aboutsummaryrefslogtreecommitdiffstats
path: root/testhal/STM32/STM32L1xx/UART/main.c
blob: 106f246d5b607cbe84269f0a0c65d7facec9a014 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
/*
    ChibiOS - Copyright (C) 2006..2015 Giovanni Di Sirio

    Licensed under the Apache License, Version 2.0 (the "License");
    you may not use this file except in compliance with the License.
    You may obtain a copy of the License at

        http://www.apache.org/licenses/LICENSE-2.0

    Unless required by applicable law or agreed to in writing, software
    distributed under the License is distributed on an "AS IS" BASIS,
    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    See the License for the specific language governing permissions and
    limitations under the License.
*/

#include "ch.h"
#include "hal.h"

static virtual_timer_t vt1, vt2;

static void restart(void *p) {

  (void)p;

  chSysLockFromISR();
  uartStartSendI(&UARTD1, 14, "Hello World!\r\n");
  chSysUnlockFromISR();
}

static void ledoff(void *p) {

  (void)p;
  palClearPad(GPIOB, GPIOB_LED4);
}

/*
 * This callback is invoked when a transmission buffer has been completely
 * read by the driver.
 */
static void txend1(UARTDriver *uartp) {

  (void)uartp;
  palSetPad(GPIOB, GPIOB_LED4);
}

/*
 * This callback is invoked when a transmission has physically completed.
 */
static void txend2(UARTDriver *uartp) {

  (void)uartp;
  palClearPad(GPIOB, GPIOB_LED4);
  chSysLockFromISR();
  chVTResetI(&vt1);
  chVTDoSetI(&vt1, MS2ST(5000), restart, NULL);
  chSysUnlockFromISR();
}

/*
 * This callback is invoked on a receive error, the errors mask is passed
 * as parameter.
 */
static void rxerr(UARTDriver *uartp, uartflags_t e) {

  (void)uartp;
  (void)e;
}

/*
 * This callback is invoked when a character is received but the application
 * was not ready to receive it, the character is passed as parameter.
 */
static void rxchar(UARTDriver *uartp, uint16_t c) {

  (void)uartp;
  (void)c;
  /* Flashing the LED each time a character is received.*/
  palSetPad(GPIOB, GPIOB_LED4);
  chSysLockFromISR();
  chVTResetI(&vt2);
  chVTDoSetI(&vt2, MS2ST(200), ledoff, NULL);
  chSysUnlockFromISR();
}

/*
 * This callback is invoked when a receive buffer has been completely written.
 */
static void rxend(UARTDriver *uartp) {

  (void)uartp;
}

/*
 * UART driver configuration structure.
 */
static UARTConfig uart_cfg_1 = {
  txend1,
  txend2,
  rxend,
  rxchar,
  rxerr,
  38400,
  0,
  USART_CR2_LINEN,
  0
};

/*
 * Application entry point.
 */
int main(void) {

  /*
   * System initializations.
   * - HAL initialization, this also initializes the configured device drivers
   *   and performs the board-specific initializations.
   * - Kernel initialization, the main() function becomes a thread and the
   *   RTOS is active.
   */
  halInit();
  chSysInit();

  /*
   * Activates the serial driver 1, PA9 and PA10 are routed to USART1.
   */
  uartStart(&UARTD1, &uart_cfg_1);
  palSetPadMode(GPIOA, 9, PAL_MODE_ALTERNATE(7));
  palSetPadMode(GPIOA, 10, PAL_MODE_ALTERNATE(7));

  /*
   * Starts the transmission, it will be handled entirely in background.
   */
  uartStartSend(&UARTD1, 13, "Starting...\r\n");

  /*
   * Normal main() thread activity, in this demo it does nothing.
   */
  while (TRUE) {
    chThdSleepMilliseconds(500);
  }
}
cpf"><linux/ip.h> //remove this. #include <net/sock.h> #define NET_TX_IRQ _EVENT_NET_TX #define NET_RX_IRQ _EVENT_NET_RX #define NET_TX_IRQ_FOR_VIF(x) _EVENT_NET_TX_FOR_VIF(x) #define NET_RX_IRQ_FOR_VIF(x) _EVENT_NET_RX_FOR_VIF(x) #define TX_MAX_ENTRIES (TX_RING_SIZE - 2) #define RX_MAX_ENTRIES (RX_RING_SIZE - 2) #define TX_RING_INC(_i) (((_i)+1) & (TX_RING_SIZE-1)) #define RX_RING_INC(_i) (((_i)+1) & (RX_RING_SIZE-1)) #define TX_RING_ADD(_i,_j) (((_i)+(_j)) & (TX_RING_SIZE-1)) #define RX_RING_ADD(_i,_j) (((_i)+(_j)) & (RX_RING_SIZE-1)) #define RX_BUF_SIZE 1600 /* Ethernet MTU + plenty of slack! */ static void network_rx_int(int irq, void *dev_id, struct pt_regs *ptregs); static void network_tx_int(int irq, void *dev_id, struct pt_regs *ptregs); static void network_tx_buf_gc(struct net_device *dev); static void network_alloc_rx_buffers(struct net_device *dev); static void network_free_rx_buffers(struct net_device *dev); static void cleanup_module(void); static struct list_head dev_list; /* * RX RING: RX_IDX <= rx_cons <= rx_prod * TX RING: TX_IDX <= tx_cons <= tx_prod * (*_IDX allocated privately here, *_cons & *_prod shared with hypervisor) */ struct net_private { struct list_head list; struct net_device *dev; struct net_device_stats stats; struct sk_buff **tx_skb_ring; struct sk_buff **rx_skb_ring; atomic_t tx_entries; unsigned int rx_idx, tx_idx, tx_full; net_ring_t *net_ring; spinlock_t tx_lock; unsigned int id; }; static int network_open(struct net_device *dev) { struct net_private *np = dev->priv; int error; char *rxlabel, *txlabel; // This is inevitably not the right way to allocate a couple of static strings. rxlabel = kmalloc(sizeof("net-rx- "), GFP_KERNEL); txlabel = kmalloc(sizeof("net-tx- "), GFP_KERNEL); if ((rxlabel == NULL) || (txlabel == NULL)) goto fail; sprintf(rxlabel, "net-rx-%d", np->id); sprintf(txlabel, "net-tx-%d", np->id); np->rx_idx = np->tx_idx = np->tx_full = 0; memset(&np->stats, 0, sizeof(np->stats)); spin_lock_init(&np->tx_lock); atomic_set(&np->tx_entries, 0); np->net_ring->tx_prod = np->net_ring->tx_cons = np->net_ring->tx_event = 0; np->net_ring->rx_prod = np->net_ring->rx_cons = np->net_ring->rx_event = 0; np->net_ring->tx_ring = NULL; np->net_ring->rx_ring = NULL; np->tx_skb_ring = kmalloc(TX_RING_SIZE * sizeof(struct sk_buff *), GFP_KERNEL); np->rx_skb_ring = kmalloc(RX_RING_SIZE * sizeof(struct sk_buff *), GFP_KERNEL); np->net_ring->tx_ring = kmalloc(TX_RING_SIZE * sizeof(tx_entry_t), GFP_KERNEL); np->net_ring->rx_ring = kmalloc(RX_RING_SIZE * sizeof(rx_entry_t), GFP_KERNEL); if ( (np->tx_skb_ring == NULL) || (np->rx_skb_ring == NULL) || (np->net_ring->tx_ring == NULL) || (np->net_ring->rx_ring == NULL) ) { printk(KERN_WARNING "%s; Could not allocate ring memory\n", dev->name); error = -ENOBUFS; goto fail; } network_alloc_rx_buffers(dev); error = request_irq(NET_RX_IRQ_FOR_VIF(np->id), network_rx_int, 0, rxlabel, dev); if ( error ) { printk(KERN_WARNING "%s: Could not allocate receive interrupt\n", dev->name); goto fail; } error = request_irq(NET_TX_IRQ_FOR_VIF(np->id), network_tx_int, 0, txlabel, dev); if ( error ) { printk(KERN_WARNING "%s: Could not allocate transmit interrupt\n", dev->name); free_irq(NET_RX_IRQ_FOR_VIF(np->id), dev); goto fail; } printk("XenoLinux Virtual Network Driver installed as %s\n", dev->name); netif_start_queue(dev); MOD_INC_USE_COUNT; return 0; fail: if ( rxlabel ) kfree(rxlabel); if ( txlabel ) kfree(txlabel); if ( np->net_ring->rx_ring ) kfree(np->net_ring->rx_ring); if ( np->net_ring->tx_ring ) kfree(np->net_ring->tx_ring); if ( np->rx_skb_ring ) kfree(np->rx_skb_ring); if ( np->tx_skb_ring ) kfree(np->tx_skb_ring); kfree(np); return error; } static void network_tx_buf_gc(struct net_device *dev) { unsigned int i; struct net_private *np = dev->priv; struct sk_buff *skb; unsigned long flags; spin_lock_irqsave(&np->tx_lock, flags); for ( i = np->tx_idx; i != np->net_ring->tx_cons; i = TX_RING_INC(i) ) { skb = np->tx_skb_ring[i]; dev_kfree_skb_any(skb); atomic_dec(&np->tx_entries); } np->tx_idx = i; if ( np->tx_full && (atomic_read(&np->tx_entries) < TX_MAX_ENTRIES) ) { np->tx_full = 0; netif_wake_queue(dev); } spin_unlock_irqrestore(&np->tx_lock, flags); } static void network_alloc_rx_buffers(struct net_device *dev) { unsigned int i; struct net_private *np = dev->priv; struct sk_buff *skb; unsigned int end = RX_RING_ADD(np->rx_idx, RX_MAX_ENTRIES); for ( i = np->net_ring->rx_prod; i != end; i = RX_RING_INC(i) ) { skb = dev_alloc_skb(RX_BUF_SIZE); if ( skb == NULL ) break; skb->dev = dev; skb_reserve(skb, 2); /* word align the IP header */ np->rx_skb_ring[i] = skb; np->net_ring->rx_ring[i].addr = (unsigned long)skb->data; np->net_ring->rx_ring[i].size = RX_BUF_SIZE - 16; /* arbitrary */ } np->net_ring->rx_prod = i; np->net_ring->rx_event = RX_RING_INC(np->rx_idx); HYPERVISOR_net_update(); } static void network_free_rx_buffers(struct net_device *dev) { unsigned int i; struct net_private *np = dev->priv; struct sk_buff *skb; for ( i = np->rx_idx; i != np->net_ring->rx_prod; i = RX_RING_INC(i) ) { skb = np->rx_skb_ring[i]; dev_kfree_skb(skb); } } static int network_start_xmit(struct sk_buff *skb, struct net_device *dev) { unsigned int i; struct net_private *np = (struct net_private *)dev->priv; if ((np->id > 0) || ((skb->len > 20) && (skb->nh.iph != NULL) && (skb->nh.iph->protocol == 1))) printk(KERN_WARNING "TX on vif %d (dev:%p)\n", np->id, dev); if ( np->tx_full ) { printk(KERN_WARNING "%s: full queue wasn't stopped!\n", dev->name); netif_stop_queue(dev); return -ENOBUFS; } i = np->net_ring->tx_prod; np->tx_skb_ring[i] = skb; np->net_ring->tx_ring[i].addr = (unsigned long)skb->data; np->net_ring->tx_ring[i].size = skb->len; np->net_ring->tx_prod = TX_RING_INC(i); atomic_inc(&np->tx_entries); np->stats.tx_bytes += skb->len; np->stats.tx_packets++; spin_lock_irq(&np->tx_lock); if ( atomic_read(&np->tx_entries) >= TX_MAX_ENTRIES ) { np->tx_full = 1; netif_stop_queue(dev); np->net_ring->tx_event = TX_RING_ADD(np->tx_idx, atomic_read(&np->tx_entries) >> 1); } else { /* Avoid unnecessary tx interrupts. */ np->net_ring->tx_event = TX_RING_INC(np->net_ring->tx_prod); } spin_unlock_irq(&np->tx_lock); /* Must do this after setting tx_event: race with updates of tx_cons. */ network_tx_buf_gc(dev); HYPERVISOR_net_update(); return 0; } static void network_rx_int(int irq, void *dev_id, struct pt_regs *ptregs) { unsigned int i; struct net_device *dev = (struct net_device *)dev_id; struct net_private *np = dev->priv; struct sk_buff *skb; again: for ( i = np->rx_idx; i != np->net_ring->rx_cons; i = RX_RING_INC(i) ) { skb = np->rx_skb_ring[i]; skb_put(skb, np->net_ring->rx_ring[i].size); skb->protocol = eth_type_trans(skb, dev); np->stats.rx_packets++; np->stats.rx_bytes += np->net_ring->rx_ring[i].size; if (((skb->len > 20) && ((*(unsigned char *)(skb->data + 9) == 1) || (np->id > 0)) )) printk(KERN_WARNING "RX on vif %d (dev:%p)\n", np->id, dev); if ((skb != NULL) && (skb->data != NULL) && (skb->len > 20) && ntohl(*(unsigned long *)(skb->data + 16)) == 167903489) printk(KERN_WARNING "RX INT (driver): pkt_type is %d.!", skb->pkt_type); netif_rx(skb); dev->last_rx = jiffies; } np->rx_idx = i; network_alloc_rx_buffers(dev); /* Deal with hypervisor racing our resetting of rx_event. */ smp_mb(); if ( np->net_ring->rx_cons != i ) { //printk("redoing network rx...\n"); goto again; } } static void network_tx_int(int irq, void *dev_id, struct pt_regs *ptregs) { struct net_device *dev = (struct net_device *)dev_id; network_tx_buf_gc(dev); } int network_close(struct net_device *dev) { struct net_private *np = dev->priv; netif_stop_queue(dev); free_irq(NET_RX_IRQ_FOR_VIF(np->id), dev); free_irq(NET_TX_IRQ_FOR_VIF(np->id), dev); network_free_rx_buffers(dev); kfree(np->net_ring->rx_ring); kfree(np->net_ring->tx_ring); kfree(np->rx_skb_ring); kfree(np->tx_skb_ring); MOD_DEC_USE_COUNT; return 0; } static struct net_device_stats *network_get_stats(struct net_device *dev) { struct net_private *np = (struct net_private *)dev->priv; return &np->stats; } int __init init_module(void) { int i, err; struct net_device *dev; struct net_private *np; INIT_LIST_HEAD(&dev_list); for ( i = 0; i < start_info.num_net_rings; i++ ) { dev = alloc_etherdev(sizeof(struct net_private)); if ( dev == NULL ) { err = -ENOMEM; goto fail; } np = dev->priv; np->net_ring = start_info.net_rings + i; SET_MODULE_OWNER(dev); dev->open = network_open; dev->hard_start_xmit = network_start_xmit; dev->stop = network_close; dev->get_stats = network_get_stats; memset(dev->dev_addr, 0, ETH_ALEN); *(unsigned int *)(dev->dev_addr) = i; if ( (err = register_netdev(dev)) != 0 ) { kfree(dev); goto fail; } np->dev = dev; np->id = i; list_add(&np->list, &dev_list); printk(KERN_WARNING "Added VIF, ifindex is %d.\n", dev->ifindex); } return 0; fail: cleanup_module(); return err; } static void cleanup_module(void) { struct net_private *np; struct net_device *dev; while ( !list_empty(&dev_list) ) { np = list_entry(dev_list.next, struct net_private, list); list_del(&np->list); dev = np->dev; unregister_netdev(dev); kfree(dev); } } module_init(init_module); module_exit(cleanup_module);