component/bt : remove all GKI reference

1. remove GKI(not use osi_free_and_reset)
2. modify mutex/semaphore to individual directory
3. set osi_malloc as malloc(previously use calloc)
4. change osi allocator debug osi_free
5. fix rebase of remove GKI
This commit is contained in:
Tian Hao
2017-08-17 21:13:45 +08:00
parent fc85cb683d
commit e4f63819a1
179 changed files with 2565 additions and 3599 deletions

View File

@@ -24,7 +24,6 @@
#include <string.h>
#include "bt_target.h"
#include "gki.h"
#include "rfcdefs.h"
#include "port_api.h"
#include "port_int.h"
@@ -1091,17 +1090,17 @@ int PORT_Purge (UINT16 handle, UINT8 purge_flags)
}
if (purge_flags & PORT_PURGE_RXCLEAR) {
PORT_SCHEDULE_LOCK; /* to prevent missing credit */
osi_mutex_global_lock(); /* to prevent missing credit */
count = GKI_queue_length(&p_port->rx.queue);
count = fixed_queue_length(p_port->rx.queue);
while ((p_buf = (BT_HDR *)GKI_dequeue (&p_port->rx.queue)) != NULL) {
GKI_freebuf (p_buf);
while ((p_buf = (BT_HDR *)fixed_queue_try_dequeue(p_port->rx.queue)) != NULL) {
osi_free (p_buf);
}
p_port->rx.queue_size = 0;
PORT_SCHEDULE_UNLOCK;
osi_mutex_global_unlock();
/* If we flowed controlled peer based on rx_queue size enable data again */
if (count) {
@@ -1110,15 +1109,15 @@ int PORT_Purge (UINT16 handle, UINT8 purge_flags)
}
if (purge_flags & PORT_PURGE_TXCLEAR) {
PORT_SCHEDULE_LOCK; /* to prevent tx.queue_size from being negative */
osi_mutex_global_lock(); /* to prevent tx.queue_size from being negative */
while ((p_buf = (BT_HDR *)GKI_dequeue (&p_port->tx.queue)) != NULL) {
GKI_freebuf (p_buf);
while ((p_buf = (BT_HDR *)fixed_queue_try_dequeue(p_port->tx.queue)) != NULL) {
osi_free (p_buf);
}
p_port->tx.queue_size = 0;
PORT_SCHEDULE_UNLOCK;
osi_mutex_global_unlock();
events = PORT_EV_TXEMPTY;
@@ -1174,14 +1173,19 @@ int PORT_ReadData (UINT16 handle, char *p_data, UINT16 max_len, UINT16 *p_len)
return (PORT_LINE_ERR);
}
p_buf = (BT_HDR *)GKI_getfirst (&p_port->rx.queue);
if (fixed_queue_is_empty(p_port->rx.queue))
if (!p_buf) {
return (PORT_SUCCESS);
}
count = 0;
while (max_len && p_buf) {
while (max_len)
{
p_buf = (BT_HDR *)fixed_queue_try_peek_first(p_port->rx.queue);
if (p_buf == NULL)
break;
if (p_buf->len > max_len) {
memcpy (p_data, (UINT8 *)(p_buf + 1) + p_buf->offset, max_len);
p_buf->offset += max_len;
@@ -1189,11 +1193,11 @@ int PORT_ReadData (UINT16 handle, char *p_data, UINT16 max_len, UINT16 *p_len)
*p_len += max_len;
PORT_SCHEDULE_LOCK;
osi_mutex_global_lock();
p_port->rx.queue_size -= max_len;
PORT_SCHEDULE_UNLOCK;
osi_mutex_global_unlock();
break;
} else {
@@ -1202,18 +1206,17 @@ int PORT_ReadData (UINT16 handle, char *p_data, UINT16 max_len, UINT16 *p_len)
*p_len += p_buf->len;
max_len -= p_buf->len;
PORT_SCHEDULE_LOCK;
osi_mutex_global_lock();
p_port->rx.queue_size -= p_buf->len;
if (max_len) {
p_data += p_buf->len;
p_buf = (BT_HDR *)GKI_getnext (p_buf);
}
GKI_freebuf (GKI_dequeue (&p_port->rx.queue));
osi_free(fixed_queue_try_dequeue(p_port->rx.queue));
PORT_SCHEDULE_UNLOCK;
osi_mutex_global_unlock();
count++;
}
@@ -1265,19 +1268,19 @@ int PORT_Read (UINT16 handle, BT_HDR **pp_buf)
return (PORT_LINE_ERR);
}
PORT_SCHEDULE_LOCK;
osi_mutex_global_lock();
p_buf = (BT_HDR *)GKI_dequeue (&p_port->rx.queue);
p_buf = (BT_HDR *)fixed_queue_try_dequeue(p_port->rx.queue);
if (p_buf) {
p_port->rx.queue_size -= p_buf->len;
PORT_SCHEDULE_UNLOCK;
osi_mutex_global_unlock();
/* If rfcomm suspended traffic from the peer based on the rx_queue_size */
/* check if it can be resumed now */
port_flow_control_peer (p_port, TRUE, 1);
} else {
PORT_SCHEDULE_UNLOCK;
osi_mutex_global_unlock();
}
*pp_buf = p_buf;
@@ -1300,7 +1303,7 @@ static int port_write (tPORT *p_port, BT_HDR *p_buf)
{
/* We should not allow to write data in to server port when connection is not opened */
if (p_port->is_server && (p_port->rfc.state != RFC_STATE_OPENED)) {
GKI_freebuf (p_buf);
osi_free (p_buf);
return (PORT_CLOSED);
}
@@ -1314,11 +1317,11 @@ static int port_write (tPORT *p_port, BT_HDR *p_buf)
|| ((p_port->port_ctrl & (PORT_CTRL_REQ_SENT | PORT_CTRL_IND_RECEIVED)) !=
(PORT_CTRL_REQ_SENT | PORT_CTRL_IND_RECEIVED))) {
if ((p_port->tx.queue_size > PORT_TX_CRITICAL_WM)
|| (GKI_queue_length(&p_port->tx.queue) > PORT_TX_BUF_CRITICAL_WM)) {
|| (fixed_queue_length(p_port->tx.queue) > PORT_TX_BUF_CRITICAL_WM))
RFCOMM_TRACE_WARNING ("PORT_Write: Queue size: %d",
p_port->tx.queue_size);
GKI_freebuf (p_buf);
osi_free (p_buf);
if ((p_port->p_callback != NULL) && (p_port->ev_mask & PORT_EV_ERR)) {
p_port->p_callback (PORT_EV_ERR, p_port->inx);
@@ -1333,7 +1336,7 @@ static int port_write (tPORT *p_port, BT_HDR *p_buf)
p_port->rfc.state,
p_port->port_ctrl);
GKI_enqueue (&p_port->tx.queue, p_buf);
fixed_queue_enqueue(p_port->tx.queue, p_buf);
p_port->tx.queue_size += p_buf->len;
return (PORT_CMD_PENDING);
@@ -1366,21 +1369,21 @@ int PORT_Write (UINT16 handle, BT_HDR *p_buf)
/* Check if handle is valid to avoid crashing */
if ((handle == 0) || (handle > MAX_RFC_PORTS)) {
GKI_freebuf (p_buf);
osi_free (p_buf);
return (PORT_BAD_HANDLE);
}
p_port = &rfc_cb.port.port[handle - 1];
if (!p_port->in_use || (p_port->state == PORT_STATE_CLOSED)) {
GKI_freebuf (p_buf);
osi_free (p_buf);
return (PORT_NOT_OPENED);
}
if (p_port->line_status) {
RFCOMM_TRACE_WARNING ("PORT_Write: Data dropped line_status:0x%x",
p_port->line_status);
GKI_freebuf (p_buf);
osi_free (p_buf);
return (PORT_LINE_ERR);
}
@@ -1456,14 +1459,14 @@ int PORT_WriteDataCO (UINT16 handle, int *p_len)
return PORT_SUCCESS;
}
/* Length for each buffer is the smaller of GKI buffer, peer MTU, or max_len */
length = RFCOMM_DATA_POOL_BUF_SIZE -
length = RFCOMM_DATA_BUF_SIZE -
(UINT16)(sizeof(BT_HDR) + L2CAP_MIN_OFFSET + RFCOMM_DATA_OVERHEAD);
/* If there are buffers scheduled for transmission check if requested */
/* data fits into the end of the queue */
PORT_SCHEDULE_LOCK;
osi_mutex_global_lock();
if (((p_buf = (BT_HDR *)GKI_getlast(&p_port->tx.queue)) != NULL)
if (((p_buf = (BT_HDR *)fixed_queue_try_peek_last(p_port->tx.queue)) != NULL)
&& (((int)p_buf->len + available) <= (int)p_port->peer_mtu)
&& (((int)p_buf->len + available) <= (int)length)) {
//if(recv(fd, (UINT8 *)(p_buf + 1) + p_buf->offset + p_buf->len, available, 0) != available)
@@ -1472,7 +1475,7 @@ int PORT_WriteDataCO (UINT16 handle, int *p_len)
{
RFCOMM_TRACE_ERROR("p_data_co_callback DATA_CO_CALLBACK_TYPE_OUTGOING failed, available:%d", available);
PORT_SCHEDULE_UNLOCK;
osi_mutex_global_unlock();
return (PORT_UNKNOWN_ERROR);
}
//memcpy ((UINT8 *)(p_buf + 1) + p_buf->offset + p_buf->len, p_data, max_len);
@@ -1481,12 +1484,12 @@ int PORT_WriteDataCO (UINT16 handle, int *p_len)
*p_len = available;
p_buf->len += (UINT16)available;
PORT_SCHEDULE_UNLOCK;
osi_mutex_global_unlock();
return (PORT_SUCCESS);
}
PORT_SCHEDULE_UNLOCK;
osi_mutex_global_unlock();
//int max_read = length < p_port->peer_mtu ? length : p_port->peer_mtu;
@@ -1495,16 +1498,16 @@ int PORT_WriteDataCO (UINT16 handle, int *p_len)
while (available) {
/* if we're over buffer high water mark, we're done */
if ((p_port->tx.queue_size > PORT_TX_HIGH_WM)
|| (GKI_queue_length(&p_port->tx.queue) > PORT_TX_BUF_HIGH_WM)) {
|| (fixed_queue_length(p_port->tx.queue) > PORT_TX_BUF_HIGH_WM)) {
port_flow_control_user(p_port);
event |= PORT_EV_FC;
RFCOMM_TRACE_EVENT ("tx queue is full,tx.queue_size:%d,tx.queue.count:%d,available:%d",
p_port->tx.queue_size, GKI_queue_length(&p_port->tx.queue), available);
p_port->tx.queue_size, fixed_queue_length(p_port->tx.queue), available);
break;
}
/* continue with rfcomm data write */
p_buf = (BT_HDR *)GKI_getpoolbuf (RFCOMM_DATA_POOL_ID);
p_buf = (BT_HDR *)osi_malloc(RFCOMM_DATA_BUF_SIZE);
if (!p_buf) {
break;
}
@@ -1612,9 +1615,9 @@ int PORT_WriteData (UINT16 handle, char *p_data, UINT16 max_len, UINT16 *p_len)
/* If there are buffers scheduled for transmission check if requested */
/* data fits into the end of the queue */
PORT_SCHEDULE_LOCK;
osi_mutex_global_lock();
if (((p_buf = (BT_HDR *)GKI_getlast(&p_port->tx.queue)) != NULL)
if (((p_buf = (BT_HDR *)fixed_queue_try_peek_last(p_port->tx.queue)) != NULL) {
&& ((p_buf->len + max_len) <= p_port->peer_mtu)
&& ((p_buf->len + max_len) <= length)) {
memcpy ((UINT8 *)(p_buf + 1) + p_buf->offset + p_buf->len, p_data, max_len);
@@ -1623,22 +1626,22 @@ int PORT_WriteData (UINT16 handle, char *p_data, UINT16 max_len, UINT16 *p_len)
*p_len = max_len;
p_buf->len += max_len;
PORT_SCHEDULE_UNLOCK;
osi_mutex_global_unlock();
return (PORT_SUCCESS);
}
PORT_SCHEDULE_UNLOCK;
osi_mutex_global_unlock();
while (max_len) {
/* if we're over buffer high water mark, we're done */
if ((p_port->tx.queue_size > PORT_TX_HIGH_WM)
|| (GKI_queue_length(&p_port->tx.queue) > PORT_TX_BUF_HIGH_WM)) {
|| (fixed_queue_length(p_port->tx.queue) > PORT_TX_BUF_HIGH_WM)) {
break;
}
/* continue with rfcomm data write */
p_buf = (BT_HDR *)GKI_getpoolbuf (RFCOMM_DATA_POOL_ID);
p_buf = (BT_HDR *)osi_malloc(RFCOMM_DATA_BUF_SIZE);
if (!p_buf) {
break;
}
@@ -1724,7 +1727,7 @@ int PORT_Test (UINT16 handle, UINT8 *p_data, UINT16 len)
return (PORT_UNKNOWN_ERROR);
}
if ((p_buf = (BT_HDR *)GKI_getpoolbuf (RFCOMM_CMD_POOL_ID)) != NULL) {
if ((p_buf = (BT_HDR *)osi_malloc(RFCOMM_CMD_BUF_SIZE)) != NULL) {
p_buf->offset = L2CAP_MIN_OFFSET + RFCOMM_MIN_OFFSET + 2;
p_buf->len = len;