cregit-Linux how code gets into the kernel

Release 4.8 net/bluetooth/hci_request.c

Directory: net/bluetooth
/*
   BlueZ - Bluetooth protocol stack for Linux

   Copyright (C) 2014 Intel Corporation

   This program is free software; you can redistribute it and/or modify
   it under the terms of the GNU General Public License version 2 as
   published by the Free Software Foundation;

   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
   SOFTWARE IS DISCLAIMED.
*/

#include <asm/unaligned.h>

#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/mgmt.h>

#include "smp.h"
#include "hci_request.h"


#define HCI_REQ_DONE	  0

#define HCI_REQ_PEND	  1

#define HCI_REQ_CANCELED  2


void hci_req_init(struct hci_request *req, struct hci_dev *hdev) { skb_queue_head_init(&req->cmd_q); req->hdev = hdev; req->err = 0; }

Contributors

PersonTokensPropCommitsCommitProp
johan hedbergjohan hedberg35100.00%1100.00%
Total35100.00%1100.00%


static int req_run(struct hci_request *req, hci_req_complete_t complete, hci_req_complete_skb_t complete_skb) { struct hci_dev *hdev = req->hdev; struct sk_buff *skb; unsigned long flags; BT_DBG("length %u", skb_queue_len(&req->cmd_q)); /* If an error occurred during request building, remove all HCI * commands queued on the HCI request queue. */ if (req->err) { skb_queue_purge(&req->cmd_q); return req->err; } /* Do not allow empty requests */ if (skb_queue_empty(&req->cmd_q)) return -ENODATA; skb = skb_peek_tail(&req->cmd_q); if (complete) { bt_cb(skb)->hci.req_complete = complete; } else if (complete_skb) { bt_cb(skb)->hci.req_complete_skb = complete_skb; bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB; } spin_lock_irqsave(&hdev->cmd_q.lock, flags); skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q); spin_unlock_irqrestore(&hdev->cmd_q.lock, flags); queue_work(hdev->workqueue, &hdev->cmd_work); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
johan hedbergjohan hedberg18997.93%480.00%
marcel holtmannmarcel holtmann42.07%120.00%
Total193100.00%5100.00%


int hci_req_run(struct hci_request *req, hci_req_complete_t complete) { return req_run(req, complete, NULL); }

Contributors

PersonTokensPropCommitsCommitProp
johan hedbergjohan hedberg23100.00%1100.00%
Total23100.00%1100.00%


int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete) { return req_run(req, NULL, complete); }

Contributors

PersonTokensPropCommitsCommitProp
johan hedbergjohan hedberg23100.00%1100.00%
Total23100.00%1100.00%


static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode, struct sk_buff *skb) { BT_DBG("%s result 0x%2.2x", hdev->name, result); if (hdev->req_status == HCI_REQ_PEND) { hdev->req_result = result; hdev->req_status = HCI_REQ_DONE; if (skb) hdev->req_skb = skb_get(skb); wake_up_interruptible(&hdev->req_wait_q); } }

Contributors

PersonTokensPropCommitsCommitProp
johan hedbergjohan hedberg76100.00%1100.00%
Total76100.00%1100.00%


void hci_req_sync_cancel(struct hci_dev *hdev, int err) { BT_DBG("%s err 0x%2.2x", hdev->name, err); if (hdev->req_status == HCI_REQ_PEND) { hdev->req_result = err; hdev->req_status = HCI_REQ_CANCELED; wake_up_interruptible(&hdev->req_wait_q); } }

Contributors

PersonTokensPropCommitsCommitProp
johan hedbergjohan hedberg54100.00%2100.00%
Total54100.00%2100.00%


struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen, const void *param, u8 event, u32 timeout) { DECLARE_WAITQUEUE(wait, current); struct hci_request req; struct sk_buff *skb; int err = 0; BT_DBG("%s", hdev->name); hci_req_init(&req, hdev); hci_req_add_ev(&req, opcode, plen, param, event); hdev->req_status = HCI_REQ_PEND; add_wait_queue(&hdev->req_wait_q, &wait); set_current_state(TASK_INTERRUPTIBLE); err = hci_req_run_skb(&req, hci_req_sync_complete); if (err < 0) { remove_wait_queue(&hdev->req_wait_q, &wait); set_current_state(TASK_RUNNING); return ERR_PTR(err); } schedule_timeout(timeout); remove_wait_queue(&hdev->req_wait_q, &wait); if (signal_pending(current)) return ERR_PTR(-EINTR); switch (hdev->req_status) { case HCI_REQ_DONE: err = -bt_to_errno(hdev->req_result); break; case HCI_REQ_CANCELED: err = -hdev->req_result; break; default: err = -ETIMEDOUT; break; } hdev->req_status = hdev->req_result = 0; skb = hdev->req_skb; hdev->req_skb = NULL; BT_DBG("%s end: err %d", hdev->name, err); if (err < 0) { kfree_skb(skb); return ERR_PTR(err); } if (!skb) return ERR_PTR(-ENODATA); return skb; }

Contributors

PersonTokensPropCommitsCommitProp
johan hedbergjohan hedberg280100.00%1100.00%
Total280100.00%1100.00%

EXPORT_SYMBOL(__hci_cmd_sync_ev);
struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen, const void *param, u32 timeout) { return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout); }

Contributors

PersonTokensPropCommitsCommitProp
johan hedbergjohan hedberg42100.00%1100.00%
Total42100.00%1100.00%

EXPORT_SYMBOL(__hci_cmd_sync); /* Execute request and wait for completion. */
int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req, unsigned long opt), unsigned long opt, u32 timeout, u8 *hci_status) { struct hci_request req; DECLARE_WAITQUEUE(wait, current); int err = 0; BT_DBG("%s start", hdev->name); hci_req_init(&req, hdev); hdev->req_status = HCI_REQ_PEND; err = func(&req, opt); if (err) { if (hci_status) *hci_status = HCI_ERROR_UNSPECIFIED; return err; } add_wait_queue(&hdev->req_wait_q, &wait); set_current_state(TASK_INTERRUPTIBLE); err = hci_req_run_skb(&req, hci_req_sync_complete); if (err < 0) { hdev->req_status = 0; remove_wait_queue(&hdev->req_wait_q, &wait); set_current_state(TASK_RUNNING); /* ENODATA means the HCI request command queue is empty. * This can happen when a request with conditionals doesn't * trigger any commands to be sent. This is normal behavior * and should not trigger an error return. */ if (err == -ENODATA) { if (hci_status) *hci_status = 0; return 0; } if (hci_status) *hci_status = HCI_ERROR_UNSPECIFIED; return err; } schedule_timeout(timeout); remove_wait_queue(&hdev->req_wait_q, &wait); if (signal_pending(current)) return -EINTR; switch (hdev->req_status) { case HCI_REQ_DONE: err = -bt_to_errno(hdev->req_result); if (hci_status) *hci_status = hdev->req_result; break; case HCI_REQ_CANCELED: err = -hdev->req_result; if (hci_status) *hci_status = HCI_ERROR_UNSPECIFIED; break; default: err = -ETIMEDOUT; if (hci_status) *hci_status = HCI_ERROR_UNSPECIFIED; break; } kfree_skb(hdev->req_skb); hdev->req_skb = NULL; hdev->req_status = hdev->req_result = 0; BT_DBG("%s end: err %d", hdev->name, err); return err; }

Contributors

PersonTokensPropCommitsCommitProp
johan hedbergjohan hedberg31496.02%480.00%
frederic dalleaufrederic dalleau133.98%120.00%
Total327100.00%5100.00%


int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req, unsigned long opt), unsigned long opt, u32 timeout, u8 *hci_status) { int ret; if (!test_bit(HCI_UP, &hdev->flags)) return -ENETDOWN; /* Serialize all requests */ hci_req_sync_lock(hdev); ret = __hci_req_sync(hdev, req, opt, timeout, hci_status); hci_req_sync_unlock(hdev); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
johan hedbergjohan hedberg86100.00%4100.00%
Total86100.00%4100.00%


struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen, const void *param) { int len = HCI_COMMAND_HDR_SIZE + plen; struct hci_command_hdr *hdr; struct sk_buff *skb; skb = bt_skb_alloc(len, GFP_ATOMIC); if (!skb) return NULL; hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE); hdr->opcode = cpu_to_le16(opcode); hdr->plen = plen; if (plen) memcpy(skb_put(skb, plen), param, plen); BT_DBG("skb len %d", skb->len); hci_skb_pkt_type(skb) = HCI_COMMAND_PKT; hci_skb_opcode(skb) = opcode; return skb; }

Contributors

PersonTokensPropCommitsCommitProp
johan hedbergjohan hedberg12898.46%150.00%
marcel holtmannmarcel holtmann21.54%150.00%
Total130100.00%2100.00%

/* Queue a command to an asynchronous HCI request */
void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen, const void *param, u8 event) { struct hci_dev *hdev = req->hdev; struct sk_buff *skb; BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen); /* If an error occurred during request building, there is no point in * queueing the HCI command. We can simply return. */ if (req->err) return; skb = hci_prepare_cmd(hdev, opcode, plen, param); if (!skb) { BT_ERR("%s no memory for command (opcode 0x%4.4x)", hdev->name, opcode); req->err = -ENOMEM; return; } if (skb_queue_empty(&req->cmd_q)) bt_cb(skb)->hci.req_flags |= HCI_REQ_START; bt_cb(skb)->hci.req_event = event; skb_queue_tail(&req->cmd_q, skb); }

Contributors

PersonTokensPropCommitsCommitProp
johan hedbergjohan hedberg13797.86%375.00%
marcel holtmannmarcel holtmann32.14%125.00%
Total140100.00%4100.00%


void hci_req_add(struct hci_request *req, u16 opcode, u32 plen, const void *param) { hci_req_add_ev(req, opcode, plen, param, 0); }

Contributors

PersonTokensPropCommitsCommitProp
johan hedbergjohan hedberg34100.00%1100.00%
Total34100.00%1100.00%


void __hci_req_write_fast_connectable(struct hci_request *req, bool enable) { struct hci_dev *hdev = req->hdev; struct hci_cp_write_page_scan_activity acp; u8 type; if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) return; if (hdev->hci_ver < BLUETOOTH_VER_1_2) return; if (enable) { type = PAGE_SCAN_TYPE_INTERLACED; /* 160 msec page scan interval */ acp.interval = cpu_to_le16(0x0100); } else { type = PAGE_SCAN_TYPE_STANDARD; /* default */ /* default 1.28 sec page scan */ acp.interval = cpu_to_le16(0x0800); } acp.window = cpu_to_le16(0x0012); if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval || __cpu_to_le16(hdev->page_scan_window) != acp.window) hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY, sizeof(acp), &acp); if (hdev->page_scan_type != type) hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type); }

Contributors

PersonTokensPropCommitsCommitProp
johan hedbergjohan hedberg155100.00%1100.00%
Total155100.00%1100.00%

/* This function controls the background scanning based on hdev->pend_le_conns * list. If there are pending LE connection we start the background scanning, * otherwise we stop it. * * This function requires the caller holds hdev->lock. */
static void __hci_update_background_scan(struct hci_request *req) { struct hci_dev *hdev = req->hdev; if (!test_bit(HCI_UP, &hdev->flags) || test_bit(HCI_INIT, &hdev->flags) || hci_dev_test_flag(hdev, HCI_SETUP) || hci_dev_test_flag(hdev, HCI_CONFIG) || hci_dev_test_flag(hdev, HCI_AUTO_OFF) || hci_dev_test_flag(hdev, HCI_UNREGISTER)) return; /* No point in doing scanning if LE support hasn't been enabled */ if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) return; /* If discovery is active don't interfere with it */ if (hdev->discovery.state != DISCOVERY_STOPPED) return; /* Reset RSSI and UUID filters when starting background scanning * since these filters are meant for service discovery only. * * The Start Discovery and Start Service Discovery operations * ensure to set proper values for RSSI threshold and UUID * filter list. So it is safe to just reset them here. */ hci_discovery_filter_clear(hdev); if (list_empty(&hdev->pend_le_conns) && list_empty(&hdev->pend_le_reports)) { /* If there is no pending LE connections or devices * to be scanned for, we should stop the background * scanning. */ /* If controller is not scanning we are done. */ if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) return; hci_req_add_le_scan_disable(req); BT_DBG("%s stopping background scanning", hdev->name); } else { /* If there is at least one pending LE connection, we should * keep the background scan running. */ /* If controller is connecting, we should not start scanning * since some controllers are not able to scan and connect at * the same time. */ if (hci_lookup_le_connect(hdev)) return; /* If controller is currently scanning, we stop it to ensure we * don't miss any advertising (due to duplicates filter). */ if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) hci_req_add_le_scan_disable(req); hci_req_add_le_passive_scan(req); BT_DBG("%s starting background scanning", hdev->name); } }

Contributors

PersonTokensPropCommitsCommitProp
johan hedbergjohan hedberg191100.00%1100.00%
Total191100.00%1100.00%


void __hci_req_update_name(struct hci_request *req) { struct hci_dev *hdev = req->hdev; struct hci_cp_write_local_name cp; memcpy(cp.name, hdev->dev_name, sizeof(cp.name)); hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp); }

Contributors

PersonTokensPropCommitsCommitProp
johan hedbergjohan hedberg56100.00%1100.00%
Total56100.00%1100.00%

#define PNP_INFO_SVCLASS_ID 0x1200
static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len) { u8 *ptr = data, *uuids_start = NULL; struct bt_uuid *uuid; if (len < 4) return ptr; list_for_each_entry(uuid, &hdev->uuids, list) { u16 uuid16; if (uuid->size != 16) continue; uuid16 = get_unaligned_le16(&uuid->uuid[12]); if (uuid16 < 0x1100) continue; if (uuid16 == PNP_INFO_SVCLASS_ID) continue; if (!uuids_start) { uuids_start = ptr; uuids_start[0] = 1; uuids_start[1] = EIR_UUID16_ALL; ptr += 2; } /* Stop if not enough space to put next UUID */ if ((ptr - data) + sizeof(u16) > len) { uuids_start[1] = EIR_UUID16_SOME; break; } *ptr++ = (uuid16 & 0x00ff); *ptr++ = (uuid16 & 0xff00) >> 8; uuids_start[0] += sizeof(uuid16); } return ptr; }

Contributors

PersonTokensPropCommitsCommitProp
johan hedbergjohan hedberg183100.00%1100.00%
Total183100.00%1100.00%


static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len) { u8 *ptr = data, *uuids_start = NULL; struct bt_uuid *uuid; if (len < 6) return ptr; list_for_each_entry(uuid, &hdev->uuids, list) { if (uuid->size != 32) continue; if (!uuids_start) { uuids_start = ptr; uuids_start[0] = 1; uuids_start[1] = EIR_UUID32_ALL; ptr += 2; } /* Stop if not enough space to put next UUID */ if ((ptr - data) + sizeof(u32) > len) { uuids_start[1] = EIR_UUID32_SOME; break; } memcpy(ptr, &uuid->uuid[12], sizeof(u32)); ptr += sizeof(u32); uuids_start[0] += sizeof(u32); } return ptr; }

Contributors

PersonTokensPropCommitsCommitProp
johan hedbergjohan hedberg156100.00%1100.00%
Total156100.00%1100.00%


static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len) { u8 *ptr = data, *uuids_start = NULL; struct bt_uuid *uuid; if (len < 18) return ptr; list_for_each_entry(uuid, &hdev->uuids, list) { if (uuid->size != 128) continue; if (!uuids_start) { uuids_start = ptr; uuids_start[0] = 1; uuids_start[1] = EIR_UUID128_ALL; ptr += 2; } /* Stop if not enough space to put next UUID */ if ((ptr - data) + 16 > len) { uuids_start[1] = EIR_UUID128_SOME; break; } memcpy(ptr, uuid->uuid, 16); ptr += 16; uuids_start[0] += 16; } return ptr; }

Contributors

PersonTokensPropCommitsCommitProp
johan hedbergjohan hedberg140100.00%1100.00%
Total140100.00%1100.00%


static void create_eir(struct hci_dev *hdev, u8 *data) { u8 *ptr = data; size_t name_len; name_len = strlen(hdev->dev_name); if (name_len > 0) { /* EIR Data type */ if (name_len > 48) { name_len = 48; ptr[1] = EIR_NAME_SHORT; } else ptr[1] = EIR_NAME_COMPLETE; /* EIR Data length */ ptr[0] = name_len + 1; memcpy(ptr + 2, hdev->dev_name, name_len); ptr += (name_len + 2); } if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) { ptr[0] = 2; ptr[1] = EIR_TX_POWER; ptr[2] = (u8) hdev->inq_tx_power; ptr += 3; } if (hdev->devid_source > 0) { ptr[0] = 9; ptr[1] = EIR_DEVICE_ID; put_unaligned_le16(hdev->devid_source, ptr + 2); put_unaligned_le16(hdev->devid_vendor, ptr + 4); put_unaligned_le16(hdev->devid_product, ptr + 6); put_unaligned_le16(hdev->devid_version, ptr + 8); ptr += 10; } ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data)); ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data)); ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data)); }

Contributors

PersonTokensPropCommitsCommitProp
johan hedbergjohan hedberg263100.00%1100.00%
Total263100.00%1100.00%


void __hci_req_update_eir(struct hci_request *req) { struct hci_dev *hdev = req->hdev; struct hci_cp_write_eir cp; if (!hdev_is_powered(hdev)) return; if (!lmp_ext_inq_capable(hdev)) return; if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) return; if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE)) return; memset(&cp, 0, sizeof(cp)); create_eir(hdev, cp.data); if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0) return; memcpy(hdev->eir, cp.data, sizeof(cp.data)); hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp); }

Contributors

PersonTokensPropCommitsCommitProp
johan hedbergjohan hedberg140100.00%1100.00%
Total140100.00%1100.00%


void hci_req_add_le_scan_disable(struct hci_request *req) { struct hci_cp_le_set_scan_enable cp; memset(&cp, 0, sizeof(cp)); cp.enable = LE_SCAN_DISABLE; hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); }

Contributors

PersonTokensPropCommitsCommitProp
johan hedbergjohan hedberg48100.00%1100.00%
Total48100.00%1100.00%


static void add_to_white_list(struct hci_request *req, struct hci_conn_params *params) { struct hci_cp_le_add_to_white_list cp; cp.bdaddr_type = params->addr_type; bacpy(&cp.bdaddr, &params->addr); hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST