// SPDX-License-Identifier: GPL-2.0 /* * Thunderbolt driver - switch/port utility functions * * Copyright (c) 2014 Andreas Noever <[email protected]> * Copyright (C) 2018, Intel Corporation */ #include <linux/delay.h> #include <linux/idr.h> #include <linux/module.h> #include <linux/nvmem-provider.h> #include <linux/pm_runtime.h> #include <linux/sched/signal.h> #include <linux/sizes.h> #include <linux/slab.h> #include <linux/string_helpers.h> #include "tb.h" /* Switch NVM support */ struct nvm_auth_status { … }; /* * Hold NVM authentication failure status per switch This information * needs to stay around even when the switch gets power cycled so we * keep it separately. */ static LIST_HEAD(nvm_auth_status_cache); static DEFINE_MUTEX(nvm_auth_status_lock); static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw) { … } static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status) { … } static void nvm_set_auth_status(const struct tb_switch *sw, u32 status) { … } static void nvm_clear_auth_status(const struct tb_switch *sw) { … } static int nvm_validate_and_write(struct tb_switch *sw) { … } static int nvm_authenticate_host_dma_port(struct tb_switch *sw) { … } static int nvm_authenticate_device_dma_port(struct tb_switch *sw) { … } static void nvm_authenticate_start_dma_port(struct tb_switch *sw) { … } static void nvm_authenticate_complete_dma_port(struct tb_switch *sw) { … } static inline bool nvm_readable(struct tb_switch *sw) { … } static inline bool nvm_upgradeable(struct tb_switch *sw) { … } static int nvm_authenticate(struct tb_switch *sw, bool auth_only) { … } /** * tb_switch_nvm_read() - Read router NVM * @sw: Router whose NVM to read * @address: Start address on the NVM * @buf: Buffer where the read data is copied * @size: Size of the buffer in bytes * * Reads from router NVM and returns the requested data in @buf. Locking * is up to the caller. Returns %0 in success and negative errno in case * of failure. */ int tb_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf, size_t size) { … } static int nvm_read(void *priv, unsigned int offset, void *val, size_t bytes) { … } static int nvm_write(void *priv, unsigned int offset, void *val, size_t bytes) { … } static int tb_switch_nvm_add(struct tb_switch *sw) { … } static void tb_switch_nvm_remove(struct tb_switch *sw) { … } /* port utility functions */ static const char *tb_port_type(const struct tb_regs_port_header *port) { … } static void tb_dump_port(struct tb *tb, const struct tb_port *port) { … } /** * tb_port_state() - get connectedness state of a port * @port: the port to check * * The port must have a TB_CAP_PHY (i.e. it should be a real port). * * Return: Returns an enum tb_port_state on success or an error code on failure. */ int tb_port_state(struct tb_port *port) { … } /** * tb_wait_for_port() - wait for a port to become ready * @port: Port to wait * @wait_if_unplugged: Wait also when port is unplugged * * Wait up to 1 second for a port to reach state TB_PORT_UP. If * wait_if_unplugged is set then we also wait if the port is in state * TB_PORT_UNPLUGGED (it takes a while for the device to be registered after * switch resume). Otherwise we only wait if a device is registered but the link * has not yet been established. * * Return: Returns an error code on failure. Returns 0 if the port is not * connected or failed to reach state TB_PORT_UP within one second. Returns 1 * if the port is connected and in state TB_PORT_UP. */ int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged) { … } /** * tb_port_add_nfc_credits() - add/remove non flow controlled credits to port * @port: Port to add/remove NFC credits * @credits: Credits to add/remove * * Change the number of NFC credits allocated to @port by @credits. To remove * NFC credits pass a negative amount of credits. * * Return: Returns 0 on success or an error code on failure. */ int tb_port_add_nfc_credits(struct tb_port *port, int credits) { … } /** * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER * @port: Port whose counters to clear * @counter: Counter index to clear * * Return: Returns 0 on success or an error code on failure. */ int tb_port_clear_counter(struct tb_port *port, int counter) { … } /** * tb_port_unlock() - Unlock downstream port * @port: Port to unlock * * Needed for USB4 but can be called for any CIO/USB4 ports. Makes the * downstream router accessible for CM. */ int tb_port_unlock(struct tb_port *port) { … } static int __tb_port_enable(struct tb_port *port, bool enable) { … } /** * tb_port_enable() - Enable lane adapter * @port: Port to enable (can be %NULL) * * This is used for lane 0 and 1 adapters to enable it. */ int tb_port_enable(struct tb_port *port) { … } /** * tb_port_disable() - Disable lane adapter * @port: Port to disable (can be %NULL) * * This is used for lane 0 and 1 adapters to disable it. */ int tb_port_disable(struct tb_port *port) { … } static int tb_port_reset(struct tb_port *port) { … } /* * tb_init_port() - initialize a port * * This is a helper method for tb_switch_alloc. Does not check or initialize * any downstream switches. * * Return: Returns 0 on success or an error code on failure. */ static int tb_init_port(struct tb_port *port) { … } static int tb_port_alloc_hopid(struct tb_port *port, bool in, int min_hopid, int max_hopid) { … } /** * tb_port_alloc_in_hopid() - Allocate input HopID from port * @port: Port to allocate HopID for * @min_hopid: Minimum acceptable input HopID * @max_hopid: Maximum acceptable input HopID * * Return: HopID between @min_hopid and @max_hopid or negative errno in * case of error. */ int tb_port_alloc_in_hopid(struct tb_port *port, int min_hopid, int max_hopid) { … } /** * tb_port_alloc_out_hopid() - Allocate output HopID from port * @port: Port to allocate HopID for * @min_hopid: Minimum acceptable output HopID * @max_hopid: Maximum acceptable output HopID * * Return: HopID between @min_hopid and @max_hopid or negative errno in * case of error. */ int tb_port_alloc_out_hopid(struct tb_port *port, int min_hopid, int max_hopid) { … } /** * tb_port_release_in_hopid() - Release allocated input HopID from port * @port: Port whose HopID to release * @hopid: HopID to release */ void tb_port_release_in_hopid(struct tb_port *port, int hopid) { … } /** * tb_port_release_out_hopid() - Release allocated output HopID from port * @port: Port whose HopID to release * @hopid: HopID to release */ void tb_port_release_out_hopid(struct tb_port *port, int hopid) { … } static inline bool tb_switch_is_reachable(const struct tb_switch *parent, const struct tb_switch *sw) { … } /** * tb_next_port_on_path() - Return next port for given port on a path * @start: Start port of the walk * @end: End port of the walk * @prev: Previous port (%NULL if this is the first) * * This function can be used to walk from one port to another if they * are connected through zero or more switches. If the @prev is dual * link port, the function follows that link and returns another end on * that same link. * * If the @end port has been reached, return %NULL. * * Domain tb->lock must be held when this function is called. */ struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end, struct tb_port *prev) { … } /** * tb_port_get_link_speed() - Get current link speed * @port: Port to check (USB4 or CIO) * * Returns link speed in Gb/s or negative errno in case of failure. */ int tb_port_get_link_speed(struct tb_port *port) { … } /** * tb_port_get_link_generation() - Returns link generation * @port: Lane adapter * * Returns link generation as number or negative errno in case of * failure. Does not distinguish between Thunderbolt 1 and Thunderbolt 2 * links so for those always returns 2. */ int tb_port_get_link_generation(struct tb_port *port) { … } /** * tb_port_get_link_width() - Get current link width * @port: Port to check (USB4 or CIO) * * Returns link width. Return the link width as encoded in &enum * tb_link_width or negative errno in case of failure. */ int tb_port_get_link_width(struct tb_port *port) { … } /** * tb_port_width_supported() - Is the given link width supported * @port: Port to check * @width: Widths to check (bitmask) * * Can be called to any lane adapter. Checks if given @width is * supported by the hardware and returns %true if it is. */ bool tb_port_width_supported(struct tb_port *port, unsigned int width) { … } /** * tb_port_set_link_width() - Set target link width of the lane adapter * @port: Lane adapter * @width: Target link width * * Sets the target link width of the lane adapter to @width. Does not * enable/disable lane bonding. For that call tb_port_set_lane_bonding(). * * Return: %0 in case of success and negative errno in case of error */ int tb_port_set_link_width(struct tb_port *port, enum tb_link_width width) { … } /** * tb_port_set_lane_bonding() - Enable/disable lane bonding * @port: Lane adapter * @bonding: enable/disable bonding * * Enables or disables lane bonding. This should be called after target * link width has been set (tb_port_set_link_width()). Note in most * cases one should use tb_port_lane_bonding_enable() instead to enable * lane bonding. * * Return: %0 in case of success and negative errno in case of error */ static int tb_port_set_lane_bonding(struct tb_port *port, bool bonding) { … } /** * tb_port_lane_bonding_enable() - Enable bonding on port * @port: port to enable * * Enable bonding by setting the link width of the port and the other * port in case of dual link port. Does not wait for the link to * actually reach the bonded state so caller needs to call * tb_port_wait_for_link_width() before enabling any paths through the * link to make sure the link is in expected state. * * Return: %0 in case of success and negative errno in case of error */ int tb_port_lane_bonding_enable(struct tb_port *port) { … } /** * tb_port_lane_bonding_disable() - Disable bonding on port * @port: port to disable * * Disable bonding by setting the link width of the port and the * other port in case of dual link port. */ void tb_port_lane_bonding_disable(struct tb_port *port) { … } /** * tb_port_wait_for_link_width() - Wait until link reaches specific width * @port: Port to wait for * @width: Expected link width (bitmask) * @timeout_msec: Timeout in ms how long to wait * * Should be used after both ends of the link have been bonded (or * bonding has been disabled) to wait until the link actually reaches * the expected state. Returns %-ETIMEDOUT if the width was not reached * within the given timeout, %0 if it did. Can be passed a mask of * expected widths and succeeds if any of the widths is reached. */ int tb_port_wait_for_link_width(struct tb_port *port, unsigned int width, int timeout_msec) { … } static int tb_port_do_update_credits(struct tb_port *port) { … } /** * tb_port_update_credits() - Re-read port total credits * @port: Port to update * * After the link is bonded (or bonding was disabled) the port total * credits may change, so this function needs to be called to re-read * the credits. Updates also the second lane adapter. */ int tb_port_update_credits(struct tb_port *port) { … } static int tb_port_start_lane_initialization(struct tb_port *port) { … } /* * Returns true if the port had something (router, XDomain) connected * before suspend. */ static bool tb_port_resume(struct tb_port *port) { … } /** * tb_port_is_enabled() - Is the adapter port enabled * @port: Port to check */ bool tb_port_is_enabled(struct tb_port *port) { … } /** * tb_usb3_port_is_enabled() - Is the USB3 adapter port enabled * @port: USB3 adapter port to check */ bool tb_usb3_port_is_enabled(struct tb_port *port) { … } /** * tb_usb3_port_enable() - Enable USB3 adapter port * @port: USB3 adapter port to enable * @enable: Enable/disable the USB3 adapter */ int tb_usb3_port_enable(struct tb_port *port, bool enable) { … } /** * tb_pci_port_is_enabled() - Is the PCIe adapter port enabled * @port: PCIe port to check */ bool tb_pci_port_is_enabled(struct tb_port *port) { … } /** * tb_pci_port_enable() - Enable PCIe adapter port * @port: PCIe port to enable * @enable: Enable/disable the PCIe adapter */ int tb_pci_port_enable(struct tb_port *port, bool enable) { … } /** * tb_dp_port_hpd_is_active() - Is HPD already active * @port: DP out port to check * * Checks if the DP OUT adapter port has HPD bit already set. */ int tb_dp_port_hpd_is_active(struct tb_port *port) { … } /** * tb_dp_port_hpd_clear() - Clear HPD from DP IN port * @port: Port to clear HPD * * If the DP IN port has HPD set, this function can be used to clear it. */ int tb_dp_port_hpd_clear(struct tb_port *port) { … } /** * tb_dp_port_set_hops() - Set video/aux Hop IDs for DP port * @port: DP IN/OUT port to set hops * @video: Video Hop ID * @aux_tx: AUX TX Hop ID * @aux_rx: AUX RX Hop ID * * Programs specified Hop IDs for DP IN/OUT port. Can be called for USB4 * router DP adapters too but does not program the values as the fields * are read-only. */ int tb_dp_port_set_hops(struct tb_port *port, unsigned int video, unsigned int aux_tx, unsigned int aux_rx) { … } /** * tb_dp_port_is_enabled() - Is DP adapter port enabled * @port: DP adapter port to check */ bool tb_dp_port_is_enabled(struct tb_port *port) { … } /** * tb_dp_port_enable() - Enables/disables DP paths of a port * @port: DP IN/OUT port * @enable: Enable/disable DP path * * Once Hop IDs are programmed DP paths can be enabled or disabled by * calling this function. */ int tb_dp_port_enable(struct tb_port *port, bool enable) { … } /* switch utility functions */ static const char *tb_switch_generation_name(const struct tb_switch *sw) { … } static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw) { … } static int tb_switch_reset_host(struct tb_switch *sw) { … } static int tb_switch_reset_device(struct tb_switch *sw) { … } static bool tb_switch_enumerated(struct tb_switch *sw) { … } /** * tb_switch_reset() - Perform reset to the router * @sw: Router to reset * * Issues reset to the router @sw. Can be used for any router. For host * routers, resets all the downstream ports and cleans up path config * spaces accordingly. For device routers issues downstream port reset * through the parent router, so as side effect there will be unplug * soon after this is finished. * * If the router is not enumerated does nothing. * * Returns %0 on success or negative errno in case of failure. */ int tb_switch_reset(struct tb_switch *sw) { … } /** * tb_switch_wait_for_bit() - Wait for specified value of bits in offset * @sw: Router to read the offset value from * @offset: Offset in the router config space to read from * @bit: Bit mask in the offset to wait for * @value: Value of the bits to wait for * @timeout_msec: Timeout in ms how long to wait * * Wait till the specified bits in specified offset reach specified value. * Returns %0 in case of success, %-ETIMEDOUT if the @value was not reached * within the given timeout or a negative errno in case of failure. */ int tb_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit, u32 value, int timeout_msec) { … } /* * tb_plug_events_active() - enable/disable plug events on a switch * * Also configures a sane plug_events_delay of 255ms. * * Return: Returns 0 on success or an error code on failure. */ static int tb_plug_events_active(struct tb_switch *sw, bool active) { … } static ssize_t authorized_show(struct device *dev, struct device_attribute *attr, char *buf) { … } static int disapprove_switch(struct device *dev, void *not_used) { … } static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val) { … } static ssize_t authorized_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { … } static DEVICE_ATTR_RW(authorized); static ssize_t boot_show(struct device *dev, struct device_attribute *attr, char *buf) { … } static DEVICE_ATTR_RO(boot); static ssize_t device_show(struct device *dev, struct device_attribute *attr, char *buf) { … } static DEVICE_ATTR_RO(device); static ssize_t device_name_show(struct device *dev, struct device_attribute *attr, char *buf) { … } static DEVICE_ATTR_RO(device_name); static ssize_t generation_show(struct device *dev, struct device_attribute *attr, char *buf) { … } static DEVICE_ATTR_RO(generation); static ssize_t key_show(struct device *dev, struct device_attribute *attr, char *buf) { … } static ssize_t key_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { … } static DEVICE_ATTR(key, 0600, key_show, key_store); static ssize_t speed_show(struct device *dev, struct device_attribute *attr, char *buf) { … } /* * Currently all lanes must run at the same speed but we expose here * both directions to allow possible asymmetric links in the future. */ static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL); static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL); static ssize_t rx_lanes_show(struct device *dev, struct device_attribute *attr, char *buf) { … } static DEVICE_ATTR(rx_lanes, 0444, rx_lanes_show, NULL); static ssize_t tx_lanes_show(struct device *dev, struct device_attribute *attr, char *buf) { … } static DEVICE_ATTR(tx_lanes, 0444, tx_lanes_show, NULL); static ssize_t nvm_authenticate_show(struct device *dev, struct device_attribute *attr, char *buf) { … } static ssize_t nvm_authenticate_sysfs(struct device *dev, const char *buf, bool disconnect) { … } static ssize_t nvm_authenticate_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { … } static DEVICE_ATTR_RW(nvm_authenticate); static ssize_t nvm_authenticate_on_disconnect_show(struct device *dev, struct device_attribute *attr, char *buf) { … } static ssize_t nvm_authenticate_on_disconnect_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { … } static DEVICE_ATTR_RW(nvm_authenticate_on_disconnect); static ssize_t nvm_version_show(struct device *dev, struct device_attribute *attr, char *buf) { … } static DEVICE_ATTR_RO(nvm_version); static ssize_t vendor_show(struct device *dev, struct device_attribute *attr, char *buf) { … } static DEVICE_ATTR_RO(vendor); static ssize_t vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf) { … } static DEVICE_ATTR_RO(vendor_name); static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr, char *buf) { … } static DEVICE_ATTR_RO(unique_id); static struct attribute *switch_attrs[] = …; static umode_t switch_attr_is_visible(struct kobject *kobj, struct attribute *attr, int n) { … } static const struct attribute_group switch_group = …; static const struct attribute_group *switch_groups[] = …; static void tb_switch_release(struct device *dev) { … } static int tb_switch_uevent(const struct device *dev, struct kobj_uevent_env *env) { … } /* * Currently only need to provide the callbacks. Everything else is handled * in the connection manager. */ static int __maybe_unused tb_switch_runtime_suspend(struct device *dev) { … } static int __maybe_unused tb_switch_runtime_resume(struct device *dev) { … } static const struct dev_pm_ops tb_switch_pm_ops = …; const struct device_type tb_switch_type = …; static int tb_switch_get_generation(struct tb_switch *sw) { … } static bool tb_switch_exceeds_max_depth(const struct tb_switch *sw, int depth) { … } /** * tb_switch_alloc() - allocate a switch * @tb: Pointer to the owning domain * @parent: Parent device for this switch * @route: Route string for this switch * * Allocates and initializes a switch. Will not upload configuration to * the switch. For that you need to call tb_switch_configure() * separately. The returned switch should be released by calling * tb_switch_put(). * * Return: Pointer to the allocated switch or ERR_PTR() in case of * failure. */ struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent, u64 route) { … } /** * tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode * @tb: Pointer to the owning domain * @parent: Parent device for this switch * @route: Route string for this switch * * This creates a switch in safe mode. This means the switch pretty much * lacks all capabilities except DMA configuration port before it is * flashed with a valid NVM firmware. * * The returned switch must be released by calling tb_switch_put(). * * Return: Pointer to the allocated switch or ERR_PTR() in case of failure */ struct tb_switch * tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route) { … } /** * tb_switch_configure() - Uploads configuration to the switch * @sw: Switch to configure * * Call this function before the switch is added to the system. It will * upload configuration to the switch and makes it available for the * connection manager to use. Can be called to the switch again after * resume from low power states to re-initialize it. * * Return: %0 in case of success and negative errno in case of failure */ int tb_switch_configure(struct tb_switch *sw) { … } /** * tb_switch_configuration_valid() - Set the tunneling configuration to be valid * @sw: Router to configure * * Needs to be called before any tunnels can be setup through the * router. Can be called to any router. * * Returns %0 in success and negative errno otherwise. */ int tb_switch_configuration_valid(struct tb_switch *sw) { … } static int tb_switch_set_uuid(struct tb_switch *sw) { … } static int tb_switch_add_dma_port(struct tb_switch *sw) { … } static void tb_switch_default_link_ports(struct tb_switch *sw) { … } static bool tb_switch_lane_bonding_possible(struct tb_switch *sw) { … } static int tb_switch_update_link_attributes(struct tb_switch *sw) { … } /* Must be called after tb_switch_update_link_attributes() */ static void tb_switch_link_init(struct tb_switch *sw) { … } /** * tb_switch_lane_bonding_enable() - Enable lane bonding * @sw: Switch to enable lane bonding * * Connection manager can call this function to enable lane bonding of a * switch. If conditions are correct and both switches support the feature, * lanes are bonded. It is safe to call this to any switch. */ static int tb_switch_lane_bonding_enable(struct tb_switch *sw) { … } /** * tb_switch_lane_bonding_disable() - Disable lane bonding * @sw: Switch whose lane bonding to disable * * Disables lane bonding between @sw and parent. This can be called even * if lanes were not bonded originally. */ static int tb_switch_lane_bonding_disable(struct tb_switch *sw) { … } /* Note updating sw->link_width done in tb_switch_update_link_attributes() */ static int tb_switch_asym_enable(struct tb_switch *sw, enum tb_link_width width) { … } /* Note updating sw->link_width done in tb_switch_update_link_attributes() */ static int tb_switch_asym_disable(struct tb_switch *sw) { … } /** * tb_switch_set_link_width() - Configure router link width * @sw: Router to configure * @width: The new link width * * Set device router link width to @width from router upstream port * perspective. Supports also asymmetric links if the routers boths side * of the link supports it. * * Does nothing for host router. * * Returns %0 in case of success, negative errno otherwise. */ int tb_switch_set_link_width(struct tb_switch *sw, enum tb_link_width width) { … } /** * tb_switch_configure_link() - Set link configured * @sw: Switch whose link is configured * * Sets the link upstream from @sw configured (from both ends) so that * it will not be disconnected when the domain exits sleep. Can be * called for any switch. * * It is recommended that this is called after lane bonding is enabled. * * Returns %0 on success and negative errno in case of error. */ int tb_switch_configure_link(struct tb_switch *sw) { … } /** * tb_switch_unconfigure_link() - Unconfigure link * @sw: Switch whose link is unconfigured * * Sets the link unconfigured so the @sw will be disconnected if the * domain exists sleep. */ void tb_switch_unconfigure_link(struct tb_switch *sw) { … } static void tb_switch_credits_init(struct tb_switch *sw) { … } static int tb_switch_port_hotplug_enable(struct tb_switch *sw) { … } /** * tb_switch_add() - Add a switch to the domain * @sw: Switch to add * * This is the last step in adding switch to the domain. It will read * identification information from DROM and initializes ports so that * they can be used to connect other switches. The switch will be * exposed to the userspace when this function successfully returns. To * remove and release the switch, call tb_switch_remove(). * * Return: %0 in case of success and negative errno in case of failure */ int tb_switch_add(struct tb_switch *sw) { … } /** * tb_switch_remove() - Remove and release a switch * @sw: Switch to remove * * This will remove the switch from the domain and release it after last * reference count drops to zero. If there are switches connected below * this switch, they will be removed as well. */ void tb_switch_remove(struct tb_switch *sw) { … } /** * tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches * @sw: Router to mark unplugged */ void tb_sw_set_unplugged(struct tb_switch *sw) { … } static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags) { … } static void tb_switch_check_wakes(struct tb_switch *sw) { … } /** * tb_switch_resume() - Resume a switch after sleep * @sw: Switch to resume * @runtime: Is this resume from runtime suspend or system sleep * * Resumes and re-enumerates router (and all its children), if still plugged * after suspend. Don't enumerate device router whose UID was changed during * suspend. If this is resume from system sleep, notifies PM core about the * wakes occurred during suspend. Disables all wakes, except USB4 wake of * upstream port for USB4 routers that shall be always enabled. */ int tb_switch_resume(struct tb_switch *sw, bool runtime) { … } /** * tb_switch_suspend() - Put a switch to sleep * @sw: Switch to suspend * @runtime: Is this runtime suspend or system sleep * * Suspends router and all its children. Enables wakes according to * value of @runtime and then sets sleep bit for the router. If @sw is * host router the domain is ready to go to sleep once this function * returns. */ void tb_switch_suspend(struct tb_switch *sw, bool runtime) { … } /** * tb_switch_query_dp_resource() - Query availability of DP resource * @sw: Switch whose DP resource is queried * @in: DP IN port * * Queries availability of DP resource for DP tunneling using switch * specific means. Returns %true if resource is available. */ bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in) { … } /** * tb_switch_alloc_dp_resource() - Allocate available DP resource * @sw: Switch whose DP resource is allocated * @in: DP IN port * * Allocates DP resource for DP tunneling. The resource must be * available for this to succeed (see tb_switch_query_dp_resource()). * Returns %0 in success and negative errno otherwise. */ int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in) { … } /** * tb_switch_dealloc_dp_resource() - De-allocate DP resource * @sw: Switch whose DP resource is de-allocated * @in: DP IN port * * De-allocates DP resource that was previously allocated for DP * tunneling. */ void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in) { … } struct tb_sw_lookup { … }; static int tb_switch_match(struct device *dev, const void *data) { … } /** * tb_switch_find_by_link_depth() - Find switch by link and depth * @tb: Domain the switch belongs * @link: Link number the switch is connected * @depth: Depth of the switch in link * * Returned switch has reference count increased so the caller needs to * call tb_switch_put() when done with the switch. */ struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth) { … } /** * tb_switch_find_by_uuid() - Find switch by UUID * @tb: Domain the switch belongs * @uuid: UUID to look for * * Returned switch has reference count increased so the caller needs to * call tb_switch_put() when done with the switch. */ struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid) { … } /** * tb_switch_find_by_route() - Find switch by route string * @tb: Domain the switch belongs * @route: Route string to look for * * Returned switch has reference count increased so the caller needs to * call tb_switch_put() when done with the switch. */ struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route) { … } /** * tb_switch_find_port() - return the first port of @type on @sw or NULL * @sw: Switch to find the port from * @type: Port type to look for */ struct tb_port *tb_switch_find_port(struct tb_switch *sw, enum tb_port_type type) { … } /* * Can be used for read/write a specified PCIe bridge for any Thunderbolt 3 * device. For now used only for Titan Ridge. */ static int tb_switch_pcie_bridge_write(struct tb_switch *sw, unsigned int bridge, unsigned int pcie_offset, u32 value) { … } /** * tb_switch_pcie_l1_enable() - Enable PCIe link to enter L1 state * @sw: Router to enable PCIe L1 * * For Titan Ridge switch to enter CLx state, its PCIe bridges shall enable * entry to PCIe L1 state. Shall be called after the upstream PCIe tunnel * was configured. Due to Intel platforms limitation, shall be called only * for first hop switch. */ int tb_switch_pcie_l1_enable(struct tb_switch *sw) { … } /** * tb_switch_xhci_connect() - Connect internal xHCI * @sw: Router whose xHCI to connect * * Can be called to any router. For Alpine Ridge and Titan Ridge * performs special flows that bring the xHCI functional for any device * connected to the type-C port. Call only after PCIe tunnel has been * established. The function only does the connect if not done already * so can be called several times for the same router. */ int tb_switch_xhci_connect(struct tb_switch *sw) { … } /** * tb_switch_xhci_disconnect() - Disconnect internal xHCI * @sw: Router whose xHCI to disconnect * * The opposite of tb_switch_xhci_connect(). Disconnects xHCI on both * ports. */ void tb_switch_xhci_disconnect(struct tb_switch *sw) { … }