From 736f30b8b14e996320a7444d1203a97a49ba96bb Mon Sep 17 00:00:00 2001 From: "J. Avila" Date: Sat, 1 Aug 2020 19:15:07 +0000 Subject: [PATCH 0001/1304] ANDROID: GKI: kernel: tick-sched: Move wake callback registration code The code to register a wakeup callback exists outside of the #ifdef CONFIG_HIGH_RES_TIMERS which actually uses it, causing compilation errors. Move it inside the block, where it belongs. Bug: 162654685 Fixes: 4c3d3d24ddc4 ("kernel: tick-sched: Add an API for wakeup callbacks") Change-Id: I3876481c71d63f4cc655280bdd183024b70897c8 Signed-off-by: J. Avila --- kernel/time/tick-sched.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index f8256bb66bfa..77c359d58817 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -1262,6 +1262,15 @@ void tick_irq_enter(void) static void (*wake_callback)(void); +void register_tick_sched_wakeup_callback(void (*cb)(void)) +{ + if (!wake_callback) + wake_callback = cb; + else + pr_warn("tick-sched wake cb already exists; skipping.\n"); +} +EXPORT_SYMBOL_GPL(register_tick_sched_wakeup_callback); + /* * We rearm the timer until we get disabled by the idle code. * Called with interrupts disabled. @@ -1404,15 +1413,6 @@ int tick_check_oneshot_change(int allow_nohz) return 0; } -void register_tick_sched_wakeup_callback(void (*cb)(void)) -{ - if (!wake_callback) - wake_callback = cb; - else - pr_warn("tick-sched wake cb already exists; skipping.\n"); -} -EXPORT_SYMBOL_GPL(register_tick_sched_wakeup_callback); - ktime_t *get_next_event_cpu(unsigned int cpu) { return &(per_cpu(tick_cpu_device, cpu).evtdev->next_event); -- GitLab From 167edeeb034f65f57790ff70b1af84a501274d15 Mon Sep 17 00:00:00 2001 From: Navid Emamdoost Date: Thu, 19 Sep 2019 11:04:48 -0500 Subject: [PATCH 0002/1304] crypto: ccp - Release all allocated memory if sha type is invalid [ Upstream commit 128c66429247add5128c03dc1e144ca56f05a4e2 ] Release all allocated memory if sha type is invalid: In ccp_run_sha_cmd, if the type of sha is invalid, the allocated hmac_buf should be released. v2: fix the goto. Signed-off-by: Navid Emamdoost Acked-by: Gary R Hook Signed-off-by: Herbert Xu Signed-off-by: Sasha Levin --- drivers/crypto/ccp/ccp-ops.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index 330853a2702f..43b74cf0787e 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -1783,8 +1783,9 @@ ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) LSB_ITEM_SIZE); break; default: + kfree(hmac_buf); ret = -EINVAL; - goto e_ctx; + goto e_data; } memset(&hmac_cmd, 0, sizeof(hmac_cmd)); -- GitLab From 17271f33fd8811b364ae3c54671566434d46d3eb Mon Sep 17 00:00:00 2001 From: Navid Emamdoost Date: Wed, 25 Sep 2019 12:02:41 -0300 Subject: [PATCH 0003/1304] media: rc: prevent memory leak in cx23888_ir_probe [ Upstream commit a7b2df76b42bdd026e3106cf2ba97db41345a177 ] In cx23888_ir_probe if kfifo_alloc fails the allocated memory for state should be released. Signed-off-by: Navid Emamdoost Signed-off-by: Sean Young Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Sasha Levin --- drivers/media/pci/cx23885/cx23888-ir.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/media/pci/cx23885/cx23888-ir.c b/drivers/media/pci/cx23885/cx23888-ir.c index 00329f668b59..5177479d13d3 100644 --- a/drivers/media/pci/cx23885/cx23888-ir.c +++ b/drivers/media/pci/cx23885/cx23888-ir.c @@ -1178,8 +1178,11 @@ int cx23888_ir_probe(struct cx23885_dev *dev) return -ENOMEM; spin_lock_init(&state->rx_kfifo_lock); - if (kfifo_alloc(&state->rx_kfifo, CX23888_IR_RX_KFIFO_SIZE, GFP_KERNEL)) + if (kfifo_alloc(&state->rx_kfifo, CX23888_IR_RX_KFIFO_SIZE, + GFP_KERNEL)) { + kfree(state); return -ENOMEM; + } state->dev = dev; sd = &state->sd; -- GitLab From c57c213538156bff971aa352d9d7749196dfbfd8 Mon Sep 17 00:00:00 2001 From: Sasha Levin Date: Thu, 30 Jul 2020 19:14:12 -0400 Subject: [PATCH 0004/1304] iio: imu: adis16400: fix memory leak [ Upstream commit 9c0530e898f384c5d279bfcebd8bb17af1105873 ] In adis_update_scan_mode_burst, if adis->buffer allocation fails release the adis->xfer. Signed-off-by: Navid Emamdoost Reviewed-by: Alexandru Ardelean Signed-off-by: Jonathan Cameron Signed-off-by: Sasha Levin --- drivers/iio/imu/adis16400_buffer.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/iio/imu/adis16400_buffer.c b/drivers/iio/imu/adis16400_buffer.c index e70a5339acb1..3fc11aec98b9 100644 --- a/drivers/iio/imu/adis16400_buffer.c +++ b/drivers/iio/imu/adis16400_buffer.c @@ -38,8 +38,11 @@ int adis16400_update_scan_mode(struct iio_dev *indio_dev, return -ENOMEM; adis->buffer = kzalloc(burst_length + sizeof(u16), GFP_KERNEL); - if (!adis->buffer) + if (!adis->buffer) { + kfree(adis->xfer); + adis->xfer = NULL; return -ENOMEM; + } tx = adis->buffer + burst_length; tx[0] = ADIS_READ_REG(ADIS16400_GLOB_CMD); -- GitLab From e15f8a9b0046418295d09db24c1ec306c80d013b Mon Sep 17 00:00:00 2001 From: Navid Emamdoost Date: Tue, 1 Oct 2019 22:46:07 -0500 Subject: [PATCH 0005/1304] drm/amdgpu: fix multiple memory leaks in acp_hw_init MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 57be09c6e8747bf48704136d9e3f92bfb93f5725 ] In acp_hw_init there are some allocations that needs to be released in case of failure: 1- adev->acp.acp_genpd should be released if any allocation attemp for adev->acp.acp_cell, adev->acp.acp_res or i2s_pdata fails. 2- all of those allocations should be released if mfd_add_hotplug_devices or pm_genpd_add_device fail. 3- Release is needed in case of time out values expire. Reviewed-by: Christian König Signed-off-by: Navid Emamdoost Signed-off-by: Alex Deucher Signed-off-by: Sasha Levin --- drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c | 34 ++++++++++++++++--------- 1 file changed, 22 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c index 71efcf38f11b..94cd8a261091 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c @@ -276,7 +276,7 @@ static int acp_hw_init(void *handle) u32 val = 0; u32 count = 0; struct device *dev; - struct i2s_platform_data *i2s_pdata; + struct i2s_platform_data *i2s_pdata = NULL; struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -317,20 +317,21 @@ static int acp_hw_init(void *handle) adev->acp.acp_cell = kcalloc(ACP_DEVS, sizeof(struct mfd_cell), GFP_KERNEL); - if (adev->acp.acp_cell == NULL) - return -ENOMEM; + if (adev->acp.acp_cell == NULL) { + r = -ENOMEM; + goto failure; + } adev->acp.acp_res = kcalloc(5, sizeof(struct resource), GFP_KERNEL); if (adev->acp.acp_res == NULL) { - kfree(adev->acp.acp_cell); - return -ENOMEM; + r = -ENOMEM; + goto failure; } i2s_pdata = kcalloc(3, sizeof(struct i2s_platform_data), GFP_KERNEL); if (i2s_pdata == NULL) { - kfree(adev->acp.acp_res); - kfree(adev->acp.acp_cell); - return -ENOMEM; + r = -ENOMEM; + goto failure; } switch (adev->asic_type) { @@ -427,7 +428,7 @@ static int acp_hw_init(void *handle) r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell, ACP_DEVS); if (r) - return r; + goto failure; if (adev->asic_type != CHIP_STONEY) { for (i = 0; i < ACP_DEVS ; i++) { @@ -435,7 +436,7 @@ static int acp_hw_init(void *handle) r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev); if (r) { dev_err(dev, "Failed to add dev to genpd\n"); - return r; + goto failure; } } } @@ -454,7 +455,8 @@ static int acp_hw_init(void *handle) break; if (--count == 0) { dev_err(&adev->pdev->dev, "Failed to reset ACP\n"); - return -ETIMEDOUT; + r = -ETIMEDOUT; + goto failure; } udelay(100); } @@ -471,7 +473,8 @@ static int acp_hw_init(void *handle) break; if (--count == 0) { dev_err(&adev->pdev->dev, "Failed to reset ACP\n"); - return -ETIMEDOUT; + r = -ETIMEDOUT; + goto failure; } udelay(100); } @@ -480,6 +483,13 @@ static int acp_hw_init(void *handle) val &= ~ACP_SOFT_RESET__SoftResetAud_MASK; cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val); return 0; + +failure: + kfree(i2s_pdata); + kfree(adev->acp.acp_res); + kfree(adev->acp.acp_cell); + kfree(adev->acp.acp_genpd); + return r; } /** -- GitLab From 7deb2dcb8963812742ed08420cfa4e23bbeda074 Mon Sep 17 00:00:00 2001 From: Navid Emamdoost Date: Fri, 20 Sep 2019 17:57:59 -0500 Subject: [PATCH 0006/1304] tracing: Have error path in predicate_parse() free its allocated memory [ Upstream commit 96c5c6e6a5b6db592acae039fed54b5c8844cd35 ] In predicate_parse, there is an error path that is not going to out_free instead it returns directly which leads to a memory leak. Link: http://lkml.kernel.org/r/20190920225800.3870-1-navid.emamdoost@gmail.com Signed-off-by: Navid Emamdoost Signed-off-by: Steven Rostedt (VMware) Signed-off-by: Sasha Levin --- kernel/trace/trace_events_filter.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index b949c3917c67..9be3d1d1fcb4 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c @@ -451,8 +451,10 @@ predicate_parse(const char *str, int nr_parens, int nr_preds, switch (*next) { case '(': /* #2 */ - if (top - op_stack > nr_parens) - return ERR_PTR(-EINVAL); + if (top - op_stack > nr_parens) { + ret = -EINVAL; + goto out_free; + } *(++top) = invert; continue; case '!': /* #3 */ -- GitLab From 5b8464dc9a6e81a16481549c77d0d341041e425e Mon Sep 17 00:00:00 2001 From: Navid Emamdoost Date: Fri, 6 Sep 2019 13:26:03 -0500 Subject: [PATCH 0007/1304] ath9k_htc: release allocated buffer if timed out [ Upstream commit 853acf7caf10b828102d92d05b5c101666a6142b ] In htc_config_pipe_credits, htc_setup_complete, and htc_connect_service if time out happens, the allocated buffer needs to be released. Otherwise there will be memory leak. Signed-off-by: Navid Emamdoost Signed-off-by: Kalle Valo Signed-off-by: Sasha Levin --- drivers/net/wireless/ath/ath9k/htc_hst.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c index d2e062eaf561..f705f0e1cb5b 100644 --- a/drivers/net/wireless/ath/ath9k/htc_hst.c +++ b/drivers/net/wireless/ath/ath9k/htc_hst.c @@ -173,6 +173,7 @@ static int htc_config_pipe_credits(struct htc_target *target) time_left = wait_for_completion_timeout(&target->cmd_wait, HZ); if (!time_left) { dev_err(target->dev, "HTC credit config timeout\n"); + kfree_skb(skb); return -ETIMEDOUT; } @@ -208,6 +209,7 @@ static int htc_setup_complete(struct htc_target *target) time_left = wait_for_completion_timeout(&target->cmd_wait, HZ); if (!time_left) { dev_err(target->dev, "HTC start timeout\n"); + kfree_skb(skb); return -ETIMEDOUT; } @@ -280,6 +282,7 @@ int htc_connect_service(struct htc_target *target, if (!time_left) { dev_err(target->dev, "Service connection timeout for: %d\n", service_connreq->service_id); + kfree_skb(skb); return -ETIMEDOUT; } -- GitLab From e0cf3ebfcf93dbce123b8bef00f549712efe1135 Mon Sep 17 00:00:00 2001 From: Navid Emamdoost Date: Fri, 6 Sep 2019 13:59:30 -0500 Subject: [PATCH 0008/1304] ath9k: release allocated buffer if timed out [ Upstream commit 728c1e2a05e4b5fc52fab3421dce772a806612a2 ] In ath9k_wmi_cmd, the allocated network buffer needs to be released if timeout happens. Otherwise memory will be leaked. Signed-off-by: Navid Emamdoost Signed-off-by: Kalle Valo Signed-off-by: Sasha Levin --- drivers/net/wireless/ath/ath9k/wmi.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c index e7a3127395be..066677bb83eb 100644 --- a/drivers/net/wireless/ath/ath9k/wmi.c +++ b/drivers/net/wireless/ath/ath9k/wmi.c @@ -339,6 +339,7 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id, ath_dbg(common, WMI, "Timeout waiting for WMI command: %s\n", wmi_cmd_to_name(cmd_id)); mutex_unlock(&wmi->op_mutex); + kfree_skb(skb); return -ETIMEDOUT; } -- GitLab From 60e1b411bf0fd9fda2d2de7f45dc3b1d9960b85e Mon Sep 17 00:00:00 2001 From: Navid Emamdoost Date: Tue, 24 Sep 2019 23:23:56 -0500 Subject: [PATCH 0009/1304] drm/amd/display: prevent memory leak [ Upstream commit 104c307147ad379617472dd91a5bcb368d72bd6d ] In dcn*_create_resource_pool the allocated memory should be released if construct pool fails. Reviewed-by: Harry Wentland Signed-off-by: Navid Emamdoost Signed-off-by: Alex Deucher Signed-off-by: Sasha Levin --- drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c | 1 + drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c | 1 + drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c | 1 + drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c | 1 + drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c | 1 + 5 files changed, 5 insertions(+) diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c index 3f76e6019546..5a2f29bd3508 100644 --- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c @@ -1001,6 +1001,7 @@ struct resource_pool *dce100_create_resource_pool( if (construct(num_virtual_links, dc, pool)) return &pool->base; + kfree(pool); BREAK_TO_DEBUGGER(); return NULL; } diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c index e5e9e92521e9..17d936c260d9 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c @@ -1344,6 +1344,7 @@ struct resource_pool *dce110_create_resource_pool( if (construct(num_virtual_links, dc, pool, asic_id)) return &pool->base; + kfree(pool); BREAK_TO_DEBUGGER(); return NULL; } diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c index 288129343c77..71adab8bf31b 100644 --- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c @@ -1287,6 +1287,7 @@ struct resource_pool *dce112_create_resource_pool( if (construct(num_virtual_links, dc, pool)) return &pool->base; + kfree(pool); BREAK_TO_DEBUGGER(); return NULL; } diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c index d43f37d99c7d..f0f2ce6da827 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c @@ -1076,6 +1076,7 @@ struct resource_pool *dce120_create_resource_pool( if (construct(num_virtual_links, dc, pool)) return &pool->base; + kfree(pool); BREAK_TO_DEBUGGER(); return NULL; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index 6b44ed3697a4..e6d556881140 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -1361,6 +1361,7 @@ struct resource_pool *dcn10_create_resource_pool( if (construct(num_virtual_links, dc, pool)) return &pool->base; + kfree(pool); BREAK_TO_DEBUGGER(); return NULL; } -- GitLab From 4e986ab36ed11ecf21de9b5aab0e46ac3342df93 Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Wed, 13 Mar 2019 13:55:11 +0800 Subject: [PATCH 0010/1304] btrfs: inode: Verify inode mode to avoid NULL pointer dereference [ Upstream commit 6bf9e4bd6a277840d3fe8c5d5d530a1fbd3db592 ] [BUG] When accessing a file on a crafted image, btrfs can crash in block layer: BUG: unable to handle kernel NULL pointer dereference at 0000000000000008 PGD 136501067 P4D 136501067 PUD 124519067 PMD 0 CPU: 3 PID: 0 Comm: swapper/3 Not tainted 5.0.0-rc8-default #252 RIP: 0010:end_bio_extent_readpage+0x144/0x700 Call Trace: blk_update_request+0x8f/0x350 blk_mq_end_request+0x1a/0x120 blk_done_softirq+0x99/0xc0 __do_softirq+0xc7/0x467 irq_exit+0xd1/0xe0 call_function_single_interrupt+0xf/0x20 RIP: 0010:default_idle+0x1e/0x170 [CAUSE] The crafted image has a tricky corruption, the INODE_ITEM has a different type against its parent dir: item 20 key (268 INODE_ITEM 0) itemoff 2808 itemsize 160 generation 13 transid 13 size 1048576 nbytes 1048576 block group 0 mode 121644 links 1 uid 0 gid 0 rdev 0 sequence 9 flags 0x0(none) This mode number 0120000 means it's a symlink. But the dir item think it's still a regular file: item 8 key (264 DIR_INDEX 5) itemoff 3707 itemsize 32 location key (268 INODE_ITEM 0) type FILE transid 13 data_len 0 name_len 2 name: f4 item 40 key (264 DIR_ITEM 51821248) itemoff 1573 itemsize 32 location key (268 INODE_ITEM 0) type FILE transid 13 data_len 0 name_len 2 name: f4 For symlink, we don't set BTRFS_I(inode)->io_tree.ops and leave it empty, as symlink is only designed to have inlined extent, all handled by tree block read. Thus no need to trigger btrfs_submit_bio_hook() for inline file extent. However end_bio_extent_readpage() expects tree->ops populated, as it's reading regular data extent. This causes NULL pointer dereference. [FIX] This patch fixes the problem in two ways: - Verify inode mode against its dir item when looking up inode So in btrfs_lookup_dentry() if we find inode mode mismatch with dir item, we error out so that corrupted inode will not be accessed. - Verify inode mode when getting extent mapping Only regular file should have regular or preallocated extent. If we found regular/preallocated file extent for symlink or the rest, we error out before submitting the read bio. With this fix that crafted image can be rejected gracefully: BTRFS critical (device loop0): inode mode mismatch with dir: inode mode=0121644 btrfs type=7 dir type=1 Reported-by: Yoon Jungyeon Link: https://bugzilla.kernel.org/show_bug.cgi?id=202763 Reviewed-by: Nikolay Borisov Signed-off-by: Qu Wenruo Signed-off-by: David Sterba Signed-off-by: Sasha Levin --- fs/btrfs/inode.c | 41 +++++++++++++++++++++++++++++------- fs/btrfs/tests/inode-tests.c | 1 + 2 files changed, 34 insertions(+), 8 deletions(-) diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 8dd2702ce859..7befb7c12bd3 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -5553,12 +5553,14 @@ void btrfs_evict_inode(struct inode *inode) } /* - * this returns the key found in the dir entry in the location pointer. + * Return the key found in the dir entry in the location pointer, fill @type + * with BTRFS_FT_*, and return 0. + * * If no dir entries were found, returns -ENOENT. * If found a corrupted location in dir entry, returns -EUCLEAN. */ static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry, - struct btrfs_key *location) + struct btrfs_key *location, u8 *type) { const char *name = dentry->d_name.name; int namelen = dentry->d_name.len; @@ -5591,6 +5593,8 @@ static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry, __func__, name, btrfs_ino(BTRFS_I(dir)), location->objectid, location->type, location->offset); } + if (!ret) + *type = btrfs_dir_type(path->nodes[0], di); out: btrfs_free_path(path); return ret; @@ -5826,6 +5830,11 @@ static struct inode *new_simple_dir(struct super_block *s, return inode; } +static inline u8 btrfs_inode_type(struct inode *inode) +{ + return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT]; +} + struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) { struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); @@ -5833,18 +5842,31 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) struct btrfs_root *root = BTRFS_I(dir)->root; struct btrfs_root *sub_root = root; struct btrfs_key location; + u8 di_type = 0; int index; int ret = 0; if (dentry->d_name.len > BTRFS_NAME_LEN) return ERR_PTR(-ENAMETOOLONG); - ret = btrfs_inode_by_name(dir, dentry, &location); + ret = btrfs_inode_by_name(dir, dentry, &location, &di_type); if (ret < 0) return ERR_PTR(ret); if (location.type == BTRFS_INODE_ITEM_KEY) { inode = btrfs_iget(dir->i_sb, &location, root, NULL); + if (IS_ERR(inode)) + return inode; + + /* Do extra check against inode mode with di_type */ + if (btrfs_inode_type(inode) != di_type) { + btrfs_crit(fs_info, +"inode mode mismatch with dir: inode mode=0%o btrfs type=%u dir type=%u", + inode->i_mode, btrfs_inode_type(inode), + di_type); + iput(inode); + return ERR_PTR(-EUCLEAN); + } return inode; } @@ -6455,11 +6477,6 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, return ERR_PTR(ret); } -static inline u8 btrfs_inode_type(struct inode *inode) -{ - return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT]; -} - /* * utility function to add 'inode' into 'parent_inode' with * a give name and a given sequence number. @@ -6993,6 +7010,14 @@ struct extent_map *btrfs_get_extent(struct btrfs_inode *inode, extent_start = found_key.offset; if (found_type == BTRFS_FILE_EXTENT_REG || found_type == BTRFS_FILE_EXTENT_PREALLOC) { + /* Only regular file could have regular/prealloc extent */ + if (!S_ISREG(inode->vfs_inode.i_mode)) { + ret = -EUCLEAN; + btrfs_crit(fs_info, + "regular/prealloc extent found for non-regular inode %llu", + btrfs_ino(inode)); + goto out; + } extent_end = extent_start + btrfs_file_extent_num_bytes(leaf, item); diff --git a/fs/btrfs/tests/inode-tests.c b/fs/btrfs/tests/inode-tests.c index 64043f028820..648633aae968 100644 --- a/fs/btrfs/tests/inode-tests.c +++ b/fs/btrfs/tests/inode-tests.c @@ -232,6 +232,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) return ret; } + inode->i_mode = S_IFREG; BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY; BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID; BTRFS_I(inode)->location.offset = 0; -- GitLab From 9a84bb13816fe3b361a75e10ee9821ab68aa36f5 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Mon, 15 Apr 2019 17:15:06 +0800 Subject: [PATCH 0011/1304] sctp: implement memory accounting on tx path [ Upstream commit 1033990ac5b2ab6cee93734cb6d301aa3a35bcaa ] Now when sending packets, sk_mem_charge() and sk_mem_uncharge() have been used to set sk_forward_alloc. We just need to call sk_wmem_schedule() to check if the allocated should be raised, and call sk_mem_reclaim() to check if the allocated should be reduced when it's under memory pressure. If sk_wmem_schedule() returns false, which means no memory is allowed to allocate, it will block and wait for memory to become available. Note different from tcp, sctp wait_for_buf happens before allocating any skb, so memory accounting check is done with the whole msg_len before it too. Reported-by: Matteo Croce Tested-by: Matteo Croce Acked-by: Neil Horman Acked-by: Marcelo Ricardo Leitner Signed-off-by: Xin Long Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- net/sctp/socket.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index c93be3ba5df2..df4a7d7c5ec0 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -1931,7 +1931,10 @@ static int sctp_sendmsg_to_asoc(struct sctp_association *asoc, if (sctp_wspace(asoc) < (int)msg_len) sctp_prsctp_prune(asoc, sinfo, msg_len - sctp_wspace(asoc)); - if (sctp_wspace(asoc) <= 0) { + if (sk_under_memory_pressure(sk)) + sk_mem_reclaim(sk); + + if (sctp_wspace(asoc) <= 0 || !sk_wmem_schedule(sk, msg_len)) { timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len); if (err) @@ -8515,7 +8518,10 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, goto do_error; if (signal_pending(current)) goto do_interrupted; - if ((int)msg_len <= sctp_wspace(asoc)) + if (sk_under_memory_pressure(sk)) + sk_mem_reclaim(sk); + if ((int)msg_len <= sctp_wspace(asoc) && + sk_wmem_schedule(sk, msg_len)) break; /* Let another process have a go. Since we are going -- GitLab From cd823ab582225b2ce6eb37b9e22581a8d171a24a Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Wed, 18 Sep 2019 13:08:52 +0100 Subject: [PATCH 0012/1304] Btrfs: fix selftests failure due to uninitialized i_mode in test inodes [ Upstream commit 9f7fec0ba89108b9385f1b9fb167861224912a4a ] Some of the self tests create a test inode, setup some extents and then do calls to btrfs_get_extent() to test that the corresponding extent maps exist and are correct. However btrfs_get_extent(), since the 5.2 merge window, now errors out when it finds a regular or prealloc extent for an inode that does not correspond to a regular file (its ->i_mode is not S_IFREG). This causes the self tests to fail sometimes, specially when KASAN, slub_debug and page poisoning are enabled: $ modprobe btrfs modprobe: ERROR: could not insert 'btrfs': Invalid argument $ dmesg [ 9414.691648] Btrfs loaded, crc32c=crc32c-intel, debug=on, assert=on, integrity-checker=on, ref-verify=on [ 9414.692655] BTRFS: selftest: sectorsize: 4096 nodesize: 4096 [ 9414.692658] BTRFS: selftest: running btrfs free space cache tests [ 9414.692918] BTRFS: selftest: running extent only tests [ 9414.693061] BTRFS: selftest: running bitmap only tests [ 9414.693366] BTRFS: selftest: running bitmap and extent tests [ 9414.696455] BTRFS: selftest: running space stealing from bitmap to extent tests [ 9414.697131] BTRFS: selftest: running extent buffer operation tests [ 9414.697133] BTRFS: selftest: running btrfs_split_item tests [ 9414.697564] BTRFS: selftest: running extent I/O tests [ 9414.697583] BTRFS: selftest: running find delalloc tests [ 9415.081125] BTRFS: selftest: running find_first_clear_extent_bit test [ 9415.081278] BTRFS: selftest: running extent buffer bitmap tests [ 9415.124192] BTRFS: selftest: running inode tests [ 9415.124195] BTRFS: selftest: running btrfs_get_extent tests [ 9415.127909] BTRFS: selftest: running hole first btrfs_get_extent test [ 9415.128343] BTRFS critical (device (efault)): regular/prealloc extent found for non-regular inode 256 [ 9415.131428] BTRFS: selftest: fs/btrfs/tests/inode-tests.c:904 expected a real extent, got 0 This happens because the test inodes are created without ever initializing the i_mode field of the inode, and neither VFS's new_inode() nor the btrfs callback btrfs_alloc_inode() initialize the i_mode. Initialization of the i_mode is done through the various callbacks used by the VFS to create new inodes (regular files, directories, symlinks, tmpfiles, etc), which all call btrfs_new_inode() which in turn calls inode_init_owner(), which sets the inode's i_mode. Since the tests only uses new_inode() to create the test inodes, the i_mode was never initialized. This always happens on a VM I used with kasan, slub_debug and many other debug facilities enabled. It also happened to someone who reported this on bugzilla (on a 5.3-rc). Fix this by setting i_mode to S_IFREG at btrfs_new_test_inode(). Fixes: 6bf9e4bd6a2778 ("btrfs: inode: Verify inode mode to avoid NULL pointer dereference") Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=204397 Signed-off-by: Filipe Manana Reviewed-by: Qu Wenruo Signed-off-by: David Sterba Signed-off-by: Sasha Levin --- fs/btrfs/tests/btrfs-tests.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/fs/btrfs/tests/btrfs-tests.c b/fs/btrfs/tests/btrfs-tests.c index 2eec1dd3803a..82d874b10438 100644 --- a/fs/btrfs/tests/btrfs-tests.c +++ b/fs/btrfs/tests/btrfs-tests.c @@ -38,7 +38,13 @@ static struct file_system_type test_type = { struct inode *btrfs_new_test_inode(void) { - return new_inode(test_mnt->mnt_sb); + struct inode *inode; + + inode = new_inode(test_mnt->mnt_sb); + if (inode) + inode_init_owner(inode, NULL, S_IFREG); + + return inode; } static int btrfs_init_test_fs(void) -- GitLab From 80c1e18c4cdd10192e6dc020e1b7dc11b6b9b89b Mon Sep 17 00:00:00 2001 From: Robert Hancock Date: Tue, 21 Jul 2020 20:18:03 -0600 Subject: [PATCH 0013/1304] PCI/ASPM: Disable ASPM on ASMedia ASM1083/1085 PCIe-to-PCI bridge commit b361663c5a40c8bc758b7f7f2239f7a192180e7c upstream. Recently ASPM handling was changed to allow ASPM on PCIe-to-PCI/PCI-X bridges. Unfortunately the ASMedia ASM1083/1085 PCIe to PCI bridge device doesn't seem to function properly with ASPM enabled. On an Asus PRIME H270-PRO motherboard, it causes errors like these: pcieport 0000:00:1c.0: AER: PCIe Bus Error: severity=Corrected, type=Data Link Layer, (Transmitter ID) pcieport 0000:00:1c.0: AER: device [8086:a292] error status/mask=00003000/00002000 pcieport 0000:00:1c.0: AER: [12] Timeout pcieport 0000:00:1c.0: AER: Corrected error received: 0000:00:1c.0 pcieport 0000:00:1c.0: AER: can't find device of ID00e0 In addition to flooding the kernel log, this also causes the machine to wake up immediately after suspend is initiated. The device advertises ASPM L0s and L1 support in the Link Capabilities register, but the ASMedia web page for ASM1083 [1] claims "No PCIe ASPM support". Windows 10 (build 2004) enables L0s, but it also logs correctable PCIe errors. Add a quirk to disable ASPM for this device. [1] https://www.asmedia.com.tw/eng/e_show_products.php?cate_index=169&item=114 [bhelgaas: commit log] Fixes: 66ff14e59e8a ("PCI/ASPM: Allow ASPM on links to PCIe-to-PCI/PCI-X Bridges") Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=208667 Link: https://lore.kernel.org/r/20200722021803.17958-1-hancockrwd@gmail.com Signed-off-by: Robert Hancock Signed-off-by: Bjorn Helgaas Signed-off-by: Greg Kroah-Hartman --- drivers/pci/quirks.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 0862cb633849..8f856657dac2 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -2307,6 +2307,19 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f1, quirk_disable_aspm_l0s); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f4, quirk_disable_aspm_l0s); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s); +static void quirk_disable_aspm_l0s_l1(struct pci_dev *dev) +{ + pci_info(dev, "Disabling ASPM L0s/L1\n"); + pci_disable_link_state(dev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1); +} + +/* + * ASM1083/1085 PCIe-PCI bridge devices cause AER timeout errors on the + * upstream PCIe root port when ASPM is enabled. At least L0s mode is affected; + * disable both L0s and L1 for now to be safe. + */ +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ASMEDIA, 0x1080, quirk_disable_aspm_l0s_l1); + /* * Some Pericom PCIe-to-PCI bridges in reverse mode need the PCIe Retrain * Link bit cleared after starting the link retrain process to allow this -- GitLab From 7271d03240b02053205ee7fa33664a50c3a9d680 Mon Sep 17 00:00:00 2001 From: Wang Hai Date: Fri, 12 Jun 2020 17:08:33 +0800 Subject: [PATCH 0014/1304] 9p/trans_fd: Fix concurrency del of req_list in p9_fd_cancelled/p9_read_work commit 74d6a5d5662975aed7f25952f62efbb6f6dadd29 upstream. p9_read_work and p9_fd_cancelled may be called concurrently. In some cases, req->req_list may be deleted by both p9_read_work and p9_fd_cancelled. We can fix it by ignoring replies associated with a cancelled request and ignoring cancelled request if message has been received before lock. Link: http://lkml.kernel.org/r/20200612090833.36149-1-wanghai38@huawei.com Fixes: 60ff779c4abb ("9p: client: remove unused code and any reference to "cancelled" function") Cc: # v3.12+ Reported-by: syzbot+77a25acfa0382e06ab23@syzkaller.appspotmail.com Signed-off-by: Wang Hai Signed-off-by: Dominique Martinet Signed-off-by: Greg Kroah-Hartman --- net/9p/trans_fd.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c index f868cf6fba79..d28c2cc9618f 100644 --- a/net/9p/trans_fd.c +++ b/net/9p/trans_fd.c @@ -377,6 +377,10 @@ static void p9_read_work(struct work_struct *work) if (m->rreq->status == REQ_STATUS_SENT) { list_del(&m->rreq->req_list); p9_client_cb(m->client, m->rreq, REQ_STATUS_RCVD); + } else if (m->rreq->status == REQ_STATUS_FLSHD) { + /* Ignore replies associated with a cancelled request. */ + p9_debug(P9_DEBUG_TRANS, + "Ignore replies associated with a cancelled request\n"); } else { spin_unlock(&m->client->lock); p9_debug(P9_DEBUG_ERROR, @@ -718,11 +722,20 @@ static int p9_fd_cancelled(struct p9_client *client, struct p9_req_t *req) { p9_debug(P9_DEBUG_TRANS, "client %p req %p\n", client, req); + spin_lock(&client->lock); + /* Ignore cancelled request if message has been received + * before lock. + */ + if (req->status == REQ_STATUS_RCVD) { + spin_unlock(&client->lock); + return 0; + } + /* we haven't received a response for oldreq, * remove it from the list. */ - spin_lock(&client->lock); list_del(&req->req_list); + req->status = REQ_STATUS_FLSHD; spin_unlock(&client->lock); p9_req_put(req); -- GitLab From 8eff5a05f2c8db8c27eee1b67a93eb58bd3c2af5 Mon Sep 17 00:00:00 2001 From: Pi-Hsun Shih Date: Wed, 4 Dec 2019 16:13:07 +0800 Subject: [PATCH 0015/1304] wireless: Use offsetof instead of custom macro. commit 6989310f5d4327e8595664954edd40a7f99ddd0d upstream. Use offsetof to calculate offset of a field to take advantage of compiler built-in version when possible, and avoid UBSAN warning when compiling with Clang: ================================================================== UBSAN: Undefined behaviour in net/wireless/wext-core.c:525:14 member access within null pointer of type 'struct iw_point' CPU: 3 PID: 165 Comm: kworker/u16:3 Tainted: G S W 4.19.23 #43 Workqueue: cfg80211 __cfg80211_scan_done [cfg80211] Call trace: dump_backtrace+0x0/0x194 show_stack+0x20/0x2c __dump_stack+0x20/0x28 dump_stack+0x70/0x94 ubsan_epilogue+0x14/0x44 ubsan_type_mismatch_common+0xf4/0xfc __ubsan_handle_type_mismatch_v1+0x34/0x54 wireless_send_event+0x3cc/0x470 ___cfg80211_scan_done+0x13c/0x220 [cfg80211] __cfg80211_scan_done+0x28/0x34 [cfg80211] process_one_work+0x170/0x35c worker_thread+0x254/0x380 kthread+0x13c/0x158 ret_from_fork+0x10/0x18 =================================================================== Signed-off-by: Pi-Hsun Shih Reviewed-by: Nick Desaulniers Link: https://lore.kernel.org/r/20191204081307.138765-1-pihsun@chromium.org Signed-off-by: Johannes Berg Signed-off-by: Nick Desaulniers Signed-off-by: Greg Kroah-Hartman --- include/uapi/linux/wireless.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/include/uapi/linux/wireless.h b/include/uapi/linux/wireless.h index 86eca3208b6b..a2c006a364e0 100644 --- a/include/uapi/linux/wireless.h +++ b/include/uapi/linux/wireless.h @@ -74,6 +74,8 @@ #include /* for "struct sockaddr" et al */ #include /* for IFNAMSIZ and co... */ +#include /* for offsetof */ + /***************************** VERSION *****************************/ /* * This constant is used to know the availability of the wireless @@ -1090,8 +1092,7 @@ struct iw_event { /* iw_point events are special. First, the payload (extra data) come at * the end of the event, so they are bigger than IW_EV_POINT_LEN. Second, * we omit the pointer, so start at an offset. */ -#define IW_EV_POINT_OFF (((char *) &(((struct iw_point *) NULL)->length)) - \ - (char *) NULL) +#define IW_EV_POINT_OFF offsetof(struct iw_point, length) #define IW_EV_POINT_LEN (IW_EV_LCP_LEN + sizeof(struct iw_point) - \ IW_EV_POINT_OFF) -- GitLab From 2fdddd5914129fdb9b1bc868776fd9568fb34589 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 18 Jun 2020 11:16:45 +0100 Subject: [PATCH 0016/1304] ARM: 8986/1: hw_breakpoint: Don't invoke overflow handler on uaccess watchpoints commit eec13b42d41b0f3339dcf0c4da43734427c68620 upstream. Unprivileged memory accesses generated by the so-called "translated" instructions (e.g. LDRT) in kernel mode can cause user watchpoints to fire unexpectedly. In such cases, the hw_breakpoint logic will invoke the user overflow handler which will typically raise a SIGTRAP back to the current task. This is futile when returning back to the kernel because (a) the signal won't have been delivered and (b) userspace can't handle the thing anyway. Avoid invoking the user overflow handler for watchpoints triggered by kernel uaccess routines, and instead single-step over the faulting instruction as we would if no overflow handler had been installed. Cc: Fixes: f81ef4a920c8 ("ARM: 6356/1: hw-breakpoint: add ARM backend for the hw-breakpoint framework") Reported-by: Luis Machado Tested-by: Luis Machado Signed-off-by: Will Deacon Signed-off-by: Russell King Signed-off-by: Greg Kroah-Hartman --- arch/arm/kernel/hw_breakpoint.c | 27 ++++++++++++++++++++++----- 1 file changed, 22 insertions(+), 5 deletions(-) diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index 1d5fbf1d1c67..8a8470d36c65 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -688,6 +688,12 @@ static void disable_single_step(struct perf_event *bp) arch_install_hw_breakpoint(bp); } +static int watchpoint_fault_on_uaccess(struct pt_regs *regs, + struct arch_hw_breakpoint *info) +{ + return !user_mode(regs) && info->ctrl.privilege == ARM_BREAKPOINT_USER; +} + static void watchpoint_handler(unsigned long addr, unsigned int fsr, struct pt_regs *regs) { @@ -747,16 +753,27 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr, } pr_debug("watchpoint fired: address = 0x%x\n", info->trigger); + + /* + * If we triggered a user watchpoint from a uaccess routine, + * then handle the stepping ourselves since userspace really + * can't help us with this. + */ + if (watchpoint_fault_on_uaccess(regs, info)) + goto step; + perf_bp_event(wp, regs); /* - * If no overflow handler is present, insert a temporary - * mismatch breakpoint so we can single-step over the - * watchpoint trigger. + * Defer stepping to the overflow handler if one is installed. + * Otherwise, insert a temporary mismatch breakpoint so that + * we can single-step over the watchpoint trigger. */ - if (is_default_overflow_handler(wp)) - enable_single_step(wp, instruction_pointer(regs)); + if (!is_default_overflow_handler(wp)) + goto unlock; +step: + enable_single_step(wp, instruction_pointer(regs)); unlock: rcu_read_unlock(); } -- GitLab From 7b88c1ef512b2e4e08096773b35596c16678f038 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 30 Jul 2020 11:02:30 -0400 Subject: [PATCH 0017/1304] Revert "drm/amdgpu: Fix NULL dereference in dpm sysfs handlers" commit 87004abfbc27261edd15716515d89ab42198b405 upstream. This regressed some working configurations so revert it. Will fix this properly for 5.9 and backport then. This reverts commit 38e0c89a19fd13f28d2b4721035160a3e66e270b. Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 7bea8ba89e88..e63a253eb425 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -529,7 +529,8 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev, while (isspace(*++tmp_str)); - while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) { + while (tmp_str[0]) { + sub_str = strsep(&tmp_str, delimiter); ret = kstrtol(sub_str, 0, ¶meter[parameter_size]); if (ret) return -EINVAL; @@ -629,7 +630,8 @@ static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask) memcpy(buf_cpy, buf, bytes); buf_cpy[bytes] = '\0'; tmp = buf_cpy; - while ((sub_str = strsep(&tmp, delimiter)) != NULL) { + while (tmp[0]) { + sub_str = strsep(&tmp, delimiter); if (strlen(sub_str)) { ret = kstrtol(sub_str, 0, &level); if (ret) @@ -880,7 +882,8 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev, i++; memcpy(buf_cpy, buf, count-i); tmp_str = buf_cpy; - while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) { + while (tmp_str[0]) { + sub_str = strsep(&tmp_str, delimiter); ret = kstrtol(sub_str, 0, ¶meter[parameter_size]); if (ret) { count = -EINVAL; -- GitLab From 5febb6f96f7f9874c60493d0ae2241685aa64741 Mon Sep 17 00:00:00 2001 From: Peilin Ye Date: Tue, 28 Jul 2020 15:29:24 -0400 Subject: [PATCH 0018/1304] drm/amdgpu: Prevent kernel-infoleak in amdgpu_info_ioctl() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 543e8669ed9bfb30545fd52bc0e047ca4df7fb31 upstream. Compiler leaves a 4-byte hole near the end of `dev_info`, causing amdgpu_info_ioctl() to copy uninitialized kernel stack memory to userspace when `size` is greater than 356. In 2015 we tried to fix this issue by doing `= {};` on `dev_info`, which unfortunately does not initialize that 4-byte hole. Fix it by using memset() instead. Cc: stable@vger.kernel.org Fixes: c193fa91b918 ("drm/amdgpu: information leak in amdgpu_info_ioctl()") Fixes: d38ceaf99ed0 ("drm/amdgpu: add core driver (v4)") Suggested-by: Dan Carpenter Reviewed-by: Christian König Signed-off-by: Peilin Ye Signed-off-by: Alex Deucher Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index ba10577569f8..bb41936df0d9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -549,9 +549,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file return n ? -EFAULT : 0; } case AMDGPU_INFO_DEV_INFO: { - struct drm_amdgpu_info_device dev_info = {}; + struct drm_amdgpu_info_device dev_info; uint64_t vm_size; + memset(&dev_info, 0, sizeof(dev_info)); dev_info.device_id = dev->pdev->device; dev_info.chip_rev = adev->rev_id; dev_info.external_rev = adev->external_rev_id; -- GitLab From 80512b95fc8b372bb312a6a36aea1435ebb69761 Mon Sep 17 00:00:00 2001 From: Steve Cohen Date: Mon, 20 Jul 2020 18:30:50 -0400 Subject: [PATCH 0019/1304] drm: hold gem reference until object is no longer accessed commit 8490d6a7e0a0a6fab5c2d82d57a3937306660864 upstream. A use-after-free in drm_gem_open_ioctl can happen if the GEM object handle is closed between the idr lookup and retrieving the size from said object since a local reference is not being held at that point. Hold the local reference while the object can still be accessed to fix this and plug the potential security hole. Signed-off-by: Steve Cohen Cc: stable@vger.kernel.org Signed-off-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/1595284250-31580-1-git-send-email-cohens@codeaurora.org Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/drm_gem.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index bf90625df3c5..ac545c88a6f3 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -731,9 +731,6 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data, * @file_priv: drm file-private structure * * Open an object using the global name, returning a handle and the size. - * - * This handle (of course) holds a reference to the object, so the object - * will not go away until the handle is deleted. */ int drm_gem_open_ioctl(struct drm_device *dev, void *data, @@ -758,14 +755,15 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data, /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */ ret = drm_gem_handle_create_tail(file_priv, obj, &handle); - drm_gem_object_put_unlocked(obj); if (ret) - return ret; + goto err; args->handle = handle; args->size = obj->size; - return 0; +err: + drm_gem_object_put_unlocked(obj); + return ret; } /** -- GitLab From 24578a23b27225dbd913ff3036cd41959370d902 Mon Sep 17 00:00:00 2001 From: Peilin Ye Date: Thu, 30 Jul 2020 15:20:26 -0400 Subject: [PATCH 0020/1304] rds: Prevent kernel-infoleak in rds_notify_queue_get() commit bbc8a99e952226c585ac17477a85ef1194501762 upstream. rds_notify_queue_get() is potentially copying uninitialized kernel stack memory to userspace since the compiler may leave a 4-byte hole at the end of `cmsg`. In 2016 we tried to fix this issue by doing `= { 0 };` on `cmsg`, which unfortunately does not always initialize that 4-byte hole. Fix it by using memset() instead. Cc: stable@vger.kernel.org Fixes: f037590fff30 ("rds: fix a leak of kernel memory") Fixes: bdbe6fbc6a2f ("RDS: recv.c") Suggested-by: Dan Carpenter Signed-off-by: Peilin Ye Acked-by: Santosh Shilimkar Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/rds/recv.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/rds/recv.c b/net/rds/recv.c index c0b945516cdb..3ca278988b52 100644 --- a/net/rds/recv.c +++ b/net/rds/recv.c @@ -455,12 +455,13 @@ static int rds_still_queued(struct rds_sock *rs, struct rds_incoming *inc, int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msghdr) { struct rds_notifier *notifier; - struct rds_rdma_notify cmsg = { 0 }; /* fill holes with zero */ + struct rds_rdma_notify cmsg; unsigned int count = 0, max_messages = ~0U; unsigned long flags; LIST_HEAD(copy); int err = 0; + memset(&cmsg, 0, sizeof(cmsg)); /* fill holes with zero */ /* put_cmsg copies to user space and thus may sleep. We can't do this * with rs_lock held, so first grab as many notifications as we can stuff -- GitLab From ab6291837dcd31b595f1a867768cc73661a6da9e Mon Sep 17 00:00:00 2001 From: Rik van Riel Date: Thu, 5 Sep 2019 17:32:48 -0700 Subject: [PATCH 0021/1304] xfs: fix missed wakeup on l_flush_wait commit cdea5459ce263fbc963657a7736762ae897a8ae6 upstream. The code in xlog_wait uses the spinlock to make adding the task to the wait queue, and setting the task state to UNINTERRUPTIBLE atomic with respect to the waker. Doing the wakeup after releasing the spinlock opens up the following race condition: Task 1 task 2 add task to wait queue wake up task set task state to UNINTERRUPTIBLE This issue was found through code inspection as a result of kworkers being observed stuck in UNINTERRUPTIBLE state with an empty wait queue. It is rare and largely unreproducable. Simply moving the spin_unlock to after the wake_up_all results in the waker not being able to see a task on the waitqueue before it has set its state to UNINTERRUPTIBLE. This bug dates back to the conversion of this code to generic waitqueue infrastructure from a counting semaphore back in 2008 which didn't place the wakeups consistently w.r.t. to the relevant spin locks. [dchinner: Also fix a similar issue in the shutdown path on xc_commit_wait. Update commit log with more details of the issue.] Fixes: d748c62367eb ("[XFS] Convert l_flushsema to a sv_t") Reported-by: Chris Mason Signed-off-by: Rik van Riel Signed-off-by: Dave Chinner Reviewed-by: Darrick J. Wong Signed-off-by: Darrick J. Wong Cc: stable@vger.kernel.org # 4.9.x-4.19.x [modified for contextual change near xlog_state_do_callback()] Signed-off-by: Samuel Mendoza-Jonas Reviewed-by: Frank van der Linden Reviewed-by: Suraj Jitindar Singh Reviewed-by: Benjamin Herrenschmidt Reviewed-by: Anchal Agarwal Signed-off-by: Greg Kroah-Hartman --- fs/xfs/xfs_log.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 7bba551cbf90..8b1b0862e869 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -2712,7 +2712,6 @@ xlog_state_do_callback( int funcdidcallbacks; /* flag: function did callbacks */ int repeats; /* for issuing console warnings if * looping too many times */ - int wake = 0; spin_lock(&log->l_icloglock); first_iclog = iclog = log->l_iclog; @@ -2914,11 +2913,9 @@ xlog_state_do_callback( #endif if (log->l_iclog->ic_state & (XLOG_STATE_ACTIVE|XLOG_STATE_IOERROR)) - wake = 1; - spin_unlock(&log->l_icloglock); - - if (wake) wake_up_all(&log->l_flush_wait); + + spin_unlock(&log->l_icloglock); } @@ -4026,7 +4023,9 @@ xfs_log_force_umount( * item committed callback functions will do this again under lock to * avoid races. */ + spin_lock(&log->l_cilp->xc_push_lock); wake_up_all(&log->l_cilp->xc_commit_wait); + spin_unlock(&log->l_cilp->xc_push_lock); xlog_state_do_callback(log, XFS_LI_ABORTED, NULL); #ifdef XFSERRORDEBUG -- GitLab From fca9ee21e9777c8e0c59660a6af8e4b10f93b968 Mon Sep 17 00:00:00 2001 From: Xiyu Yang Date: Sat, 25 Apr 2020 21:06:25 +0800 Subject: [PATCH 0022/1304] net/x25: Fix x25_neigh refcnt leak when x25 disconnect commit 4becb7ee5b3d2829ed7b9261a245a77d5b7de902 upstream. x25_connect() invokes x25_get_neigh(), which returns a reference of the specified x25_neigh object to "x25->neighbour" with increased refcnt. When x25 connect success and returns, the reference still be hold by "x25->neighbour", so the refcount should be decreased in x25_disconnect() to keep refcount balanced. The reference counting issue happens in x25_disconnect(), which forgets to decrease the refcnt increased by x25_get_neigh() in x25_connect(), causing a refcnt leak. Fix this issue by calling x25_neigh_put() before x25_disconnect() returns. Signed-off-by: Xiyu Yang Signed-off-by: Xin Tan Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/x25/x25_subr.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/net/x25/x25_subr.c b/net/x25/x25_subr.c index 743103786652..d34a874177d5 100644 --- a/net/x25/x25_subr.c +++ b/net/x25/x25_subr.c @@ -362,6 +362,10 @@ void x25_disconnect(struct sock *sk, int reason, unsigned char cause, sk->sk_state_change(sk); sock_set_flag(sk, SOCK_DEAD); } + read_lock_bh(&x25_list_lock); + x25_neigh_put(x25->neighbour); + x25->neighbour = NULL; + read_unlock_bh(&x25_list_lock); } /* -- GitLab From 9a5e3aba2c1ce1055a04b70b26e86c28d5eada82 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Tue, 28 Apr 2020 16:12:08 +0800 Subject: [PATCH 0023/1304] net/x25: Fix null-ptr-deref in x25_disconnect commit 8999dc89497ab1c80d0718828e838c7cd5f6bffe upstream. We should check null before do x25_neigh_put in x25_disconnect, otherwise may cause null-ptr-deref like this: #include #include int main() { int sck_x25; sck_x25 = socket(AF_X25, SOCK_SEQPACKET, 0); close(sck_x25); return 0; } BUG: kernel NULL pointer dereference, address: 00000000000000d8 CPU: 0 PID: 4817 Comm: t2 Not tainted 5.7.0-rc3+ #159 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.9.3- RIP: 0010:x25_disconnect+0x91/0xe0 Call Trace: x25_release+0x18a/0x1b0 __sock_release+0x3d/0xc0 sock_close+0x13/0x20 __fput+0x107/0x270 ____fput+0x9/0x10 task_work_run+0x6d/0xb0 exit_to_usermode_loop+0x102/0x110 do_syscall_64+0x23c/0x260 entry_SYSCALL_64_after_hwframe+0x49/0xb3 Reported-by: syzbot+6db548b615e5aeefdce2@syzkaller.appspotmail.com Fixes: 4becb7ee5b3d ("net/x25: Fix x25_neigh refcnt leak when x25 disconnect") Signed-off-by: YueHaibing Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/x25/x25_subr.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/net/x25/x25_subr.c b/net/x25/x25_subr.c index d34a874177d5..f3d34582581b 100644 --- a/net/x25/x25_subr.c +++ b/net/x25/x25_subr.c @@ -362,10 +362,12 @@ void x25_disconnect(struct sock *sk, int reason, unsigned char cause, sk->sk_state_change(sk); sock_set_flag(sk, SOCK_DEAD); } - read_lock_bh(&x25_list_lock); - x25_neigh_put(x25->neighbour); - x25->neighbour = NULL; - read_unlock_bh(&x25_list_lock); + if (x25->neighbour) { + read_lock_bh(&x25_list_lock); + x25_neigh_put(x25->neighbour); + x25->neighbour = NULL; + read_unlock_bh(&x25_list_lock); + } } /* -- GitLab From 8b2a6581c88f277d99ff6a530d590603c68426b2 Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Fri, 17 Jul 2020 10:34:27 +0200 Subject: [PATCH 0024/1304] xfrm: Fix crash when the hold queue is used. [ Upstream commit 101dde4207f1daa1fda57d714814a03835dccc3f ] The commits "xfrm: Move dst->path into struct xfrm_dst" and "net: Create and use new helper xfrm_dst_child()." changed xfrm bundle handling under the assumption that xdst->path and dst->child are not a NULL pointer only if dst->xfrm is not a NULL pointer. That is true with one exception. If the xfrm hold queue is used to wait until a SA is installed by the key manager, we create a dummy bundle without a valid dst->xfrm pointer. The current xfrm bundle handling crashes in that case. Fix this by extending the NULL check of dst->xfrm with a test of the DST_XFRM_QUEUE flag. Fixes: 0f6c480f23f4 ("xfrm: Move dst->path into struct xfrm_dst") Fixes: b92cf4aab8e6 ("net: Create and use new helper xfrm_dst_child().") Signed-off-by: Steffen Klassert Signed-off-by: Sasha Levin --- include/net/xfrm.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/net/xfrm.h b/include/net/xfrm.h index f087c8d125b8..3a0b5de742e9 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -1016,7 +1016,7 @@ struct xfrm_dst { static inline struct dst_entry *xfrm_dst_path(const struct dst_entry *dst) { #ifdef CONFIG_XFRM - if (dst->xfrm) { + if (dst->xfrm || (dst->flags & DST_XFRM_QUEUE)) { const struct xfrm_dst *xdst = (const struct xfrm_dst *) dst; return xdst->path; @@ -1028,7 +1028,7 @@ static inline struct dst_entry *xfrm_dst_path(const struct dst_entry *dst) static inline struct dst_entry *xfrm_dst_child(const struct dst_entry *dst) { #ifdef CONFIG_XFRM - if (dst->xfrm) { + if (dst->xfrm || (dst->flags & DST_XFRM_QUEUE)) { struct xfrm_dst *xdst = (struct xfrm_dst *) dst; return xdst->child; } -- GitLab From fbd97d5b22086bff517c7fefdc29453e5894c4f6 Mon Sep 17 00:00:00 2001 From: Tanner Love Date: Mon, 27 Jul 2020 12:25:28 -0400 Subject: [PATCH 0025/1304] selftests/net: rxtimestamp: fix clang issues for target arch PowerPC [ Upstream commit 955cbe91bcf782c09afe369c95a20f0a4b6dcc3c ] The signedness of char is implementation-dependent. Some systems (including PowerPC and ARM) use unsigned char. Clang 9 threw: warning: result of comparison of constant -1 with expression of type \ 'char' is always true [-Wtautological-constant-out-of-range-compare] &arg_index)) != -1) { Tested: make -C tools/testing/selftests TARGETS="net" run_tests Fixes: 16e781224198 ("selftests/net: Add a test to validate behavior of rx timestamps") Signed-off-by: Tanner Love Acked-by: Willem de Bruijn Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- tools/testing/selftests/networking/timestamping/rxtimestamp.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tools/testing/selftests/networking/timestamping/rxtimestamp.c b/tools/testing/selftests/networking/timestamping/rxtimestamp.c index 7a573fb4c1c4..c6428f1ac22f 100644 --- a/tools/testing/selftests/networking/timestamping/rxtimestamp.c +++ b/tools/testing/selftests/networking/timestamping/rxtimestamp.c @@ -328,8 +328,7 @@ int main(int argc, char **argv) bool all_tests = true; int arg_index = 0; int failures = 0; - int s, t; - char opt; + int s, t, opt; while ((opt = getopt_long(argc, argv, "", long_options, &arg_index)) != -1) { -- GitLab From 7846460c1d7d343e6996c00b9d12f61951cc1a93 Mon Sep 17 00:00:00 2001 From: Tanner Love Date: Mon, 27 Jul 2020 12:25:29 -0400 Subject: [PATCH 0026/1304] selftests/net: psock_fanout: fix clang issues for target arch PowerPC [ Upstream commit 64f9ede2274980076423583683d44480909b7a40 ] Clang 9 threw: warning: format specifies type 'unsigned short' but the argument has \ type 'int' [-Wformat] typeflags, PORT_BASE, PORT_BASE + port_off); Tested: make -C tools/testing/selftests TARGETS="net" run_tests Fixes: 77f65ebdca50 ("packet: packet fanout rollover during socket overload") Signed-off-by: Tanner Love Acked-by: Willem de Bruijn Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- tools/testing/selftests/net/psock_fanout.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/net/psock_fanout.c b/tools/testing/selftests/net/psock_fanout.c index bd9b9632c72b..f496ba3b1cd3 100644 --- a/tools/testing/selftests/net/psock_fanout.c +++ b/tools/testing/selftests/net/psock_fanout.c @@ -364,7 +364,8 @@ static int test_datapath(uint16_t typeflags, int port_off, int fds[2], fds_udp[2][2], ret; fprintf(stderr, "\ntest: datapath 0x%hx ports %hu,%hu\n", - typeflags, PORT_BASE, PORT_BASE + port_off); + typeflags, (uint16_t)PORT_BASE, + (uint16_t)(PORT_BASE + port_off)); fds[0] = sock_fanout_open(typeflags, 0); fds[1] = sock_fanout_open(typeflags, 0); -- GitLab From 93bfba8143dfee8fa15ddd1064ae4dcd533eed81 Mon Sep 17 00:00:00 2001 From: Michael Karcher Date: Thu, 23 Jul 2020 01:13:19 +0200 Subject: [PATCH 0027/1304] sh: Fix validation of system call number [ Upstream commit 04a8a3d0a73f51c7c2da84f494db7ec1df230e69 ] The slow path for traced system call entries accessed a wrong memory location to get the number of the maximum allowed system call number. Renumber the numbered "local" label for the correct location to avoid collisions with actual local labels. Signed-off-by: Michael Karcher Tested-by: John Paul Adrian Glaubitz Fixes: f3a8308864f920d2 ("sh: Add a few missing irqflags tracing markers.") Signed-off-by: Rich Felker Signed-off-by: Sasha Levin --- arch/sh/kernel/entry-common.S | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S index 28cc61216b64..ed5b758c650d 100644 --- a/arch/sh/kernel/entry-common.S +++ b/arch/sh/kernel/entry-common.S @@ -203,7 +203,7 @@ syscall_trace_entry: mov.l @(OFF_R7,r15), r7 ! arg3 mov.l @(OFF_R3,r15), r3 ! syscall_nr ! - mov.l 2f, r10 ! Number of syscalls + mov.l 6f, r10 ! Number of syscalls cmp/hs r10, r3 bf syscall_call mov #-ENOSYS, r0 @@ -357,7 +357,7 @@ ENTRY(system_call) tst r9, r8 bf syscall_trace_entry ! - mov.l 2f, r8 ! Number of syscalls + mov.l 6f, r8 ! Number of syscalls cmp/hs r8, r3 bt syscall_badsys ! @@ -396,7 +396,7 @@ syscall_exit: #if !defined(CONFIG_CPU_SH2) 1: .long TRA #endif -2: .long NR_syscalls +6: .long NR_syscalls 3: .long sys_call_table 7: .long do_syscall_trace_enter 8: .long do_syscall_trace_leave -- GitLab From ee9599af8355d0e527713b94bf86716049d57976 Mon Sep 17 00:00:00 2001 From: Eran Ben Elisha Date: Wed, 8 Jul 2020 11:10:01 +0300 Subject: [PATCH 0028/1304] net/mlx5: Verify Hardware supports requested ptp function on a given pin [ Upstream commit 071995c877a8646209d55ff8edddd2b054e7424c ] Fix a bug where driver did not verify Hardware pin capabilities for PTP functions. Fixes: ee7f12205abc ("net/mlx5e: Implement 1PPS support") Signed-off-by: Eran Ben Elisha Reviewed-by: Ariel Levkovich Signed-off-by: Saeed Mahameed Signed-off-by: Sasha Levin --- .../ethernet/mellanox/mlx5/core/lib/clock.c | 23 ++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c index 54f1a40a68ed..d359e850dbf0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c @@ -366,10 +366,31 @@ static int mlx5_ptp_enable(struct ptp_clock_info *ptp, return 0; } +enum { + MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN = BIT(0), + MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT = BIT(1), +}; + static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin, enum ptp_pin_function func, unsigned int chan) { - return (func == PTP_PF_PHYSYNC) ? -EOPNOTSUPP : 0; + struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, + ptp_info); + + switch (func) { + case PTP_PF_NONE: + return 0; + case PTP_PF_EXTTS: + return !(clock->pps_info.pin_caps[pin] & + MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN); + case PTP_PF_PEROUT: + return !(clock->pps_info.pin_caps[pin] & + MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT); + default: + return -EOPNOTSUPP; + } + + return -EOPNOTSUPP; } static const struct ptp_clock_info mlx5_ptp_clock_info = { -- GitLab From 3c1add359a3d30527f821729f4cfff591756c5d9 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Tue, 28 Jul 2020 14:10:29 +0200 Subject: [PATCH 0029/1304] net: lan78xx: add missing endpoint sanity check [ Upstream commit 8d8e95fd6d69d774013f51e5f2ee10c6e6d1fc14 ] Add the missing endpoint sanity check to prevent a NULL-pointer dereference should a malicious device lack the expected endpoints. Note that the driver has a broken endpoint-lookup helper, lan78xx_get_endpoints(), which can end up accepting interfaces in an altsetting without endpoints as long as *some* altsetting has a bulk-in and a bulk-out endpoint. Fixes: 55d7de9de6c3 ("Microchip's LAN7800 family USB 2/3 to 10/100/1000 Ethernet device driver") Cc: Woojung.Huh@microchip.com Signed-off-by: Johan Hovold Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/usb/lan78xx.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 92548887df2f..2dff233814ea 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -3786,6 +3786,11 @@ static int lan78xx_probe(struct usb_interface *intf, netdev->max_mtu = MAX_SINGLE_PACKET_SIZE; netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER); + if (intf->cur_altsetting->desc.bNumEndpoints < 3) { + ret = -ENODEV; + goto out3; + } + dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0; dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1; dev->ep_intr = (intf->cur_altsetting)->endpoint + 2; -- GitLab From ff94414fb6bd10d123b519bfd29d7e03cea977e3 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Tue, 28 Jul 2020 14:10:30 +0200 Subject: [PATCH 0030/1304] net: lan78xx: fix transfer-buffer memory leak [ Upstream commit 63634aa679ba8b5e306ad0727120309ae6ba8a8e ] The interrupt URB transfer-buffer was never freed on disconnect or after probe errors. Fixes: 55d7de9de6c3 ("Microchip's LAN7800 family USB 2/3 to 10/100/1000 Ethernet device driver") Cc: Woojung.Huh@microchip.com Signed-off-by: Johan Hovold Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/usb/lan78xx.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 2dff233814ea..d198f36785a4 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -3815,6 +3815,7 @@ static int lan78xx_probe(struct usb_interface *intf, usb_fill_int_urb(dev->urb_intr, dev->udev, dev->pipe_intr, buf, maxp, intr_complete, dev, period); + dev->urb_intr->transfer_flags |= URB_FREE_BUFFER; } } -- GitLab From 111462ba8ef5b5dc489d3446605571c2abadae7d Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 24 Jul 2020 16:15:43 -0700 Subject: [PATCH 0031/1304] mlx4: disable device on shutdown [ Upstream commit 3cab8c65525920f00d8f4997b3e9bb73aecb3a8e ] It appears that not disabling a PCI device on .shutdown may lead to a Hardware Error with particular (perhaps buggy) BIOS versions: mlx4_en: eth0: Close port called mlx4_en 0000:04:00.0: removed PHC reboot: Restarting system {1}[Hardware Error]: Hardware error from APEI Generic Hardware Error Source: 1 {1}[Hardware Error]: event severity: fatal {1}[Hardware Error]: Error 0, type: fatal {1}[Hardware Error]: section_type: PCIe error {1}[Hardware Error]: port_type: 4, root port {1}[Hardware Error]: version: 1.16 {1}[Hardware Error]: command: 0x4010, status: 0x0143 {1}[Hardware Error]: device_id: 0000:00:02.2 {1}[Hardware Error]: slot: 0 {1}[Hardware Error]: secondary_bus: 0x04 {1}[Hardware Error]: vendor_id: 0x8086, device_id: 0x2f06 {1}[Hardware Error]: class_code: 000604 {1}[Hardware Error]: bridge: secondary_status: 0x2000, control: 0x0003 {1}[Hardware Error]: aer_uncor_status: 0x00100000, aer_uncor_mask: 0x00000000 {1}[Hardware Error]: aer_uncor_severity: 0x00062030 {1}[Hardware Error]: TLP Header: 40000018 040000ff 791f4080 00000000 [hw error repeats] Kernel panic - not syncing: Fatal hardware error! CPU: 0 PID: 2189 Comm: reboot Kdump: loaded Not tainted 5.6.x-blabla #1 Hardware name: HP ProLiant DL380 Gen9/ProLiant DL380 Gen9, BIOS P89 05/05/2017 Fix the mlx4 driver. This is a very similar problem to what had been fixed in: commit 0d98ba8d70b0 ("scsi: hpsa: disable device during shutdown") to address https://bugzilla.kernel.org/show_bug.cgi?id=199779. Fixes: 2ba5fbd62b25 ("net/mlx4_core: Handle AER flow properly") Reported-by: Jake Lawrence Signed-off-by: Jakub Kicinski Reviewed-by: Saeed Mahameed Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/ethernet/mellanox/mlx4/main.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index f7825c7b92fe..8d7bb9a88967 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -4311,12 +4311,14 @@ static void mlx4_pci_resume(struct pci_dev *pdev) static void mlx4_shutdown(struct pci_dev *pdev) { struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); + struct mlx4_dev *dev = persist->dev; mlx4_info(persist->dev, "mlx4_shutdown was called\n"); mutex_lock(&persist->interface_state_mutex); if (persist->interface_state & MLX4_INTERFACE_STATE_UP) mlx4_unload_one(pdev); mutex_unlock(&persist->interface_state_mutex); + mlx4_pci_disable_device(dev); } static const struct pci_error_handlers mlx4_err_handler = { -- GitLab From b7935969d226b3601241a3b6112e9139724b3a96 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 29 Jul 2020 12:26:45 +0300 Subject: [PATCH 0032/1304] mlxsw: core: Increase scope of RCU read-side critical section [ Upstream commit 7d8e8f3433dc8d1dc87c1aabe73a154978fb4c4d ] The lifetime of the Rx listener item ('rxl_item') is managed using RCU, but is dereferenced outside of RCU read-side critical section, which can lead to a use-after-free. Fix this by increasing the scope of the RCU read-side critical section. Fixes: 93c1edb27f9e ("mlxsw: Introduce Mellanox switch driver core") Signed-off-by: Ido Schimmel Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/ethernet/mellanox/mlxsw/core.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index e180ec4f1a24..3cebea6f3e6a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -1605,11 +1605,13 @@ void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, break; } } - rcu_read_unlock(); - if (!found) + if (!found) { + rcu_read_unlock(); goto drop; + } rxl->func(skb, local_port, rxl_item->priv); + rcu_read_unlock(); return; drop: -- GitLab From 685d55c516361cc1002fe870510ae067fecfb63c Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 29 Jul 2020 12:26:46 +0300 Subject: [PATCH 0033/1304] mlxsw: core: Free EMAD transactions using kfree_rcu() [ Upstream commit 3c8ce24b037648a5a15b85888b259a74b05ff97d ] The lifetime of EMAD transactions (i.e., 'struct mlxsw_reg_trans') is managed using RCU. They are freed using kfree_rcu() once the transaction ends. However, in case the transaction failed it is freed immediately after being removed from the active transactions list. This is problematic because it is still possible for a different CPU to dereference the transaction from an RCU read-side critical section while traversing the active transaction list in mlxsw_emad_rx_listener_func(). In which case, a use-after-free is triggered [1]. Fix this by freeing the transaction after a grace period by calling kfree_rcu(). [1] BUG: KASAN: use-after-free in mlxsw_emad_rx_listener_func+0x969/0xac0 drivers/net/ethernet/mellanox/mlxsw/core.c:671 Read of size 8 at addr ffff88800b7964e8 by task syz-executor.2/2881 CPU: 0 PID: 2881 Comm: syz-executor.2 Not tainted 5.8.0-rc4+ #44 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.12.1-0-ga5cab58e9a3f-prebuilt.qemu.org 04/01/2014 Call Trace: __dump_stack lib/dump_stack.c:77 [inline] dump_stack+0xf6/0x16e lib/dump_stack.c:118 print_address_description.constprop.0+0x1c/0x250 mm/kasan/report.c:383 __kasan_report mm/kasan/report.c:513 [inline] kasan_report.cold+0x1f/0x37 mm/kasan/report.c:530 mlxsw_emad_rx_listener_func+0x969/0xac0 drivers/net/ethernet/mellanox/mlxsw/core.c:671 mlxsw_core_skb_receive+0x571/0x700 drivers/net/ethernet/mellanox/mlxsw/core.c:2061 mlxsw_pci_cqe_rdq_handle drivers/net/ethernet/mellanox/mlxsw/pci.c:595 [inline] mlxsw_pci_cq_tasklet+0x12a6/0x2520 drivers/net/ethernet/mellanox/mlxsw/pci.c:651 tasklet_action_common.isra.0+0x13f/0x3e0 kernel/softirq.c:550 __do_softirq+0x223/0x964 kernel/softirq.c:292 asm_call_on_stack+0x12/0x20 arch/x86/entry/entry_64.S:711 __run_on_irqstack arch/x86/include/asm/irq_stack.h:22 [inline] run_on_irqstack_cond arch/x86/include/asm/irq_stack.h:48 [inline] do_softirq_own_stack+0x109/0x140 arch/x86/kernel/irq_64.c:77 invoke_softirq kernel/softirq.c:387 [inline] __irq_exit_rcu kernel/softirq.c:417 [inline] irq_exit_rcu+0x16f/0x1a0 kernel/softirq.c:429 sysvec_apic_timer_interrupt+0x4e/0xd0 arch/x86/kernel/apic/apic.c:1091 asm_sysvec_apic_timer_interrupt+0x12/0x20 arch/x86/include/asm/idtentry.h:587 RIP: 0010:arch_local_irq_restore arch/x86/include/asm/irqflags.h:85 [inline] RIP: 0010:__raw_spin_unlock_irqrestore include/linux/spinlock_api_smp.h:160 [inline] RIP: 0010:_raw_spin_unlock_irqrestore+0x3b/0x40 kernel/locking/spinlock.c:191 Code: e8 2a c3 f4 fc 48 89 ef e8 12 96 f5 fc f6 c7 02 75 11 53 9d e8 d6 db 11 fd 65 ff 0d 1f 21 b3 56 5b 5d c3 e8 a7 d7 11 fd 53 9d ed 0f 1f 00 55 48 89 fd 65 ff 05 05 21 b3 56 ff 74 24 08 48 8d RSP: 0018:ffff8880446ffd80 EFLAGS: 00000286 RAX: 0000000000000006 RBX: 0000000000000286 RCX: 0000000000000006 RDX: 0000000000000000 RSI: 0000000000000000 RDI: ffffffffa94ecea9 RBP: ffff888012934408 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000001 R11: fffffbfff57be301 R12: 1ffff110088dffc1 R13: ffff888037b817c0 R14: ffff88802442415a R15: ffff888024424000 __do_sys_perf_event_open+0x1b5d/0x2bd0 kernel/events/core.c:11874 do_syscall_64+0x56/0xa0 arch/x86/entry/common.c:384 entry_SYSCALL_64_after_hwframe+0x44/0xa9 RIP: 0033:0x473dbd Code: Bad RIP value. RSP: 002b:00007f21e5e9cc28 EFLAGS: 00000246 ORIG_RAX: 000000000000012a RAX: ffffffffffffffda RBX: 000000000057bf00 RCX: 0000000000473dbd RDX: 0000000000000000 RSI: 0000000000000000 RDI: 0000000020000040 RBP: 000000000057bf00 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000003 R11: 0000000000000246 R12: 000000000057bf0c R13: 00007ffd0493503f R14: 00000000004d0f46 R15: 00007f21e5e9cd80 Allocated by task 871: save_stack+0x1b/0x40 mm/kasan/common.c:48 set_track mm/kasan/common.c:56 [inline] __kasan_kmalloc mm/kasan/common.c:494 [inline] __kasan_kmalloc.constprop.0+0xc2/0xd0 mm/kasan/common.c:467 kmalloc include/linux/slab.h:555 [inline] kzalloc include/linux/slab.h:669 [inline] mlxsw_core_reg_access_emad+0x70/0x1410 drivers/net/ethernet/mellanox/mlxsw/core.c:1812 mlxsw_core_reg_access+0xeb/0x540 drivers/net/ethernet/mellanox/mlxsw/core.c:1991 mlxsw_sp_port_get_hw_xstats+0x335/0x7e0 drivers/net/ethernet/mellanox/mlxsw/spectrum.c:1130 update_stats_cache+0xf4/0x140 drivers/net/ethernet/mellanox/mlxsw/spectrum.c:1173 process_one_work+0xa3e/0x17a0 kernel/workqueue.c:2269 worker_thread+0x9e/0x1050 kernel/workqueue.c:2415 kthread+0x355/0x470 kernel/kthread.c:291 ret_from_fork+0x22/0x30 arch/x86/entry/entry_64.S:293 Freed by task 871: save_stack+0x1b/0x40 mm/kasan/common.c:48 set_track mm/kasan/common.c:56 [inline] kasan_set_free_info mm/kasan/common.c:316 [inline] __kasan_slab_free+0x12c/0x170 mm/kasan/common.c:455 slab_free_hook mm/slub.c:1474 [inline] slab_free_freelist_hook mm/slub.c:1507 [inline] slab_free mm/slub.c:3072 [inline] kfree+0xe6/0x320 mm/slub.c:4052 mlxsw_core_reg_access_emad+0xd45/0x1410 drivers/net/ethernet/mellanox/mlxsw/core.c:1819 mlxsw_core_reg_access+0xeb/0x540 drivers/net/ethernet/mellanox/mlxsw/core.c:1991 mlxsw_sp_port_get_hw_xstats+0x335/0x7e0 drivers/net/ethernet/mellanox/mlxsw/spectrum.c:1130 update_stats_cache+0xf4/0x140 drivers/net/ethernet/mellanox/mlxsw/spectrum.c:1173 process_one_work+0xa3e/0x17a0 kernel/workqueue.c:2269 worker_thread+0x9e/0x1050 kernel/workqueue.c:2415 kthread+0x355/0x470 kernel/kthread.c:291 ret_from_fork+0x22/0x30 arch/x86/entry/entry_64.S:293 The buggy address belongs to the object at ffff88800b796400 which belongs to the cache kmalloc-512 of size 512 The buggy address is located 232 bytes inside of 512-byte region [ffff88800b796400, ffff88800b796600) The buggy address belongs to the page: page:ffffea00002de500 refcount:1 mapcount:0 mapping:0000000000000000 index:0x0 head:ffffea00002de500 order:2 compound_mapcount:0 compound_pincount:0 flags: 0x100000000010200(slab|head) raw: 0100000000010200 dead000000000100 dead000000000122 ffff88806c402500 raw: 0000000000000000 0000000000100010 00000001ffffffff 0000000000000000 page dumped because: kasan: bad access detected Memory state around the buggy address: ffff88800b796380: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc ffff88800b796400: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb >ffff88800b796480: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb ^ ffff88800b796500: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb ffff88800b796580: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb Fixes: caf7297e7ab5 ("mlxsw: core: Introduce support for asynchronous EMAD register access") Signed-off-by: Ido Schimmel Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/ethernet/mellanox/mlxsw/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index 3cebea6f3e6a..d8e7ca48753f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -1384,7 +1384,7 @@ static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core, err = mlxsw_emad_reg_access(mlxsw_core, reg, payload, type, trans, bulk_list, cb, cb_priv, tid); if (err) { - kfree(trans); + kfree_rcu(trans, rcu); return err; } return 0; -- GitLab From 5858ad8d6af5bdb01c3766febf3d360aa21bdf25 Mon Sep 17 00:00:00 2001 From: Thomas Falcon Date: Wed, 29 Jul 2020 16:36:32 -0500 Subject: [PATCH 0034/1304] ibmvnic: Fix IRQ mapping disposal in error path [ Upstream commit 27a2145d6f826d1fad9de06ac541b1016ced3427 ] RX queue IRQ mappings are disposed in both the TX IRQ and RX IRQ error paths. Fix this and dispose of TX IRQ mappings correctly in case of an error. Fixes: ea22d51a7831 ("ibmvnic: simplify and improve driver probe function") Signed-off-by: Thomas Falcon Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/ethernet/ibm/ibmvnic.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 5e9e45befc87..d8115a9333e0 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -2926,7 +2926,7 @@ static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter) req_tx_irq_failed: for (j = 0; j < i; j++) { free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]); - irq_dispose_mapping(adapter->rx_scrq[j]->irq); + irq_dispose_mapping(adapter->tx_scrq[j]->irq); } release_sub_crqs(adapter, 1); return rc; -- GitLab From 634d42cadc4771fbe3b70e0fa8b82b334fd41959 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 28 Jul 2020 21:09:12 -0700 Subject: [PATCH 0035/1304] bpf: Fix map leak in HASH_OF_MAPS map [ Upstream commit 1d4e1eab456e1ee92a94987499b211db05f900ea ] Fix HASH_OF_MAPS bug of not putting inner map pointer on bpf_map_elem_update() operation. This is due to per-cpu extra_elems optimization, which bypassed free_htab_elem() logic doing proper clean ups. Make sure that inner map is put properly in optimized case as well. Fixes: 8c290e60fa2a ("bpf: fix hashmap extra_elems logic") Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Acked-by: Song Liu Link: https://lore.kernel.org/bpf/20200729040913.2815687-1-andriin@fb.com Signed-off-by: Sasha Levin --- kernel/bpf/hashtab.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index 6fe72792312d..1b28fb006763 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -678,15 +678,20 @@ static void htab_elem_free_rcu(struct rcu_head *head) preempt_enable(); } -static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l) +static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l) { struct bpf_map *map = &htab->map; + void *ptr; if (map->ops->map_fd_put_ptr) { - void *ptr = fd_htab_map_get_ptr(map, l); - + ptr = fd_htab_map_get_ptr(map, l); map->ops->map_fd_put_ptr(ptr); } +} + +static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l) +{ + htab_put_fd_value(htab, l); if (htab_is_prealloc(htab)) { __pcpu_freelist_push(&htab->freelist, &l->fnode); @@ -747,6 +752,7 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, */ pl_new = this_cpu_ptr(htab->extra_elems); l_new = *pl_new; + htab_put_fd_value(htab, old_elem); *pl_new = old_elem; } else { struct pcpu_freelist_node *l; -- GitLab From 37bccfa89559a70c044b5ccde3c916a91388e14a Mon Sep 17 00:00:00 2001 From: Remi Pommarel Date: Sat, 4 Jul 2020 15:50:07 +0200 Subject: [PATCH 0036/1304] mac80211: mesh: Free ie data when leaving mesh [ Upstream commit 6a01afcf8468d3ca2bd8bbb27503f60dcf643b20 ] At ieee80211_join_mesh() some ie data could have been allocated (see copy_mesh_setup()) and need to be cleaned up when leaving the mesh. This fixes the following kmemleak report: unreferenced object 0xffff0000116bc600 (size 128): comm "wpa_supplicant", pid 608, jiffies 4294898983 (age 293.484s) hex dump (first 32 bytes): 30 14 01 00 00 0f ac 04 01 00 00 0f ac 04 01 00 0............... 00 0f ac 08 00 00 00 00 c4 65 40 00 00 00 00 00 .........e@..... backtrace: [<00000000bebe439d>] __kmalloc_track_caller+0x1c0/0x330 [<00000000a349dbe1>] kmemdup+0x28/0x50 [<0000000075d69baa>] ieee80211_join_mesh+0x6c/0x3b8 [mac80211] [<00000000683bb98b>] __cfg80211_join_mesh+0x1e8/0x4f0 [cfg80211] [<0000000072cb507f>] nl80211_join_mesh+0x520/0x6b8 [cfg80211] [<0000000077e9bcf9>] genl_family_rcv_msg+0x374/0x680 [<00000000b1bd936d>] genl_rcv_msg+0x78/0x108 [<0000000022c53788>] netlink_rcv_skb+0xb0/0x1c0 [<0000000011af8ec9>] genl_rcv+0x34/0x48 [<0000000069e41f53>] netlink_unicast+0x268/0x2e8 [<00000000a7517316>] netlink_sendmsg+0x320/0x4c0 [<0000000069cba205>] ____sys_sendmsg+0x354/0x3a0 [<00000000e06bab0f>] ___sys_sendmsg+0xd8/0x120 [<0000000037340728>] __sys_sendmsg+0xa4/0xf8 [<000000004fed9776>] __arm64_sys_sendmsg+0x44/0x58 [<000000001c1e5647>] el0_svc_handler+0xd0/0x1a0 Fixes: c80d545da3f7 (mac80211: Let userspace enable and configure vendor specific path selection.) Signed-off-by: Remi Pommarel Link: https://lore.kernel.org/r/20200704135007.27292-1-repk@triplefau.lt Signed-off-by: Johannes Berg Signed-off-by: Sasha Levin --- net/mac80211/cfg.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index cb7076d9a769..b6670e74aeb7 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -2011,6 +2011,7 @@ static int ieee80211_leave_mesh(struct wiphy *wiphy, struct net_device *dev) ieee80211_stop_mesh(sdata); mutex_lock(&sdata->local->mtx); ieee80211_vif_release_channel(sdata); + kfree(sdata->u.mesh.ie); mutex_unlock(&sdata->local->mtx); return 0; -- GitLab From 0535c43d369cf63e07a116a4c7e92d3b7b290806 Mon Sep 17 00:00:00 2001 From: Remi Pommarel Date: Sat, 4 Jul 2020 15:54:19 +0200 Subject: [PATCH 0037/1304] mac80211: mesh: Free pending skb when destroying a mpath [ Upstream commit 5e43540c2af0a0c0a18e39579b1ad49541f87506 ] A mpath object can hold reference on a list of skb that are waiting for mpath resolution to be sent. When destroying a mpath this skb list should be cleaned up in order to not leak memory. Fixing that kind of leak: unreferenced object 0xffff0000181c9300 (size 1088): comm "openvpn", pid 1782, jiffies 4295071698 (age 80.416s) hex dump (first 32 bytes): 00 00 00 00 00 00 00 00 f9 80 36 00 00 00 00 00 ..........6..... 02 00 07 40 00 00 00 00 00 00 00 00 00 00 00 00 ...@............ backtrace: [<000000004bc6a443>] kmem_cache_alloc+0x1a4/0x2f0 [<000000002caaef13>] sk_prot_alloc.isra.39+0x34/0x178 [<00000000ceeaa916>] sk_alloc+0x34/0x228 [<00000000ca1f1d04>] inet_create+0x198/0x518 [<0000000035626b1c>] __sock_create+0x134/0x328 [<00000000a12b3a87>] __sys_socket+0xb0/0x158 [<00000000ff859f23>] __arm64_sys_socket+0x40/0x58 [<00000000263486ec>] el0_svc_handler+0xd0/0x1a0 [<0000000005b5157d>] el0_svc+0x8/0xc unreferenced object 0xffff000012973a40 (size 216): comm "openvpn", pid 1782, jiffies 4295082137 (age 38.660s) hex dump (first 32 bytes): 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................ 00 c0 06 16 00 00 ff ff 00 93 1c 18 00 00 ff ff ................ backtrace: [<000000004bc6a443>] kmem_cache_alloc+0x1a4/0x2f0 [<0000000023c8c8f9>] __alloc_skb+0xc0/0x2b8 [<000000007ad950bb>] alloc_skb_with_frags+0x60/0x320 [<00000000ef90023a>] sock_alloc_send_pskb+0x388/0x3c0 [<00000000104fb1a3>] sock_alloc_send_skb+0x1c/0x28 [<000000006919d2dd>] __ip_append_data+0xba4/0x11f0 [<0000000083477587>] ip_make_skb+0x14c/0x1a8 [<0000000024f3d592>] udp_sendmsg+0xaf0/0xcf0 [<000000005aabe255>] inet_sendmsg+0x5c/0x80 [<000000008651ea08>] __sys_sendto+0x15c/0x218 [<000000003505c99b>] __arm64_sys_sendto+0x74/0x90 [<00000000263486ec>] el0_svc_handler+0xd0/0x1a0 [<0000000005b5157d>] el0_svc+0x8/0xc Fixes: 2bdaf386f99c (mac80211: mesh: move path tables into if_mesh) Signed-off-by: Remi Pommarel Link: https://lore.kernel.org/r/20200704135419.27703-1-repk@triplefau.lt Signed-off-by: Johannes Berg Signed-off-by: Sasha Levin --- net/mac80211/mesh_pathtbl.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index ac1f5db52994..4fc720c77e37 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c @@ -532,6 +532,7 @@ static void mesh_path_free_rcu(struct mesh_table *tbl, del_timer_sync(&mpath->timer); atomic_dec(&sdata->u.mesh.mpaths); atomic_dec(&tbl->entries); + mesh_path_flush_pending(mpath); kfree_rcu(mpath, rcu); } -- GitLab From 53f941777b9df64fa70fad775efabfcfb5db92c4 Mon Sep 17 00:00:00 2001 From: Sami Tolvanen Date: Thu, 30 Jul 2020 08:37:01 -0700 Subject: [PATCH 0038/1304] arm64/alternatives: move length validation inside the subsection [ Upstream commit 966a0acce2fca776391823381dba95c40e03c339 ] Commit f7b93d42945c ("arm64/alternatives: use subsections for replacement sequences") breaks LLVM's integrated assembler, because due to its one-pass design, it cannot compute instruction sequence lengths before the layout for the subsection has been finalized. This change fixes the build by moving the .org directives inside the subsection, so they are processed after the subsection layout is known. Fixes: f7b93d42945c ("arm64/alternatives: use subsections for replacement sequences") Signed-off-by: Sami Tolvanen Link: https://github.com/ClangBuiltLinux/linux/issues/1078 Link: https://lore.kernel.org/r/20200730153701.3892953-1-samitolvanen@google.com Signed-off-by: Will Deacon Signed-off-by: Sasha Levin --- arch/arm64/include/asm/alternative.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h index 849d891c60a8..844f05b23115 100644 --- a/arch/arm64/include/asm/alternative.h +++ b/arch/arm64/include/asm/alternative.h @@ -77,9 +77,9 @@ static inline void apply_alternatives_module(void *start, size_t length) { } "663:\n\t" \ newinstr "\n" \ "664:\n\t" \ - ".previous\n\t" \ ".org . - (664b-663b) + (662b-661b)\n\t" \ - ".org . - (662b-661b) + (664b-663b)\n" \ + ".org . - (662b-661b) + (664b-663b)\n\t" \ + ".previous\n" \ ".endif\n" #define __ALTERNATIVE_CFG_CB(oldinstr, feature, cfg_enabled, cb) \ -- GitLab From 0be9b57b5bb5a1e643624ea68decc4a8a14ffda6 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Thu, 30 Jul 2020 10:56:49 +0100 Subject: [PATCH 0039/1304] arm64: csum: Fix handling of bad packets [ Upstream commit 05fb3dbda187bbd9cc1cd0e97e5d6595af570ac6 ] Although iph is expected to point to at least 20 bytes of valid memory, ihl may be bogus, for example on reception of a corrupt packet. If it happens to be less than 5, we really don't want to run away and dereference 16GB worth of memory until it wraps back to exactly zero... Fixes: 0e455d8e80aa ("arm64: Implement optimised IP checksum helpers") Reported-by: guodeqing Signed-off-by: Robin Murphy Signed-off-by: Will Deacon Signed-off-by: Sasha Levin --- arch/arm64/include/asm/checksum.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/arch/arm64/include/asm/checksum.h b/arch/arm64/include/asm/checksum.h index 0b6f5a7d4027..fd11e0d70e44 100644 --- a/arch/arm64/include/asm/checksum.h +++ b/arch/arm64/include/asm/checksum.h @@ -30,16 +30,17 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) { __uint128_t tmp; u64 sum; + int n = ihl; /* we want it signed */ tmp = *(const __uint128_t *)iph; iph += 16; - ihl -= 4; + n -= 4; tmp += ((tmp >> 64) | (tmp << 64)); sum = tmp >> 64; do { sum += *(const u32 *)iph; iph += 4; - } while (--ihl); + } while (--n > 0); sum += ((sum >> 32) | (sum << 32)); return csum_fold((__force u32)(sum >> 32)); -- GitLab From 5df9e5613d1c51e16b1501a4c75e139fbbe0fb6c Mon Sep 17 00:00:00 2001 From: Alain Michaud Date: Mon, 27 Jul 2020 20:48:55 +0000 Subject: [PATCH 0040/1304] Bluetooth: fix kernel oops in store_pending_adv_report [ Upstream commit a2ec905d1e160a33b2e210e45ad30445ef26ce0e ] Fix kernel oops observed when an ext adv data is larger than 31 bytes. This can be reproduced by setting up an advertiser with advertisement larger than 31 bytes. The issue is not sensitive to the advertisement content. In particular, this was reproduced with an advertisement of 229 bytes filled with 'A'. See stack trace below. This is fixed by not catching ext_adv as legacy adv are only cached to be able to concatenate a scanable adv with its scan response before sending it up through mgmt. With ext_adv, this is no longer necessary. general protection fault: 0000 [#1] SMP PTI CPU: 6 PID: 205 Comm: kworker/u17:0 Not tainted 5.4.0-37-generic #41-Ubuntu Hardware name: Dell Inc. XPS 15 7590/0CF6RR, BIOS 1.7.0 05/11/2020 Workqueue: hci0 hci_rx_work [bluetooth] RIP: 0010:hci_bdaddr_list_lookup+0x1e/0x40 [bluetooth] Code: ff ff e9 26 ff ff ff 0f 1f 44 00 00 0f 1f 44 00 00 55 48 8b 07 48 89 e5 48 39 c7 75 0a eb 24 48 8b 00 48 39 f8 74 1c 44 8b 06 <44> 39 40 10 75 ef 44 0f b7 4e 04 66 44 39 48 14 75 e3 38 50 16 75 RSP: 0018:ffffbc6a40493c70 EFLAGS: 00010286 RAX: 4141414141414141 RBX: 000000000000001b RCX: 0000000000000000 RDX: 0000000000000000 RSI: ffff9903e76c100f RDI: ffff9904289d4b28 RBP: ffffbc6a40493c70 R08: 0000000093570362 R09: 0000000000000000 R10: 0000000000000000 R11: ffff9904344eae38 R12: ffff9904289d4000 R13: 0000000000000000 R14: 00000000ffffffa3 R15: ffff9903e76c100f FS: 0000000000000000(0000) GS:ffff990434580000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007feed125a000 CR3: 00000001b860a003 CR4: 00000000003606e0 Call Trace: process_adv_report+0x12e/0x560 [bluetooth] hci_le_meta_evt+0x7b2/0xba0 [bluetooth] hci_event_packet+0x1c29/0x2a90 [bluetooth] hci_rx_work+0x19b/0x360 [bluetooth] process_one_work+0x1eb/0x3b0 worker_thread+0x4d/0x400 kthread+0x104/0x140 Fixes: c215e9397b00 ("Bluetooth: Process extended ADV report event") Reported-by: Andy Nguyen Reported-by: Linus Torvalds Reported-by: Balakrishna Godavarthi Signed-off-by: Alain Michaud Tested-by: Sonny Sasaka Acked-by: Marcel Holtmann Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin --- net/bluetooth/hci_event.c | 26 +++++++++++++++++++------- 1 file changed, 19 insertions(+), 7 deletions(-) diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index a044e6bb12b8..cdb92b129906 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -1229,6 +1229,9 @@ static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr, { struct discovery_state *d = &hdev->discovery; + if (len > HCI_MAX_AD_LENGTH) + return; + bacpy(&d->last_adv_addr, bdaddr); d->last_adv_addr_type = bdaddr_type; d->last_adv_rssi = rssi; @@ -5116,7 +5119,8 @@ static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev, static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, u8 bdaddr_type, bdaddr_t *direct_addr, - u8 direct_addr_type, s8 rssi, u8 *data, u8 len) + u8 direct_addr_type, s8 rssi, u8 *data, u8 len, + bool ext_adv) { struct discovery_state *d = &hdev->discovery; struct smp_irk *irk; @@ -5138,6 +5142,11 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, return; } + if (!ext_adv && len > HCI_MAX_AD_LENGTH) { + bt_dev_err_ratelimited(hdev, "legacy adv larger than 31 bytes"); + return; + } + /* Find the end of the data in case the report contains padded zero * bytes at the end causing an invalid length value. * @@ -5197,7 +5206,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, */ conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type, direct_addr); - if (conn && type == LE_ADV_IND) { + if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) { /* Store report for later inclusion by * mgmt_device_connected */ @@ -5251,7 +5260,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, * event or send an immediate device found event if the data * should not be stored for later. */ - if (!has_pending_adv_report(hdev)) { + if (!ext_adv && !has_pending_adv_report(hdev)) { /* If the report will trigger a SCAN_REQ store it for * later merging. */ @@ -5286,7 +5295,8 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, /* If the new report will trigger a SCAN_REQ store it for * later merging. */ - if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) { + if (!ext_adv && (type == LE_ADV_IND || + type == LE_ADV_SCAN_IND)) { store_pending_adv_report(hdev, bdaddr, bdaddr_type, rssi, flags, data, len); return; @@ -5326,7 +5336,7 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb) rssi = ev->data[ev->length]; process_adv_report(hdev, ev->evt_type, &ev->bdaddr, ev->bdaddr_type, NULL, 0, rssi, - ev->data, ev->length); + ev->data, ev->length, false); } else { bt_dev_err(hdev, "Dropping invalid advertising data"); } @@ -5400,7 +5410,8 @@ static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb) if (legacy_evt_type != LE_ADV_INVALID) { process_adv_report(hdev, legacy_evt_type, &ev->bdaddr, ev->bdaddr_type, NULL, 0, ev->rssi, - ev->data, ev->length); + ev->data, ev->length, + !(evt_type & LE_EXT_ADV_LEGACY_PDU)); } ptr += sizeof(*ev) + ev->length + 1; @@ -5598,7 +5609,8 @@ static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, process_adv_report(hdev, ev->evt_type, &ev->bdaddr, ev->bdaddr_type, &ev->direct_addr, - ev->direct_addr_type, ev->rssi, NULL, 0); + ev->direct_addr_type, ev->rssi, NULL, 0, + false); ptr += sizeof(*ev); } -- GitLab From 0a48de95229a6de22913fd7670fbff67c4fd8183 Mon Sep 17 00:00:00 2001 From: Wang Hai Date: Thu, 30 Jul 2020 15:30:00 +0800 Subject: [PATCH 0041/1304] net: gemini: Fix missing clk_disable_unprepare() in error path of gemini_ethernet_port_probe() [ Upstream commit 85496a29224188051b6135eb38da8afd4c584765 ] Fix the missing clk_disable_unprepare() before return from gemini_ethernet_port_probe() in the error handling case. Fixes: 4d5ae32f5e1e ("net: ethernet: Add a driver for Gemini gigabit ethernet") Reported-by: Hulk Robot Signed-off-by: Wang Hai Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/ethernet/cortina/gemini.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c index 01a212097836..f402af39da42 100644 --- a/drivers/net/ethernet/cortina/gemini.c +++ b/drivers/net/ethernet/cortina/gemini.c @@ -2451,6 +2451,7 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev) port->reset = devm_reset_control_get_exclusive(dev, NULL); if (IS_ERR(port->reset)) { dev_err(dev, "no reset\n"); + clk_disable_unprepare(port->pclk); return PTR_ERR(port->reset); } reset_control_reset(port->reset); @@ -2506,8 +2507,10 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev) IRQF_SHARED, port_names[port->id], port); - if (ret) + if (ret) { + clk_disable_unprepare(port->pclk); return ret; + } ret = register_netdev(netdev); if (!ret) { -- GitLab From eb3a903d100ed3bbd338be38cc4f3170935e3404 Mon Sep 17 00:00:00 2001 From: Xin Xiong Date: Thu, 30 Jul 2020 18:29:41 +0800 Subject: [PATCH 0042/1304] net/mlx5e: fix bpf_prog reference count leaks in mlx5e_alloc_rq [ Upstream commit e692139e6af339a1495ef401b2d95f7f9d1c7a44 ] The function invokes bpf_prog_inc(), which increases the reference count of a bpf_prog object "rq->xdp_prog" if the object isn't NULL. The refcount leak issues take place in two error handling paths. When either mlx5_wq_ll_create() or mlx5_wq_cyc_create() fails, the function simply returns the error code and forgets to drop the reference count increased earlier, causing a reference count leak of "rq->xdp_prog". Fix this issue by jumping to the error handling path err_rq_wq_destroy while either function fails. Fixes: 422d4c401edd ("net/mlx5e: RX, Split WQ objects for different RQ types") Signed-off-by: Xin Xiong Signed-off-by: Xiyu Yang Signed-off-by: Xin Tan Signed-off-by: Saeed Mahameed Signed-off-by: Sasha Levin --- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 7e6706333fa8..51edc507b7b5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -519,7 +519,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->mpwqe.wq, &rq->wq_ctrl); if (err) - return err; + goto err_rq_wq_destroy; rq->mpwqe.wq.db = &rq->mpwqe.wq.db[MLX5_RCV_DBR]; @@ -564,7 +564,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq, &rq->wq_ctrl); if (err) - return err; + goto err_rq_wq_destroy; rq->wqe.wq.db = &rq->wqe.wq.db[MLX5_RCV_DBR]; -- GitLab From e2ccd43b7a6f46f60c8ab2d6dc6db3acf2b837e6 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Mon, 13 Jul 2020 13:05:13 +0200 Subject: [PATCH 0043/1304] usb: hso: Fix debug compile warning on sparc32 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit e0484010ec05191a8edf980413fc92f28050c1cc ] On sparc32, tcflag_t is "unsigned long", unlike on all other architectures, where it is "unsigned int": drivers/net/usb/hso.c: In function ‘hso_serial_set_termios’: include/linux/kern_levels.h:5:18: warning: format ‘%d’ expects argument of type ‘unsigned int’, but argument 4 has type ‘tcflag_t {aka long unsigned int}’ [-Wformat=] drivers/net/usb/hso.c:1393:3: note: in expansion of macro ‘hso_dbg’ hso_dbg(0x16, "Termios called with: cflags new[%d] - old[%d]\n", ^~~~~~~ include/linux/kern_levels.h:5:18: warning: format ‘%d’ expects argument of type ‘unsigned int’, but argument 5 has type ‘tcflag_t {aka long unsigned int}’ [-Wformat=] drivers/net/usb/hso.c:1393:3: note: in expansion of macro ‘hso_dbg’ hso_dbg(0x16, "Termios called with: cflags new[%d] - old[%d]\n", ^~~~~~~ As "unsigned long" is 32-bit on sparc32, fix this by casting all tcflag_t parameters to "unsigned int". While at it, use "%u" to format unsigned numbers. Signed-off-by: Geert Uytterhoeven Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/usb/hso.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index 5251c5f6f96e..61b9d3368148 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c @@ -1403,8 +1403,9 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old) unsigned long flags; if (old) - hso_dbg(0x16, "Termios called with: cflags new[%d] - old[%d]\n", - tty->termios.c_cflag, old->c_cflag); + hso_dbg(0x16, "Termios called with: cflags new[%u] - old[%u]\n", + (unsigned int)tty->termios.c_cflag, + (unsigned int)old->c_cflag); /* the actual setup */ spin_lock_irqsave(&serial->serial_lock, flags); -- GitLab From d042d9abc1f61ea5d8c6a527efb6f701a20bb0b5 Mon Sep 17 00:00:00 2001 From: Laurence Oberman Date: Tue, 14 Jul 2020 18:08:05 -0400 Subject: [PATCH 0044/1304] qed: Disable "MFW indication via attention" SPAM every 5 minutes [ Upstream commit 1d61e21852d3161f234b9656797669fe185c251b ] This is likely firmware causing this but its starting to annoy customers. Change the message level to verbose to prevent the spam. Note that this seems to only show up with ISCSI enabled on the HBA via the qedi driver. Signed-off-by: Laurence Oberman Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/ethernet/qlogic/qed/qed_int.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index f9e475075d3e..61d5d7654568 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -1015,7 +1015,8 @@ static int qed_int_attentions(struct qed_hwfn *p_hwfn) index, attn_bits, attn_acks, asserted_bits, deasserted_bits, p_sb_attn_sw->known_attn); } else if (asserted_bits == 0x100) { - DP_INFO(p_hwfn, "MFW indication via attention\n"); + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, + "MFW indication via attention\n"); } else { DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "MFW indication [deassertion]\n"); -- GitLab From 5fa5e4def87289197d7aa04d0c45f0a9fea1a14e Mon Sep 17 00:00:00 2001 From: Navid Emamdoost Date: Sat, 18 Jul 2020 00:31:49 -0500 Subject: [PATCH 0045/1304] nfc: s3fwrn5: add missing release on skb in s3fwrn5_recv_frame [ Upstream commit 1e8fd3a97f2d83a7197876ceb4f37b4c2b00a0f3 ] The implementation of s3fwrn5_recv_frame() is supposed to consume skb on all execution paths. Release skb before returning -ENODEV. Signed-off-by: Navid Emamdoost Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/nfc/s3fwrn5/core.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/nfc/s3fwrn5/core.c b/drivers/nfc/s3fwrn5/core.c index 9d9c8d57a042..64b58455e620 100644 --- a/drivers/nfc/s3fwrn5/core.c +++ b/drivers/nfc/s3fwrn5/core.c @@ -209,6 +209,7 @@ int s3fwrn5_recv_frame(struct nci_dev *ndev, struct sk_buff *skb, case S3FWRN5_MODE_FW: return s3fwrn5_fw_recv_frame(ndev, skb); default: + kfree_skb(skb); return -ENODEV; } } -- GitLab From ddce1f2d9dc2973ccea342b8154a33a6b22fc557 Mon Sep 17 00:00:00 2001 From: Liam Beguin Date: Sat, 18 Jul 2020 16:10:21 -0400 Subject: [PATCH 0046/1304] parisc: add support for cmpxchg on u8 pointers [ Upstream commit b344d6a83d01c52fddbefa6b3b4764da5b1022a0 ] The kernel test bot reported[1] that using set_mask_bits on a u8 causes the following issue on parisc: hppa-linux-ld: drivers/phy/ti/phy-tusb1210.o: in function `tusb1210_probe': >> (.text+0x2f4): undefined reference to `__cmpxchg_called_with_bad_pointer' >> hppa-linux-ld: (.text+0x324): undefined reference to `__cmpxchg_called_with_bad_pointer' hppa-linux-ld: (.text+0x354): undefined reference to `__cmpxchg_called_with_bad_pointer' Add support for cmpxchg on u8 pointers. [1] https://lore.kernel.org/patchwork/patch/1272617/#1468946 Reported-by: kernel test robot Signed-off-by: Liam Beguin Tested-by: Dave Anglin Signed-off-by: Helge Deller Signed-off-by: Sasha Levin --- arch/parisc/include/asm/cmpxchg.h | 2 ++ arch/parisc/lib/bitops.c | 12 ++++++++++++ 2 files changed, 14 insertions(+) diff --git a/arch/parisc/include/asm/cmpxchg.h b/arch/parisc/include/asm/cmpxchg.h index ab5c215cf46c..068958575871 100644 --- a/arch/parisc/include/asm/cmpxchg.h +++ b/arch/parisc/include/asm/cmpxchg.h @@ -60,6 +60,7 @@ extern void __cmpxchg_called_with_bad_pointer(void); extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old, unsigned int new_); extern u64 __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new_); +extern u8 __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new_); /* don't worry...optimizer will get rid of most of this */ static inline unsigned long @@ -71,6 +72,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size) #endif case 4: return __cmpxchg_u32((unsigned int *)ptr, (unsigned int)old, (unsigned int)new_); + case 1: return __cmpxchg_u8((u8 *)ptr, (u8)old, (u8)new_); } __cmpxchg_called_with_bad_pointer(); return old; diff --git a/arch/parisc/lib/bitops.c b/arch/parisc/lib/bitops.c index 70ffbcf889b8..2e4d1f05a926 100644 --- a/arch/parisc/lib/bitops.c +++ b/arch/parisc/lib/bitops.c @@ -79,3 +79,15 @@ unsigned long __cmpxchg_u32(volatile unsigned int *ptr, unsigned int old, unsign _atomic_spin_unlock_irqrestore(ptr, flags); return (unsigned long)prev; } + +u8 __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new) +{ + unsigned long flags; + u8 prev; + + _atomic_spin_lock_irqsave(ptr, flags); + if ((prev = *ptr) == old) + *ptr = new; + _atomic_spin_unlock_irqrestore(ptr, flags); + return prev; +} -- GitLab From 4418d72258a9796cce607945a6d94aa315a584c5 Mon Sep 17 00:00:00 2001 From: Yoshihiro Shimoda Date: Tue, 21 Jul 2020 15:23:12 +0900 Subject: [PATCH 0047/1304] net: ethernet: ravb: exit if re-initialization fails in tx timeout [ Upstream commit 015c5d5e6aa3523c758a70eb87b291cece2dbbb4 ] According to the report of [1], this driver is possible to cause the following error in ravb_tx_timeout_work(). ravb e6800000.ethernet ethernet: failed to switch device to config mode This error means that the hardware could not change the state from "Operation" to "Configuration" while some tx and/or rx queue are operating. After that, ravb_config() in ravb_dmac_init() will fail, and then any descriptors will be not allocaled anymore so that NULL pointer dereference happens after that on ravb_start_xmit(). To fix the issue, the ravb_tx_timeout_work() should check the return values of ravb_stop_dma() and ravb_dmac_init(). If ravb_stop_dma() fails, ravb_tx_timeout_work() re-enables TX and RX and just exits. If ravb_dmac_init() fails, just exits. [1] https://lore.kernel.org/linux-renesas-soc/20200518045452.2390-1-dirk.behme@de.bosch.com/ Reported-by: Dirk Behme Signed-off-by: Yoshihiro Shimoda Reviewed-by: Sergei Shtylyov Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/ethernet/renesas/ravb_main.c | 26 ++++++++++++++++++++++-- 1 file changed, 24 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index faaf74073a12..569e698b5c80 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -1445,6 +1445,7 @@ static void ravb_tx_timeout_work(struct work_struct *work) struct ravb_private *priv = container_of(work, struct ravb_private, work); struct net_device *ndev = priv->ndev; + int error; netif_tx_stop_all_queues(ndev); @@ -1453,15 +1454,36 @@ static void ravb_tx_timeout_work(struct work_struct *work) ravb_ptp_stop(ndev); /* Wait for DMA stopping */ - ravb_stop_dma(ndev); + if (ravb_stop_dma(ndev)) { + /* If ravb_stop_dma() fails, the hardware is still operating + * for TX and/or RX. So, this should not call the following + * functions because ravb_dmac_init() is possible to fail too. + * Also, this should not retry ravb_stop_dma() again and again + * here because it's possible to wait forever. So, this just + * re-enables the TX and RX and skip the following + * re-initialization procedure. + */ + ravb_rcv_snd_enable(ndev); + goto out; + } ravb_ring_free(ndev, RAVB_BE); ravb_ring_free(ndev, RAVB_NC); /* Device init */ - ravb_dmac_init(ndev); + error = ravb_dmac_init(ndev); + if (error) { + /* If ravb_dmac_init() fails, descriptors are freed. So, this + * should return here to avoid re-enabling the TX and RX in + * ravb_emac_init(). + */ + netdev_err(ndev, "%s: ravb_dmac_init() failed, error %d\n", + __func__, error); + return; + } ravb_emac_init(ndev); +out: /* Initialise PTP Clock driver */ if (priv->chip_id == RCAR_GEN2) ravb_ptp_init(ndev, priv->pdev); -- GitLab From 2bf308bb77723df0faea14094871f26b5318f074 Mon Sep 17 00:00:00 2001 From: Raviteja Narayanam Date: Fri, 3 Jul 2020 19:25:49 +0530 Subject: [PATCH 0048/1304] Revert "i2c: cadence: Fix the hold bit setting" [ Upstream commit 0db9254d6b896b587759e2c844c277fb1a6da5b9 ] This reverts commit d358def706880defa4c9e87381c5bf086a97d5f9. There are two issues with "i2c: cadence: Fix the hold bit setting" commit. 1. In case of combined message request from user space, when the HOLD bit is cleared in cdns_i2c_mrecv function, a STOP condition is sent on the bus even before the last message is started. This is because when the HOLD bit is cleared, the FIFOS are empty and there is no pending transfer. The STOP condition should occur only after the last message is completed. 2. The code added by the commit is redundant. Driver is handling the setting/clearing of HOLD bit in right way before the commit. The setting of HOLD bit based on 'bus_hold_flag' is taken care in cdns_i2c_master_xfer function even before cdns_i2c_msend/cdns_i2c_recv functions. The clearing of HOLD bit is taken care at the end of cdns_i2c_msend and cdns_i2c_recv functions based on bus_hold_flag and byte count. Since clearing of HOLD bit is done after the slave address is written to the register (writing to address register triggers the message transfer), it is ensured that STOP condition occurs at the right time after completion of the pending transfer (last message). Signed-off-by: Raviteja Narayanam Acked-by: Michal Simek Signed-off-by: Wolfram Sang Signed-off-by: Sasha Levin --- drivers/i2c/busses/i2c-cadence.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c index d917cefc5a19..b13605718291 100644 --- a/drivers/i2c/busses/i2c-cadence.c +++ b/drivers/i2c/busses/i2c-cadence.c @@ -382,10 +382,8 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id) * Check for the message size against FIFO depth and set the * 'hold bus' bit if it is greater than FIFO depth. */ - if ((id->recv_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag) + if (id->recv_count > CDNS_I2C_FIFO_DEPTH) ctrl_reg |= CDNS_I2C_CR_HOLD; - else - ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD; cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET); @@ -442,11 +440,8 @@ static void cdns_i2c_msend(struct cdns_i2c *id) * Check for the message size against FIFO depth and set the * 'hold bus' bit if it is greater than FIFO depth. */ - if ((id->send_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag) + if (id->send_count > CDNS_I2C_FIFO_DEPTH) ctrl_reg |= CDNS_I2C_CR_HOLD; - else - ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD; - cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET); /* Clear the interrupts in interrupt status register. */ -- GitLab From a9e49596a6fcf26e5b592f9ad3a0c37d4f22d24f Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Fri, 17 Jul 2020 09:04:25 -0500 Subject: [PATCH 0049/1304] x86/unwind/orc: Fix ORC for newly forked tasks [ Upstream commit 372a8eaa05998cd45b3417d0e0ffd3a70978211a ] The ORC unwinder fails to unwind newly forked tasks which haven't yet run on the CPU. It correctly reads the 'ret_from_fork' instruction pointer from the stack, but it incorrectly interprets that value as a call stack address rather than a "signal" one, so the address gets incorrectly decremented in the call to orc_find(), resulting in bad ORC data. Fix it by forcing 'ret_from_fork' frames to be signal frames. Reported-by: Wang ShaoBo Signed-off-by: Josh Poimboeuf Signed-off-by: Thomas Gleixner Tested-by: Wang ShaoBo Link: https://lkml.kernel.org/r/f91a8778dde8aae7f71884b5df2b16d552040441.1594994374.git.jpoimboe@redhat.com Signed-off-by: Sasha Levin --- arch/x86/kernel/unwind_orc.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c index 2701b370e58f..1d264ba1e56d 100644 --- a/arch/x86/kernel/unwind_orc.c +++ b/arch/x86/kernel/unwind_orc.c @@ -420,8 +420,11 @@ bool unwind_next_frame(struct unwind_state *state) /* * Find the orc_entry associated with the text address. * - * Decrement call return addresses by one so they work for sibling - * calls and calls to noreturn functions. + * For a call frame (as opposed to a signal frame), state->ip points to + * the instruction after the call. That instruction's stack layout + * could be different from the call instruction's layout, for example + * if the call was to a noreturn function. So get the ORC data for the + * call instruction itself. */ orc = orc_find(state->signal ? state->ip : state->ip - 1); if (!orc) @@ -634,6 +637,7 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task, state->sp = task->thread.sp; state->bp = READ_ONCE_NOCHECK(frame->bp); state->ip = READ_ONCE_NOCHECK(frame->ret_addr); + state->signal = (void *)state->ip == ret_from_fork; } if (get_stack_info((unsigned long *)state->sp, state->task, -- GitLab From 214ac24e07e471e51946827d4d90408eba43059e Mon Sep 17 00:00:00 2001 From: Navid Emamdoost Date: Wed, 22 Jul 2020 21:58:39 -0500 Subject: [PATCH 0050/1304] cxgb4: add missing release on skb in uld_send() [ Upstream commit e6827d1abdc9b061a57d7b7d3019c4e99fabea2f ] In the implementation of uld_send(), the skb is consumed on all execution paths except one. Release skb when returning NET_XMIT_DROP. Signed-off-by: Navid Emamdoost Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/ethernet/chelsio/cxgb4/sge.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 3d4a765e9e61..7801f2aeeb30 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -2367,6 +2367,7 @@ static inline int uld_send(struct adapter *adap, struct sk_buff *skb, txq_info = adap->sge.uld_txq_info[tx_uld_type]; if (unlikely(!txq_info)) { WARN_ON(true); + kfree_skb(skb); return NET_XMIT_DROP; } -- GitLab From 4b635fc2b3491bdaa69fd80b376b216fae2d1461 Mon Sep 17 00:00:00 2001 From: Andrea Righi Date: Fri, 24 Jul 2020 10:59:10 +0200 Subject: [PATCH 0051/1304] xen-netfront: fix potential deadlock in xennet_remove() [ Upstream commit c2c633106453611be07821f53dff9e93a9d1c3f0 ] There's a potential race in xennet_remove(); this is what the driver is doing upon unregistering a network device: 1. state = read bus state 2. if state is not "Closed": 3. request to set state to "Closing" 4. wait for state to be set to "Closing" 5. request to set state to "Closed" 6. wait for state to be set to "Closed" If the state changes to "Closed" immediately after step 1 we are stuck forever in step 4, because the state will never go back from "Closed" to "Closing". Make sure to check also for state == "Closed" in step 4 to prevent the deadlock. Also add a 5 sec timeout any time we wait for the bus state to change, to avoid getting stuck forever in wait_event(). Signed-off-by: Andrea Righi Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/xen-netfront.c | 64 +++++++++++++++++++++++++------------- 1 file changed, 42 insertions(+), 22 deletions(-) diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 6b4675a9494b..c8e84276e639 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -63,6 +63,8 @@ module_param_named(max_queues, xennet_max_queues, uint, 0644); MODULE_PARM_DESC(max_queues, "Maximum number of queues per virtual interface"); +#define XENNET_TIMEOUT (5 * HZ) + static const struct ethtool_ops xennet_ethtool_ops; struct netfront_cb { @@ -1337,12 +1339,15 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev) netif_carrier_off(netdev); - xenbus_switch_state(dev, XenbusStateInitialising); - wait_event(module_wq, - xenbus_read_driver_state(dev->otherend) != - XenbusStateClosed && - xenbus_read_driver_state(dev->otherend) != - XenbusStateUnknown); + do { + xenbus_switch_state(dev, XenbusStateInitialising); + err = wait_event_timeout(module_wq, + xenbus_read_driver_state(dev->otherend) != + XenbusStateClosed && + xenbus_read_driver_state(dev->otherend) != + XenbusStateUnknown, XENNET_TIMEOUT); + } while (!err); + return netdev; exit: @@ -2142,28 +2147,43 @@ static const struct attribute_group xennet_dev_group = { }; #endif /* CONFIG_SYSFS */ -static int xennet_remove(struct xenbus_device *dev) +static void xennet_bus_close(struct xenbus_device *dev) { - struct netfront_info *info = dev_get_drvdata(&dev->dev); - - dev_dbg(&dev->dev, "%s\n", dev->nodename); + int ret; - if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) { + if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed) + return; + do { xenbus_switch_state(dev, XenbusStateClosing); - wait_event(module_wq, - xenbus_read_driver_state(dev->otherend) == - XenbusStateClosing || - xenbus_read_driver_state(dev->otherend) == - XenbusStateUnknown); + ret = wait_event_timeout(module_wq, + xenbus_read_driver_state(dev->otherend) == + XenbusStateClosing || + xenbus_read_driver_state(dev->otherend) == + XenbusStateClosed || + xenbus_read_driver_state(dev->otherend) == + XenbusStateUnknown, + XENNET_TIMEOUT); + } while (!ret); + + if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed) + return; + do { xenbus_switch_state(dev, XenbusStateClosed); - wait_event(module_wq, - xenbus_read_driver_state(dev->otherend) == - XenbusStateClosed || - xenbus_read_driver_state(dev->otherend) == - XenbusStateUnknown); - } + ret = wait_event_timeout(module_wq, + xenbus_read_driver_state(dev->otherend) == + XenbusStateClosed || + xenbus_read_driver_state(dev->otherend) == + XenbusStateUnknown, + XENNET_TIMEOUT); + } while (!ret); +} + +static int xennet_remove(struct xenbus_device *dev) +{ + struct netfront_info *info = dev_get_drvdata(&dev->dev); + xennet_bus_close(dev); xennet_disconnect_backend(info); if (info->netdev->reg_state == NETREG_REGISTERED) -- GitLab From 8c6c93ccb6bee8adc1a2bdcb1a75410bddf9e443 Mon Sep 17 00:00:00 2001 From: Wanpeng Li Date: Fri, 31 Jul 2020 11:12:19 +0800 Subject: [PATCH 0052/1304] KVM: LAPIC: Prevent setting the tscdeadline timer if the lapic is hw disabled commit d2286ba7d574ba3103a421a2f9ec17cb5b0d87a1 upstream. Prevent setting the tscdeadline timer if the lapic is hw disabled. Fixes: bce87cce88 (KVM: x86: consolidate different ways to test for in-kernel LAPIC) Cc: Signed-off-by: Wanpeng Li Message-Id: <1596165141-28874-1-git-send-email-wanpengli@tencent.com> Signed-off-by: Paolo Bonzini Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/lapic.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 8c6392534d14..bba2f76c356d 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -2034,7 +2034,7 @@ void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data) { struct kvm_lapic *apic = vcpu->arch.apic; - if (!lapic_in_kernel(vcpu) || apic_lvtt_oneshot(apic) || + if (!kvm_apic_present(vcpu) || apic_lvtt_oneshot(apic) || apic_lvtt_period(apic)) return; -- GitLab From dc3d380f6eb90dede26afbce0073919c74a82a17 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 29 Jul 2020 10:53:28 +0200 Subject: [PATCH 0053/1304] x86/i8259: Use printk_deferred() to prevent deadlock commit bdd65589593edd79b6a12ce86b3b7a7c6dae5208 upstream. 0day reported a possible circular locking dependency: Chain exists of: &irq_desc_lock_class --> console_owner --> &port_lock_key Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock(&port_lock_key); lock(console_owner); lock(&port_lock_key); lock(&irq_desc_lock_class); The reason for this is a printk() in the i8259 interrupt chip driver which is invoked with the irq descriptor lock held, which reverses the lock operations vs. printk() from arbitrary contexts. Switch the printk() to printk_deferred() to avoid that. Reported-by: kernel test robot Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/87365abt2v.fsf@nanos.tec.linutronix.de Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/i8259.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c index 519649ddf100..fe522691ac71 100644 --- a/arch/x86/kernel/i8259.c +++ b/arch/x86/kernel/i8259.c @@ -207,7 +207,7 @@ static void mask_and_ack_8259A(struct irq_data *data) * lets ACK and report it. [once per IRQ] */ if (!(spurious_irq_mask & irqmask)) { - printk(KERN_DEBUG + printk_deferred(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq); spurious_irq_mask |= irqmask; } -- GitLab From c076c79e03c6094e578df5d210fde808b3ad32e6 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Wed, 5 Aug 2020 10:06:06 +0200 Subject: [PATCH 0054/1304] Linux 4.19.137 Tested-by: Shuah Khan Signed-off-by: Greg Kroah-Hartman --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index a76c159bb605..edf1799c08d2 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 4 PATCHLEVEL = 19 -SUBLEVEL = 136 +SUBLEVEL = 137 EXTRAVERSION = NAME = "People's Front" -- GitLab From d96fd5077670098f017f856c07ec5593b96b9cf8 Mon Sep 17 00:00:00 2001 From: Matthias Maennich Date: Tue, 5 May 2020 15:53:55 +0200 Subject: [PATCH 0055/1304] ANDROID: Enforce KMI stability This declares ABI/KMI stability for the gki aarch64 config. Bug: 134674532 Signed-off-by: Matthias Maennich Change-Id: I2e35c1281ee66ef22d4fce0fcce1269e827f8594 Signed-off-by: Will McVicker --- build.config.gki.aarch64 | 1 + 1 file changed, 1 insertion(+) diff --git a/build.config.gki.aarch64 b/build.config.gki.aarch64 index 6ebaf34cb036..7353591fade5 100644 --- a/build.config.gki.aarch64 +++ b/build.config.gki.aarch64 @@ -10,3 +10,4 @@ android/abi_gki_aarch64_qcom " TRIM_NONLISTED_KMI=1 KMI_SYMBOL_LIST_STRICT_MODE=1 +KMI_ENFORCED=1 -- GitLab From e8f603ac1ab29f8f5b1c486dad6a502db568155e Mon Sep 17 00:00:00 2001 From: Puma Hsu Date: Thu, 30 Jul 2020 16:08:52 +0800 Subject: [PATCH 0056/1304] ANDROID: GKI: power: Add property to enable/disable cc toggle Add POWER_SUPPLY_PROP_CC_TOGGLE_ENABLE to force control the typec cc toggling. Bug: 163013210 Bug: 162697021 Signed-off-by: Puma Hsu Change-Id: I188a8afceedada4fb84e349eb56263d7da726851 (cherry picked from commit 489cd26d041ca5e663783bfb62dff61b87ab00a3) Signed-off-by: Will McVicker --- drivers/power/supply/power_supply_sysfs.c | 1 + include/linux/power_supply.h | 1 + 2 files changed, 2 insertions(+) diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c index d85b5649a5b6..8f49aa53d4f9 100644 --- a/drivers/power/supply/power_supply_sysfs.c +++ b/drivers/power/supply/power_supply_sysfs.c @@ -507,6 +507,7 @@ static struct device_attribute power_supply_attrs[] = { POWER_SUPPLY_ATTR(parallel_output_mode), POWER_SUPPLY_ATTR(alignment), POWER_SUPPLY_ATTR(moisture_detection_enabled), + POWER_SUPPLY_ATTR(cc_toggle_enable), POWER_SUPPLY_ATTR(fg_type), POWER_SUPPLY_ATTR(charger_status), /* Local extensions of type int64_t */ diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index 931c206660df..a19b2fc74f2f 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -390,6 +390,7 @@ enum power_supply_property { POWER_SUPPLY_PROP_PARALLEL_OUTPUT_MODE, POWER_SUPPLY_PROP_ALIGNMENT, POWER_SUPPLY_PROP_MOISTURE_DETECTION_ENABLE, + POWER_SUPPLY_PROP_CC_TOGGLE_ENABLE, POWER_SUPPLY_PROP_FG_TYPE, POWER_SUPPLY_PROP_CHARGER_STATUS, /* Local extensions of type int64_t */ -- GitLab From 59619eb9de2fd8bdb93a2de23f587de655acff32 Mon Sep 17 00:00:00 2001 From: Will McVicker Date: Thu, 6 Aug 2020 12:14:25 -0700 Subject: [PATCH 0057/1304] ANDROID: GKI: update the ABI xml Leaf changes summary: 8 artifacts changed Changed leaf types summary: 1 leaf type changed Removed/Changed/Added functions summary: 0 Removed, 7 Changed, 0 Added function Removed/Changed/Added variables summary: 0 Removed, 0 Changed, 0 Added variable 7 functions with some sub-type change: 'enum power_supply_property at power_supply.h:171:1' changed: type size hasn't changed 1 enumerator insertion: 'power_supply_property::POWER_SUPPLY_PROP_CC_TOGGLE_ENABLE' value '216' 10 enumerator changes: ... 7 impacted interfaces: ... Signed-off-by: Will McVicker Bug: 163013210 Bug: 162697021 Change-Id: Ic06e2fbab95ac11a23089770ab69ed6bfd03ae6b --- android/abi_gki_aarch64.xml | 1445 +++++++++++++++++------------------ 1 file changed, 688 insertions(+), 757 deletions(-) diff --git a/android/abi_gki_aarch64.xml b/android/abi_gki_aarch64.xml index 90d96578660a..60c9d2856391 100644 --- a/android/abi_gki_aarch64.xml +++ b/android/abi_gki_aarch64.xml @@ -598,7 +598,7 @@ - + @@ -1702,13 +1702,13 @@ - - - - - + + + + + - + @@ -7126,14 +7126,6 @@ - - - - - - - - @@ -8037,7 +8029,7 @@ - + @@ -9338,7 +9330,7 @@ - + @@ -10333,7 +10325,7 @@ - + @@ -13989,21 +13981,21 @@ - + - + - + - + - + - + @@ -15453,7 +15445,7 @@ - + @@ -15539,7 +15531,7 @@ - + @@ -16470,7 +16462,7 @@ - + @@ -16525,7 +16517,7 @@ - + @@ -17447,7 +17439,7 @@ - + @@ -18215,7 +18207,7 @@ - + @@ -21797,13 +21789,13 @@ - + - + @@ -28126,6 +28118,41 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -28133,6 +28160,22 @@ + + + + + + + + + + + + + + + + @@ -28203,6 +28246,17 @@ + + + + + + + + + + + @@ -28273,31 +28327,31 @@ - - + + - - + + - - + + - - + + - - - - + + + + - - - + + + @@ -29477,10 +29531,10 @@ - + - + @@ -29602,7 +29656,7 @@ - + @@ -29625,7 +29679,7 @@ - + @@ -29854,8 +29908,6 @@ - - @@ -29898,7 +29950,7 @@ - + @@ -30707,131 +30759,131 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -30857,7 +30909,7 @@ - + @@ -30870,15 +30922,15 @@ - + - + - + - + @@ -31099,33 +31151,34 @@ - - - - - - - - - - + + + + + + + + + + + - + - + - + - + - + - + @@ -31162,13 +31215,13 @@ - + - + @@ -31201,7 +31254,7 @@ - + @@ -32307,7 +32360,7 @@ - + @@ -32326,7 +32379,7 @@ - + @@ -33160,7 +33213,7 @@ - + @@ -33180,12 +33233,12 @@ - + - + @@ -33205,7 +33258,7 @@ - + @@ -33511,7 +33564,7 @@ - + @@ -33527,13 +33580,13 @@ - + - + @@ -35383,164 +35436,6 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -35709,7 +35604,7 @@ - + @@ -35846,7 +35741,7 @@ - + @@ -35954,7 +35849,7 @@ - + @@ -35969,7 +35864,7 @@ - + @@ -36013,7 +35908,7 @@ - + @@ -36110,7 +36005,7 @@ - + @@ -36175,57 +36070,57 @@ - - - - - + + + + + - - + + - - + + - - - + + + - - - - + + + + - - - - - - - - + + + + + + + + - - - - - - - + + + + + + + - - - - - - + + + + + + @@ -37537,7 +37432,7 @@ - + @@ -37550,7 +37445,7 @@ - + @@ -37558,13 +37453,13 @@ - + - + - + @@ -37572,7 +37467,7 @@ - + @@ -38062,10 +37957,10 @@ - + - + @@ -38120,7 +38015,7 @@ - + @@ -38129,7 +38024,7 @@ - + @@ -38842,7 +38737,7 @@ - + @@ -38851,7 +38746,7 @@ - + @@ -39955,9 +39850,9 @@ - + - + @@ -40263,7 +40158,7 @@ - + @@ -40275,7 +40170,7 @@ - + @@ -40356,7 +40251,7 @@ - + @@ -40364,7 +40259,7 @@ - + @@ -40593,7 +40488,7 @@ - + @@ -40965,7 +40860,7 @@ - + @@ -40978,7 +40873,7 @@ - + @@ -41676,7 +41571,7 @@ - + @@ -41684,10 +41579,10 @@ - + - + @@ -41770,13 +41665,13 @@ - + - + @@ -42699,7 +42594,7 @@ - + @@ -42775,7 +42670,7 @@ - + @@ -42984,7 +42879,7 @@ - + @@ -45150,21 +45045,27 @@ - + + + + + + + - + - + - + - + @@ -47712,12 +47613,12 @@ - + - + @@ -47777,7 +47678,7 @@ - + @@ -48021,7 +47922,7 @@ - + @@ -48120,7 +48021,7 @@ - + @@ -51220,7 +51121,7 @@ - + @@ -52056,7 +51957,7 @@ - + @@ -52172,7 +52073,7 @@ - + @@ -52588,7 +52489,7 @@ - + @@ -53160,7 +53061,7 @@ - + @@ -53524,7 +53425,7 @@ - + @@ -53547,10 +53448,10 @@ - + - + @@ -53559,7 +53460,7 @@ - + @@ -53567,7 +53468,7 @@ - + @@ -53597,7 +53498,7 @@ - + @@ -53605,7 +53506,7 @@ - + @@ -54523,7 +54424,7 @@ - + @@ -64830,10 +64731,10 @@ - + - + @@ -64846,7 +64747,7 @@ - + @@ -64985,7 +64886,7 @@ - + @@ -65021,6 +64922,23 @@ + + + + + + + + + + + + + + + + + @@ -65058,7 +64976,7 @@ - + @@ -65066,7 +64984,7 @@ - + @@ -65200,10 +65118,10 @@ - + - + @@ -65224,7 +65142,7 @@ - + @@ -65248,13 +65166,13 @@ - + - + @@ -65693,6 +65611,17 @@ + + + + + + + + + + + @@ -65701,18 +65630,18 @@ - + - + - + - + @@ -65774,7 +65703,7 @@ - + @@ -65932,7 +65861,7 @@ - + @@ -65987,7 +65916,7 @@ - + @@ -65995,23 +65924,6 @@ - - - - - - - - - - - - - - - - - @@ -66199,7 +66111,7 @@ - + @@ -66211,7 +66123,7 @@ - + @@ -66401,7 +66313,7 @@ - + @@ -66802,6 +66714,23 @@ + + + + + + + + + + + + + + + + + @@ -67070,13 +66999,13 @@ - + - + @@ -67094,10 +67023,10 @@ - + - + @@ -67124,7 +67053,7 @@ - + @@ -67132,7 +67061,7 @@ - + @@ -67398,24 +67327,7 @@ - - - - - - - - - - - - - - - - - - + @@ -67873,10 +67785,10 @@ - + - + @@ -68328,58 +68240,58 @@ - - - - + + + + - - - + + + - - + + - - - + + + - - - + + + - - + + - - - + + + - - - - - - - - - - - - + + + + + + - + - + + + + + + + @@ -68502,18 +68414,18 @@ - + - + - + @@ -68771,7 +68683,7 @@ - + @@ -68859,7 +68771,7 @@ - + @@ -68921,10 +68833,10 @@ - + - + @@ -68935,14 +68847,6 @@ - - - - - - - - @@ -68969,6 +68873,14 @@ + + + + + + + + @@ -69166,12 +69078,12 @@ - + - + - + @@ -69181,8 +69093,8 @@ - - + + @@ -69542,10 +69454,10 @@ - + - + @@ -69968,7 +69880,7 @@ - + @@ -70032,7 +69944,7 @@ - + @@ -70167,20 +70079,20 @@ - - - - + + + + - - - + + + - - - + + + @@ -70663,7 +70575,7 @@ - + @@ -70808,13 +70720,13 @@ - + - + @@ -72475,7 +72387,7 @@ - + @@ -72502,10 +72414,10 @@ - + - + @@ -72697,10 +72609,10 @@ - + - + @@ -81720,7 +81632,7 @@ - + @@ -81728,7 +81640,7 @@ - + @@ -81760,7 +81672,7 @@ - + @@ -81801,7 +81713,7 @@ - + @@ -82203,7 +82115,7 @@ - + @@ -82217,7 +82129,7 @@ - + @@ -82357,8 +82269,8 @@ - - + + @@ -82439,12 +82351,12 @@ - - + + - - + + @@ -82623,7 +82535,7 @@ - + @@ -82635,7 +82547,7 @@ - + @@ -82944,7 +82856,7 @@ - + @@ -82986,7 +82898,7 @@ - + @@ -83758,7 +83670,7 @@ - + @@ -83858,7 +83770,7 @@ - + @@ -84099,7 +84011,7 @@ - + @@ -84112,7 +84024,7 @@ - + @@ -84128,7 +84040,7 @@ - + @@ -84163,6 +84075,14 @@ + + + + + + + + @@ -84173,10 +84093,10 @@ - + - + @@ -84192,15 +84112,15 @@ - + - + - + @@ -84222,7 +84142,7 @@ - + @@ -84254,7 +84174,7 @@ - + @@ -84268,6 +84188,17 @@ + + + + + + + + + + + @@ -84300,11 +84231,11 @@ - + - + @@ -84343,12 +84274,12 @@ - + - + @@ -84786,107 +84717,107 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -84910,60 +84841,60 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -84974,7 +84905,7 @@ - + @@ -86645,7 +86576,7 @@ - + @@ -86704,7 +86635,7 @@ - + @@ -86712,7 +86643,7 @@ - + @@ -86720,7 +86651,7 @@ - + @@ -87168,13 +87099,83 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + @@ -87233,7 +87234,7 @@ - + @@ -87402,76 +87403,6 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -87506,7 +87437,7 @@ - + @@ -87555,7 +87486,7 @@ - + @@ -87567,7 +87498,7 @@ - + @@ -87994,7 +87925,7 @@ - + @@ -88003,7 +87934,7 @@ - + @@ -88011,7 +87942,7 @@ - + @@ -88100,7 +88031,7 @@ - + @@ -88199,7 +88130,7 @@ - + @@ -88282,7 +88213,7 @@ - + @@ -88300,7 +88231,7 @@ - + @@ -91701,7 +91632,7 @@ - + @@ -91713,7 +91644,7 @@ - + @@ -93007,7 +92938,7 @@ - + @@ -93018,7 +92949,7 @@ - + @@ -93026,7 +92957,7 @@ - + @@ -93034,7 +92965,7 @@ - + @@ -93042,7 +92973,7 @@ - + @@ -93050,7 +92981,7 @@ - + @@ -93058,7 +92989,7 @@ - + @@ -93670,7 +93601,7 @@ - + @@ -93757,12 +93688,12 @@ - + - + - + @@ -93795,7 +93726,7 @@ - + @@ -94400,7 +94331,7 @@ - + @@ -94435,7 +94366,7 @@ - + @@ -94446,7 +94377,7 @@ - + @@ -95175,9 +95106,9 @@ - - - + + + @@ -95323,7 +95254,7 @@ - + @@ -95332,10 +95263,10 @@ - + - + @@ -95344,7 +95275,7 @@ - + @@ -95355,7 +95286,7 @@ - + @@ -95383,7 +95314,7 @@ - + @@ -95400,7 +95331,7 @@ - + @@ -95409,24 +95340,24 @@ - + - + - + - + - + - + @@ -95434,7 +95365,7 @@ - + @@ -95442,7 +95373,7 @@ - + @@ -95450,7 +95381,7 @@ - + @@ -95551,13 +95482,13 @@ - + - + @@ -96296,7 +96227,7 @@ - + @@ -96309,7 +96240,7 @@ - + @@ -96344,10 +96275,10 @@ - + - + @@ -96458,7 +96389,7 @@ - + @@ -96635,7 +96566,7 @@ - + @@ -97202,7 +97133,7 @@ - + @@ -97402,10 +97333,10 @@ - + - + @@ -97424,10 +97355,10 @@ - + - + @@ -98282,7 +98213,7 @@ - + @@ -99203,7 +99134,7 @@ - + @@ -99232,7 +99163,7 @@ - + @@ -99240,7 +99171,7 @@ - + @@ -99248,7 +99179,7 @@ - + @@ -99256,7 +99187,7 @@ - + @@ -99264,7 +99195,7 @@ - + @@ -99539,7 +99470,7 @@ - + @@ -99919,7 +99850,7 @@ - + @@ -100019,6 +99950,6 @@ -- GitLab From 29204c846894d73108f87e78aea4757a8ec52c74 Mon Sep 17 00:00:00 2001 From: Willy Tarreau Date: Fri, 10 Jul 2020 15:23:19 +0200 Subject: [PATCH 0058/1304] random32: update the net random state on interrupt and activity commit f227e3ec3b5cad859ad15666874405e8c1bbc1d4 upstream. This modifies the first 32 bits out of the 128 bits of a random CPU's net_rand_state on interrupt or CPU activity to complicate remote observations that could lead to guessing the network RNG's internal state. Note that depending on some network devices' interrupt rate moderation or binding, this re-seeding might happen on every packet or even almost never. In addition, with NOHZ some CPUs might not even get timer interrupts, leaving their local state rarely updated, while they are running networked processes making use of the random state. For this reason, we also perform this update in update_process_times() in order to at least update the state when there is user or system activity, since it's the only case we care about. Reported-by: Amit Klein Suggested-by: Linus Torvalds Cc: Eric Dumazet Cc: "Jason A. Donenfeld" Cc: Andy Lutomirski Cc: Kees Cook Cc: Thomas Gleixner Cc: Peter Zijlstra Cc: Signed-off-by: Willy Tarreau Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- drivers/char/random.c | 1 + include/linux/random.h | 3 +++ kernel/time/timer.c | 8 ++++++++ lib/random32.c | 2 +- 4 files changed, 13 insertions(+), 1 deletion(-) diff --git a/drivers/char/random.c b/drivers/char/random.c index d5f970d039bb..6a5d4dfafc47 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -1257,6 +1257,7 @@ void add_interrupt_randomness(int irq, int irq_flags) fast_mix(fast_pool); add_interrupt_bench(cycles); + this_cpu_add(net_rand_state.s1, fast_pool->pool[cycles & 3]); if (unlikely(crng_init == 0)) { if ((fast_pool->count >= 64) && diff --git a/include/linux/random.h b/include/linux/random.h index 445a0ea4ff49..d729f7614215 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -9,6 +9,7 @@ #include #include +#include #include @@ -115,6 +116,8 @@ struct rnd_state { __u32 s1, s2, s3, s4; }; +DECLARE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy; + u32 prandom_u32_state(struct rnd_state *state); void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes); void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state); diff --git a/kernel/time/timer.c b/kernel/time/timer.c index 6c54cf481fde..61e41ea3a96e 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -44,6 +44,7 @@ #include #include #include +#include #include #include @@ -1654,6 +1655,13 @@ void update_process_times(int user_tick) scheduler_tick(); if (IS_ENABLED(CONFIG_POSIX_TIMERS)) run_posix_cpu_timers(p); + + /* The current CPU might make use of net randoms without receiving IRQs + * to renew them often enough. Let's update the net_rand_state from a + * non-constant value that's not affine to the number of calls to make + * sure it's updated when there's some activity (we don't care in idle). + */ + this_cpu_add(net_rand_state.s1, rol32(jiffies, 24) + user_tick); } /** diff --git a/lib/random32.c b/lib/random32.c index 4aaa76404d56..7abd634a718e 100644 --- a/lib/random32.c +++ b/lib/random32.c @@ -48,7 +48,7 @@ static inline void prandom_state_selftest(void) } #endif -static DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy; +DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy; /** * prandom_u32_state - seeded pseudo-random number generator. -- GitLab From 546271c2c8d3a4f2d5fd07d43faf49d0b4423dde Mon Sep 17 00:00:00 2001 From: Grygorii Strashko Date: Thu, 30 Jul 2020 22:05:01 +0300 Subject: [PATCH 0059/1304] ARM: percpu.h: fix build error MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit aa54ea903abb02303bf55855fb51e3fcee135d70 upstream. Fix build error for the case: defined(CONFIG_SMP) && !defined(CONFIG_CPU_V6) config: keystone_defconfig CC arch/arm/kernel/signal.o In file included from ../include/linux/random.h:14, from ../arch/arm/kernel/signal.c:8: ../arch/arm/include/asm/percpu.h: In function ‘__my_cpu_offset’: ../arch/arm/include/asm/percpu.h:29:34: error: ‘current_stack_pointer’ undeclared (first use in this function); did you mean ‘user_stack_pointer’? : "Q" (*(const unsigned long *)current_stack_pointer)); ^~~~~~~~~~~~~~~~~~~~~ user_stack_pointer Fixes: f227e3ec3b5c ("random32: update the net random state on interrupt and activity") Signed-off-by: Grygorii Strashko Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- arch/arm/include/asm/percpu.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/arm/include/asm/percpu.h b/arch/arm/include/asm/percpu.h index a89b4076cde4..72821b4721ad 100644 --- a/arch/arm/include/asm/percpu.h +++ b/arch/arm/include/asm/percpu.h @@ -16,6 +16,8 @@ #ifndef _ASM_ARM_PERCPU_H_ #define _ASM_ARM_PERCPU_H_ +#include + /* * Same as asm-generic/percpu.h, except that we store the per cpu offset * in the TPIDRPRW. TPIDRPRW only exists on V6K and V7 -- GitLab From 6f697da3eb85610aae888623ca2885b347e86864 Mon Sep 17 00:00:00 2001 From: Willy Tarreau Date: Thu, 30 Jul 2020 07:59:24 +0200 Subject: [PATCH 0060/1304] random: fix circular include dependency on arm64 after addition of percpu.h MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 1c9df907da83812e4f33b59d3d142c864d9da57f upstream. Daniel Díaz and Kees Cook independently reported that commit f227e3ec3b5c ("random32: update the net random state on interrupt and activity") broke arm64 due to a circular dependency on include files since the addition of percpu.h in random.h. The correct fix would definitely be to move all the prandom32 stuff out of random.h but for backporting, a smaller solution is preferred. This one replaces linux/percpu.h with asm/percpu.h, and this fixes the problem on x86_64, arm64, arm, and mips. Note that moving percpu.h around didn't change anything and that removing it entirely broke differently. When backporting, such options might still be considered if this patch fails to help. [ It turns out that an alternate fix seems to be to just remove the troublesome remove from the arm64 that causes the circular dependency. But we might as well do the whole belt-and-suspenders thing, and minimize inclusion in too. Either will fix the problem, and both are good changes. - Linus ] Reported-by: Daniel Díaz Reported-by: Kees Cook Tested-by: Marc Zyngier Fixes: f227e3ec3b5c Cc: Stephen Rothwell Signed-off-by: Willy Tarreau Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- include/linux/random.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/random.h b/include/linux/random.h index d729f7614215..79bba0dfbc32 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -9,7 +9,7 @@ #include #include -#include +#include #include -- GitLab From e6b7c5f7a420578b1004dad4842b1210c580724e Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Wed, 29 Jul 2020 19:11:00 -0700 Subject: [PATCH 0061/1304] random32: remove net_rand_state from the latent entropy gcc plugin commit 83bdc7275e6206f560d247be856bceba3e1ed8f2 upstream. It turns out that the plugin right now ends up being really unhappy about the change from 'static' to 'extern' storage that happened in commit f227e3ec3b5c ("random32: update the net random state on interrupt and activity"). This is probably a trivial fix for the latent_entropy plugin, but for now, just remove net_rand_state from the list of things the plugin worries about. Reported-by: Stephen Rothwell Cc: Emese Revfy Cc: Kees Cook Cc: Willy Tarreau Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- include/linux/random.h | 2 +- lib/random32.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/include/linux/random.h b/include/linux/random.h index 79bba0dfbc32..085ebb912949 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -116,7 +116,7 @@ struct rnd_state { __u32 s1, s2, s3, s4; }; -DECLARE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy; +DECLARE_PER_CPU(struct rnd_state, net_rand_state); u32 prandom_u32_state(struct rnd_state *state); void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes); diff --git a/lib/random32.c b/lib/random32.c index 7abd634a718e..036de0c93e22 100644 --- a/lib/random32.c +++ b/lib/random32.c @@ -48,7 +48,7 @@ static inline void prandom_state_selftest(void) } #endif -DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy; +DEFINE_PER_CPU(struct rnd_state, net_rand_state); /** * prandom_u32_state - seeded pseudo-random number generator. -- GitLab From df9a9ac7a4614afa59fae8f7ad56b93fbab45d46 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Fri, 31 Jul 2020 07:51:14 +0200 Subject: [PATCH 0062/1304] random32: move the pseudo-random 32-bit definitions to prandom.h commit c0842fbc1b18c7a044e6ff3e8fa78bfa822c7d1a upstream. The addition of percpu.h to the list of includes in random.h revealed some circular dependencies on arm64 and possibly other platforms. This include was added solely for the pseudo-random definitions, which have nothing to do with the rest of the definitions in this file but are still there for legacy reasons. This patch moves the pseudo-random parts to linux/prandom.h and the percpu.h include with it, which is now guarded by _LINUX_PRANDOM_H and protected against recursive inclusion. A further cleanup step would be to remove this from entirely, and make people who use the prandom infrastructure include just the new header file. That's a bit of a churn patch, but grepping for "prandom_" and "next_pseudo_random32" "struct rnd_state" should catch most users. But it turns out that that nice cleanup step is fairly painful, because a _lot_ of code currently seems to depend on the implicit include of , which can currently come in a lot of ways, including such fairly core headfers as . So the "nice cleanup" part may or may never happen. Fixes: 1c9df907da83 ("random: fix circular include dependency on arm64 after addition of percpu.h") Tested-by: Guenter Roeck Acked-by: Willy Tarreau Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- include/linux/prandom.h | 78 +++++++++++++++++++++++++++++++++++++++++ include/linux/random.h | 66 +++------------------------------- 2 files changed, 82 insertions(+), 62 deletions(-) create mode 100644 include/linux/prandom.h diff --git a/include/linux/prandom.h b/include/linux/prandom.h new file mode 100644 index 000000000000..aa16e6468f91 --- /dev/null +++ b/include/linux/prandom.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * include/linux/prandom.h + * + * Include file for the fast pseudo-random 32-bit + * generation. + */ +#ifndef _LINUX_PRANDOM_H +#define _LINUX_PRANDOM_H + +#include +#include + +u32 prandom_u32(void); +void prandom_bytes(void *buf, size_t nbytes); +void prandom_seed(u32 seed); +void prandom_reseed_late(void); + +struct rnd_state { + __u32 s1, s2, s3, s4; +}; + +DECLARE_PER_CPU(struct rnd_state, net_rand_state); + +u32 prandom_u32_state(struct rnd_state *state); +void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes); +void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state); + +#define prandom_init_once(pcpu_state) \ + DO_ONCE(prandom_seed_full_state, (pcpu_state)) + +/** + * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro) + * @ep_ro: right open interval endpoint + * + * Returns a pseudo-random number that is in interval [0, ep_ro). Note + * that the result depends on PRNG being well distributed in [0, ~0U] + * u32 space. Here we use maximally equidistributed combined Tausworthe + * generator, that is, prandom_u32(). This is useful when requesting a + * random index of an array containing ep_ro elements, for example. + * + * Returns: pseudo-random number in interval [0, ep_ro) + */ +static inline u32 prandom_u32_max(u32 ep_ro) +{ + return (u32)(((u64) prandom_u32() * ep_ro) >> 32); +} + +/* + * Handle minimum values for seeds + */ +static inline u32 __seed(u32 x, u32 m) +{ + return (x < m) ? x + m : x; +} + +/** + * prandom_seed_state - set seed for prandom_u32_state(). + * @state: pointer to state structure to receive the seed. + * @seed: arbitrary 64-bit value to use as a seed. + */ +static inline void prandom_seed_state(struct rnd_state *state, u64 seed) +{ + u32 i = (seed >> 32) ^ (seed << 10) ^ seed; + + state->s1 = __seed(i, 2U); + state->s2 = __seed(i, 8U); + state->s3 = __seed(i, 16U); + state->s4 = __seed(i, 128U); +} + +/* Pseudo random number generator from numerical recipes. */ +static inline u32 next_pseudo_random32(u32 seed) +{ + return seed * 1664525 + 1013904223; +} + +#endif diff --git a/include/linux/random.h b/include/linux/random.h index 085ebb912949..37209b3b22ae 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -9,7 +9,6 @@ #include #include -#include #include @@ -107,63 +106,12 @@ declare_get_random_var_wait(long) unsigned long randomize_page(unsigned long start, unsigned long range); -u32 prandom_u32(void); -void prandom_bytes(void *buf, size_t nbytes); -void prandom_seed(u32 seed); -void prandom_reseed_late(void); - -struct rnd_state { - __u32 s1, s2, s3, s4; -}; - -DECLARE_PER_CPU(struct rnd_state, net_rand_state); - -u32 prandom_u32_state(struct rnd_state *state); -void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes); -void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state); - -#define prandom_init_once(pcpu_state) \ - DO_ONCE(prandom_seed_full_state, (pcpu_state)) - -/** - * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro) - * @ep_ro: right open interval endpoint - * - * Returns a pseudo-random number that is in interval [0, ep_ro). Note - * that the result depends on PRNG being well distributed in [0, ~0U] - * u32 space. Here we use maximally equidistributed combined Tausworthe - * generator, that is, prandom_u32(). This is useful when requesting a - * random index of an array containing ep_ro elements, for example. - * - * Returns: pseudo-random number in interval [0, ep_ro) - */ -static inline u32 prandom_u32_max(u32 ep_ro) -{ - return (u32)(((u64) prandom_u32() * ep_ro) >> 32); -} - /* - * Handle minimum values for seeds - */ -static inline u32 __seed(u32 x, u32 m) -{ - return (x < m) ? x + m : x; -} - -/** - * prandom_seed_state - set seed for prandom_u32_state(). - * @state: pointer to state structure to receive the seed. - * @seed: arbitrary 64-bit value to use as a seed. + * This is designed to be standalone for just prandom + * users, but for now we include it from + * for legacy reasons. */ -static inline void prandom_seed_state(struct rnd_state *state, u64 seed) -{ - u32 i = (seed >> 32) ^ (seed << 10) ^ seed; - - state->s1 = __seed(i, 2U); - state->s2 = __seed(i, 8U); - state->s3 = __seed(i, 16U); - state->s4 = __seed(i, 128U); -} +#include #ifdef CONFIG_ARCH_RANDOM # include @@ -194,10 +142,4 @@ static inline bool arch_has_random_seed(void) } #endif -/* Pseudo random number generator from numerical recipes. */ -static inline u32 next_pseudo_random32(u32 seed) -{ - return seed * 1664525 + 1013904223; -} - #endif /* _LINUX_RANDOM_H */ -- GitLab From aa0962310814356b10a973c578cfe595d89a3e39 Mon Sep 17 00:00:00 2001 From: Jiang Ying Date: Wed, 5 Aug 2020 15:57:21 +0800 Subject: [PATCH 0063/1304] ext4: fix direct I/O read error This patch is used to fix ext4 direct I/O read error when the read size is not aligned with block size. Then, I will use a test to explain the error. (1) Make a file that is not aligned with block size: $dd if=/dev/zero of=./test.jar bs=1000 count=3 (2) I wrote a source file named "direct_io_read_file.c" as following: #include #include #include #include #include #include #include #define BUF_SIZE 1024 int main() { int fd; int ret; unsigned char *buf; ret = posix_memalign((void **)&buf, 512, BUF_SIZE); if (ret) { perror("posix_memalign failed"); exit(1); } fd = open("./test.jar", O_RDONLY | O_DIRECT, 0755); if (fd < 0){ perror("open ./test.jar failed"); exit(1); } do { ret = read(fd, buf, BUF_SIZE); printf("ret=%d\n",ret); if (ret < 0) { perror("write test.jar failed"); } } while (ret > 0); free(buf); close(fd); } (3) Compile the source file: $gcc direct_io_read_file.c -D_GNU_SOURCE (4) Run the test program: $./a.out The result is as following: ret=1024 ret=1024 ret=952 ret=-1 write test.jar failed: Invalid argument. I have tested this program on XFS filesystem, XFS does not have this problem, because XFS use iomap_dio_rw() to do direct I/O read. And the comparing between read offset and file size is done in iomap_dio_rw(), the code is as following: if (pos < size) { retval = filemap_write_and_wait_range(mapping, pos, pos + iov_length(iov, nr_segs) - 1); if (!retval) { retval = mapping->a_ops->direct_IO(READ, iocb, iov, pos, nr_segs); } ... } ...only when "pos < size", direct I/O can be done, or 0 will be return. I have tested the fix patch on Ext4, it is up to the mustard of EINVAL in man2(read) as following: #include ssize_t read(int fd, void *buf, size_t count); EINVAL fd is attached to an object which is unsuitable for reading; or the file was opened with the O_DIRECT flag, and either the address specified in buf, the value specified in count, or the current file offset is not suitably aligned. So I think this patch can be applied to fix ext4 direct I/O error. However Ext4 introduces direct I/O read using iomap infrastructure on kernel 5.5, the patch is commit ("ext4: introduce direct I/O read using iomap infrastructure"), then Ext4 will be the same as XFS, they all use iomap_dio_rw() to do direct I/O read. So this problem does not exist on kernel 5.5 for Ext4. >From above description, we can see this problem exists on all the kernel versions between kernel 3.14 and kernel 5.4. It will cause the Applications to fail to read. For example, when the search service downloads a new full index file, the search engine is loading the previous index file and is processing the search request, it can not use buffer io that may squeeze the previous index file in use from pagecache, so the serch service must use direct I/O read. Please apply this patch on these kernel versions, or please use the method on kernel 5.5 to fix this problem. Fixes: 9fe55eea7e4b ("Fix race when checking i_size on direct i/o read") Reviewed-by: Jan Kara Co-developed-by: Wang Long Signed-off-by: Wang Long Signed-off-by: Jiang Ying Signed-off-by: Greg Kroah-Hartman --- fs/ext4/inode.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 3b1a7597af15..cd833f4e64ef 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -3848,6 +3848,11 @@ static ssize_t ext4_direct_IO_read(struct kiocb *iocb, struct iov_iter *iter) struct inode *inode = mapping->host; size_t count = iov_iter_count(iter); ssize_t ret; + loff_t offset = iocb->ki_pos; + loff_t size = i_size_read(inode); + + if (offset >= size) + return 0; /* * Shared inode_lock is enough for us - it protects against concurrent -- GitLab From 961f830af0658ef5ef8a7708786d634a6115f16b Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Fri, 7 Aug 2020 09:36:21 +0200 Subject: [PATCH 0064/1304] Linux 4.19.138 Tested-by: Shuah Khan Signed-off-by: Greg Kroah-Hartman --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index edf1799c08d2..daaa8ab2f550 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 4 PATCHLEVEL = 19 -SUBLEVEL = 137 +SUBLEVEL = 138 EXTRAVERSION = NAME = "People's Front" -- GitLab From b2fac90b6b029e0bc67b1829434d8abc65cf04f6 Mon Sep 17 00:00:00 2001 From: Martijn Coenen Date: Wed, 13 May 2020 15:38:35 +0200 Subject: [PATCH 0065/1304] BACKPORT: loop: Call loop_config_discard() only after new config is applied loop_set_status() calls loop_config_discard() to configure discard for the loop device; however, the discard configuration depends on whether the loop device uses encryption, and when we call it the encryption configuration has not been updated yet. Move the call down so we apply the correct discard configuration based on the new configuration. Signed-off-by: Martijn Coenen Reviewed-by: Christoph Hellwig Reviewed-by: Bob Liu Reviewed-by: Bart Van Assche Signed-off-by: Jens Axboe (cherry picked from commit 7c5014b0987a30e4989c90633c198aced454c0ec) Bug: 148607611 Change-Id: I77cb81b7c8038ebdb2d4cd6161c034a9e94cf516 --- drivers/block/loop.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/block/loop.c b/drivers/block/loop.c index f7b9b23d4c76..592e358d6a02 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -1295,8 +1295,6 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) } } - loop_config_discard(lo); - memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE); memcpy(lo->lo_crypt_name, info->lo_crypt_name, LO_NAME_SIZE); lo->lo_file_name[LO_NAME_SIZE-1] = 0; @@ -1320,6 +1318,8 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) lo->lo_key_owner = uid; } + loop_config_discard(lo); + /* update dio if lo_offset or transfer is changed */ __loop_update_dio(lo, lo->use_dio); -- GitLab From 4c23d31faf7d4363b46aff4117d7a693ed793c75 Mon Sep 17 00:00:00 2001 From: Martijn Coenen Date: Wed, 13 May 2020 15:38:36 +0200 Subject: [PATCH 0066/1304] BACKPORT: loop: Remove sector_t truncation checks sector_t is now always u64, so we don't need to check for truncation. Signed-off-by: Martijn Coenen Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe (cherry picked from commit 083a6a50783ef54256eec3499e6575237e0e3d53) Bug: 148607611 Change-Id: I1db87e391aca6226d542c5b74e569184ec059073 --- drivers/block/loop.c | 21 +++++++-------------- 1 file changed, 7 insertions(+), 14 deletions(-) diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 592e358d6a02..7ec3aa39265a 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -227,24 +227,20 @@ static void __loop_update_dio(struct loop_device *lo, bool dio) blk_mq_unfreeze_queue(lo->lo_queue); } -static int +static void figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit) { loff_t size = get_size(offset, sizelimit, lo->lo_backing_file); - sector_t x = (sector_t)size; struct block_device *bdev = lo->lo_device; - if (unlikely((loff_t)x != size)) - return -EFBIG; if (lo->lo_offset != offset) lo->lo_offset = offset; if (lo->lo_sizelimit != sizelimit) lo->lo_sizelimit = sizelimit; - set_capacity(lo->lo_disk, x); + set_capacity(lo->lo_disk, size); bd_set_size(bdev, (loff_t)get_capacity(bdev->bd_disk) << 9); /* let user-space know about the new size */ kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE); - return 0; } static inline int @@ -971,10 +967,8 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode, !file->f_op->write_iter) lo_flags |= LO_FLAGS_READ_ONLY; - error = -EFBIG; size = get_loop_size(lo, file); - if ((loff_t)(sector_t)size != size) - goto out_unlock; + error = loop_prepare_queue(lo); if (error) goto out_unlock; @@ -1289,10 +1283,7 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) lo->lo_device->bd_inode->i_mapping->nrpages); goto out_unfreeze; } - if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) { - err = -EFBIG; - goto out_unfreeze; - } + figure_loop_size(lo, info->lo_offset, info->lo_sizelimit); } memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE); @@ -1495,7 +1486,9 @@ static int loop_set_capacity(struct loop_device *lo) if (unlikely(lo->lo_state != Lo_bound)) return -ENXIO; - return figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit); + figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit); + + return 0; } static int loop_set_dio(struct loop_device *lo, unsigned long arg) -- GitLab From 2260595c54add5829c9615828f5e6d98364441ee Mon Sep 17 00:00:00 2001 From: Martijn Coenen Date: Wed, 13 May 2020 15:38:37 +0200 Subject: [PATCH 0067/1304] BACKPORT: loop: Factor out setting loop device size This code is used repeatedly. Signed-off-by: Martijn Coenen Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe (cherry picked from commit 5795b6f5607f7e4db62ddea144727780cb351a9b) Bug: 148607611 Change-Id: I332d05033e13b6c9f75f9d959a4682135bd6c53c --- drivers/block/loop.c | 30 +++++++++++++++++++++--------- 1 file changed, 21 insertions(+), 9 deletions(-) diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 7ec3aa39265a..81e8d0f9007d 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -227,20 +227,35 @@ static void __loop_update_dio(struct loop_device *lo, bool dio) blk_mq_unfreeze_queue(lo->lo_queue); } +/** + * loop_set_size() - sets device size and notifies userspace + * @lo: struct loop_device to set the size for + * @size: new size of the loop device + * + * Callers must validate that the size passed into this function fits into + * a sector_t, eg using loop_validate_size() + */ +static void loop_set_size(struct loop_device *lo, loff_t size) +{ + struct block_device *bdev = lo->lo_device; + + set_capacity(lo->lo_disk, size); + bd_set_size(bdev, size << SECTOR_SHIFT); + /* let user-space know about the new size */ + kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE); +} + static void figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit) { loff_t size = get_size(offset, sizelimit, lo->lo_backing_file); - struct block_device *bdev = lo->lo_device; if (lo->lo_offset != offset) lo->lo_offset = offset; if (lo->lo_sizelimit != sizelimit) lo->lo_sizelimit = sizelimit; - set_capacity(lo->lo_disk, size); - bd_set_size(bdev, (loff_t)get_capacity(bdev->bd_disk) << 9); - /* let user-space know about the new size */ - kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE); + + loop_set_size(lo, size); } static inline int @@ -1001,11 +1016,8 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode, } loop_update_dio(lo); - set_capacity(lo->lo_disk, size); - bd_set_size(bdev, size << 9); loop_sysfs_init(lo); - /* let user-space know about the new size */ - kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE); + loop_set_size(lo, size); set_blocksize(bdev, S_ISBLK(inode->i_mode) ? block_size(inode->i_bdev) : PAGE_SIZE); -- GitLab From 556233f6455c676a606078ce016817055392cbab Mon Sep 17 00:00:00 2001 From: Martijn Coenen Date: Wed, 13 May 2020 15:38:39 +0200 Subject: [PATCH 0068/1304] BACKPORT: loop: Refactor loop_set_status() size calculation figure_loop_size() calculates the loop size based on the passed in parameters, but at the same time it updates the offset and sizelimit parameters in the loop device configuration. That is a somewhat unexpected side effect of a function with this name, and it is only only needed by one of the two callers of this function - loop_set_status(). Move the lo_offset and lo_sizelimit assignment back into loop_set_status(), and use the newly factored out functions to validate and apply the newly calculated size. This allows us to get rid of figure_loop_size() in a follow-up commit. Signed-off-by: Martijn Coenen Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe (cherry picked from commit b0bd158dd630bd47640e0e418c062cda1e0da5ad) Bug: 148607611 Change-Id: Ib8e10eb734c3f95f3c433c13012b378373336d10 --- drivers/block/loop.c | 37 +++++++++++++++++++------------------ 1 file changed, 19 insertions(+), 18 deletions(-) diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 81e8d0f9007d..41c7e716eeba 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -250,11 +250,6 @@ figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit) { loff_t size = get_size(offset, sizelimit, lo->lo_backing_file); - if (lo->lo_offset != offset) - lo->lo_offset = offset; - if (lo->lo_sizelimit != sizelimit) - lo->lo_sizelimit = sizelimit; - loop_set_size(lo, size); } @@ -1234,6 +1229,7 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) kuid_t uid = current_uid(); struct block_device *bdev; bool partscan = false; + bool size_changed = false; err = mutex_lock_killable(&loop_ctl_mutex); if (err) @@ -1255,6 +1251,7 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) if (lo->lo_offset != info->lo_offset || lo->lo_sizelimit != info->lo_sizelimit) { + size_changed = true; sync_blockdev(lo->lo_device); invalidate_bdev(lo->lo_device); } @@ -1262,6 +1259,15 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) /* I/O need to be drained during transfer transition */ blk_mq_freeze_queue(lo->lo_queue); + if (size_changed && lo->lo_device->bd_inode->i_mapping->nrpages) { + /* If any pages were dirtied after kill_bdev(), try again */ + err = -EAGAIN; + pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n", + __func__, lo->lo_number, lo->lo_file_name, + lo->lo_device->bd_inode->i_mapping->nrpages); + goto out_unfreeze; + } + err = loop_release_xfer(lo); if (err) goto out_unfreeze; @@ -1285,19 +1291,8 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) if (err) goto out_unfreeze; - if (lo->lo_offset != info->lo_offset || - lo->lo_sizelimit != info->lo_sizelimit) { - /* kill_bdev should have truncated all the pages */ - if (lo->lo_device->bd_inode->i_mapping->nrpages) { - err = -EAGAIN; - pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n", - __func__, lo->lo_number, lo->lo_file_name, - lo->lo_device->bd_inode->i_mapping->nrpages); - goto out_unfreeze; - } - figure_loop_size(lo, info->lo_offset, info->lo_sizelimit); - } - + lo->lo_offset = info->lo_offset; + lo->lo_sizelimit = info->lo_sizelimit; memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE); memcpy(lo->lo_crypt_name, info->lo_crypt_name, LO_NAME_SIZE); lo->lo_file_name[LO_NAME_SIZE-1] = 0; @@ -1321,6 +1316,12 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) lo->lo_key_owner = uid; } + if (size_changed) { + loff_t new_size = get_size(lo->lo_offset, lo->lo_sizelimit, + lo->lo_backing_file); + loop_set_size(lo, new_size); + } + loop_config_discard(lo); /* update dio if lo_offset or transfer is changed */ -- GitLab From 0651770ca24fdc5caddc9aa3c4bac21968355c0d Mon Sep 17 00:00:00 2001 From: Martijn Coenen Date: Wed, 13 May 2020 15:38:40 +0200 Subject: [PATCH 0069/1304] BACKPORT: loop: Remove figure_loop_size() This function was now only used by loop_set_capacity(). Just open code the remaining code in the caller instead. Signed-off-by: Martijn Coenen Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe (cherry picked from commit 0a6ed1b5ff6757f11ad2d57906ceb40488a5ee52) Bug: 148607611 Change-Id: Icc5ef56a5f0264b9bfa15fbbe8062a74a59d88cb --- drivers/block/loop.c | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 41c7e716eeba..ac05b0c40c7a 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -245,14 +245,6 @@ static void loop_set_size(struct loop_device *lo, loff_t size) kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE); } -static void -figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit) -{ - loff_t size = get_size(offset, sizelimit, lo->lo_backing_file); - - loop_set_size(lo, size); -} - static inline int lo_do_transfer(struct loop_device *lo, int cmd, struct page *rpage, unsigned roffs, @@ -1496,10 +1488,13 @@ loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) { static int loop_set_capacity(struct loop_device *lo) { + loff_t size; + if (unlikely(lo->lo_state != Lo_bound)) return -ENXIO; - figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit); + size = get_loop_size(lo, lo->lo_backing_file); + loop_set_size(lo, size); return 0; } -- GitLab From 5e69a89011b7befc2cb1d440006ae6a397cb9668 Mon Sep 17 00:00:00 2001 From: Martijn Coenen Date: Wed, 13 May 2020 15:38:41 +0200 Subject: [PATCH 0070/1304] BACKPORT: loop: Factor out configuring loop from status Factor out this code into a separate function, so it can be reused by other code more easily. Signed-off-by: Martijn Coenen Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe (cherry picked from commit 0c3796c244598122a5d59d56f30d19390096817f) Bug: 148607611 Change-Id: I6bb8054a3963ba67bb44b07010d00ee53dbfef81 --- drivers/block/loop.c | 117 +++++++++++++++++++++++++------------------ 1 file changed, 67 insertions(+), 50 deletions(-) diff --git a/drivers/block/loop.c b/drivers/block/loop.c index ac05b0c40c7a..84773b6266fa 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -1213,75 +1213,43 @@ static int loop_clr_fd(struct loop_device *lo) return __loop_clr_fd(lo, false); } +/** + * loop_set_status_from_info - configure device from loop_info + * @lo: struct loop_device to configure + * @info: struct loop_info64 to configure the device with + * + * Configures the loop device parameters according to the passed + * in loop_info64 configuration. + */ static int -loop_set_status(struct loop_device *lo, const struct loop_info64 *info) +loop_set_status_from_info(struct loop_device *lo, + const struct loop_info64 *info) { int err; struct loop_func_table *xfer; kuid_t uid = current_uid(); - struct block_device *bdev; - bool partscan = false; - bool size_changed = false; - - err = mutex_lock_killable(&loop_ctl_mutex); - if (err) - return err; - if (lo->lo_encrypt_key_size && - !uid_eq(lo->lo_key_owner, uid) && - !capable(CAP_SYS_ADMIN)) { - err = -EPERM; - goto out_unlock; - } - if (lo->lo_state != Lo_bound) { - err = -ENXIO; - goto out_unlock; - } - if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE) { - err = -EINVAL; - goto out_unlock; - } - - if (lo->lo_offset != info->lo_offset || - lo->lo_sizelimit != info->lo_sizelimit) { - size_changed = true; - sync_blockdev(lo->lo_device); - invalidate_bdev(lo->lo_device); - } - /* I/O need to be drained during transfer transition */ - blk_mq_freeze_queue(lo->lo_queue); - - if (size_changed && lo->lo_device->bd_inode->i_mapping->nrpages) { - /* If any pages were dirtied after kill_bdev(), try again */ - err = -EAGAIN; - pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n", - __func__, lo->lo_number, lo->lo_file_name, - lo->lo_device->bd_inode->i_mapping->nrpages); - goto out_unfreeze; - } + if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE) + return -EINVAL; err = loop_release_xfer(lo); if (err) - goto out_unfreeze; + return err; if (info->lo_encrypt_type) { unsigned int type = info->lo_encrypt_type; - if (type >= MAX_LO_CRYPT) { - err = -EINVAL; - goto out_unfreeze; - } + if (type >= MAX_LO_CRYPT) + return -EINVAL; xfer = xfer_funcs[type]; - if (xfer == NULL) { - err = -EINVAL; - goto out_unfreeze; - } + if (xfer == NULL) + return -EINVAL; } else xfer = NULL; err = loop_init_xfer(lo, xfer, info); if (err) - goto out_unfreeze; + return err; lo->lo_offset = info->lo_offset; lo->lo_sizelimit = info->lo_sizelimit; @@ -1308,6 +1276,55 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) lo->lo_key_owner = uid; } + return 0; +} + +static int +loop_set_status(struct loop_device *lo, const struct loop_info64 *info) +{ + int err; + struct block_device *bdev; + kuid_t uid = current_uid(); + bool partscan = false; + bool size_changed = false; + + err = mutex_lock_killable(&loop_ctl_mutex); + if (err) + return err; + if (lo->lo_encrypt_key_size && + !uid_eq(lo->lo_key_owner, uid) && + !capable(CAP_SYS_ADMIN)) { + err = -EPERM; + goto out_unlock; + } + if (lo->lo_state != Lo_bound) { + err = -ENXIO; + goto out_unlock; + } + + if (lo->lo_offset != info->lo_offset || + lo->lo_sizelimit != info->lo_sizelimit) { + size_changed = true; + sync_blockdev(lo->lo_device); + invalidate_bdev(lo->lo_device); + } + + /* I/O need to be drained during transfer transition */ + blk_mq_freeze_queue(lo->lo_queue); + + if (size_changed && lo->lo_device->bd_inode->i_mapping->nrpages) { + /* If any pages were dirtied after kill_bdev(), try again */ + err = -EAGAIN; + pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n", + __func__, lo->lo_number, lo->lo_file_name, + lo->lo_device->bd_inode->i_mapping->nrpages); + goto out_unfreeze; + } + + err = loop_set_status_from_info(lo, info); + if (err) + goto out_unfreeze; + if (size_changed) { loff_t new_size = get_size(lo->lo_offset, lo->lo_sizelimit, lo->lo_backing_file); -- GitLab From 052e9ce4bb8b9e6a82e7e1fa762a50e8fcd8edb2 Mon Sep 17 00:00:00 2001 From: Martijn Coenen Date: Wed, 13 May 2020 15:38:42 +0200 Subject: [PATCH 0071/1304] BACKPORT: loop: Move loop_set_status_from_info() and friends up So we can use it without forward declaration. This is a separate commit to make it easier to verify that this is just a move, without functional modifications. Signed-off-by: Martijn Coenen Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe (cherry picked from commit 62ab466ca881fe200c21aa74b65f8bd83ec482dc) Bug: 148607611 Change-Id: I8c68ebbe65cbec476f9f2d4c1d60eb343e88bb56 --- drivers/block/loop.c | 206 +++++++++++++++++++++---------------------- 1 file changed, 103 insertions(+), 103 deletions(-) diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 84773b6266fa..c909a1ae3559 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -931,6 +931,109 @@ static int loop_prepare_queue(struct loop_device *lo) return 0; } +static int +loop_release_xfer(struct loop_device *lo) +{ + int err = 0; + struct loop_func_table *xfer = lo->lo_encryption; + + if (xfer) { + if (xfer->release) + err = xfer->release(lo); + lo->transfer = NULL; + lo->lo_encryption = NULL; + module_put(xfer->owner); + } + return err; +} + +static int +loop_init_xfer(struct loop_device *lo, struct loop_func_table *xfer, + const struct loop_info64 *i) +{ + int err = 0; + + if (xfer) { + struct module *owner = xfer->owner; + + if (!try_module_get(owner)) + return -EINVAL; + if (xfer->init) + err = xfer->init(lo, i); + if (err) + module_put(owner); + else + lo->lo_encryption = xfer; + } + return err; +} + +/** + * loop_set_status_from_info - configure device from loop_info + * @lo: struct loop_device to configure + * @info: struct loop_info64 to configure the device with + * + * Configures the loop device parameters according to the passed + * in loop_info64 configuration. + */ +static int +loop_set_status_from_info(struct loop_device *lo, + const struct loop_info64 *info) +{ + int err; + struct loop_func_table *xfer; + kuid_t uid = current_uid(); + + if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE) + return -EINVAL; + + err = loop_release_xfer(lo); + if (err) + return err; + + if (info->lo_encrypt_type) { + unsigned int type = info->lo_encrypt_type; + + if (type >= MAX_LO_CRYPT) + return -EINVAL; + xfer = xfer_funcs[type]; + if (xfer == NULL) + return -EINVAL; + } else + xfer = NULL; + + err = loop_init_xfer(lo, xfer, info); + if (err) + return err; + + lo->lo_offset = info->lo_offset; + lo->lo_sizelimit = info->lo_sizelimit; + memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE); + memcpy(lo->lo_crypt_name, info->lo_crypt_name, LO_NAME_SIZE); + lo->lo_file_name[LO_NAME_SIZE-1] = 0; + lo->lo_crypt_name[LO_NAME_SIZE-1] = 0; + + if (!xfer) + xfer = &none_funcs; + lo->transfer = xfer->transfer; + lo->ioctl = xfer->ioctl; + + if ((lo->lo_flags & LO_FLAGS_AUTOCLEAR) != + (info->lo_flags & LO_FLAGS_AUTOCLEAR)) + lo->lo_flags ^= LO_FLAGS_AUTOCLEAR; + + lo->lo_encrypt_key_size = info->lo_encrypt_key_size; + lo->lo_init[0] = info->lo_init[0]; + lo->lo_init[1] = info->lo_init[1]; + if (info->lo_encrypt_key_size) { + memcpy(lo->lo_encrypt_key, info->lo_encrypt_key, + info->lo_encrypt_key_size); + lo->lo_key_owner = uid; + } + + return 0; +} + static int loop_set_fd(struct loop_device *lo, fmode_t mode, struct block_device *bdev, unsigned int arg) { @@ -1033,43 +1136,6 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode, return error; } -static int -loop_release_xfer(struct loop_device *lo) -{ - int err = 0; - struct loop_func_table *xfer = lo->lo_encryption; - - if (xfer) { - if (xfer->release) - err = xfer->release(lo); - lo->transfer = NULL; - lo->lo_encryption = NULL; - module_put(xfer->owner); - } - return err; -} - -static int -loop_init_xfer(struct loop_device *lo, struct loop_func_table *xfer, - const struct loop_info64 *i) -{ - int err = 0; - - if (xfer) { - struct module *owner = xfer->owner; - - if (!try_module_get(owner)) - return -EINVAL; - if (xfer->init) - err = xfer->init(lo, i); - if (err) - module_put(owner); - else - lo->lo_encryption = xfer; - } - return err; -} - static int __loop_clr_fd(struct loop_device *lo, bool release) { struct file *filp = NULL; @@ -1213,72 +1279,6 @@ static int loop_clr_fd(struct loop_device *lo) return __loop_clr_fd(lo, false); } -/** - * loop_set_status_from_info - configure device from loop_info - * @lo: struct loop_device to configure - * @info: struct loop_info64 to configure the device with - * - * Configures the loop device parameters according to the passed - * in loop_info64 configuration. - */ -static int -loop_set_status_from_info(struct loop_device *lo, - const struct loop_info64 *info) -{ - int err; - struct loop_func_table *xfer; - kuid_t uid = current_uid(); - - if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE) - return -EINVAL; - - err = loop_release_xfer(lo); - if (err) - return err; - - if (info->lo_encrypt_type) { - unsigned int type = info->lo_encrypt_type; - - if (type >= MAX_LO_CRYPT) - return -EINVAL; - xfer = xfer_funcs[type]; - if (xfer == NULL) - return -EINVAL; - } else - xfer = NULL; - - err = loop_init_xfer(lo, xfer, info); - if (err) - return err; - - lo->lo_offset = info->lo_offset; - lo->lo_sizelimit = info->lo_sizelimit; - memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE); - memcpy(lo->lo_crypt_name, info->lo_crypt_name, LO_NAME_SIZE); - lo->lo_file_name[LO_NAME_SIZE-1] = 0; - lo->lo_crypt_name[LO_NAME_SIZE-1] = 0; - - if (!xfer) - xfer = &none_funcs; - lo->transfer = xfer->transfer; - lo->ioctl = xfer->ioctl; - - if ((lo->lo_flags & LO_FLAGS_AUTOCLEAR) != - (info->lo_flags & LO_FLAGS_AUTOCLEAR)) - lo->lo_flags ^= LO_FLAGS_AUTOCLEAR; - - lo->lo_encrypt_key_size = info->lo_encrypt_key_size; - lo->lo_init[0] = info->lo_init[0]; - lo->lo_init[1] = info->lo_init[1]; - if (info->lo_encrypt_key_size) { - memcpy(lo->lo_encrypt_key, info->lo_encrypt_key, - info->lo_encrypt_key_size); - lo->lo_key_owner = uid; - } - - return 0; -} - static int loop_set_status(struct loop_device *lo, const struct loop_info64 *info) { -- GitLab From 3393e59963194de900bb0a09b10565c1f62358ee Mon Sep 17 00:00:00 2001 From: Martijn Coenen Date: Wed, 13 May 2020 15:38:43 +0200 Subject: [PATCH 0072/1304] BACKPORT: loop: Rework lo_ioctl() __user argument casting In preparation for a new ioctl that needs to copy_from_user(); makes the code easier to read as well. Signed-off-by: Martijn Coenen Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe (cherry picked from commit 571fae6e290d64a3e8132c455e7786c99c467ed1) Bug: 148607611 Change-Id: Ie37d0ed6bfc91d2a55c070446e5c54c8343a7170 --- drivers/block/loop.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/drivers/block/loop.c b/drivers/block/loop.c index c909a1ae3559..9e4a761a6299 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -1596,6 +1596,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { struct loop_device *lo = bdev->bd_disk->private_data; + void __user *argp = (void __user *) arg; int err; switch (cmd) { @@ -1608,21 +1609,19 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode, case LOOP_SET_STATUS: err = -EPERM; if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) { - err = loop_set_status_old(lo, - (struct loop_info __user *)arg); + err = loop_set_status_old(lo, argp); } break; case LOOP_GET_STATUS: - return loop_get_status_old(lo, (struct loop_info __user *) arg); + return loop_get_status_old(lo, argp); case LOOP_SET_STATUS64: err = -EPERM; if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) { - err = loop_set_status64(lo, - (struct loop_info64 __user *) arg); + err = loop_set_status64(lo, argp); } break; case LOOP_GET_STATUS64: - return loop_get_status64(lo, (struct loop_info64 __user *) arg); + return loop_get_status64(lo, argp); case LOOP_SET_CAPACITY: case LOOP_SET_DIRECT_IO: case LOOP_SET_BLOCK_SIZE: -- GitLab From a7bbde7dfba643fa4c2dd9f5763dc281f9fb3315 Mon Sep 17 00:00:00 2001 From: Martijn Coenen Date: Wed, 13 May 2020 15:38:44 +0200 Subject: [PATCH 0073/1304] BACKPORT: loop: Clean up LOOP_SET_STATUS lo_flags handling LOOP_SET_STATUS(64) will actually allow some lo_flags to be modified; in particular, LO_FLAGS_AUTOCLEAR can be set and cleared, whereas LO_FLAGS_PARTSCAN can be set to request a partition scan. Make this explicit by updating the UAPI to include the flags that can be set/cleared using this ioctl. The implementation can then blindly take over the passed in flags, and use the previous flags for those flags that can't be set / cleared using LOOP_SET_STATUS. Signed-off-by: Martijn Coenen Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe (cherry picked from commit faf1d25440d6ad06d509dada4b6fe62fea844370) Bug: 148607611 Change-Id: If80c0cce4ef7b1af4753246021981795bbc54ca0 --- drivers/block/loop.c | 19 +++++++++++++------ include/uapi/linux/loop.h | 10 ++++++++-- 2 files changed, 21 insertions(+), 8 deletions(-) diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 9e4a761a6299..69bde7182847 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -1018,9 +1018,7 @@ loop_set_status_from_info(struct loop_device *lo, lo->transfer = xfer->transfer; lo->ioctl = xfer->ioctl; - if ((lo->lo_flags & LO_FLAGS_AUTOCLEAR) != - (info->lo_flags & LO_FLAGS_AUTOCLEAR)) - lo->lo_flags ^= LO_FLAGS_AUTOCLEAR; + lo->lo_flags = info->lo_flags; lo->lo_encrypt_key_size = info->lo_encrypt_key_size; lo->lo_init[0] = info->lo_init[0]; @@ -1285,6 +1283,7 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) int err; struct block_device *bdev; kuid_t uid = current_uid(); + int prev_lo_flags; bool partscan = false; bool size_changed = false; @@ -1321,10 +1320,19 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) goto out_unfreeze; } + prev_lo_flags = lo->lo_flags; + err = loop_set_status_from_info(lo, info); if (err) goto out_unfreeze; + /* Mask out flags that can't be set using LOOP_SET_STATUS. */ + lo->lo_flags &= ~LOOP_SET_STATUS_SETTABLE_FLAGS; + /* For those flags, use the previous values instead */ + lo->lo_flags |= prev_lo_flags & ~LOOP_SET_STATUS_SETTABLE_FLAGS; + /* For flags that can't be cleared, use previous values too */ + lo->lo_flags |= prev_lo_flags & ~LOOP_SET_STATUS_CLEARABLE_FLAGS; + if (size_changed) { loff_t new_size = get_size(lo->lo_offset, lo->lo_sizelimit, lo->lo_backing_file); @@ -1339,9 +1347,8 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) out_unfreeze: blk_mq_unfreeze_queue(lo->lo_queue); - if (!err && (info->lo_flags & LO_FLAGS_PARTSCAN) && - !(lo->lo_flags & LO_FLAGS_PARTSCAN)) { - lo->lo_flags |= LO_FLAGS_PARTSCAN; + if (!err && (lo->lo_flags & LO_FLAGS_PARTSCAN) && + !(prev_lo_flags & LO_FLAGS_PARTSCAN)) { lo->lo_disk->flags &= ~GENHD_FL_NO_PART_SCAN; bdev = lo->lo_device; partscan = true; diff --git a/include/uapi/linux/loop.h b/include/uapi/linux/loop.h index 080a8df134ef..6b32fee80ce0 100644 --- a/include/uapi/linux/loop.h +++ b/include/uapi/linux/loop.h @@ -25,6 +25,12 @@ enum { LO_FLAGS_DIRECT_IO = 16, }; +/* LO_FLAGS that can be set using LOOP_SET_STATUS(64) */ +#define LOOP_SET_STATUS_SETTABLE_FLAGS (LO_FLAGS_AUTOCLEAR | LO_FLAGS_PARTSCAN) + +/* LO_FLAGS that can be cleared using LOOP_SET_STATUS(64) */ +#define LOOP_SET_STATUS_CLEARABLE_FLAGS (LO_FLAGS_AUTOCLEAR) + #include /* for __kernel_old_dev_t */ #include /* for __u64 */ @@ -37,7 +43,7 @@ struct loop_info { int lo_offset; int lo_encrypt_type; int lo_encrypt_key_size; /* ioctl w/o */ - int lo_flags; /* ioctl r/o */ + int lo_flags; char lo_name[LO_NAME_SIZE]; unsigned char lo_encrypt_key[LO_KEY_SIZE]; /* ioctl w/o */ unsigned long lo_init[2]; @@ -53,7 +59,7 @@ struct loop_info64 { __u32 lo_number; /* ioctl r/o */ __u32 lo_encrypt_type; __u32 lo_encrypt_key_size; /* ioctl w/o */ - __u32 lo_flags; /* ioctl r/o */ + __u32 lo_flags; __u8 lo_file_name[LO_NAME_SIZE]; __u8 lo_crypt_name[LO_NAME_SIZE]; __u8 lo_encrypt_key[LO_KEY_SIZE]; /* ioctl w/o */ -- GitLab From 9a7c579b773c9305abdaf20b394bdaf3cdb77086 Mon Sep 17 00:00:00 2001 From: Martijn Coenen Date: Wed, 13 May 2020 15:38:45 +0200 Subject: [PATCH 0074/1304] BACKPORT: loop: Add LOOP_CONFIGURE ioctl This allows userspace to completely setup a loop device with a single ioctl, removing the in-between state where the device can be partially configured - eg the loop device has a backing file associated with it, but is reading from the wrong offset. Besides removing the intermediate state, another big benefit of this ioctl is that LOOP_SET_STATUS can be slow; the main reason for this slowness is that LOOP_SET_STATUS(64) calls blk_mq_freeze_queue() to freeze the associated queue; this requires waiting for RCU synchronization, which I've measured can take about 15-20ms on this device on average. In addition to doing what LOOP_SET_STATUS can do, LOOP_CONFIGURE can also be used to: - Set the correct block size immediately by setting loop_config.block_size (avoids LOOP_SET_BLOCK_SIZE) - Explicitly request direct I/O mode by setting LO_FLAGS_DIRECT_IO in loop_config.info.lo_flags (avoids LOOP_SET_DIRECT_IO) - Explicitly request read-only mode by setting LO_FLAGS_READ_ONLY in loop_config.info.lo_flags Here's setting up ~70 regular loop devices with an offset on an x86 Android device, using LOOP_SET_FD and LOOP_SET_STATUS: vsoc_x86:/system/apex # time for i in `seq 30 100`; do losetup -r -o 4096 /dev/block/loop$i com.android.adbd.apex; done 0m03.40s real 0m00.02s user 0m00.03s system Here's configuring ~70 devices in the same way, but using a modified losetup that uses the new LOOP_CONFIGURE ioctl: vsoc_x86:/system/apex # time for i in `seq 30 100`; do losetup -r -o 4096 /dev/block/loop$i com.android.adbd.apex; done 0m01.94s real 0m00.01s user 0m00.01s system Signed-off-by: Martijn Coenen Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe (cherry picked from commit 3448914e8cc550ba792d4ccc74471d1ca4293aae) Bug: 148607611 Change-Id: I839ee9c9df8dbd9f4cc0fa5c8119c01abfcac1d9 --- drivers/block/loop.c | 98 +++++++++++++++++++++++++++++---------- include/uapi/linux/loop.h | 21 +++++++++ 2 files changed, 94 insertions(+), 25 deletions(-) diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 69bde7182847..e6b5c5cde802 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -227,6 +227,19 @@ static void __loop_update_dio(struct loop_device *lo, bool dio) blk_mq_unfreeze_queue(lo->lo_queue); } +/** + * loop_validate_block_size() - validates the passed in block size + * @bsize: size to validate + */ +static int +loop_validate_block_size(unsigned short bsize) +{ + if (bsize < 512 || bsize > PAGE_SIZE || !is_power_of_2(bsize)) + return -EINVAL; + + return 0; +} + /** * loop_set_size() - sets device size and notifies userspace * @lo: struct loop_device to set the size for @@ -1032,22 +1045,23 @@ loop_set_status_from_info(struct loop_device *lo, return 0; } -static int loop_set_fd(struct loop_device *lo, fmode_t mode, - struct block_device *bdev, unsigned int arg) +static int loop_configure(struct loop_device *lo, fmode_t mode, + struct block_device *bdev, + const struct loop_config *config) { struct file *file; struct inode *inode; struct address_space *mapping; - int lo_flags = 0; int error; loff_t size; bool partscan; + unsigned short bsize; /* This is safe, since we have a reference from open(). */ __module_get(THIS_MODULE); error = -EBADF; - file = fget(arg); + file = fget(config->fd); if (!file) goto out; @@ -1066,11 +1080,26 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode, mapping = file->f_mapping; inode = mapping->host; + size = get_loop_size(lo, file); + + if ((config->info.lo_flags & ~LOOP_CONFIGURE_SETTABLE_FLAGS) != 0) { + error = -EINVAL; + goto out_unlock; + } + + if (config->block_size) { + error = loop_validate_block_size(config->block_size); + if (error) + goto out_unlock; + } + + error = loop_set_status_from_info(lo, &config->info); + if (error) + goto out_unlock; + if (!(file->f_mode & FMODE_WRITE) || !(mode & FMODE_WRITE) || !file->f_op->write_iter) - lo_flags |= LO_FLAGS_READ_ONLY; - - size = get_loop_size(lo, file); + lo->lo_flags |= LO_FLAGS_READ_ONLY; error = loop_prepare_queue(lo); if (error) @@ -1078,30 +1107,28 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode, error = 0; - set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0); + set_device_ro(bdev, (lo->lo_flags & LO_FLAGS_READ_ONLY) != 0); - lo->use_dio = false; + lo->use_dio = lo->lo_flags & LO_FLAGS_DIRECT_IO; lo->lo_device = bdev; - lo->lo_flags = lo_flags; lo->lo_backing_file = file; - lo->transfer = NULL; - lo->ioctl = NULL; - lo->lo_sizelimit = 0; lo->old_gfp_mask = mapping_gfp_mask(mapping); mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); - if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync) + if (!(lo->lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync) blk_queue_write_cache(lo->lo_queue, true, false); - if (io_is_direct(lo->lo_backing_file) && inode->i_sb->s_bdev) { + if (config->block_size) + bsize = config->block_size; + else if (io_is_direct(lo->lo_backing_file) && inode->i_sb->s_bdev) /* In case of direct I/O, match underlying block size */ - unsigned short bsize = bdev_logical_block_size( - inode->i_sb->s_bdev); + bsize = bdev_logical_block_size(inode->i_sb->s_bdev); + else + bsize = 512; - blk_queue_logical_block_size(lo->lo_queue, bsize); - blk_queue_physical_block_size(lo->lo_queue, bsize); - blk_queue_io_min(lo->lo_queue, bsize); - } + blk_queue_logical_block_size(lo->lo_queue, bsize); + blk_queue_physical_block_size(lo->lo_queue, bsize); + blk_queue_io_min(lo->lo_queue, bsize); loop_update_dio(lo); loop_sysfs_init(lo); @@ -1544,8 +1571,9 @@ static int loop_set_block_size(struct loop_device *lo, unsigned long arg) if (lo->lo_state != Lo_bound) return -ENXIO; - if (arg < 512 || arg > PAGE_SIZE || !is_power_of_2(arg)) - return -EINVAL; + err = loop_validate_block_size(arg); + if (err) + return err; if (lo->lo_queue->limits.logical_block_size == arg) return 0; @@ -1607,8 +1635,27 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode, int err; switch (cmd) { - case LOOP_SET_FD: - return loop_set_fd(lo, mode, bdev, arg); + case LOOP_SET_FD: { + /* + * Legacy case - pass in a zeroed out struct loop_config with + * only the file descriptor set , which corresponds with the + * default parameters we'd have used otherwise. + */ + struct loop_config config; + + memset(&config, 0, sizeof(config)); + config.fd = arg; + + return loop_configure(lo, mode, bdev, &config); + } + case LOOP_CONFIGURE: { + struct loop_config config; + + if (copy_from_user(&config, argp, sizeof(config))) + return -EFAULT; + + return loop_configure(lo, mode, bdev, &config); + } case LOOP_CHANGE_FD: return loop_change_fd(lo, bdev, arg); case LOOP_CLR_FD: @@ -1780,6 +1827,7 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode, case LOOP_CLR_FD: case LOOP_GET_STATUS64: case LOOP_SET_STATUS64: + case LOOP_CONFIGURE: arg = (unsigned long) compat_ptr(arg); /* fall through */ case LOOP_SET_FD: diff --git a/include/uapi/linux/loop.h b/include/uapi/linux/loop.h index 6b32fee80ce0..24a1c45bd1ae 100644 --- a/include/uapi/linux/loop.h +++ b/include/uapi/linux/loop.h @@ -31,6 +31,10 @@ enum { /* LO_FLAGS that can be cleared using LOOP_SET_STATUS(64) */ #define LOOP_SET_STATUS_CLEARABLE_FLAGS (LO_FLAGS_AUTOCLEAR) +/* LO_FLAGS that can be set using LOOP_CONFIGURE */ +#define LOOP_CONFIGURE_SETTABLE_FLAGS (LO_FLAGS_READ_ONLY | LO_FLAGS_AUTOCLEAR \ + | LO_FLAGS_PARTSCAN | LO_FLAGS_DIRECT_IO) + #include /* for __kernel_old_dev_t */ #include /* for __u64 */ @@ -66,6 +70,22 @@ struct loop_info64 { __u64 lo_init[2]; }; +/** + * struct loop_config - Complete configuration for a loop device. + * @fd: fd of the file to be used as a backing file for the loop device. + * @block_size: block size to use; ignored if 0. + * @info: struct loop_info64 to configure the loop device with. + * + * This structure is used with the LOOP_CONFIGURE ioctl, and can be used to + * atomically setup and configure all loop device parameters at once. + */ +struct loop_config { + __u32 fd; + __u32 block_size; + struct loop_info64 info; + __u64 __reserved[8]; +}; + /* * Loop filter types */ @@ -96,6 +116,7 @@ struct loop_info64 { #define LOOP_SET_CAPACITY 0x4C07 #define LOOP_SET_DIRECT_IO 0x4C08 #define LOOP_SET_BLOCK_SIZE 0x4C09 +#define LOOP_CONFIGURE 0x4C0A /* /dev/loop-control interface */ #define LOOP_CTL_ADD 0x4C80 -- GitLab From 1805e569cfd48350250149600a1d3155a3964739 Mon Sep 17 00:00:00 2001 From: Martijn Coenen Date: Thu, 4 Jun 2020 22:25:20 +0200 Subject: [PATCH 0075/1304] BACKPORT: loop: Fix wrong masking of status flags In faf1d25440d6, loop_set_status() now assigns lo_status directly from the passed in lo_flags, but then fixes it up by masking out flags that can't be set by LOOP_SET_STATUS; unfortunately the mask was negated. Re-ran all ltp ioctl_loop tests, and they all passed. Pass run of the previously failing one: tst_test.c:1247: INFO: Timeout per run is 0h 05m 00s tst_device.c:88: INFO: Found free device 0 '/dev/loop0' ioctl_loop01.c:49: PASS: /sys/block/loop0/loop/partscan = 0 ioctl_loop01.c:50: PASS: /sys/block/loop0/loop/autoclear = 0 ioctl_loop01.c:51: PASS: /sys/block/loop0/loop/backing_file = '/tmp/ZRJ6H4/test.img' ioctl_loop01.c:65: PASS: get expected lo_flag 12 ioctl_loop01.c:67: PASS: /sys/block/loop0/loop/partscan = 1 ioctl_loop01.c:68: PASS: /sys/block/loop0/loop/autoclear = 1 ioctl_loop01.c:77: PASS: access /dev/loop0p1 succeeds ioctl_loop01.c:83: PASS: access /sys/block/loop0/loop0p1 succeeds Summary: passed 8 failed 0 skipped 0 warnings 0 Fixes: faf1d25440d6 ("loop: Clean up LOOP_SET_STATUS lo_flags handling") Reported-by: Naresh Kamboju Signed-off-by: Martijn Coenen Tested-by: Naresh Kamboju Signed-off-by: Jens Axboe (cherry picked from commit 6ac92fb5cdff6e5708199f1d5d9d58011ccc76a0) Bug: 148607611 Change-Id: I86096f7d77854d1e63c92578997bbbd8beebff81 --- drivers/block/loop.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/block/loop.c b/drivers/block/loop.c index e6b5c5cde802..10824bc7c198 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -1354,7 +1354,7 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) goto out_unfreeze; /* Mask out flags that can't be set using LOOP_SET_STATUS. */ - lo->lo_flags &= ~LOOP_SET_STATUS_SETTABLE_FLAGS; + lo->lo_flags &= LOOP_SET_STATUS_SETTABLE_FLAGS; /* For those flags, use the previous values instead */ lo->lo_flags |= prev_lo_flags & ~LOOP_SET_STATUS_SETTABLE_FLAGS; /* For flags that can't be cleared, use previous values too */ -- GitLab From 9c4f7a8c8d4d65df054540340806cb7a7bac6e0b Mon Sep 17 00:00:00 2001 From: Erik Ekman Date: Fri, 17 Jul 2020 20:51:18 +0200 Subject: [PATCH 0076/1304] USB: serial: qcserial: add EM7305 QDL product ID commit d2a4309c1ab6df424b2239fe2920d6f26f808d17 upstream. When running qmi-firmware-update on the Sierra Wireless EM7305 in a Toshiba laptop, it changed product ID to 0x9062 when entering QDL mode: usb 2-4: new high-speed USB device number 78 using xhci_hcd usb 2-4: New USB device found, idVendor=1199, idProduct=9062, bcdDevice= 0.00 usb 2-4: New USB device strings: Mfr=1, Product=2, SerialNumber=0 usb 2-4: Product: EM7305 usb 2-4: Manufacturer: Sierra Wireless, Incorporated The upgrade could complete after running # echo 1199 9062 > /sys/bus/usb-serial/drivers/qcserial/new_id qcserial 2-4:1.0: Qualcomm USB modem converter detected usb 2-4: Qualcomm USB modem converter now attached to ttyUSB0 Signed-off-by: Erik Ekman Link: https://lore.kernel.org/r/20200717185118.3640219-1-erik@kryo.se Cc: stable@vger.kernel.org Signed-off-by: Johan Hovold Signed-off-by: Greg Kroah-Hartman --- drivers/usb/serial/qcserial.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index d147feae83e6..0f60363c1bbc 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c @@ -155,6 +155,7 @@ static const struct usb_device_id id_table[] = { {DEVICE_SWI(0x1199, 0x9056)}, /* Sierra Wireless Modem */ {DEVICE_SWI(0x1199, 0x9060)}, /* Sierra Wireless Modem */ {DEVICE_SWI(0x1199, 0x9061)}, /* Sierra Wireless Modem */ + {DEVICE_SWI(0x1199, 0x9062)}, /* Sierra Wireless EM7305 QDL */ {DEVICE_SWI(0x1199, 0x9063)}, /* Sierra Wireless EM7305 */ {DEVICE_SWI(0x1199, 0x9070)}, /* Sierra Wireless MC74xx */ {DEVICE_SWI(0x1199, 0x9071)}, /* Sierra Wireless MC74xx */ -- GitLab From 39dbda7fbd5fb10b0ee07e8fb8f8af7429f0ea47 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Sun, 26 Jul 2020 11:49:39 +0200 Subject: [PATCH 0077/1304] USB: iowarrior: fix up report size handling for some devices commit 17a82716587e9d7c3b246a789add490b2b5dcab6 upstream. In previous patches that added support for new iowarrior devices, the handling of the report size was not done correct. Fix that up and update the copyright date for the driver Reworked from an original patch written by Christoph Jung. Fixes: bab5417f5f01 ("USB: misc: iowarrior: add support for the 100 device") Fixes: 5f6f8da2d7b5 ("USB: misc: iowarrior: add support for the 28 and 28L devices") Fixes: 461d8deb26a7 ("USB: misc: iowarrior: add support for 2 OEMed devices") Cc: stable Reported-by: Christoph Jung Link: https://lore.kernel.org/r/20200726094939.1268978-1-gregkh@linuxfoundation.org Signed-off-by: Greg Kroah-Hartman --- drivers/usb/misc/iowarrior.c | 35 +++++++++++++++++++++++++---------- 1 file changed, 25 insertions(+), 10 deletions(-) diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c index 92875a264b14..9c1ca20d4139 100644 --- a/drivers/usb/misc/iowarrior.c +++ b/drivers/usb/misc/iowarrior.c @@ -2,8 +2,9 @@ /* * Native support for the I/O-Warrior USB devices * - * Copyright (c) 2003-2005 Code Mercenaries GmbH - * written by Christian Lucht + * Copyright (c) 2003-2005, 2020 Code Mercenaries GmbH + * written by Christian Lucht and + * Christoph Jung * * based on @@ -817,14 +818,28 @@ static int iowarrior_probe(struct usb_interface *interface, /* we have to check the report_size often, so remember it in the endianness suitable for our machine */ dev->report_size = usb_endpoint_maxp(dev->int_in_endpoint); - if ((dev->interface->cur_altsetting->desc.bInterfaceNumber == 0) && - ((dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56) || - (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56AM) || - (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW28) || - (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW28L) || - (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW100))) - /* IOWarrior56 has wMaxPacketSize different from report size */ - dev->report_size = 7; + + /* + * Some devices need the report size to be different than the + * endpoint size. + */ + if (dev->interface->cur_altsetting->desc.bInterfaceNumber == 0) { + switch (dev->product_id) { + case USB_DEVICE_ID_CODEMERCS_IOW56: + case USB_DEVICE_ID_CODEMERCS_IOW56AM: + dev->report_size = 7; + break; + + case USB_DEVICE_ID_CODEMERCS_IOW28: + case USB_DEVICE_ID_CODEMERCS_IOW28L: + dev->report_size = 4; + break; + + case USB_DEVICE_ID_CODEMERCS_IOW100: + dev->report_size = 13; + break; + } + } /* create the urb and buffer for reading */ dev->int_in_urb = usb_alloc_urb(0, GFP_KERNEL); -- GitLab From c2ea6fcfec3e05fbc5384737fb3eb623427bb30c Mon Sep 17 00:00:00 2001 From: Forest Crossman Date: Mon, 27 Jul 2020 23:24:07 -0500 Subject: [PATCH 0078/1304] usb: xhci: define IDs for various ASMedia host controllers commit 1841cb255da41e87bed9573915891d056f80e2e7 upstream. Not all ASMedia host controllers have a device ID that matches its part number. #define some of these IDs to make it clearer at a glance which chips require what quirks. Acked-by: Mathias Nyman Signed-off-by: Forest Crossman Link: https://lore.kernel.org/r/20200728042408.180529-2-cyrozap@gmail.com Cc: stable Signed-off-by: Greg Kroah-Hartman --- drivers/usb/host/xhci-pci.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 9b02e3e3f998..296e614919da 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -47,7 +47,9 @@ #define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba #define PCI_DEVICE_ID_AMD_PROMONTORYA_2 0x43bb #define PCI_DEVICE_ID_AMD_PROMONTORYA_1 0x43bc +#define PCI_DEVICE_ID_ASMEDIA_1042_XHCI 0x1042 #define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI 0x1142 +#define PCI_DEVICE_ID_ASMEDIA_2142_XHCI 0x2142 static const char hcd_name[] = "xhci_hcd"; @@ -226,13 +228,13 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) xhci->quirks |= XHCI_BROKEN_STREAMS; if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && - pdev->device == 0x1042) + pdev->device == PCI_DEVICE_ID_ASMEDIA_1042_XHCI) xhci->quirks |= XHCI_BROKEN_STREAMS; if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && - pdev->device == 0x1142) + pdev->device == PCI_DEVICE_ID_ASMEDIA_1042A_XHCI) xhci->quirks |= XHCI_TRUST_TX_LENGTH; if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && - pdev->device == 0x2142) + pdev->device == PCI_DEVICE_ID_ASMEDIA_2142_XHCI) xhci->quirks |= XHCI_NO_64BIT_SUPPORT; if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && -- GitLab From 8efb2159c956a28b892fd4c169729b2959c25483 Mon Sep 17 00:00:00 2001 From: Forest Crossman Date: Mon, 27 Jul 2020 23:24:08 -0500 Subject: [PATCH 0079/1304] usb: xhci: Fix ASMedia ASM1142 DMA addressing commit ec37198acca7b4c17b96247697406e47aafe0605 upstream. I've confirmed that the ASMedia ASM1142 has the same problem as the ASM2142/ASM3142, in that it too reports that it supports 64-bit DMA addresses when in fact it does not. As with the ASM2142/ASM3142, this can cause problems on systems where the upper bits matter, and adding the XHCI_NO_64BIT_SUPPORT quirk completely fixes the issue. Acked-by: Mathias Nyman Signed-off-by: Forest Crossman Cc: stable Link: https://lore.kernel.org/r/20200728042408.180529-3-cyrozap@gmail.com Signed-off-by: Greg Kroah-Hartman --- drivers/usb/host/xhci-pci.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 296e614919da..1a6a23e57201 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -49,6 +49,7 @@ #define PCI_DEVICE_ID_AMD_PROMONTORYA_1 0x43bc #define PCI_DEVICE_ID_ASMEDIA_1042_XHCI 0x1042 #define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI 0x1142 +#define PCI_DEVICE_ID_ASMEDIA_1142_XHCI 0x1242 #define PCI_DEVICE_ID_ASMEDIA_2142_XHCI 0x2142 static const char hcd_name[] = "xhci_hcd"; @@ -234,7 +235,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) pdev->device == PCI_DEVICE_ID_ASMEDIA_1042A_XHCI) xhci->quirks |= XHCI_TRUST_TX_LENGTH; if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && - pdev->device == PCI_DEVICE_ID_ASMEDIA_2142_XHCI) + (pdev->device == PCI_DEVICE_ID_ASMEDIA_1142_XHCI || + pdev->device == PCI_DEVICE_ID_ASMEDIA_2142_XHCI)) xhci->quirks |= XHCI_NO_64BIT_SUPPORT; if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && -- GitLab From 21e7fc3f69daa0fd2974edcaa02590c1df81889f Mon Sep 17 00:00:00 2001 From: Hui Wang Date: Mon, 3 Aug 2020 14:46:38 +0800 Subject: [PATCH 0080/1304] Revert "ALSA: hda: call runtime_allow() for all hda controllers" commit 07c9983b567d0ef33aefc063299de95a987e12a8 upstream. This reverts commit 9a6418487b56 ("ALSA: hda: call runtime_allow() for all hda controllers"). The reverted patch already introduced some regressions on some machines: - on gemini-lake machines, the error of "azx_get_response timeout" happens in the hda driver. - on the machines with alc662 codec, the audio jack detection doesn't work anymore. Fixes: 9a6418487b56 ("ALSA: hda: call runtime_allow() for all hda controllers") BugLink: https://bugzilla.kernel.org/show_bug.cgi?id=208511 Cc: Signed-off-by: Hui Wang Link: https://lore.kernel.org/r/20200803064638.6139-1-hui.wang@canonical.com Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/pci/hda/hda_intel.c | 1 - 1 file changed, 1 deletion(-) diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 8e1eb5f243a2..d43245937db7 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -2478,7 +2478,6 @@ static int azx_probe_continue(struct azx *chip) if (azx_has_pm_runtime(chip)) { pm_runtime_use_autosuspend(&pci->dev); - pm_runtime_allow(&pci->dev); pm_runtime_put_autosuspend(&pci->dev); } -- GitLab From 34f41d924fc8d5c482a95214581f0b5ede308ce9 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Tue, 4 Aug 2020 20:58:15 +0200 Subject: [PATCH 0081/1304] ALSA: seq: oss: Serialize ioctls commit 80982c7e834e5d4e325b6ce33757012ecafdf0bb upstream. Some ioctls via OSS sequencer API may race and lead to UAF when the port create and delete are performed concurrently, as spotted by a couple of syzkaller cases. This patch is an attempt to address it by serializing the ioctls with the existing register_mutex. Basically OSS sequencer API is an obsoleted interface and was designed without much consideration of the concurrency. There are very few applications with it, and the concurrent performance isn't asked, hence this "big hammer" approach should be good enough. Reported-by: syzbot+1a54a94bd32716796edd@syzkaller.appspotmail.com Reported-by: syzbot+9d2abfef257f3e2d4713@syzkaller.appspotmail.com Suggested-by: Hillf Danton Cc: Link: https://lore.kernel.org/r/20200804185815.2453-1-tiwai@suse.de Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/core/seq/oss/seq_oss.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/sound/core/seq/oss/seq_oss.c b/sound/core/seq/oss/seq_oss.c index e1f44fc86885..ed5bca0db3e7 100644 --- a/sound/core/seq/oss/seq_oss.c +++ b/sound/core/seq/oss/seq_oss.c @@ -181,10 +181,16 @@ static long odev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct seq_oss_devinfo *dp; + long rc; + dp = file->private_data; if (snd_BUG_ON(!dp)) return -ENXIO; - return snd_seq_oss_ioctl(dp, cmd, arg); + + mutex_lock(®ister_mutex); + rc = snd_seq_oss_ioctl(dp, cmd, arg); + mutex_unlock(®ister_mutex); + return rc; } #ifdef CONFIG_COMPAT -- GitLab From fbe7e878fea059fb536ac55a8ec7fe72433a95dd Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Thu, 30 Jul 2020 12:26:32 -0700 Subject: [PATCH 0082/1304] staging: android: ashmem: Fix lockdep warning for write operation commit 3e338d3c95c735dc3265a86016bb4c022ec7cadc upstream. syzbot report [1] describes a deadlock when write operation against an ashmem fd executed at the time when ashmem is shrinking its cache results in the following lock sequence: Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock(fs_reclaim); lock(&sb->s_type->i_mutex_key#13); lock(fs_reclaim); lock(&sb->s_type->i_mutex_key#13); kswapd takes fs_reclaim and then inode_lock while generic_perform_write takes inode_lock and then fs_reclaim. However ashmem does not support writing into backing shmem with a write syscall. The only way to change its content is to mmap it and operate on mapped memory. Therefore the race that lockdep is warning about is not valid. Resolve this by introducing a separate lockdep class for the backing shmem inodes. [1]: https://lkml.kernel.org/lkml/0000000000000b5f9d059aa2037f@google.com/ Reported-by: syzbot+7a0d9d0b26efefe61780@syzkaller.appspotmail.com Signed-off-by: Suren Baghdasaryan Cc: stable Reviewed-by: Joel Fernandes (Google) Link: https://lore.kernel.org/r/20200730192632.3088194-1-surenb@google.com Signed-off-by: Greg Kroah-Hartman --- drivers/staging/android/ashmem.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c index e3df4bf521b5..a97bbd89fae2 100644 --- a/drivers/staging/android/ashmem.c +++ b/drivers/staging/android/ashmem.c @@ -95,6 +95,15 @@ static DEFINE_MUTEX(ashmem_mutex); static struct kmem_cache *ashmem_area_cachep __read_mostly; static struct kmem_cache *ashmem_range_cachep __read_mostly; +/* + * A separate lockdep class for the backing shmem inodes to resolve the lockdep + * warning about the race between kswapd taking fs_reclaim before inode_lock + * and write syscall taking inode_lock and then fs_reclaim. + * Note that such race is impossible because ashmem does not support write + * syscalls operating on the backing shmem. + */ +static struct lock_class_key backing_shmem_inode_class; + static inline unsigned long range_size(struct ashmem_range *range) { return range->pgend - range->pgstart + 1; @@ -395,6 +404,7 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma) if (!asma->file) { char *name = ASHMEM_NAME_DEF; struct file *vmfile; + struct inode *inode; if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') name = asma->name; @@ -406,6 +416,8 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma) goto out; } vmfile->f_mode |= FMODE_LSEEK; + inode = file_inode(vmfile); + lockdep_set_class(&inode->i_rwsem, &backing_shmem_inode_class); asma->file = vmfile; /* * override mmap operation of the vmfile so that it can't be -- GitLab From 8c4a649c20fec015ebb326f36b47d4e39d9ff5b7 Mon Sep 17 00:00:00 2001 From: Peilin Ye Date: Fri, 10 Jul 2020 12:09:15 -0400 Subject: [PATCH 0083/1304] Bluetooth: Fix slab-out-of-bounds read in hci_extended_inquiry_result_evt() commit 51c19bf3d5cfaa66571e4b88ba2a6f6295311101 upstream. Check upon `num_rsp` is insufficient. A malformed event packet with a large `num_rsp` number makes hci_extended_inquiry_result_evt() go out of bounds. Fix it. This patch fixes the following syzbot bug: https://syzkaller.appspot.com/bug?id=4bf11aa05c4ca51ce0df86e500fce486552dc8d2 Reported-by: syzbot+d8489a79b781849b9c46@syzkaller.appspotmail.com Cc: stable@vger.kernel.org Signed-off-by: Peilin Ye Acked-by: Greg Kroah-Hartman Signed-off-by: Marcel Holtmann Signed-off-by: Greg Kroah-Hartman --- net/bluetooth/hci_event.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index cdb92b129906..59899dbf742d 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -4151,7 +4151,7 @@ static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, BT_DBG("%s num_rsp %d", hdev->name, num_rsp); - if (!num_rsp) + if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1) return; if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) -- GitLab From f2d6adb023fc32816d7962c29fd06d8cd71418ee Mon Sep 17 00:00:00 2001 From: Peilin Ye Date: Fri, 10 Jul 2020 17:39:18 -0400 Subject: [PATCH 0084/1304] Bluetooth: Prevent out-of-bounds read in hci_inquiry_result_evt() commit 75bbd2ea50ba1c5d9da878a17e92eac02fe0fd3a upstream. Check `num_rsp` before using it as for-loop counter. Cc: stable@vger.kernel.org Signed-off-by: Peilin Ye Signed-off-by: Marcel Holtmann Signed-off-by: Greg Kroah-Hartman --- net/bluetooth/hci_event.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 59899dbf742d..d67ddc92f82e 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -2360,7 +2360,7 @@ static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) BT_DBG("%s num_rsp %d", hdev->name, num_rsp); - if (!num_rsp) + if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1) return; if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) -- GitLab From 48f70ecd6a22f5cf2a6d2670fbc3523fe64bcae8 Mon Sep 17 00:00:00 2001 From: Peilin Ye Date: Fri, 10 Jul 2020 17:45:26 -0400 Subject: [PATCH 0085/1304] Bluetooth: Prevent out-of-bounds read in hci_inquiry_result_with_rssi_evt() commit 629b49c848ee71244203934347bd7730b0ddee8d upstream. Check `num_rsp` before using it as for-loop counter. Add `unlock` label. Cc: stable@vger.kernel.org Signed-off-by: Peilin Ye Signed-off-by: Marcel Holtmann Signed-off-by: Greg Kroah-Hartman --- net/bluetooth/hci_event.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index d67ddc92f82e..2b4a7cf03041 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -3948,6 +3948,9 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct inquiry_info_with_rssi_and_pscan_mode *info; info = (void *) (skb->data + 1); + if (skb->len < num_rsp * sizeof(*info) + 1) + goto unlock; + for (; num_rsp; num_rsp--, info++) { u32 flags; @@ -3969,6 +3972,9 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, } else { struct inquiry_info_with_rssi *info = (void *) (skb->data + 1); + if (skb->len < num_rsp * sizeof(*info) + 1) + goto unlock; + for (; num_rsp; num_rsp--, info++) { u32 flags; @@ -3989,6 +3995,7 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, } } +unlock: hci_dev_unlock(hdev); } -- GitLab From 546e19dda0645e4ae3d56120e6dc586d4e9acdb0 Mon Sep 17 00:00:00 2001 From: Adam Ford Date: Tue, 30 Jun 2020 13:26:36 -0500 Subject: [PATCH 0086/1304] omapfb: dss: Fix max fclk divider for omap36xx commit 254503a2b186caa668a188dbbd7ab0d25149c0a5 upstream. The drm/omap driver was fixed to correct an issue where using a divider of 32 breaks the DSS despite the TRM stating 32 is a valid number. Through experimentation, it appears that 31 works, and it is consistent with the value used by the drm/omap driver. This patch fixes the divider for fbdev driver instead of the drm. Fixes: f76ee892a99e ("omapfb: copy omapdss & displays for omapfb") Cc: #4.5+ Signed-off-by: Adam Ford Reviewed-by: Tomi Valkeinen Cc: Dave Airlie Cc: Rob Clark [b.zolnierkie: mark patch as applicable to stable 4.5+ (was 4.9+)] Signed-off-by: Bartlomiej Zolnierkiewicz Link: https://patchwork.freedesktop.org/patch/msgid/20200630182636.439015-1-aford173@gmail.com Signed-off-by: Greg Kroah-Hartman --- drivers/video/fbdev/omap2/omapfb/dss/dss.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss.c b/drivers/video/fbdev/omap2/omapfb/dss/dss.c index f0cac9e0eb94..b6c6c24979dd 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/dss.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/dss.c @@ -844,7 +844,7 @@ static const struct dss_features omap34xx_dss_feats = { }; static const struct dss_features omap3630_dss_feats = { - .fck_div_max = 32, + .fck_div_max = 31, .dss_fck_multiplier = 1, .parent_clk_name = "dpll4_ck", .dpi_select_source = &dss_dpi_select_source_omap2_omap3, -- GitLab From 74e42c22f2125bb07ffd9b0cccef120815e68725 Mon Sep 17 00:00:00 2001 From: Jann Horn Date: Mon, 27 Jul 2020 14:04:24 +0200 Subject: [PATCH 0087/1304] binder: Prevent context manager from incrementing ref 0 commit 4b836a1426cb0f1ef2a6e211d7e553221594f8fc upstream. Binder is designed such that a binder_proc never has references to itself. If this rule is violated, memory corruption can occur when a process sends a transaction to itself; see e.g. . There is a remaining edgecase through which such a transaction-to-self can still occur from the context of a task with BINDER_SET_CONTEXT_MGR access: - task A opens /dev/binder twice, creating binder_proc instances P1 and P2 - P1 becomes context manager - P2 calls ACQUIRE on the magic handle 0, allocating index 0 in its handle table - P1 dies (by closing the /dev/binder fd and waiting a bit) - P2 becomes context manager - P2 calls ACQUIRE on the magic handle 0, allocating index 1 in its handle table [this triggers a warning: "binder: 1974:1974 tried to acquire reference to desc 0, got 1 instead"] - task B opens /dev/binder once, creating binder_proc instance P3 - P3 calls P2 (via magic handle 0) with (void*)1 as argument (two-way transaction) - P2 receives the handle and uses it to call P3 (two-way transaction) - P3 calls P2 (via magic handle 0) (two-way transaction) - P2 calls P2 (via handle 1) (two-way transaction) And then, if P2 does *NOT* accept the incoming transaction work, but instead closes the binder fd, we get a crash. Solve it by preventing the context manager from using ACQUIRE on ref 0. There shouldn't be any legitimate reason for the context manager to do that. Additionally, print a warning if someone manages to find another way to trigger a transaction-to-self bug in the future. Cc: stable@vger.kernel.org Fixes: 457b9a6f09f0 ("Staging: android: add binder driver") Acked-by: Todd Kjos Signed-off-by: Jann Horn Reviewed-by: Martijn Coenen Link: https://lore.kernel.org/r/20200727120424.1627555-1-jannh@google.com Signed-off-by: Greg Kroah-Hartman --- drivers/android/binder.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/drivers/android/binder.c b/drivers/android/binder.c index cf4367135a00..112b5b50ad3c 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -2862,6 +2862,12 @@ static void binder_transaction(struct binder_proc *proc, goto err_dead_binder; } e->to_node = target_node->debug_id; + if (WARN_ON(proc == target_proc)) { + return_error = BR_FAILED_REPLY; + return_error_param = -EINVAL; + return_error_line = __LINE__; + goto err_invalid_target_handle; + } if (security_binder_transaction(proc->tsk, target_proc->tsk) < 0) { return_error = BR_FAILED_REPLY; @@ -3366,10 +3372,17 @@ static int binder_thread_write(struct binder_proc *proc, struct binder_node *ctx_mgr_node; mutex_lock(&context->context_mgr_node_lock); ctx_mgr_node = context->binder_context_mgr_node; - if (ctx_mgr_node) + if (ctx_mgr_node) { + if (ctx_mgr_node->proc == proc) { + binder_user_error("%d:%d context manager tried to acquire desc 0\n", + proc->pid, thread->pid); + mutex_unlock(&context->context_mgr_node_lock); + return -EINVAL; + } ret = binder_inc_ref_for_node( proc, ctx_mgr_node, strong, NULL, &rdata); + } mutex_unlock(&context->context_mgr_node_lock); } if (ret) -- GitLab From 61219546f3036d2b4a1898be7a38da22e97a3b62 Mon Sep 17 00:00:00 2001 From: Yunhai Zhang Date: Tue, 28 Jul 2020 09:58:03 +0800 Subject: [PATCH 0088/1304] vgacon: Fix for missing check in scrollback handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit ebfdfeeae8c01fcb2b3b74ffaf03876e20835d2d upstream. vgacon_scrollback_update() always leaves enbough room in the scrollback buffer for the next call, but if the console size changed that room might not actually be enough, and so we need to re-check. The check should be in the loop since vgacon_scrollback_cur->tail is updated in the loop and count may be more than 1 when triggered by CSI M, as Jiri's PoC: #include #include #include #include #include #include #include int main(int argc, char** argv) { int fd = open("/dev/tty1", O_RDWR); unsigned short size[3] = {25, 200, 0}; ioctl(fd, 0x5609, size); // VT_RESIZE write(fd, "\e[1;1H", 6); for (int i = 0; i < 30; i++) write(fd, "\e[10M", 5); } It leads to various crashes as vgacon_scrollback_update writes out of the buffer: BUG: unable to handle page fault for address: ffffc900001752a0 #PF: supervisor write access in kernel mode #PF: error_code(0x0002) - not-present page RIP: 0010:mutex_unlock+0x13/0x30 ... Call Trace: n_tty_write+0x1a0/0x4d0 tty_write+0x1a0/0x2e0 Or to KASAN reports: BUG: KASAN: slab-out-of-bounds in vgacon_scroll+0x57a/0x8ed This fixes CVE-2020-14331. Reported-by: 张云海 Reported-by: Yang Yingliang Reported-by: Kyungtae Kim Fixes: 15bdab959c9b ([PATCH] vgacon: Add support for soft scrollback) Cc: stable@vger.kernel.org Cc: linux-fbdev@vger.kernel.org Cc: Linus Torvalds Cc: Solar Designer Cc: "Srivatsa S. Bhat" Cc: Anthony Liguori Cc: Yang Yingliang Cc: Bartlomiej Zolnierkiewicz Cc: Jiri Slaby Signed-off-by: Yunhai Zhang Link: https://lore.kernel.org/r/9fb43895-ca91-9b07-ebfd-808cf854ca95@nsfocus.com Signed-off-by: Greg Kroah-Hartman --- drivers/video/console/vgacon.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c index bfaa9ec4bc1f..e079b910feb2 100644 --- a/drivers/video/console/vgacon.c +++ b/drivers/video/console/vgacon.c @@ -251,6 +251,10 @@ static void vgacon_scrollback_update(struct vc_data *c, int t, int count) p = (void *) (c->vc_origin + t * c->vc_size_row); while (count--) { + if ((vgacon_scrollback_cur->tail + c->vc_size_row) > + vgacon_scrollback_cur->size) + vgacon_scrollback_cur->tail = 0; + scr_memcpyw(vgacon_scrollback_cur->data + vgacon_scrollback_cur->tail, p, c->vc_size_row); -- GitLab From ab1a602a9cea98aa37b2e6851b168d2a2633a58d Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Thu, 16 Jul 2020 13:53:46 +0200 Subject: [PATCH 0089/1304] mtd: properly check all write ioctls for permissions commit f7e6b19bc76471ba03725fe58e0c218a3d6266c3 upstream. When doing a "write" ioctl call, properly check that we have permissions to do so before copying anything from userspace or anything else so we can "fail fast". This includes also covering the MEMWRITE ioctl which previously missed checking for this. Cc: Miquel Raynal Cc: Richard Weinberger Cc: Vignesh Raghavendra Cc: stable Signed-off-by: Greg Kroah-Hartman [rw: Fixed locking issue] Signed-off-by: Richard Weinberger Signed-off-by: Greg Kroah-Hartman --- drivers/mtd/mtdchar.c | 56 ++++++++++++++++++++++++++++++++++++------- 1 file changed, 47 insertions(+), 9 deletions(-) diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c index 02389528f622..5afc653c09e2 100644 --- a/drivers/mtd/mtdchar.c +++ b/drivers/mtd/mtdchar.c @@ -368,9 +368,6 @@ static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd, uint32_t retlen; int ret = 0; - if (!(file->f_mode & FMODE_WRITE)) - return -EPERM; - if (length > 4096) return -EINVAL; @@ -655,6 +652,48 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg) pr_debug("MTD_ioctl\n"); + /* + * Check the file mode to require "dangerous" commands to have write + * permissions. + */ + switch (cmd) { + /* "safe" commands */ + case MEMGETREGIONCOUNT: + case MEMGETREGIONINFO: + case MEMGETINFO: + case MEMREADOOB: + case MEMREADOOB64: + case MEMLOCK: + case MEMUNLOCK: + case MEMISLOCKED: + case MEMGETOOBSEL: + case MEMGETBADBLOCK: + case MEMSETBADBLOCK: + case OTPSELECT: + case OTPGETREGIONCOUNT: + case OTPGETREGIONINFO: + case OTPLOCK: + case ECCGETLAYOUT: + case ECCGETSTATS: + case MTDFILEMODE: + case BLKPG: + case BLKRRPART: + break; + + /* "dangerous" commands */ + case MEMERASE: + case MEMERASE64: + case MEMWRITEOOB: + case MEMWRITEOOB64: + case MEMWRITE: + if (!(file->f_mode & FMODE_WRITE)) + return -EPERM; + break; + + default: + return -ENOTTY; + } + switch (cmd) { case MEMGETREGIONCOUNT: if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int))) @@ -702,9 +741,6 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg) { struct erase_info *erase; - if(!(file->f_mode & FMODE_WRITE)) - return -EPERM; - erase=kzalloc(sizeof(struct erase_info),GFP_KERNEL); if (!erase) ret = -ENOMEM; @@ -997,9 +1033,6 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg) ret = 0; break; } - - default: - ret = -ENOTTY; } return ret; @@ -1043,6 +1076,11 @@ static long mtdchar_compat_ioctl(struct file *file, unsigned int cmd, struct mtd_oob_buf32 buf; struct mtd_oob_buf32 __user *buf_user = argp; + if (!(file->f_mode & FMODE_WRITE)) { + ret = -EPERM; + break; + } + if (copy_from_user(&buf, argp, sizeof(buf))) ret = -EFAULT; else -- GitLab From 8bac431d23205e7b9fe4da3bafe4fbd57a562be0 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Mon, 1 Jun 2020 15:39:49 +0200 Subject: [PATCH 0090/1304] leds: wm831x-status: fix use-after-free on unbind commit 47a459ecc800a17109d0c496a4e21e478806ee40 upstream. Several MFD child drivers register their class devices directly under the parent device. This means you cannot blindly do devres conversions so that deregistration ends up being tied to the parent device, something which leads to use-after-free on driver unbind when the class device is released while still being registered. Fixes: 8d3b6a4001ce ("leds: wm831x-status: Use devm_led_classdev_register") Cc: stable # 4.6 Cc: Amitoj Kaur Chawla Signed-off-by: Johan Hovold Signed-off-by: Pavel Machek Signed-off-by: Greg Kroah-Hartman --- drivers/leds/leds-wm831x-status.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/drivers/leds/leds-wm831x-status.c b/drivers/leds/leds-wm831x-status.c index c5798b92e4d3..d926edcb04ee 100644 --- a/drivers/leds/leds-wm831x-status.c +++ b/drivers/leds/leds-wm831x-status.c @@ -273,12 +273,23 @@ static int wm831x_status_probe(struct platform_device *pdev) drvdata->cdev.blink_set = wm831x_status_blink_set; drvdata->cdev.groups = wm831x_status_groups; - ret = devm_led_classdev_register(wm831x->dev, &drvdata->cdev); + ret = led_classdev_register(wm831x->dev, &drvdata->cdev); if (ret < 0) { dev_err(&pdev->dev, "Failed to register LED: %d\n", ret); return ret; } + platform_set_drvdata(pdev, drvdata); + + return 0; +} + +static int wm831x_status_remove(struct platform_device *pdev) +{ + struct wm831x_status *drvdata = platform_get_drvdata(pdev); + + led_classdev_unregister(&drvdata->cdev); + return 0; } @@ -287,6 +298,7 @@ static struct platform_driver wm831x_status_driver = { .name = "wm831x-status", }, .probe = wm831x_status_probe, + .remove = wm831x_status_remove, }; module_platform_driver(wm831x_status_driver); -- GitLab From 5f968e6c425dd202f32e093957f643e4e842fdb0 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Mon, 1 Jun 2020 15:39:46 +0200 Subject: [PATCH 0091/1304] leds: da903x: fix use-after-free on unbind commit 6f4aa35744f69ed9b0bf5a736c9ca9b44bc1dcea upstream. Several MFD child drivers register their class devices directly under the parent device. This means you cannot blindly do devres conversions so that deregistration ends up being tied to the parent device, something which leads to use-after-free on driver unbind when the class device is released while still being registered. Fixes: eed16255d66b ("leds: da903x: Use devm_led_classdev_register") Cc: stable # 4.6 Cc: Amitoj Kaur Chawla Signed-off-by: Johan Hovold Signed-off-by: Pavel Machek Signed-off-by: Greg Kroah-Hartman --- drivers/leds/leds-da903x.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/drivers/leds/leds-da903x.c b/drivers/leds/leds-da903x.c index 5ff7d72f73aa..ecc265bb69a0 100644 --- a/drivers/leds/leds-da903x.c +++ b/drivers/leds/leds-da903x.c @@ -113,12 +113,23 @@ static int da903x_led_probe(struct platform_device *pdev) led->flags = pdata->flags; led->master = pdev->dev.parent; - ret = devm_led_classdev_register(led->master, &led->cdev); + ret = led_classdev_register(led->master, &led->cdev); if (ret) { dev_err(&pdev->dev, "failed to register LED %d\n", id); return ret; } + platform_set_drvdata(pdev, led); + + return 0; +} + +static int da903x_led_remove(struct platform_device *pdev) +{ + struct da903x_led *led = platform_get_drvdata(pdev); + + led_classdev_unregister(&led->cdev); + return 0; } @@ -127,6 +138,7 @@ static struct platform_driver da903x_led_driver = { .name = "da903x-led", }, .probe = da903x_led_probe, + .remove = da903x_led_remove, }; module_platform_driver(da903x_led_driver); -- GitLab From 8334dd9adeee9ac322bd29c136afbadcba8ce49c Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Mon, 1 Jun 2020 15:39:47 +0200 Subject: [PATCH 0092/1304] leds: lm3533: fix use-after-free on unbind commit d584221e683bbd173738603b83a315f27d27d043 upstream. Several MFD child drivers register their class devices directly under the parent device. This means you cannot blindly do devres conversions so that deregistration ends up being tied to the parent device, something which leads to use-after-free on driver unbind when the class device is released while still being registered. Fixes: 50154e29e5cc ("leds: lm3533: Use devm_led_classdev_register") Cc: stable # 4.6 Cc: Amitoj Kaur Chawla Signed-off-by: Johan Hovold Signed-off-by: Pavel Machek Signed-off-by: Greg Kroah-Hartman --- drivers/leds/leds-lm3533.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/drivers/leds/leds-lm3533.c b/drivers/leds/leds-lm3533.c index 72224b599ffc..c1e562a4d6ad 100644 --- a/drivers/leds/leds-lm3533.c +++ b/drivers/leds/leds-lm3533.c @@ -698,7 +698,7 @@ static int lm3533_led_probe(struct platform_device *pdev) platform_set_drvdata(pdev, led); - ret = devm_led_classdev_register(pdev->dev.parent, &led->cdev); + ret = led_classdev_register(pdev->dev.parent, &led->cdev); if (ret) { dev_err(&pdev->dev, "failed to register LED %d\n", pdev->id); return ret; @@ -708,13 +708,18 @@ static int lm3533_led_probe(struct platform_device *pdev) ret = lm3533_led_setup(led, pdata); if (ret) - return ret; + goto err_deregister; ret = lm3533_ctrlbank_enable(&led->cb); if (ret) - return ret; + goto err_deregister; return 0; + +err_deregister: + led_classdev_unregister(&led->cdev); + + return ret; } static int lm3533_led_remove(struct platform_device *pdev) @@ -724,6 +729,7 @@ static int lm3533_led_remove(struct platform_device *pdev) dev_dbg(&pdev->dev, "%s\n", __func__); lm3533_ctrlbank_disable(&led->cb); + led_classdev_unregister(&led->cdev); return 0; } -- GitLab From 6ed56511407fcdba01f05f2228711dca2135b921 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Mon, 1 Jun 2020 15:39:45 +0200 Subject: [PATCH 0093/1304] leds: 88pm860x: fix use-after-free on unbind commit eca21c2d8655387823d695b26e6fe78cf3975c05 upstream. Several MFD child drivers register their class devices directly under the parent device. This means you cannot blindly do devres conversions so that deregistration ends up being tied to the parent device, something which leads to use-after-free on driver unbind when the class device is released while still being registered. Fixes: 375446df95ee ("leds: 88pm860x: Use devm_led_classdev_register") Cc: stable # 4.6 Cc: Amitoj Kaur Chawla Signed-off-by: Johan Hovold Signed-off-by: Pavel Machek Signed-off-by: Greg Kroah-Hartman --- drivers/leds/leds-88pm860x.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/drivers/leds/leds-88pm860x.c b/drivers/leds/leds-88pm860x.c index 77a104d2b124..13f414ff6fd0 100644 --- a/drivers/leds/leds-88pm860x.c +++ b/drivers/leds/leds-88pm860x.c @@ -207,21 +207,33 @@ static int pm860x_led_probe(struct platform_device *pdev) data->cdev.brightness_set_blocking = pm860x_led_set; mutex_init(&data->lock); - ret = devm_led_classdev_register(chip->dev, &data->cdev); + ret = led_classdev_register(chip->dev, &data->cdev); if (ret < 0) { dev_err(&pdev->dev, "Failed to register LED: %d\n", ret); return ret; } pm860x_led_set(&data->cdev, 0); + + platform_set_drvdata(pdev, data); + return 0; } +static int pm860x_led_remove(struct platform_device *pdev) +{ + struct pm860x_led *data = platform_get_drvdata(pdev); + + led_classdev_unregister(&data->cdev); + + return 0; +} static struct platform_driver pm860x_led_driver = { .driver = { .name = "88pm860x-led", }, .probe = pm860x_led_probe, + .remove = pm860x_led_remove, }; module_platform_driver(pm860x_led_driver); -- GitLab From af224c2eeda2bd6679355f588766c5a8da8920a2 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 10 Jul 2020 10:57:22 +0200 Subject: [PATCH 0094/1304] net/9p: validate fds in p9_fd_open [ Upstream commit a39c46067c845a8a2d7144836e9468b7f072343e ] p9_fd_open just fgets file descriptors passed in from userspace, but doesn't verify that they are valid for read or writing. This gets cought down in the VFS when actually attempting a read or write, but a new warning added in linux-next upsets syzcaller. Fix this by just verifying the fds early on. Link: http://lkml.kernel.org/r/20200710085722.435850-1-hch@lst.de Reported-by: syzbot+e6f77e16ff68b2434a2c@syzkaller.appspotmail.com Signed-off-by: Christoph Hellwig [Dominique: amend goto as per Doug Nazar's review] Signed-off-by: Dominique Martinet Signed-off-by: Sasha Levin --- net/9p/trans_fd.c | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c index d28c2cc9618f..b6dcb40fa8a7 100644 --- a/net/9p/trans_fd.c +++ b/net/9p/trans_fd.c @@ -831,20 +831,28 @@ static int p9_fd_open(struct p9_client *client, int rfd, int wfd) return -ENOMEM; ts->rd = fget(rfd); + if (!ts->rd) + goto out_free_ts; + if (!(ts->rd->f_mode & FMODE_READ)) + goto out_put_rd; ts->wr = fget(wfd); - if (!ts->rd || !ts->wr) { - if (ts->rd) - fput(ts->rd); - if (ts->wr) - fput(ts->wr); - kfree(ts); - return -EIO; - } + if (!ts->wr) + goto out_put_rd; + if (!(ts->wr->f_mode & FMODE_WRITE)) + goto out_put_wr; client->trans = ts; client->status = Connected; return 0; + +out_put_wr: + fput(ts->wr); +out_put_rd: + fput(ts->rd); +out_free_ts: + kfree(ts); + return -EIO; } static int p9_socket_open(struct p9_client *client, struct socket *csocket) -- GitLab From aa42be211646b790a061768587ce5af26d828eca Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Fri, 24 Jul 2020 17:01:39 +1000 Subject: [PATCH 0095/1304] drm/nouveau/fbcon: fix module unload when fbcon init has failed for some reason [ Upstream commit 498595abf5bd51f0ae074cec565d888778ea558f ] Stale pointer was tripping up the unload path. Signed-off-by: Ben Skeggs Signed-off-by: Sasha Levin --- drivers/gpu/drm/nouveau/nouveau_fbcon.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 0f64c0a1d4b3..fef38ea146a2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -599,6 +599,7 @@ nouveau_fbcon_init(struct drm_device *dev) drm_fb_helper_fini(&fbcon->helper); free: kfree(fbcon); + drm->fbcon = NULL; return ret; } -- GitLab From 1e64cb7dabcc7340ab04d0258ea61b72babfe79c Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Fri, 24 Jul 2020 17:02:48 +1000 Subject: [PATCH 0096/1304] drm/nouveau/fbcon: zero-initialise the mode_cmd2 structure [ Upstream commit 15fbc3b938534cc8eaac584a7b0c1183fc968b86 ] This is tripping up the format modifier patches. Signed-off-by: Ben Skeggs Signed-off-by: Sasha Levin --- drivers/gpu/drm/nouveau/nouveau_fbcon.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index fef38ea146a2..406cb99af7f2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -315,7 +315,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper, struct nouveau_framebuffer *fb; struct nouveau_channel *chan; struct nouveau_bo *nvbo; - struct drm_mode_fb_cmd2 mode_cmd; + struct drm_mode_fb_cmd2 mode_cmd = {}; int ret; mode_cmd.width = sizes->surface_width; -- GitLab From c79c21c791fa073b05f0e6fc14539a6701bcc39f Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Sat, 25 Jul 2020 21:50:52 +0200 Subject: [PATCH 0097/1304] i2c: slave: improve sanity check when registering [ Upstream commit 1b1be3bf27b62f5abcf85c6f3214bdb9c7526685 ] Add check for ERR_PTR and simplify code while here. Signed-off-by: Wolfram Sang Reviewed-by: Alain Volmat Signed-off-by: Wolfram Sang Signed-off-by: Sasha Levin --- drivers/i2c/i2c-core-slave.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/i2c/i2c-core-slave.c b/drivers/i2c/i2c-core-slave.c index 47a9f70a24a9..88959c8580ce 100644 --- a/drivers/i2c/i2c-core-slave.c +++ b/drivers/i2c/i2c-core-slave.c @@ -22,10 +22,8 @@ int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb) { int ret; - if (!client || !slave_cb) { - WARN(1, "insufficient data\n"); + if (WARN(IS_ERR_OR_NULL(client) || !slave_cb, "insufficient data\n")) return -EINVAL; - } if (!(client->flags & I2C_CLIENT_SLAVE)) dev_warn(&client->dev, "%s: client slave flag not set. You might see address collisions\n", -- GitLab From e247fc1b14f7730e4d1314005ec168a7e9a12e7a Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Sat, 25 Jul 2020 21:50:53 +0200 Subject: [PATCH 0098/1304] i2c: slave: add sanity check when unregistering [ Upstream commit 8808981baf96e1b3dea1f08461e4d958aa0dbde1 ] Signed-off-by: Wolfram Sang Reviewed-by: Alain Volmat Signed-off-by: Wolfram Sang Signed-off-by: Sasha Levin --- drivers/i2c/i2c-core-slave.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/i2c/i2c-core-slave.c b/drivers/i2c/i2c-core-slave.c index 88959c8580ce..f2e7e373ee47 100644 --- a/drivers/i2c/i2c-core-slave.c +++ b/drivers/i2c/i2c-core-slave.c @@ -62,6 +62,9 @@ int i2c_slave_unregister(struct i2c_client *client) { int ret; + if (IS_ERR_OR_NULL(client)) + return -EINVAL; + if (!client->adapter->algo->unreg_slave) { dev_err(&client->dev, "%s: not supported by adapter\n", __func__); return -EOPNOTSUPP; -- GitLab From a46691bb7cc2686934748c8b33e4b9f5dc936aeb Mon Sep 17 00:00:00 2001 From: Rustam Kovhaev Date: Mon, 27 Jul 2020 23:42:17 -0700 Subject: [PATCH 0099/1304] usb: hso: check for return value in hso_serial_common_create() [ Upstream commit e911e99a0770f760377c263bc7bac1b1593c6147 ] in case of an error tty_register_device_attr() returns ERR_PTR(), add IS_ERR() check Reported-and-tested-by: syzbot+67b2bd0e34f952d0321e@syzkaller.appspotmail.com Link: https://syzkaller.appspot.com/bug?extid=67b2bd0e34f952d0321e Signed-off-by: Rustam Kovhaev Reviewed-by: Greg Kroah-Hartman Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/usb/hso.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index 61b9d3368148..bff268b4a9a4 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c @@ -2274,12 +2274,14 @@ static int hso_serial_common_create(struct hso_serial *serial, int num_urbs, minor = get_free_serial_index(); if (minor < 0) - goto exit; + goto exit2; /* register our minor number */ serial->parent->dev = tty_port_register_device_attr(&serial->port, tty_drv, minor, &serial->parent->interface->dev, serial->parent, hso_serial_dev_groups); + if (IS_ERR(serial->parent->dev)) + goto exit2; /* fill in specific data for later use */ serial->minor = minor; @@ -2324,6 +2326,7 @@ static int hso_serial_common_create(struct hso_serial *serial, int num_urbs, return 0; exit: hso_serial_tty_unregister(serial); +exit2: hso_serial_common_free(serial); return -1; } -- GitLab From 937dafe8682044e70821c886d9063869744b3057 Mon Sep 17 00:00:00 2001 From: Qiushi Wu Date: Sat, 13 Jun 2020 14:05:33 -0500 Subject: [PATCH 0100/1304] firmware: Fix a reference count leak. [ Upstream commit fe3c60684377d5ad9b0569b87ed3e26e12c8173b ] kobject_init_and_add() takes reference even when it fails. If this function returns an error, kobject_put() must be called to properly clean up the memory associated with the object. Callback function fw_cfg_sysfs_release_entry() in kobject_put() can handle the pointer "entry" properly. Signed-off-by: Qiushi Wu Link: https://lore.kernel.org/r/20200613190533.15712-1-wu000273@umn.edu Signed-off-by: Michael S. Tsirkin Signed-off-by: Sasha Levin --- drivers/firmware/qemu_fw_cfg.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c index 039e0f91dba8..6945c3c96637 100644 --- a/drivers/firmware/qemu_fw_cfg.c +++ b/drivers/firmware/qemu_fw_cfg.c @@ -605,8 +605,10 @@ static int fw_cfg_register_file(const struct fw_cfg_file *f) /* register entry under "/sys/firmware/qemu_fw_cfg/by_key/" */ err = kobject_init_and_add(&entry->kobj, &fw_cfg_sysfs_entry_ktype, fw_cfg_sel_ko, "%d", entry->select); - if (err) - goto err_register; + if (err) { + kobject_put(&entry->kobj); + return err; + } /* add raw binary content access */ err = sysfs_create_bin_file(&entry->kobj, &fw_cfg_sysfs_attr_raw); @@ -622,7 +624,6 @@ static int fw_cfg_register_file(const struct fw_cfg_file *f) err_add_raw: kobject_del(&entry->kobj); -err_register: kfree(entry); return err; } -- GitLab From 8881425b926e6aa4164113401069d03fab52dc39 Mon Sep 17 00:00:00 2001 From: Julian Squires Date: Mon, 6 Jul 2020 17:13:53 -0400 Subject: [PATCH 0101/1304] cfg80211: check vendor command doit pointer before use [ Upstream commit 4052d3d2e8f47a15053320bbcbe365d15610437d ] In the case where a vendor command does not implement doit, and has no flags set, doit would not be validated and a NULL pointer dereference would occur, for example when invoking the vendor command via iw. I encountered this while developing new vendor commands. Perhaps in practice it is advisable to always implement doit along with dumpit, but it seems reasonable to me to always check doit anyway, not just when NEED_WDEV. Signed-off-by: Julian Squires Link: https://lore.kernel.org/r/20200706211353.2366470-1-julian@cipht.net Signed-off-by: Johannes Berg Signed-off-by: Sasha Levin --- net/wireless/nl80211.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 0221849b7218..996b68b48a87 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -12392,13 +12392,13 @@ static int nl80211_vendor_cmd(struct sk_buff *skb, struct genl_info *info) if (!wdev_running(wdev)) return -ENETDOWN; } - - if (!vcmd->doit) - return -EOPNOTSUPP; } else { wdev = NULL; } + if (!vcmd->doit) + return -EOPNOTSUPP; + if (info->attrs[NL80211_ATTR_VENDOR_DATA]) { data = nla_data(info->attrs[NL80211_ATTR_VENDOR_DATA]); len = nla_len(info->attrs[NL80211_ATTR_VENDOR_DATA]); -- GitLab From 0adedbf7a0c337a7d4a6e5660ca7189fdab82fd2 Mon Sep 17 00:00:00 2001 From: Francesco Ruggeri Date: Thu, 2 Jul 2020 15:39:06 -0700 Subject: [PATCH 0102/1304] igb: reinit_locked() should be called with rtnl_lock [ Upstream commit 024a8168b749db7a4aa40a5fbdfa04bf7e77c1c0 ] We observed two panics involving races with igb_reset_task. The first panic is caused by this race condition: kworker reboot -f igb_reset_task igb_reinit_locked igb_down napi_synchronize __igb_shutdown igb_clear_interrupt_scheme igb_free_q_vectors igb_free_q_vector adapter->q_vector[v_idx] = NULL; napi_disable Panics trying to access adapter->q_vector[v_idx].napi_state The second panic (a divide error) is caused by this race: kworker reboot -f tx packet igb_reset_task __igb_shutdown rtnl_lock() ... igb_clear_interrupt_scheme igb_free_q_vectors adapter->num_tx_queues = 0 ... rtnl_unlock() rtnl_lock() igb_reinit_locked igb_down igb_up netif_tx_start_all_queues dev_hard_start_xmit igb_xmit_frame igb_tx_queue_mapping Panics on r_idx % adapter->num_tx_queues This commit applies to igb_reset_task the same changes that were applied to ixgbe in commit 2f90b8657ec9 ("ixgbe: this patch adds support for DCB to the kernel and ixgbe driver"), commit 8f4c5c9fb87a ("ixgbe: reinit_locked() should be called with rtnl_lock") and commit 88adce4ea8f9 ("ixgbe: fix possible race in reset subtask"). Signed-off-by: Francesco Ruggeri Tested-by: Aaron Brown Signed-off-by: Tony Nguyen Signed-off-by: Sasha Levin --- drivers/net/ethernet/intel/igb/igb_main.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 36db874f3c92..d85eb80d8249 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -6226,9 +6226,18 @@ static void igb_reset_task(struct work_struct *work) struct igb_adapter *adapter; adapter = container_of(work, struct igb_adapter, reset_task); + rtnl_lock(); + /* If we're already down or resetting, just bail */ + if (test_bit(__IGB_DOWN, &adapter->state) || + test_bit(__IGB_RESETTING, &adapter->state)) { + rtnl_unlock(); + return; + } + igb_dump(adapter); netdev_err(adapter->netdev, "Reset adapter\n"); igb_reinit_locked(adapter); + rtnl_unlock(); } /** -- GitLab From fea1298d57f0ddf05caee0b01c44f4a9b253526a Mon Sep 17 00:00:00 2001 From: Xin Xiong Date: Wed, 29 Jul 2020 21:06:59 +0800 Subject: [PATCH 0103/1304] atm: fix atm_dev refcnt leaks in atmtcp_remove_persistent [ Upstream commit 51875dad43b44241b46a569493f1e4bfa0386d86 ] atmtcp_remove_persistent() invokes atm_dev_lookup(), which returns a reference of atm_dev with increased refcount or NULL if fails. The refcount leaks issues occur in two error handling paths. If dev_data->persist is zero or PRIV(dev)->vcc isn't NULL, the function returns 0 without decreasing the refcount kept by a local variable, resulting in refcount leaks. Fix the issue by adding atm_dev_put() before returning 0 both when dev_data->persist is zero or PRIV(dev)->vcc isn't NULL. Signed-off-by: Xin Xiong Signed-off-by: Xiyu Yang Signed-off-by: Xin Tan Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/atm/atmtcp.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c index afebeb1c3e1e..723bad1201cc 100644 --- a/drivers/atm/atmtcp.c +++ b/drivers/atm/atmtcp.c @@ -432,9 +432,15 @@ static int atmtcp_remove_persistent(int itf) return -EMEDIUMTYPE; } dev_data = PRIV(dev); - if (!dev_data->persist) return 0; + if (!dev_data->persist) { + atm_dev_put(dev); + return 0; + } dev_data->persist = 0; - if (PRIV(dev)->vcc) return 0; + if (PRIV(dev)->vcc) { + atm_dev_put(dev); + return 0; + } kfree(dev_data); atm_dev_put(dev); atm_dev_deregister(dev); -- GitLab From b5650e4f6430f23dcb412a02dc1e9ba572f1b24d Mon Sep 17 00:00:00 2001 From: Philippe Duplessis-Guindon Date: Thu, 30 Jul 2020 11:02:36 -0400 Subject: [PATCH 0104/1304] tools lib traceevent: Fix memory leak in process_dynamic_array_len [ Upstream commit e24c6447ccb7b1a01f9bf0aec94939e6450c0b4d ] I compiled with AddressSanitizer and I had these memory leaks while I was using the tep_parse_format function: Direct leak of 28 byte(s) in 4 object(s) allocated from: #0 0x7fb07db49ffe in __interceptor_realloc (/lib/x86_64-linux-gnu/libasan.so.5+0x10dffe) #1 0x7fb07a724228 in extend_token /home/pduplessis/repo/linux/tools/lib/traceevent/event-parse.c:985 #2 0x7fb07a724c21 in __read_token /home/pduplessis/repo/linux/tools/lib/traceevent/event-parse.c:1140 #3 0x7fb07a724f78 in read_token /home/pduplessis/repo/linux/tools/lib/traceevent/event-parse.c:1206 #4 0x7fb07a725191 in __read_expect_type /home/pduplessis/repo/linux/tools/lib/traceevent/event-parse.c:1291 #5 0x7fb07a7251df in read_expect_type /home/pduplessis/repo/linux/tools/lib/traceevent/event-parse.c:1299 #6 0x7fb07a72e6c8 in process_dynamic_array_len /home/pduplessis/repo/linux/tools/lib/traceevent/event-parse.c:2849 #7 0x7fb07a7304b8 in process_function /home/pduplessis/repo/linux/tools/lib/traceevent/event-parse.c:3161 #8 0x7fb07a730900 in process_arg_token /home/pduplessis/repo/linux/tools/lib/traceevent/event-parse.c:3207 #9 0x7fb07a727c0b in process_arg /home/pduplessis/repo/linux/tools/lib/traceevent/event-parse.c:1786 #10 0x7fb07a731080 in event_read_print_args /home/pduplessis/repo/linux/tools/lib/traceevent/event-parse.c:3285 #11 0x7fb07a731722 in event_read_print /home/pduplessis/repo/linux/tools/lib/traceevent/event-parse.c:3369 #12 0x7fb07a740054 in __tep_parse_format /home/pduplessis/repo/linux/tools/lib/traceevent/event-parse.c:6335 #13 0x7fb07a74047a in __parse_event /home/pduplessis/repo/linux/tools/lib/traceevent/event-parse.c:6389 #14 0x7fb07a740536 in tep_parse_format /home/pduplessis/repo/linux/tools/lib/traceevent/event-parse.c:6431 #15 0x7fb07a785acf in parse_event ../../../src/fs-src/fs.c:251 #16 0x7fb07a785ccd in parse_systems ../../../src/fs-src/fs.c:284 #17 0x7fb07a786fb3 in read_metadata ../../../src/fs-src/fs.c:593 #18 0x7fb07a78760e in ftrace_fs_source_init ../../../src/fs-src/fs.c:727 #19 0x7fb07d90c19c in add_component_with_init_method_data ../../../../src/lib/graph/graph.c:1048 #20 0x7fb07d90c87b in add_source_component_with_initialize_method_data ../../../../src/lib/graph/graph.c:1127 #21 0x7fb07d90c92a in bt_graph_add_source_component ../../../../src/lib/graph/graph.c:1152 #22 0x55db11aa632e in cmd_run_ctx_create_components_from_config_components ../../../src/cli/babeltrace2.c:2252 #23 0x55db11aa6fda in cmd_run_ctx_create_components ../../../src/cli/babeltrace2.c:2347 #24 0x55db11aa780c in cmd_run ../../../src/cli/babeltrace2.c:2461 #25 0x55db11aa8a7d in main ../../../src/cli/babeltrace2.c:2673 #26 0x7fb07d5460b2 in __libc_start_main (/lib/x86_64-linux-gnu/libc.so.6+0x270b2) The token variable in the process_dynamic_array_len function is allocated in the read_expect_type function, but is not freed before calling the read_token function. Free the token variable before calling read_token in order to plug the leak. Signed-off-by: Philippe Duplessis-Guindon Reviewed-by: Steven Rostedt (VMware) Link: https://lore.kernel.org/linux-trace-devel/20200730150236.5392-1-pduplessis@efficios.com Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: Sasha Levin --- tools/lib/traceevent/event-parse.c | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c index 382e476629fb..c0fcc8af2a3e 100644 --- a/tools/lib/traceevent/event-parse.c +++ b/tools/lib/traceevent/event-parse.c @@ -2766,6 +2766,7 @@ process_dynamic_array_len(struct event_format *event, struct print_arg *arg, if (read_expected(EVENT_DELIM, ")") < 0) goto out_err; + free_token(token); type = read_token(&token); *tok = token; -- GitLab From 0a3172f9c571ce8cf79a399c2d602be95fd9229c Mon Sep 17 00:00:00 2001 From: Dexuan Cui Date: Sun, 19 Jan 2020 15:29:22 -0800 Subject: [PATCH 0105/1304] Drivers: hv: vmbus: Ignore CHANNELMSG_TL_CONNECT_RESULT(23) [ Upstream commit ddc9d357b991838c2d975e8d7e4e9db26f37a7ff ] When a Linux hv_sock app tries to connect to a Service GUID on which no host app is listening, a recent host (RS3+) sends a CHANNELMSG_TL_CONNECT_RESULT (23) message to Linux and this triggers such a warning: unknown msgtype=23 WARNING: CPU: 2 PID: 0 at drivers/hv/vmbus_drv.c:1031 vmbus_on_msg_dpc Actually Linux can safely ignore the message because the Linux app's connect() will time out in 2 seconds: see VSOCK_DEFAULT_CONNECT_TIMEOUT and vsock_stream_connect(). We don't bother to make use of the message because: 1) it's only supported on recent hosts; 2) a non-trivial effort is required to use the message in Linux, but the benefit is small. So, let's not see the warning by silently ignoring the message. Signed-off-by: Dexuan Cui Reviewed-by: Michael Kelley Signed-off-by: Sasha Levin --- drivers/hv/channel_mgmt.c | 21 +++++++-------------- drivers/hv/vmbus_drv.c | 4 ++++ include/linux/hyperv.h | 2 ++ 3 files changed, 13 insertions(+), 14 deletions(-) diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index 3bf1f9ef8ea2..c83361a8e203 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c @@ -1249,6 +1249,8 @@ channel_message_table[CHANNELMSG_COUNT] = { { CHANNELMSG_19, 0, NULL }, { CHANNELMSG_20, 0, NULL }, { CHANNELMSG_TL_CONNECT_REQUEST, 0, NULL }, + { CHANNELMSG_22, 0, NULL }, + { CHANNELMSG_TL_CONNECT_RESULT, 0, NULL }, }; /* @@ -1260,25 +1262,16 @@ void vmbus_onmessage(void *context) { struct hv_message *msg = context; struct vmbus_channel_message_header *hdr; - int size; hdr = (struct vmbus_channel_message_header *)msg->u.payload; - size = msg->header.payload_size; trace_vmbus_on_message(hdr); - if (hdr->msgtype >= CHANNELMSG_COUNT) { - pr_err("Received invalid channel message type %d size %d\n", - hdr->msgtype, size); - print_hex_dump_bytes("", DUMP_PREFIX_NONE, - (unsigned char *)msg->u.payload, size); - return; - } - - if (channel_message_table[hdr->msgtype].message_handler) - channel_message_table[hdr->msgtype].message_handler(hdr); - else - pr_err("Unhandled channel message type %d\n", hdr->msgtype); + /* + * vmbus_on_msg_dpc() makes sure the hdr->msgtype here can not go + * out of bound and the message_handler pointer can not be NULL. + */ + channel_message_table[hdr->msgtype].message_handler(hdr); } /* diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index fb22b72fd535..0699c6018889 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -939,6 +939,10 @@ void vmbus_on_msg_dpc(unsigned long data) } entry = &channel_message_table[hdr->msgtype]; + + if (!entry->message_handler) + goto msg_handled; + if (entry->handler_type == VMHT_BLOCKING) { ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC); if (ctx == NULL) diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index c43e694fef7d..35461d49d3ae 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -428,6 +428,8 @@ enum vmbus_channel_message_type { CHANNELMSG_19 = 19, CHANNELMSG_20 = 20, CHANNELMSG_TL_CONNECT_REQUEST = 21, + CHANNELMSG_22 = 22, + CHANNELMSG_TL_CONNECT_RESULT = 23, CHANNELMSG_COUNT }; -- GitLab From fabe9d6cc1663deafba59499829e0c4c28971345 Mon Sep 17 00:00:00 2001 From: Frank van der Linden Date: Tue, 23 Jun 2020 22:39:18 +0000 Subject: [PATCH 0106/1304] xattr: break delegations in {set,remove}xattr commit 08b5d5014a27e717826999ad20e394a8811aae92 upstream. set/removexattr on an exported filesystem should break NFS delegations. This is true in general, but also for the upcoming support for RFC 8726 (NFSv4 extended attribute support). Make sure that they do. Additionally, they need to grow a _locked variant, since callers might call this with i_rwsem held (like the NFS server code). Cc: stable@vger.kernel.org # v4.9+ Cc: linux-fsdevel@vger.kernel.org Cc: Al Viro Signed-off-by: Frank van der Linden Signed-off-by: Chuck Lever Signed-off-by: Greg Kroah-Hartman --- fs/xattr.c | 84 +++++++++++++++++++++++++++++++++++++++---- include/linux/xattr.h | 2 ++ 2 files changed, 79 insertions(+), 7 deletions(-) diff --git a/fs/xattr.c b/fs/xattr.c index 0d6a6a4af861..470ee0af3200 100644 --- a/fs/xattr.c +++ b/fs/xattr.c @@ -203,10 +203,22 @@ int __vfs_setxattr_noperm(struct dentry *dentry, const char *name, return error; } - +/** + * __vfs_setxattr_locked: set an extended attribute while holding the inode + * lock + * + * @dentry - object to perform setxattr on + * @name - xattr name to set + * @value - value to set @name to + * @size - size of @value + * @flags - flags to pass into filesystem operations + * @delegated_inode - on return, will contain an inode pointer that + * a delegation was broken on, NULL if none. + */ int -vfs_setxattr(struct dentry *dentry, const char *name, const void *value, - size_t size, int flags) +__vfs_setxattr_locked(struct dentry *dentry, const char *name, + const void *value, size_t size, int flags, + struct inode **delegated_inode) { struct inode *inode = dentry->d_inode; int error; @@ -215,15 +227,40 @@ vfs_setxattr(struct dentry *dentry, const char *name, const void *value, if (error) return error; - inode_lock(inode); error = security_inode_setxattr(dentry, name, value, size, flags); if (error) goto out; + error = try_break_deleg(inode, delegated_inode); + if (error) + goto out; + error = __vfs_setxattr_noperm(dentry, name, value, size, flags); out: + return error; +} +EXPORT_SYMBOL_GPL(__vfs_setxattr_locked); + +int +vfs_setxattr(struct dentry *dentry, const char *name, const void *value, + size_t size, int flags) +{ + struct inode *inode = dentry->d_inode; + struct inode *delegated_inode = NULL; + int error; + +retry_deleg: + inode_lock(inode); + error = __vfs_setxattr_locked(dentry, name, value, size, flags, + &delegated_inode); inode_unlock(inode); + + if (delegated_inode) { + error = break_deleg_wait(&delegated_inode); + if (!error) + goto retry_deleg; + } return error; } EXPORT_SYMBOL_GPL(vfs_setxattr); @@ -377,8 +414,18 @@ __vfs_removexattr(struct dentry *dentry, const char *name) } EXPORT_SYMBOL(__vfs_removexattr); +/** + * __vfs_removexattr_locked: set an extended attribute while holding the inode + * lock + * + * @dentry - object to perform setxattr on + * @name - name of xattr to remove + * @delegated_inode - on return, will contain an inode pointer that + * a delegation was broken on, NULL if none. + */ int -vfs_removexattr(struct dentry *dentry, const char *name) +__vfs_removexattr_locked(struct dentry *dentry, const char *name, + struct inode **delegated_inode) { struct inode *inode = dentry->d_inode; int error; @@ -387,11 +434,14 @@ vfs_removexattr(struct dentry *dentry, const char *name) if (error) return error; - inode_lock(inode); error = security_inode_removexattr(dentry, name); if (error) goto out; + error = try_break_deleg(inode, delegated_inode); + if (error) + goto out; + error = __vfs_removexattr(dentry, name); if (!error) { @@ -400,12 +450,32 @@ vfs_removexattr(struct dentry *dentry, const char *name) } out: + return error; +} +EXPORT_SYMBOL_GPL(__vfs_removexattr_locked); + +int +vfs_removexattr(struct dentry *dentry, const char *name) +{ + struct inode *inode = dentry->d_inode; + struct inode *delegated_inode = NULL; + int error; + +retry_deleg: + inode_lock(inode); + error = __vfs_removexattr_locked(dentry, name, &delegated_inode); inode_unlock(inode); + + if (delegated_inode) { + error = break_deleg_wait(&delegated_inode); + if (!error) + goto retry_deleg; + } + return error; } EXPORT_SYMBOL_GPL(vfs_removexattr); - /* * Extended attribute SET operations */ diff --git a/include/linux/xattr.h b/include/linux/xattr.h index 6dad031be3c2..3a71ad716da5 100644 --- a/include/linux/xattr.h +++ b/include/linux/xattr.h @@ -51,8 +51,10 @@ ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t); ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size); int __vfs_setxattr(struct dentry *, struct inode *, const char *, const void *, size_t, int); int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int); +int __vfs_setxattr_locked(struct dentry *, const char *, const void *, size_t, int, struct inode **); int vfs_setxattr(struct dentry *, const char *, const void *, size_t, int); int __vfs_removexattr(struct dentry *, const char *); +int __vfs_removexattr_locked(struct dentry *, const char *, struct inode **); int vfs_removexattr(struct dentry *, const char *); ssize_t generic_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size); -- GitLab From eab3600b6fa4a094d4eff1b65ba6cd581d408c81 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 29 Jul 2020 11:37:13 +0300 Subject: [PATCH 0107/1304] ipv4: Silence suspicious RCU usage warning [ Upstream commit 83f3522860f702748143e022f1a546547314c715 ] fib_trie_unmerge() is called with RTNL held, but not from an RCU read-side critical section. This leads to the following warning [1] when the FIB alias list in a leaf is traversed with hlist_for_each_entry_rcu(). Since the function is always called with RTNL held and since modification of the list is protected by RTNL, simply use hlist_for_each_entry() and silence the warning. [1] WARNING: suspicious RCU usage 5.8.0-rc4-custom-01520-gc1f937f3f83b #30 Not tainted ----------------------------- net/ipv4/fib_trie.c:1867 RCU-list traversed in non-reader section!! other info that might help us debug this: rcu_scheduler_active = 2, debug_locks = 1 1 lock held by ip/164: #0: ffffffff85a27850 (rtnl_mutex){+.+.}-{3:3}, at: rtnetlink_rcv_msg+0x49a/0xbd0 stack backtrace: CPU: 0 PID: 164 Comm: ip Not tainted 5.8.0-rc4-custom-01520-gc1f937f3f83b #30 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.13.0-2.fc32 04/01/2014 Call Trace: dump_stack+0x100/0x184 lockdep_rcu_suspicious+0x153/0x15d fib_trie_unmerge+0x608/0xdb0 fib_unmerge+0x44/0x360 fib4_rule_configure+0xc8/0xad0 fib_nl_newrule+0x37a/0x1dd0 rtnetlink_rcv_msg+0x4f7/0xbd0 netlink_rcv_skb+0x17a/0x480 rtnetlink_rcv+0x22/0x30 netlink_unicast+0x5ae/0x890 netlink_sendmsg+0x98a/0xf40 ____sys_sendmsg+0x879/0xa00 ___sys_sendmsg+0x122/0x190 __sys_sendmsg+0x103/0x1d0 __x64_sys_sendmsg+0x7d/0xb0 do_syscall_64+0x54/0xa0 entry_SYSCALL_64_after_hwframe+0x44/0xa9 RIP: 0033:0x7fc80a234e97 Code: Bad RIP value. RSP: 002b:00007ffef8b66798 EFLAGS: 00000246 ORIG_RAX: 000000000000002e RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007fc80a234e97 RDX: 0000000000000000 RSI: 00007ffef8b66800 RDI: 0000000000000003 RBP: 000000005f141b1c R08: 0000000000000001 R09: 0000000000000000 R10: 00007fc80a2a8ac0 R11: 0000000000000246 R12: 0000000000000001 R13: 0000000000000000 R14: 00007ffef8b67008 R15: 0000556fccb10020 Fixes: 0ddcf43d5d4a ("ipv4: FIB Local/MAIN table collapse") Signed-off-by: Ido Schimmel Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/ipv4/fib_trie.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 3047fc4737c4..48d7125501b4 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -1749,7 +1749,7 @@ struct fib_table *fib_trie_unmerge(struct fib_table *oldtb) while ((l = leaf_walk_rcu(&tp, key)) != NULL) { struct key_vector *local_l = NULL, *local_tp; - hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) { + hlist_for_each_entry(fa, &l->leaf, fa_list) { struct fib_alias *new_fa; if (local_tb->tb_id != fa->tb_id) -- GitLab From 9c8652db5cd45f727071c42c9c675761133a58ae Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Sat, 25 Jul 2020 15:40:53 -0700 Subject: [PATCH 0108/1304] ipv6: fix memory leaks on IPV6_ADDRFORM path [ Upstream commit 8c0de6e96c9794cb523a516c465991a70245da1c ] IPV6_ADDRFORM causes resource leaks when converting an IPv6 socket to IPv4, particularly struct ipv6_ac_socklist. Similar to struct ipv6_mc_socklist, we should just close it on this path. This bug can be easily reproduced with the following C program: #include #include #include #include #include int main() { int s, value; struct sockaddr_in6 addr; struct ipv6_mreq m6; s = socket(AF_INET6, SOCK_DGRAM, 0); addr.sin6_family = AF_INET6; addr.sin6_port = htons(5000); inet_pton(AF_INET6, "::ffff:192.168.122.194", &addr.sin6_addr); connect(s, (struct sockaddr *)&addr, sizeof(addr)); inet_pton(AF_INET6, "fe80::AAAA", &m6.ipv6mr_multiaddr); m6.ipv6mr_interface = 5; setsockopt(s, SOL_IPV6, IPV6_JOIN_ANYCAST, &m6, sizeof(m6)); value = AF_INET; setsockopt(s, SOL_IPV6, IPV6_ADDRFORM, &value, sizeof(value)); close(s); return 0; } Reported-by: ch3332xr@gmail.com Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Signed-off-by: Cong Wang Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- include/net/addrconf.h | 1 + net/ipv6/anycast.c | 17 ++++++++++++----- net/ipv6/ipv6_sockglue.c | 1 + 3 files changed, 14 insertions(+), 5 deletions(-) diff --git a/include/net/addrconf.h b/include/net/addrconf.h index c8d5bb8b3616..db2a87981dd4 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -305,6 +305,7 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr); int ipv6_sock_ac_drop(struct sock *sk, int ifindex, const struct in6_addr *addr); +void __ipv6_sock_ac_close(struct sock *sk); void ipv6_sock_ac_close(struct sock *sk); int __ipv6_dev_ac_inc(struct inet6_dev *idev, const struct in6_addr *addr); diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c index 4e0ff7031edd..55fbe330471c 100644 --- a/net/ipv6/anycast.c +++ b/net/ipv6/anycast.c @@ -173,7 +173,7 @@ int ipv6_sock_ac_drop(struct sock *sk, int ifindex, const struct in6_addr *addr) return 0; } -void ipv6_sock_ac_close(struct sock *sk) +void __ipv6_sock_ac_close(struct sock *sk) { struct ipv6_pinfo *np = inet6_sk(sk); struct net_device *dev = NULL; @@ -181,10 +181,7 @@ void ipv6_sock_ac_close(struct sock *sk) struct net *net = sock_net(sk); int prev_index; - if (!np->ipv6_ac_list) - return; - - rtnl_lock(); + ASSERT_RTNL(); pac = np->ipv6_ac_list; np->ipv6_ac_list = NULL; @@ -201,6 +198,16 @@ void ipv6_sock_ac_close(struct sock *sk) sock_kfree_s(sk, pac, sizeof(*pac)); pac = next; } +} + +void ipv6_sock_ac_close(struct sock *sk) +{ + struct ipv6_pinfo *np = inet6_sk(sk); + + if (!np->ipv6_ac_list) + return; + rtnl_lock(); + __ipv6_sock_ac_close(sk); rtnl_unlock(); } diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index aa54303c43a6..4e1da6cb9ed7 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -207,6 +207,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, fl6_free_socklist(sk); __ipv6_sock_mc_close(sk); + __ipv6_sock_ac_close(sk); /* * Sock is moving from IPv6 to IPv4 (sk_prot), so -- GitLab From 2ab3d2622836c72cdbc075d60c597e6571658d91 Mon Sep 17 00:00:00 2001 From: Landen Chao Date: Wed, 29 Jul 2020 10:15:17 +0200 Subject: [PATCH 0109/1304] net: ethernet: mtk_eth_soc: fix MTU warnings [ Upstream commit 555a893303872e044fb86f0a5834ce78d41ad2e2 ] in recent kernel versions there are warnings about incorrect MTU size like these: eth0: mtu greater than device maximum mtk_soc_eth 1b100000.ethernet eth0: error -22 setting MTU to include DSA overhead Fixes: bfcb813203e6 ("net: dsa: configure the MTU for switch ports") Fixes: 72579e14a1d3 ("net: dsa: don't fail to probe if we couldn't set the MTU") Fixes: 7a4c53bee332 ("net: report invalid mtu value via netlink extack") Signed-off-by: Landen Chao Signed-off-by: Frank Wunderlich Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 1d55f014725e..b72a4fad7bc8 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -2452,6 +2452,8 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) eth->netdev[id]->irq = eth->irq[0]; eth->netdev[id]->dev.of_node = np; + eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN; + return 0; free_netdev: -- GitLab From db8e1fb8d751ca51a3932437ff132b2097e4d1af Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 29 Jul 2020 11:34:36 +0300 Subject: [PATCH 0110/1304] vxlan: Ensure FDB dump is performed under RCU [ Upstream commit b5141915b5aec3b29a63db869229e3741ebce258 ] The commit cited below removed the RCU read-side critical section from rtnl_fdb_dump() which means that the ndo_fdb_dump() callback is invoked without RCU protection. This results in the following warning [1] in the VXLAN driver, which relied on the callback being invoked from an RCU read-side critical section. Fix this by calling rcu_read_lock() in the VXLAN driver, as already done in the bridge driver. [1] WARNING: suspicious RCU usage 5.8.0-rc4-custom-01521-g481007553ce6 #29 Not tainted ----------------------------- drivers/net/vxlan.c:1379 RCU-list traversed in non-reader section!! other info that might help us debug this: rcu_scheduler_active = 2, debug_locks = 1 1 lock held by bridge/166: #0: ffffffff85a27850 (rtnl_mutex){+.+.}-{3:3}, at: netlink_dump+0xea/0x1090 stack backtrace: CPU: 1 PID: 166 Comm: bridge Not tainted 5.8.0-rc4-custom-01521-g481007553ce6 #29 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.13.0-2.fc32 04/01/2014 Call Trace: dump_stack+0x100/0x184 lockdep_rcu_suspicious+0x153/0x15d vxlan_fdb_dump+0x51e/0x6d0 rtnl_fdb_dump+0x4dc/0xad0 netlink_dump+0x540/0x1090 __netlink_dump_start+0x695/0x950 rtnetlink_rcv_msg+0x802/0xbd0 netlink_rcv_skb+0x17a/0x480 rtnetlink_rcv+0x22/0x30 netlink_unicast+0x5ae/0x890 netlink_sendmsg+0x98a/0xf40 __sys_sendto+0x279/0x3b0 __x64_sys_sendto+0xe6/0x1a0 do_syscall_64+0x54/0xa0 entry_SYSCALL_64_after_hwframe+0x44/0xa9 RIP: 0033:0x7fe14fa2ade0 Code: Bad RIP value. RSP: 002b:00007fff75bb5b88 EFLAGS: 00000246 ORIG_RAX: 000000000000002c RAX: ffffffffffffffda RBX: 00005614b1ba0020 RCX: 00007fe14fa2ade0 RDX: 000000000000011c RSI: 00007fff75bb5b90 RDI: 0000000000000003 RBP: 00007fff75bb5b90 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000246 R12: 00005614b1b89160 R13: 0000000000000000 R14: 0000000000000000 R15: 0000000000000000 Fixes: 5e6d24358799 ("bridge: netlink dump interface at par with brctl") Signed-off-by: Ido Schimmel Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/vxlan.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 09f0b53b2b77..69d9bb88cfd2 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -975,6 +975,7 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, for (h = 0; h < FDB_HASH_SIZE; ++h) { struct vxlan_fdb *f; + rcu_read_lock(); hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) { struct vxlan_rdst *rd; @@ -987,12 +988,15 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, cb->nlh->nlmsg_seq, RTM_NEWNEIGH, NLM_F_MULTI, rd); - if (err < 0) + if (err < 0) { + rcu_read_unlock(); goto out; + } skip: *idx += 1; } } + rcu_read_unlock(); } out: return err; -- GitLab From 9a70de92dd44ba152c899a439389d96ac2815b02 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Tue, 28 Jul 2020 14:10:31 +0200 Subject: [PATCH 0111/1304] net: lan78xx: replace bogus endpoint lookup [ Upstream commit ea060b352654a8de1e070140d25fe1b7e4d50310 ] Drop the bogus endpoint-lookup helper which could end up accepting interfaces based on endpoints belonging to unrelated altsettings. Note that the returned bulk pipes and interrupt endpoint descriptor were never actually used. Instead the bulk-endpoint numbers are hardcoded to 1 and 2 (matching the specification), while the interrupt- endpoint descriptor was assumed to be the third descriptor created by USB core. Try to bring some order to this by dropping the bogus lookup helper and adding the missing endpoint sanity checks while keeping the interrupt- descriptor assumption for now. Signed-off-by: Johan Hovold Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/usb/lan78xx.c | 117 ++++++++++---------------------------- 1 file changed, 30 insertions(+), 87 deletions(-) diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index d198f36785a4..5bd07cdb3e6e 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -388,10 +388,6 @@ struct lan78xx_net { struct tasklet_struct bh; struct delayed_work wq; - struct usb_host_endpoint *ep_blkin; - struct usb_host_endpoint *ep_blkout; - struct usb_host_endpoint *ep_intr; - int msg_enable; struct urb *urb_intr; @@ -2883,78 +2879,12 @@ lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net) return NETDEV_TX_OK; } -static int -lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf) -{ - int tmp; - struct usb_host_interface *alt = NULL; - struct usb_host_endpoint *in = NULL, *out = NULL; - struct usb_host_endpoint *status = NULL; - - for (tmp = 0; tmp < intf->num_altsetting; tmp++) { - unsigned ep; - - in = NULL; - out = NULL; - status = NULL; - alt = intf->altsetting + tmp; - - for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) { - struct usb_host_endpoint *e; - int intr = 0; - - e = alt->endpoint + ep; - switch (e->desc.bmAttributes) { - case USB_ENDPOINT_XFER_INT: - if (!usb_endpoint_dir_in(&e->desc)) - continue; - intr = 1; - /* FALLTHROUGH */ - case USB_ENDPOINT_XFER_BULK: - break; - default: - continue; - } - if (usb_endpoint_dir_in(&e->desc)) { - if (!intr && !in) - in = e; - else if (intr && !status) - status = e; - } else { - if (!out) - out = e; - } - } - if (in && out) - break; - } - if (!alt || !in || !out) - return -EINVAL; - - dev->pipe_in = usb_rcvbulkpipe(dev->udev, - in->desc.bEndpointAddress & - USB_ENDPOINT_NUMBER_MASK); - dev->pipe_out = usb_sndbulkpipe(dev->udev, - out->desc.bEndpointAddress & - USB_ENDPOINT_NUMBER_MASK); - dev->ep_intr = status; - - return 0; -} - static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf) { struct lan78xx_priv *pdata = NULL; int ret; int i; - ret = lan78xx_get_endpoints(dev, intf); - if (ret) { - netdev_warn(dev->net, "lan78xx_get_endpoints failed: %d\n", - ret); - return ret; - } - dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL); pdata = (struct lan78xx_priv *)(dev->data[0]); @@ -3726,6 +3656,7 @@ static void lan78xx_stat_monitor(struct timer_list *t) static int lan78xx_probe(struct usb_interface *intf, const struct usb_device_id *id) { + struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr; struct lan78xx_net *dev; struct net_device *netdev; struct usb_device *udev; @@ -3774,6 +3705,34 @@ static int lan78xx_probe(struct usb_interface *intf, mutex_init(&dev->stats.access_lock); + if (intf->cur_altsetting->desc.bNumEndpoints < 3) { + ret = -ENODEV; + goto out2; + } + + dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE); + ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in); + if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) { + ret = -ENODEV; + goto out2; + } + + dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE); + ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out); + if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) { + ret = -ENODEV; + goto out2; + } + + ep_intr = &intf->cur_altsetting->endpoint[2]; + if (!usb_endpoint_is_int_in(&ep_intr->desc)) { + ret = -ENODEV; + goto out2; + } + + dev->pipe_intr = usb_rcvintpipe(dev->udev, + usb_endpoint_num(&ep_intr->desc)); + ret = lan78xx_bind(dev, intf); if (ret < 0) goto out2; @@ -3786,23 +3745,7 @@ static int lan78xx_probe(struct usb_interface *intf, netdev->max_mtu = MAX_SINGLE_PACKET_SIZE; netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER); - if (intf->cur_altsetting->desc.bNumEndpoints < 3) { - ret = -ENODEV; - goto out3; - } - - dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0; - dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1; - dev->ep_intr = (intf->cur_altsetting)->endpoint + 2; - - dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE); - dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE); - - dev->pipe_intr = usb_rcvintpipe(dev->udev, - dev->ep_intr->desc.bEndpointAddress & - USB_ENDPOINT_NUMBER_MASK); - period = dev->ep_intr->desc.bInterval; - + period = ep_intr->desc.bInterval; maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0); buf = kmalloc(maxp, GFP_KERNEL); if (buf) { -- GitLab From d2a93f69106172be614c79c43c487d09199b4a5b Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Tue, 4 Aug 2020 09:54:15 -0700 Subject: [PATCH 0112/1304] hv_netvsc: do not use VF device if link is down [ Upstream commit 7c9864bbccc23e1812ac82966555d68c13ea4006 ] If the accelerated networking SRIOV VF device has lost carrier use the synthetic network device which is available as backup path. This is a rare case since if VF link goes down, normally the VMBus device will also loose external connectivity as well. But if the communication is between two VM's on the same host the VMBus device will still work. Reported-by: "Shah, Ashish N" Fixes: 0c195567a8f6 ("netvsc: transparent VF management") Signed-off-by: Stephen Hemminger Reviewed-by: Haiyang Zhang Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/hyperv/netvsc_drv.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index bdb55db4523b..e33cbb793b63 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -543,12 +543,13 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) u32 hash; struct hv_page_buffer pb[MAX_PAGE_BUFFER_COUNT]; - /* if VF is present and up then redirect packets - * already called with rcu_read_lock_bh + /* If VF is present and up then redirect packets to it. + * Skip the VF if it is marked down or has no carrier. + * If netpoll is in uses, then VF can not be used either. */ vf_netdev = rcu_dereference_bh(net_device_ctx->vf_netdev); if (vf_netdev && netif_running(vf_netdev) && - !netpoll_tx_running(net)) + netif_carrier_ok(vf_netdev) && !netpoll_tx_running(net)) return netvsc_vf_xmit(net, vf_netdev, skb); /* We will atmost need two pages to describe the rndis -- GitLab From 9ffa0b33f48dfd226b039d272b7ea7db57383fc0 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 31 Jul 2020 20:12:05 +0200 Subject: [PATCH 0113/1304] net: gre: recompute gre csum for sctp over gre tunnels [ Upstream commit 622e32b7d4a6492cf5c1f759ef833f817418f7b3 ] The GRE tunnel can be used to transport traffic that does not rely on a Internet checksum (e.g. SCTP). The issue can be triggered creating a GRE or GRETAP tunnel and transmitting SCTP traffic ontop of it where CRC offload has been disabled. In order to fix the issue we need to recompute the GRE csum in gre_gso_segment() not relying on the inner checksum. The issue is still present when we have the CRC offload enabled. In this case we need to disable the CRC offload if we require GRE checksum since otherwise skb_checksum() will report a wrong value. Fixes: 90017accff61 ("sctp: Add GSO support") Signed-off-by: Lorenzo Bianconi Reviewed-by: Marcelo Ricardo Leitner Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/ipv4/gre_offload.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c index 6c63524f598a..89c613f19566 100644 --- a/net/ipv4/gre_offload.c +++ b/net/ipv4/gre_offload.c @@ -19,12 +19,12 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb, netdev_features_t features) { int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb); + bool need_csum, need_recompute_csum, gso_partial; struct sk_buff *segs = ERR_PTR(-EINVAL); u16 mac_offset = skb->mac_header; __be16 protocol = skb->protocol; u16 mac_len = skb->mac_len; int gre_offset, outer_hlen; - bool need_csum, gso_partial; if (!skb->encapsulation) goto out; @@ -45,6 +45,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb, skb->protocol = skb->inner_protocol; need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM); + need_recompute_csum = skb->csum_not_inet; skb->encap_hdr_csum = need_csum; features &= skb->dev->hw_enc_features; @@ -102,7 +103,15 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb, } *(pcsum + 1) = 0; - *pcsum = gso_make_checksum(skb, 0); + if (need_recompute_csum && !skb_is_gso(skb)) { + __wsum csum; + + csum = skb_checksum(skb, gre_offset, + skb->len - gre_offset, 0); + *pcsum = csum_fold(csum); + } else { + *pcsum = gso_make_checksum(skb, 0); + } } while ((skb = skb->next)); out: return segs; -- GitLab From 55589dfa48e88485dc04e91eec36eacbc27ae915 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Tue, 4 Aug 2020 15:02:30 +0800 Subject: [PATCH 0114/1304] net: thunderx: use spin_lock_bh in nicvf_set_rx_mode_task() [ Upstream commit bab9693a9a8c6dd19f670408ec1e78e12a320682 ] A dead lock was triggered on thunderx driver: CPU0 CPU1 ---- ---- [01] lock(&(&nic->rx_mode_wq_lock)->rlock); [11] lock(&(&mc->mca_lock)->rlock); [12] lock(&(&nic->rx_mode_wq_lock)->rlock); [02] lock(&(&mc->mca_lock)->rlock); The path for each is: [01] worker_thread() -> process_one_work() -> nicvf_set_rx_mode_task() [02] mld_ifc_timer_expire() [11] ipv6_add_dev() -> ipv6_dev_mc_inc() -> igmp6_group_added() -> [12] dev_mc_add() -> __dev_set_rx_mode() -> nicvf_set_rx_mode() To fix it, it needs to disable bh on [1], so that the timer on [2] wouldn't be triggered until rx_mode_wq_lock is released. So change to use spin_lock_bh() instead of spin_lock(). Thanks to Paolo for helping with this. v1->v2: - post to netdev. Reported-by: Rafael P. Tested-by: Dean Nelson Fixes: 469998c861fa ("net: thunderx: prevent concurrent data re-writing by nicvf_set_rx_mode") Signed-off-by: Xin Long Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/cavium/thunder/nicvf_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index dca02b35c231..99eea9e6a8ea 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -2015,11 +2015,11 @@ static void nicvf_set_rx_mode_task(struct work_struct *work_arg) /* Save message data locally to prevent them from * being overwritten by next ndo_set_rx_mode call(). */ - spin_lock(&nic->rx_mode_wq_lock); + spin_lock_bh(&nic->rx_mode_wq_lock); mode = vf_work->mode; mc = vf_work->mc; vf_work->mc = NULL; - spin_unlock(&nic->rx_mode_wq_lock); + spin_unlock_bh(&nic->rx_mode_wq_lock); __nicvf_set_rx_mode_task(mode, mc, nic); } -- GitLab From 7fc4eec2a8dc2f3ea7ab860b6d6a5d0391452ca5 Mon Sep 17 00:00:00 2001 From: Peilin Ye Date: Fri, 31 Jul 2020 00:48:38 -0400 Subject: [PATCH 0115/1304] openvswitch: Prevent kernel-infoleak in ovs_ct_put_key() [ Upstream commit 9aba6c5b49254d5bee927d81593ed4429e91d4ae ] ovs_ct_put_key() is potentially copying uninitialized kernel stack memory into socket buffers, since the compiler may leave a 3-byte hole at the end of `struct ovs_key_ct_tuple_ipv4` and `struct ovs_key_ct_tuple_ipv6`. Fix it by initializing `orig` with memset(). Fixes: 9dd7f8907c37 ("openvswitch: Add original direction conntrack tuple to sw_flow_key.") Suggested-by: Dan Carpenter Signed-off-by: Peilin Ye Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/openvswitch/conntrack.c | 38 +++++++++++++++++++------------------ 1 file changed, 20 insertions(+), 18 deletions(-) diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index 6dcb59f272e1..f8e073ef1a67 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@ -283,10 +283,6 @@ void ovs_ct_fill_key(const struct sk_buff *skb, struct sw_flow_key *key) ovs_ct_update_key(skb, NULL, key, false, false); } -#define IN6_ADDR_INITIALIZER(ADDR) \ - { (ADDR).s6_addr32[0], (ADDR).s6_addr32[1], \ - (ADDR).s6_addr32[2], (ADDR).s6_addr32[3] } - int ovs_ct_put_key(const struct sw_flow_key *swkey, const struct sw_flow_key *output, struct sk_buff *skb) { @@ -308,24 +304,30 @@ int ovs_ct_put_key(const struct sw_flow_key *swkey, if (swkey->ct_orig_proto) { if (swkey->eth.type == htons(ETH_P_IP)) { - struct ovs_key_ct_tuple_ipv4 orig = { - output->ipv4.ct_orig.src, - output->ipv4.ct_orig.dst, - output->ct.orig_tp.src, - output->ct.orig_tp.dst, - output->ct_orig_proto, - }; + struct ovs_key_ct_tuple_ipv4 orig; + + memset(&orig, 0, sizeof(orig)); + orig.ipv4_src = output->ipv4.ct_orig.src; + orig.ipv4_dst = output->ipv4.ct_orig.dst; + orig.src_port = output->ct.orig_tp.src; + orig.dst_port = output->ct.orig_tp.dst; + orig.ipv4_proto = output->ct_orig_proto; + if (nla_put(skb, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4, sizeof(orig), &orig)) return -EMSGSIZE; } else if (swkey->eth.type == htons(ETH_P_IPV6)) { - struct ovs_key_ct_tuple_ipv6 orig = { - IN6_ADDR_INITIALIZER(output->ipv6.ct_orig.src), - IN6_ADDR_INITIALIZER(output->ipv6.ct_orig.dst), - output->ct.orig_tp.src, - output->ct.orig_tp.dst, - output->ct_orig_proto, - }; + struct ovs_key_ct_tuple_ipv6 orig; + + memset(&orig, 0, sizeof(orig)); + memcpy(orig.ipv6_src, output->ipv6.ct_orig.src.s6_addr32, + sizeof(orig.ipv6_src)); + memcpy(orig.ipv6_dst, output->ipv6.ct_orig.dst.s6_addr32, + sizeof(orig.ipv6_dst)); + orig.src_port = output->ct.orig_tp.src; + orig.dst_port = output->ct.orig_tp.dst; + orig.ipv6_proto = output->ct_orig_proto; + if (nla_put(skb, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6, sizeof(orig), &orig)) return -EMSGSIZE; -- GitLab From 719a92fae0434d11ee86d0f679663c14a2a13fc1 Mon Sep 17 00:00:00 2001 From: Hangbin Liu Date: Wed, 5 Aug 2020 10:41:31 +0800 Subject: [PATCH 0116/1304] Revert "vxlan: fix tos value before xmit" [ Upstream commit a0dced17ad9dc08b1b25e0065b54c97a318e6e8b ] This reverts commit 71130f29979c7c7956b040673e6b9d5643003176. In commit 71130f29979c ("vxlan: fix tos value before xmit") we want to make sure the tos value are filtered by RT_TOS() based on RFC1349. 0 1 2 3 4 5 6 7 +-----+-----+-----+-----+-----+-----+-----+-----+ | PRECEDENCE | TOS | MBZ | +-----+-----+-----+-----+-----+-----+-----+-----+ But RFC1349 has been obsoleted by RFC2474. The new DSCP field defined like 0 1 2 3 4 5 6 7 +-----+-----+-----+-----+-----+-----+-----+-----+ | DS FIELD, DSCP | ECN FIELD | +-----+-----+-----+-----+-----+-----+-----+-----+ So with IPTOS_TOS_MASK 0x1E RT_TOS(tos) ((tos)&IPTOS_TOS_MASK) the first 3 bits DSCP info will get lost. To take all the DSCP info in xmit, we should revert the patch and just push all tos bits to ip_tunnel_ecn_encap(), which will handling ECN field later. Fixes: 71130f29979c ("vxlan: fix tos value before xmit") Signed-off-by: Hangbin Liu Acked-by: Guillaume Nault Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/vxlan.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 69d9bb88cfd2..abf85f0ab72f 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -2223,7 +2223,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, ndst = &rt->dst; skb_tunnel_check_pmtu(skb, ndst, VXLAN_HEADROOM); - tos = ip_tunnel_ecn_encap(RT_TOS(tos), old_iph, skb); + tos = ip_tunnel_ecn_encap(tos, old_iph, skb); ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); err = vxlan_build_skb(skb, ndst, sizeof(struct iphdr), vni, md, flags, udp_sum); @@ -2260,7 +2260,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, skb_tunnel_check_pmtu(skb, ndst, VXLAN6_HEADROOM); - tos = ip_tunnel_ecn_encap(RT_TOS(tos), old_iph, skb); + tos = ip_tunnel_ecn_encap(tos, old_iph, skb); ttl = ttl ? : ip6_dst_hoplimit(ndst); skb_scrub_packet(skb, xnet); err = vxlan_build_skb(skb, ndst, sizeof(struct ipv6hdr), -- GitLab From 615214ab782aac760771dedf870366f38b27e902 Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Wed, 5 Aug 2020 04:40:45 -0400 Subject: [PATCH 0117/1304] selftests/net: relax cpu affinity requirement in msg_zerocopy test [ Upstream commit 16f6458f2478b55e2b628797bc81a4455045c74e ] The msg_zerocopy test pins the sender and receiver threads to separate cores to reduce variance between runs. But it hardcodes the cores and skips core 0, so it fails on machines with the selected cores offline, or simply fewer cores. The test mainly gives code coverage in automated runs. The throughput of zerocopy ('-z') and non-zerocopy runs is logged for manual inspection. Continue even when sched_setaffinity fails. Just log to warn anyone interpreting the data. Fixes: 07b65c5b31ce ("test: add msg_zerocopy test") Reported-by: Colin Ian King Signed-off-by: Willem de Bruijn Acked-by: Colin Ian King Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- tools/testing/selftests/net/msg_zerocopy.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/tools/testing/selftests/net/msg_zerocopy.c b/tools/testing/selftests/net/msg_zerocopy.c index 406cc70c571d..c539591937a1 100644 --- a/tools/testing/selftests/net/msg_zerocopy.c +++ b/tools/testing/selftests/net/msg_zerocopy.c @@ -125,9 +125,8 @@ static int do_setcpu(int cpu) CPU_ZERO(&mask); CPU_SET(cpu, &mask); if (sched_setaffinity(0, sizeof(mask), &mask)) - error(1, 0, "setaffinity %d", cpu); - - if (cfg_verbose) + fprintf(stderr, "cpu: unable to pin, may increase variance.\n"); + else if (cfg_verbose) fprintf(stderr, "cpu: %u\n", cpu); return 0; -- GitLab From 46a41ed2e6f3cb7bb193126bae357c2c6bf2ee4f Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 29 Jul 2020 00:03:56 +0100 Subject: [PATCH 0118/1304] rxrpc: Fix race between recvmsg and sendmsg on immediate call failure [ Upstream commit 65550098c1c4db528400c73acf3e46bfa78d9264 ] There's a race between rxrpc_sendmsg setting up a call, but then failing to send anything on it due to an error, and recvmsg() seeing the call completion occur and trying to return the state to the user. An assertion fails in rxrpc_recvmsg() because the call has already been released from the socket and is about to be released again as recvmsg deals with it. (The recvmsg_q queue on the socket holds a ref, so there's no problem with use-after-free.) We also have to be careful not to end up reporting an error twice, in such a way that both returns indicate to userspace that the user ID supplied with the call is no longer in use - which could cause the client to malfunction if it recycles the user ID fast enough. Fix this by the following means: (1) When sendmsg() creates a call after the point that the call has been successfully added to the socket, don't return any errors through sendmsg(), but rather complete the call and let recvmsg() retrieve them. Make sendmsg() return 0 at this point. Further calls to sendmsg() for that call will fail with ESHUTDOWN. Note that at this point, we haven't send any packets yet, so the server doesn't yet know about the call. (2) If sendmsg() returns an error when it was expected to create a new call, it means that the user ID wasn't used. (3) Mark the call disconnected before marking it completed to prevent an oops in rxrpc_release_call(). (4) recvmsg() will then retrieve the error and set MSG_EOR to indicate that the user ID is no longer known by the kernel. An oops like the following is produced: kernel BUG at net/rxrpc/recvmsg.c:605! ... RIP: 0010:rxrpc_recvmsg+0x256/0x5ae ... Call Trace: ? __init_waitqueue_head+0x2f/0x2f ____sys_recvmsg+0x8a/0x148 ? import_iovec+0x69/0x9c ? copy_msghdr_from_user+0x5c/0x86 ___sys_recvmsg+0x72/0xaa ? __fget_files+0x22/0x57 ? __fget_light+0x46/0x51 ? fdget+0x9/0x1b do_recvmmsg+0x15e/0x232 ? _raw_spin_unlock+0xa/0xb ? vtime_delta+0xf/0x25 __x64_sys_recvmmsg+0x2c/0x2f do_syscall_64+0x4c/0x78 entry_SYSCALL_64_after_hwframe+0x44/0xa9 Fixes: 357f5ef64628 ("rxrpc: Call rxrpc_release_call() on error in rxrpc_new_client_call()") Reported-by: syzbot+b54969381df354936d96@syzkaller.appspotmail.com Signed-off-by: David Howells Reviewed-by: Marc Dionne Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/rxrpc/call_object.c | 27 +++++++++++++++++++-------- net/rxrpc/conn_object.c | 8 +++++--- net/rxrpc/recvmsg.c | 2 +- net/rxrpc/sendmsg.c | 3 +++ 4 files changed, 28 insertions(+), 12 deletions(-) diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c index 964c4e45de11..39f5fa3501ff 100644 --- a/net/rxrpc/call_object.c +++ b/net/rxrpc/call_object.c @@ -290,7 +290,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, */ ret = rxrpc_connect_call(rx, call, cp, srx, gfp); if (ret < 0) - goto error; + goto error_attached_to_socket; trace_rxrpc_call(call, rxrpc_call_connected, atomic_read(&call->usage), here, NULL); @@ -310,18 +310,29 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, error_dup_user_ID: write_unlock(&rx->call_lock); release_sock(&rx->sk); - ret = -EEXIST; - -error: __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, - RX_CALL_DEAD, ret); + RX_CALL_DEAD, -EEXIST); trace_rxrpc_call(call, rxrpc_call_error, atomic_read(&call->usage), - here, ERR_PTR(ret)); + here, ERR_PTR(-EEXIST)); rxrpc_release_call(rx, call); mutex_unlock(&call->user_mutex); rxrpc_put_call(call, rxrpc_call_put); - _leave(" = %d", ret); - return ERR_PTR(ret); + _leave(" = -EEXIST"); + return ERR_PTR(-EEXIST); + + /* We got an error, but the call is attached to the socket and is in + * need of release. However, we might now race with recvmsg() when + * completing the call queues it. Return 0 from sys_sendmsg() and + * leave the error to recvmsg() to deal with. + */ +error_attached_to_socket: + trace_rxrpc_call(call, rxrpc_call_error, atomic_read(&call->usage), + here, ERR_PTR(ret)); + set_bit(RXRPC_CALL_DISCONNECTED, &call->flags); + __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, + RX_CALL_DEAD, ret); + _leave(" = c=%08x [err]", call->debug_id); + return call; } /* diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c index c4c4450891e0..2adb7c5c8966 100644 --- a/net/rxrpc/conn_object.c +++ b/net/rxrpc/conn_object.c @@ -215,9 +215,11 @@ void rxrpc_disconnect_call(struct rxrpc_call *call) call->peer->cong_cwnd = call->cong_cwnd; - spin_lock_bh(&conn->params.peer->lock); - hlist_del_rcu(&call->error_link); - spin_unlock_bh(&conn->params.peer->lock); + if (!hlist_unhashed(&call->error_link)) { + spin_lock_bh(&call->peer->lock); + hlist_del_rcu(&call->error_link); + spin_unlock_bh(&call->peer->lock); + } if (rxrpc_is_client_call(call)) return rxrpc_disconnect_client_call(call); diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c index 6e9d977f0797..e4fde33b887e 100644 --- a/net/rxrpc/recvmsg.c +++ b/net/rxrpc/recvmsg.c @@ -530,7 +530,7 @@ int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, goto error_unlock_call; } - if (msg->msg_name) { + if (msg->msg_name && call->peer) { struct sockaddr_rxrpc *srx = msg->msg_name; size_t len = sizeof(call->peer->srx); diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c index caee7632c257..edd76c41765f 100644 --- a/net/rxrpc/sendmsg.c +++ b/net/rxrpc/sendmsg.c @@ -654,6 +654,9 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) if (IS_ERR(call)) return PTR_ERR(call); /* ... and we have the call lock. */ + ret = 0; + if (READ_ONCE(call->state) == RXRPC_CALL_COMPLETE) + goto out_put_unlock; } else { switch (READ_ONCE(call->state)) { case RXRPC_CALL_UNINITIALISED: -- GitLab From 43a7e1cf606e96ee43f8897129972f0b79390367 Mon Sep 17 00:00:00 2001 From: Sergey Nemov Date: Fri, 7 Aug 2020 13:55:14 -0700 Subject: [PATCH 0119/1304] i40e: add num_vectors checker in iwarp handler [ Upstream commit 7015ca3df965378bcef072cca9cd63ed098665b5 ] Field num_vectors from struct virtchnl_iwarp_qvlist_info should not be larger than num_msix_vectors_vf in the hw struct. The iwarp uses the same set of vectors as the LAN VF driver. Fixes: e3219ce6a7754 ("i40e: Add support for client interface for IWARP driver") Signed-off-by: Sergey Nemov Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher Signed-off-by: Jesse Brandeburg Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 6a677fd540d6..a1b464a91d93 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -442,6 +442,16 @@ static int i40e_config_iwarp_qvlist(struct i40e_vf *vf, u32 next_q_idx, next_q_type; u32 msix_vf, size; + msix_vf = pf->hw.func_caps.num_msix_vectors_vf; + + if (qvlist_info->num_vectors > msix_vf) { + dev_warn(&pf->pdev->dev, + "Incorrect number of iwarp vectors %u. Maximum %u allowed.\n", + qvlist_info->num_vectors, + msix_vf); + goto err; + } + size = sizeof(struct virtchnl_iwarp_qvlist_info) + (sizeof(struct virtchnl_iwarp_qv_info) * (qvlist_info->num_vectors - 1)); -- GitLab From 48a9be93ff2c5a09e308ef93560ea1f4ecbd22f6 Mon Sep 17 00:00:00 2001 From: Grzegorz Siwik Date: Fri, 7 Aug 2020 13:55:15 -0700 Subject: [PATCH 0120/1304] i40e: Wrong truncation from u16 to u8 [ Upstream commit c004804dceee9ca384d97d9857ea2e2795c2651d ] In this patch fixed wrong truncation method from u16 to u8 during validation. It was changed by changing u8 to u32 parameter in method declaration and arguments were changed to u32. Fixes: 5c3c48ac6bf56 ("i40e: implement virtual device interface") Signed-off-by: Grzegorz Siwik Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher Signed-off-by: Jesse Brandeburg Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index a1b464a91d93..b26e41acd993 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -196,7 +196,7 @@ static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id, * * check for the valid vector id **/ -static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id) +static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u32 vector_id) { struct i40e_pf *pf = vf->pf; -- GitLab From 71d781619fc57ef1000cec352343bfea1a559e0e Mon Sep 17 00:00:00 2001 From: Martyna Szapar Date: Fri, 7 Aug 2020 13:55:16 -0700 Subject: [PATCH 0121/1304] i40e: Fix of memory leak and integer truncation in i40e_virtchnl.c [ Upstream commit 24474f2709af6729b9b1da1c5e160ab62e25e3a4 ] Fixed possible memory leak in i40e_vc_add_cloud_filter function: cfilter is being allocated and in some error conditions the function returns without freeing the memory. Fix of integer truncation from u16 (type of queue_id value) to u8 when calling i40e_vc_isvalid_queue_id function. Fixes: e284fc280473b ("i40e: Add and delete cloud filter") Signed-off-by: Martyna Szapar Signed-off-by: Jeff Kirsher Signed-off-by: Jesse Brandeburg Signed-off-by: Greg Kroah-Hartman --- .../net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index b26e41acd993..c19da0ff888e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -181,7 +181,7 @@ static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id) * check for the valid queue id **/ static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id, - u8 qid) + u16 qid) { struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); @@ -3345,7 +3345,7 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg) if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { aq_ret = I40E_ERR_PARAM; - goto err; + goto err_out; } if (!vf->adq_enabled) { @@ -3353,15 +3353,15 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg) "VF %d: ADq is not enabled, can't apply cloud filter\n", vf->vf_id); aq_ret = I40E_ERR_PARAM; - goto err; + goto err_out; } if (i40e_validate_cloud_filter(vf, vcf)) { dev_info(&pf->pdev->dev, "VF %d: Invalid input/s, can't apply cloud filter\n", vf->vf_id); - aq_ret = I40E_ERR_PARAM; - goto err; + aq_ret = I40E_ERR_PARAM; + goto err_out; } cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL); @@ -3422,13 +3422,17 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg) "VF %d: Failed to add cloud filter, err %s aq_err %s\n", vf->vf_id, i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); - goto err; + goto err_free; } INIT_HLIST_NODE(&cfilter->cloud_node); hlist_add_head(&cfilter->cloud_node, &vf->cloud_filter_list); + /* release the pointer passing it to the collection */ + cfilter = NULL; vf->num_cloud_filters++; -err: +err_free: + kfree(cfilter); +err_out: return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_CLOUD_FILTER, aq_ret); } -- GitLab From 330aa3b43ccf514ad2e99421fc8d3b38882b45cf Mon Sep 17 00:00:00 2001 From: Martyna Szapar Date: Fri, 7 Aug 2020 13:55:17 -0700 Subject: [PATCH 0122/1304] i40e: Memory leak in i40e_config_iwarp_qvlist [ Upstream commit 0b63644602cfcbac849f7ea49272a39e90fa95eb ] Added freeing the old allocation of vf->qvlist_info in function i40e_config_iwarp_qvlist before overwriting it with the new allocation. Fixes: e3219ce6a7754 ("i40e: Add support for client interface for IWARP driver") Signed-off-by: Martyna Szapar Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher Signed-off-by: Jesse Brandeburg Signed-off-by: Greg Kroah-Hartman --- .../ethernet/intel/i40e/i40e_virtchnl_pf.c | 23 ++++++++++++------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index c19da0ff888e..bc4eda52372a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -441,6 +441,7 @@ static int i40e_config_iwarp_qvlist(struct i40e_vf *vf, u32 v_idx, i, reg_idx, reg; u32 next_q_idx, next_q_type; u32 msix_vf, size; + int ret = 0; msix_vf = pf->hw.func_caps.num_msix_vectors_vf; @@ -449,16 +450,19 @@ static int i40e_config_iwarp_qvlist(struct i40e_vf *vf, "Incorrect number of iwarp vectors %u. Maximum %u allowed.\n", qvlist_info->num_vectors, msix_vf); - goto err; + ret = -EINVAL; + goto err_out; } size = sizeof(struct virtchnl_iwarp_qvlist_info) + (sizeof(struct virtchnl_iwarp_qv_info) * (qvlist_info->num_vectors - 1)); + kfree(vf->qvlist_info); vf->qvlist_info = kzalloc(size, GFP_KERNEL); - if (!vf->qvlist_info) - return -ENOMEM; - + if (!vf->qvlist_info) { + ret = -ENOMEM; + goto err_out; + } vf->qvlist_info->num_vectors = qvlist_info->num_vectors; msix_vf = pf->hw.func_caps.num_msix_vectors_vf; @@ -469,8 +473,10 @@ static int i40e_config_iwarp_qvlist(struct i40e_vf *vf, v_idx = qv_info->v_idx; /* Validate vector id belongs to this vf */ - if (!i40e_vc_isvalid_vector_id(vf, v_idx)) - goto err; + if (!i40e_vc_isvalid_vector_id(vf, v_idx)) { + ret = -EINVAL; + goto err_free; + } vf->qvlist_info->qv_info[i] = *qv_info; @@ -512,10 +518,11 @@ static int i40e_config_iwarp_qvlist(struct i40e_vf *vf, } return 0; -err: +err_free: kfree(vf->qvlist_info); vf->qvlist_info = NULL; - return -EINVAL; +err_out: + return ret; } /** -- GitLab From 67b4be302ca89d49cacc37373049b421b8bcec4e Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 8 Jul 2020 13:15:20 -0700 Subject: [PATCH 0123/1304] Smack: fix use-after-free in smk_write_relabel_self() commit beb4ee6770a89646659e6a2178538d2b13e2654e upstream. smk_write_relabel_self() frees memory from the task's credentials with no locking, which can easily cause a use-after-free because multiple tasks can share the same credentials structure. Fix this by using prepare_creds() and commit_creds() to correctly modify the task's credentials. Reproducer for "BUG: KASAN: use-after-free in smk_write_relabel_self": #include #include #include static void *thrproc(void *arg) { int fd = open("/sys/fs/smackfs/relabel-self", O_WRONLY); for (;;) write(fd, "foo", 3); } int main() { pthread_t t; pthread_create(&t, NULL, thrproc, NULL); thrproc(NULL); } Reported-by: syzbot+e6416dabb497a650da40@syzkaller.appspotmail.com Fixes: 38416e53936e ("Smack: limited capability for changing process label") Cc: # v4.4+ Signed-off-by: Eric Biggers Signed-off-by: Casey Schaufler Signed-off-by: Greg Kroah-Hartman --- security/smack/smackfs.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c index 371ae368da35..10ee51d04492 100644 --- a/security/smack/smackfs.c +++ b/security/smack/smackfs.c @@ -2746,7 +2746,6 @@ static int smk_open_relabel_self(struct inode *inode, struct file *file) static ssize_t smk_write_relabel_self(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { - struct task_smack *tsp = current_security(); char *data; int rc; LIST_HEAD(list_tmp); @@ -2771,11 +2770,21 @@ static ssize_t smk_write_relabel_self(struct file *file, const char __user *buf, kfree(data); if (!rc || (rc == -EINVAL && list_empty(&list_tmp))) { + struct cred *new; + struct task_smack *tsp; + + new = prepare_creds(); + if (!new) { + rc = -ENOMEM; + goto out; + } + tsp = new->security; smk_destroy_label_list(&tsp->smk_relabel); list_splice(&list_tmp, &tsp->smk_relabel); + commit_creds(new); return count; } - +out: smk_destroy_label_list(&list_tmp); return rc; } -- GitLab From c14d30dc9987047b439b03d6e6db7d54d9f7f180 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Tue, 11 Aug 2020 15:32:36 +0200 Subject: [PATCH 0124/1304] Linux 4.19.139 Tested-by: Shuah Khan Signed-off-by: Greg Kroah-Hartman --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index daaa8ab2f550..f6012170995e 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 4 PATCHLEVEL = 19 -SUBLEVEL = 138 +SUBLEVEL = 139 EXTRAVERSION = NAME = "People's Front" -- GitLab From 0e5ea532a0c83d6c0babd97d0faa4501097d1a3f Mon Sep 17 00:00:00 2001 From: Marco Ballesio Date: Tue, 11 Aug 2020 16:06:36 -0700 Subject: [PATCH 0125/1304] ANDROID: cgroups: ABI padding ABI padding in struct cgroup Bug: 163547360 Test: built and booted the kernel Change-Id: Ie6ef8bdc4a62f57039d3b456cf125db4582b255a Signed-off-by: Marco Ballesio --- include/linux/cgroup-defs.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 7112b8a1faaa..b916a5224b52 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -453,6 +453,10 @@ struct cgroup { /* If there is block congestion on this cgroup. */ atomic_t congestion_count; + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + /* ids of the ancestors at each level including self */ int ancestor_ids[]; }; -- GitLab From 489d25a567e76c8a8f08b12b28024030b7ca9b23 Mon Sep 17 00:00:00 2001 From: Marco Ballesio Date: Wed, 12 Aug 2020 09:46:32 -0700 Subject: [PATCH 0126/1304] ANDROID: cgroups: add v2 freezer ABI changes introduce the freezer_state struct to be used by the v2 freezer backports Test: built and booted Bug: 163547360 Signed-off-by: Marco Ballesio Change-Id: I4705ee9787f35db58e27de339d5264d1cd45007a --- include/linux/cgroup-defs.h | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index b916a5224b52..fae076e4887e 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -317,6 +317,25 @@ struct cgroup_rstat_cpu { struct cgroup *updated_next; /* NULL iff not on the list */ }; +struct cgroup_freezer_state { + /* Should the cgroup and its descendants be frozen. */ + bool freeze; + + /* Should the cgroup actually be frozen? */ + int e_freeze; + + /* Fields below are protected by css_set_lock */ + + /* Number of frozen descendant cgroups */ + int nr_frozen_descendants; + + /* + * Number of tasks, which are counted as frozen: + * frozen, SIGSTOPped, and PTRACEd. + */ + int nr_frozen_tasks; +}; + struct cgroup { /* self css with NULL ->ss, points back to this cgroup */ struct cgroup_subsys_state self; @@ -453,6 +472,9 @@ struct cgroup { /* If there is block congestion on this cgroup. */ atomic_t congestion_count; + /* Used to store internal freezer state */ + struct cgroup_freezer_state freezer; + ANDROID_KABI_RESERVE(1); ANDROID_KABI_RESERVE(2); ANDROID_KABI_RESERVE(3); -- GitLab From 8a03703c5e759d29cde005f80f34acb071fab893 Mon Sep 17 00:00:00 2001 From: Marco Ballesio Date: Wed, 12 Aug 2020 07:25:00 -0700 Subject: [PATCH 0127/1304] ANDROID: sched: add "frozen" field to task_struct use one of the ANDROID_KABI_RESERVED fields for the v2 freezer "frozen" bit. Bug: 163547360 Test: built and booted Signed-off-by: Marco Ballesio Change-Id: I7d7aed173a09580b8eff1ccf39ca4f162fbaddc8 --- include/linux/sched.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index 1dc5077da58e..98c914bd6b83 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1292,8 +1292,9 @@ struct task_struct { /* Used by LSM modules for access restriction: */ void *security; #endif + /* task is frozen/stopped (used by the cgroup freezer) */ + ANDROID_KABI_USE(1, unsigned frozen:1); - ANDROID_KABI_RESERVE(1); ANDROID_KABI_RESERVE(2); ANDROID_KABI_RESERVE(3); ANDROID_KABI_RESERVE(4); -- GitLab From f2cce51e2b2493739bb8179b1b57984dc5a9ee62 Mon Sep 17 00:00:00 2001 From: Marco Ballesio Date: Thu, 13 Aug 2020 14:54:52 -0700 Subject: [PATCH 0128/1304] ANDROID: GKI: Update the ABI xml representation Leaf changes summary: 686 artifacts changed (67 filtered out) Changed leaf types summary: 2 (1 filtered out) leaf types changed Removed/Changed/Added functions summary: 0 Removed, 678 Changed (64 filtered out), 0 Added function Removed/Changed/Added variables summary: 0 Removed, 6 Changed (2 filtered out), 0 Added variable 'struct cgroup at cgroup-defs.h:320:1' changed: type size changed from 17536 to 17856 (in bits) 4 data member insertions: 'cgroup_freezer_state cgroup::freezer', at offset 17504 (in bits) at cgroup-defs.h:476:1 'u64 cgroup::android_kabi_reserved1', at offset 17664 (in bits) at cgroup-defs.h:478:1 'u64 cgroup::android_kabi_reserved2', at offset 17728 (in bits) at cgroup-defs.h:479:1 'u64 cgroup::android_kabi_reserved3', at offset 17792 (in bits) at cgroup-defs.h:480:1 there are data member changes: 'int cgroup::ancestor_ids[]' offset changed from 17504 to 17856 (in bits) (by +352 bits) 1553 impacted interfaces 'struct cgroup_root at cgroup-defs.h:465:1' changed (indirectly): type size changed from 51392 to 51712 (in bits) there are data member changes: type 'struct cgroup' of 'cgroup_root::cgrp' changed, as reported earlier 'int cgroup_root::cgrp_ancestor_id_storage' offset changed from 17664 to 17984 (in bits) (by +320 bits) 'atomic_t cgroup_root::nr_cgrps' offset changed from 17696 to 18016 (in bits) (by +320 bits) 'list_head cgroup_root::root_list' offset changed from 17728 to 18048 (in bits) (by +320 bits) 'unsigned int cgroup_root::flags' offset changed from 17856 to 18176 (in bits) (by +320 bits) 'idr cgroup_root::cgroup_idr' offset changed from 17920 to 18240 (in bits) (by +320 bits) 'char cgroup_root::release_agent_path[4096]' offset changed from 18112 to 18432 (in bits) (by +320 bits) 'char cgroup_root::name[64]' offset changed from 50880 to 51200 (in bits) (by +320 bits) 1553 impacted interfaces Bug: 163547360 Signed-off-by: Marco Ballesio Change-Id: I88cae7b0e417f1c37eeee7f430c412d4d29b84de --- android/abi_gki_aarch64.xml | 3392 +++++++++++++++++------------------ 1 file changed, 1613 insertions(+), 1779 deletions(-) diff --git a/android/abi_gki_aarch64.xml b/android/abi_gki_aarch64.xml index 60c9d2856391..4edbf8695e98 100644 --- a/android/abi_gki_aarch64.xml +++ b/android/abi_gki_aarch64.xml @@ -3,12 +3,12 @@ - + - - + + - + @@ -18,20 +18,20 @@ - - - - + + + + - - + + - - + + @@ -39,11 +39,11 @@ - - + + - + @@ -61,11 +61,11 @@ - + - + - + @@ -74,13 +74,13 @@ - - + + - + @@ -108,39 +108,39 @@ - + - - + + - - - - + + + + - - + + - + - - + + - - + + - - + + @@ -150,15 +150,15 @@ - - - - - - - - - + + + + + + + + + @@ -180,16 +180,16 @@ - + - - - - - - + + + + + + @@ -222,20 +222,20 @@ - + - + - + - + - + - + @@ -247,30 +247,30 @@ - + - - + + - + - - - - - - - - - - - + + + + + + + + + + + - + @@ -288,74 +288,74 @@ - - - - - - - - - - - - - - + + + + + + + + + + + + + + - + - + - - - - - - - + + + + + + + - + - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + @@ -367,38 +367,38 @@ - - - - - - - - - + + + + + + + + + - + - + - + - - - - - - + + + + + + @@ -412,20 +412,20 @@ - - + + - - + + - - + + @@ -440,31 +440,31 @@ - - + + - + - - - + + + - + - + - + - + - + @@ -473,26 +473,26 @@ - - - + + + - + - - - - - - - - - - - + + + + + + + + + + + @@ -509,12 +509,12 @@ - - - - - - + + + + + + @@ -528,42 +528,42 @@ - - + + - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + - + - + @@ -589,7 +589,7 @@ - + @@ -602,13 +602,13 @@ - - - - - - - + + + + + + + @@ -617,8 +617,8 @@ - - + + @@ -818,7 +818,7 @@ - + @@ -826,29 +826,29 @@ - - - + + + - - - - - - - + + + + + + + - - + + - - + + @@ -929,7 +929,7 @@ - + @@ -941,15 +941,15 @@ - - - - - - - - - + + + + + + + + + @@ -962,19 +962,19 @@ - - - - - + + + + + - + - - + + @@ -986,20 +986,20 @@ - + - + - + - - - + + + @@ -1010,28 +1010,28 @@ - - - - + + + + - - - - - + + + + + - - + + - + - - + + - + @@ -1044,31 +1044,31 @@ - - - - - - - - - - - - + + + + + + + + + + + + - - - - - - - - - + + + + + + + + + - - + + @@ -1082,7 +1082,7 @@ - + @@ -1091,19 +1091,19 @@ - - - + + + - - + + - - - - - - + + + + + + @@ -1118,54 +1118,54 @@ - + - - + + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + @@ -1185,7 +1185,7 @@ - + @@ -1221,19 +1221,19 @@ - - + + - - - - - - + + + + + + - - - + + + @@ -1273,21 +1273,21 @@ - - + + - - - - - + + + + + - - - + + + @@ -1299,24 +1299,24 @@ - + - - - - - - - - + + + + + + + + - - + + @@ -1333,21 +1333,21 @@ - - - + + + - - - - + + + + - - + + - - + + @@ -1365,12 +1365,12 @@ - - + + - + @@ -1414,13 +1414,13 @@ - - + + - + - - + + @@ -1428,52 +1428,52 @@ - - - - - - - + + + + + + + - - + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - + + + + @@ -1481,9 +1481,9 @@ - - - + + + @@ -1503,12 +1503,12 @@ - + - - - + + + @@ -1534,9 +1534,9 @@ - - - + + + @@ -1545,7 +1545,7 @@ - + @@ -1586,11 +1586,11 @@ - - + + - + @@ -1599,9 +1599,9 @@ - - - + + + @@ -1627,20 +1627,20 @@ - + - - - - - + + + + + - + @@ -1678,10 +1678,10 @@ - - - - + + + + @@ -1689,7 +1689,7 @@ - + @@ -1698,10 +1698,10 @@ - + - - + + @@ -1730,18 +1730,18 @@ - - - + + + - - - - + + + + - + @@ -1758,8 +1758,8 @@ - - + + @@ -1776,79 +1776,79 @@ - + - - - + + + - + - + - - - - - + + + + + - - - - - - - - - - - - - - + + + + + + + + + + + + + + - + - - - - - + + + + + - + - - - - - - - + + + + + + + - - - - - - + + + + + + - - - + + + @@ -1856,7 +1856,7 @@ - + @@ -1874,18 +1874,18 @@ - - - + + + - + - - + + - + @@ -1897,8 +1897,8 @@ - - + + @@ -1908,9 +1908,9 @@ - - - + + + @@ -1933,8 +1933,8 @@ - - + + @@ -1951,15 +1951,15 @@ - - - + + + - + - + @@ -1971,51 +1971,51 @@ - - + + - + - - + + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -2069,26 +2069,26 @@ - + - - - + + + - + - + - + - - - - - - + + + + + + @@ -2102,30 +2102,30 @@ - + - + - + - - - + + + - - + + - - + + @@ -2133,57 +2133,57 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + @@ -2218,12 +2218,12 @@ - + - + @@ -2246,16 +2246,16 @@ - - - - - - + + + + + + - - - + + + @@ -2263,22 +2263,22 @@ - - - - - - + + + + + + - + - + - - + + @@ -2297,14 +2297,14 @@ - - + + - - - + + + @@ -2344,27 +2344,27 @@ - - - + + + - + - + - + - + - + - + @@ -2440,7 +2440,7 @@ - + @@ -2493,20 +2493,20 @@ - - - - - - - - - - - + + + + + + + + + + + - - + + @@ -2539,10 +2539,10 @@ - - - - + + + + @@ -2563,47 +2563,47 @@ - - - - - - - - + + + + + + + + - - - - - - + + + + + + - - + + - - + + - - - - - - + + + + + + @@ -2626,7 +2626,7 @@ - + @@ -2653,17 +2653,17 @@ - - + + - + - + @@ -2683,13 +2683,13 @@ - + - + @@ -2697,7 +2697,7 @@ - + @@ -2932,7 +2932,7 @@ - + @@ -3952,7 +3952,7 @@ - + @@ -4727,31 +4727,31 @@ - + - + - + - + - + - + - + - + - + @@ -5881,12 +5881,17 @@ + + + + + - + @@ -5951,7 +5956,7 @@ - + @@ -6000,7 +6005,7 @@ - + @@ -6054,13 +6059,13 @@ - + - + @@ -6069,7 +6074,7 @@ - + @@ -6132,7 +6137,7 @@ - + @@ -6360,10 +6365,10 @@ - + - + @@ -6952,15 +6957,26 @@ - + - + - + + + + + + + + + + + + @@ -6982,6 +6998,7 @@ + @@ -14739,7 +14756,7 @@ - + @@ -16794,6 +16811,14 @@ + + + + + + + + @@ -17439,7 +17464,7 @@ - + @@ -17622,6 +17647,14 @@ + + + + + + + + @@ -18207,12 +18240,12 @@ - + - + @@ -18661,10 +18694,10 @@ - - - - + + + + @@ -21789,13 +21822,13 @@ - + - + @@ -28154,12 +28187,26 @@ - + + + + + + + + + + + + + + + @@ -28168,6 +28215,17 @@ + + + + + + + + + + + @@ -29531,10 +29589,10 @@ - + - + @@ -29653,13 +29711,13 @@ - + - + @@ -30883,6 +30941,17 @@ + + + + + + + + + + + @@ -30922,17 +30991,6 @@ - - - - - - - - - - - @@ -31531,7 +31589,7 @@ - + @@ -31622,7 +31680,7 @@ - + @@ -32360,7 +32418,7 @@ - + @@ -32379,7 +32437,7 @@ - + @@ -33213,7 +33271,7 @@ - + @@ -33233,9 +33291,9 @@ - + - + @@ -33247,7 +33305,7 @@ - + @@ -33564,9 +33622,9 @@ - + - + @@ -33575,7 +33633,7 @@ - + @@ -35499,6 +35557,18 @@ + + + + + + + + + + + + @@ -35604,7 +35674,7 @@ - + @@ -35741,7 +35811,7 @@ - + @@ -35849,10 +35919,10 @@ - + - + @@ -35864,7 +35934,7 @@ - + @@ -35889,7 +35959,7 @@ - + @@ -36794,10 +36864,10 @@ - + - + @@ -37453,13 +37523,13 @@ - + - + @@ -39850,7 +39920,7 @@ - + @@ -40170,7 +40240,7 @@ - + @@ -40259,7 +40329,7 @@ - + @@ -40873,7 +40943,7 @@ - + @@ -41571,17 +41641,6 @@ - - - - - - - - - - - @@ -42594,10 +42653,10 @@ - + - + @@ -42648,7 +42707,7 @@ - + @@ -42657,7 +42716,7 @@ - + @@ -42670,7 +42729,7 @@ - + @@ -45035,37 +45094,37 @@ - + - + - + - + - + - + - + - + - + - + - + - + @@ -47613,12 +47672,12 @@ - + - + @@ -47645,7 +47704,7 @@ - + @@ -47667,7 +47726,7 @@ - + @@ -47729,7 +47788,7 @@ - + @@ -47922,7 +47981,7 @@ - + @@ -48021,7 +48080,7 @@ - + @@ -49507,7 +49566,7 @@ - + @@ -49518,7 +49577,7 @@ - + @@ -51957,7 +52016,7 @@ - + @@ -52073,7 +52132,7 @@ - + @@ -52489,7 +52548,7 @@ - + @@ -53061,7 +53120,7 @@ - + @@ -53425,7 +53484,7 @@ - + @@ -53448,10 +53507,10 @@ - + - + @@ -53460,7 +53519,7 @@ - + @@ -53468,7 +53527,7 @@ - + @@ -53498,7 +53557,7 @@ - + @@ -53506,7 +53565,7 @@ - + @@ -54424,7 +54483,7 @@ - + @@ -54435,7 +54494,7 @@ - + @@ -55413,7 +55472,7 @@ - + @@ -55434,7 +55493,7 @@ - + @@ -58146,6 +58205,65 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -58753,6 +58871,17 @@ + + + + + + + + + + + @@ -58928,7 +59057,7 @@ - + @@ -63583,7 +63712,7 @@ - + @@ -64144,7 +64273,7 @@ - + @@ -64773,12 +64902,24 @@ + + + + + + + + + + + + @@ -64866,6 +65007,14 @@ + + + + + + + + @@ -64886,7 +65035,7 @@ - + @@ -64897,12 +65046,12 @@ - + - + - + @@ -64922,23 +65071,6 @@ - - - - - - - - - - - - - - - - - @@ -64976,14 +65108,6 @@ - - - - - - - - @@ -65001,16 +65125,6 @@ - - - - - - - - - - @@ -65118,10 +65232,10 @@ - + - + @@ -65166,13 +65280,13 @@ - + - + @@ -65483,7 +65597,7 @@ - + @@ -65630,18 +65744,18 @@ - + - + - + - + @@ -65703,7 +65817,7 @@ - + @@ -65874,7 +65988,7 @@ - + @@ -65888,7 +66002,7 @@ - + @@ -65916,7 +66030,7 @@ - + @@ -65924,6 +66038,23 @@ + + + + + + + + + + + + + + + + + @@ -66111,7 +66242,7 @@ - + @@ -66123,15 +66254,15 @@ - + - + - + - + @@ -66148,7 +66279,7 @@ - + @@ -66352,9 +66483,19 @@ + + + + + + + + + + @@ -66449,19 +66590,19 @@ - - - - - - - - + - + + + + + + + + @@ -66999,13 +67140,13 @@ - + - + @@ -67021,12 +67162,12 @@ - + - + - + @@ -67053,7 +67194,7 @@ - + @@ -67061,7 +67202,7 @@ - + @@ -67223,7 +67364,7 @@ - + @@ -67327,6 +67468,23 @@ + + + + + + + + + + + + + + + + + @@ -67709,6 +67867,17 @@ + + + + + + + + + + + @@ -67732,7 +67901,7 @@ - + @@ -67780,15 +67949,15 @@ - + - + - + @@ -67808,7 +67977,7 @@ - + @@ -67861,6 +68030,7 @@ + @@ -67991,7 +68161,7 @@ - + @@ -68414,18 +68584,18 @@ - + - + - + @@ -68433,13 +68603,13 @@ - + - + @@ -68683,7 +68853,7 @@ - + @@ -68771,99 +68941,6 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -68873,25 +68950,6 @@ - - - - - - - - - - - - - - - - - - - @@ -69454,10 +69512,10 @@ - + - + @@ -69485,7 +69543,7 @@ - + @@ -69523,7 +69581,7 @@ - + @@ -69880,7 +69938,7 @@ - + @@ -69944,7 +70002,7 @@ - + @@ -70575,7 +70633,7 @@ - + @@ -70712,21 +70770,21 @@ - + - + - + - + @@ -72058,7 +72116,7 @@ - + @@ -72358,7 +72416,7 @@ - + @@ -72387,7 +72445,7 @@ - + @@ -72414,7 +72472,7 @@ - + @@ -72668,7 +72726,7 @@ - + @@ -72692,7 +72750,7 @@ - + @@ -73493,120 +73551,132 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + + + + + + + + + + @@ -73620,39 +73690,39 @@ - + - + - + - + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + @@ -73814,164 +73884,178 @@ - + - + + + + + + + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -74965,7 +75049,7 @@ - + @@ -75097,7 +75181,7 @@ - + @@ -75918,6 +76002,14 @@ + + + + + + + + @@ -79286,7 +79378,7 @@ - + @@ -81608,7 +81700,7 @@ - + @@ -81624,7 +81716,7 @@ - + @@ -81640,7 +81732,7 @@ - + @@ -81672,7 +81764,7 @@ - + @@ -81688,7 +81780,7 @@ - + @@ -81713,7 +81805,7 @@ - + @@ -81737,7 +81829,7 @@ - + @@ -81886,7 +81978,7 @@ - + @@ -82115,7 +82207,7 @@ - + @@ -82137,7 +82229,7 @@ - + @@ -82223,6 +82315,17 @@ + + + + + + + + + + + @@ -82249,6 +82352,7 @@ + @@ -82351,12 +82455,12 @@ - - + + - - + + @@ -82417,6 +82521,17 @@ + + + + + + + + + + + @@ -82487,6 +82602,7 @@ + @@ -82535,7 +82651,7 @@ - + @@ -82547,7 +82663,7 @@ - + @@ -82856,7 +82972,7 @@ - + @@ -82898,7 +83014,7 @@ - + @@ -83520,6 +83636,14 @@ + + + + + + + + @@ -83670,7 +83794,7 @@ - + @@ -83684,7 +83808,7 @@ - + @@ -83749,7 +83873,7 @@ - + @@ -83762,13 +83886,26 @@ - + + + + + + + + + + + + + + @@ -83776,7 +83913,7 @@ - + @@ -83954,6 +84091,14 @@ + + + + + + + + @@ -83975,6 +84120,23 @@ + + + + + + + + + + + + + + + + + @@ -84011,78 +84173,6 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -84093,12 +84183,12 @@ - + - + - + @@ -84112,15 +84202,15 @@ - + - + - + @@ -84142,7 +84232,7 @@ - + @@ -84188,17 +84278,6 @@ - - - - - - - - - - - @@ -84231,262 +84310,6 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -84499,17 +84322,6 @@ - - - - - - - - - - - @@ -84694,7 +84506,7 @@ - + @@ -84897,7 +84709,29 @@ - + + + + + + + + + + + + + + + + + + + + + + + @@ -84905,7 +84739,7 @@ - + @@ -84932,7 +84766,7 @@ - + @@ -85820,7 +85654,7 @@ - + @@ -85915,13 +85749,6 @@ - - - - - - - @@ -86241,6 +86068,13 @@ + + + + + + + @@ -86284,58 +86118,9 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -86576,7 +86361,7 @@ - + @@ -86635,7 +86420,7 @@ - + @@ -86643,7 +86428,7 @@ - + @@ -86651,7 +86436,7 @@ - + @@ -87042,7 +86827,7 @@ - + @@ -87056,7 +86841,7 @@ - + @@ -87083,7 +86868,7 @@ - + @@ -87099,15 +86884,15 @@ - + - + - + @@ -87115,7 +86900,7 @@ - + @@ -87123,7 +86908,7 @@ - + @@ -87131,7 +86916,7 @@ - + @@ -87139,6 +86924,17 @@ + + + + + + + + + + + @@ -87158,7 +86954,7 @@ - + @@ -87175,7 +86971,7 @@ - + @@ -87234,7 +87030,7 @@ - + @@ -87242,7 +87038,7 @@ - + @@ -87403,41 +87199,41 @@ - - - - - - + + + + + + - - - - - - + + + + + + - - - - - - + + + + + + - - - - - - + + + + + + - + @@ -87445,7 +87241,7 @@ - + @@ -87486,7 +87282,7 @@ - + @@ -87498,7 +87294,7 @@ - + @@ -87923,9 +87719,9 @@ - + - + @@ -87934,7 +87730,7 @@ - + @@ -87942,7 +87738,7 @@ - + @@ -88045,7 +87841,7 @@ - + @@ -88116,7 +87912,7 @@ - + @@ -88130,7 +87926,7 @@ - + @@ -88213,10 +88009,10 @@ - + - + @@ -88231,7 +88027,7 @@ - + @@ -88329,7 +88125,7 @@ - + @@ -88542,7 +88338,7 @@ - + @@ -88550,7 +88346,7 @@ - + @@ -91632,7 +91428,7 @@ - + @@ -91644,7 +91440,7 @@ - + @@ -92938,7 +92734,7 @@ - + @@ -92949,7 +92745,7 @@ - + @@ -92957,7 +92753,7 @@ - + @@ -92965,7 +92761,7 @@ - + @@ -92973,7 +92769,7 @@ - + @@ -92981,7 +92777,7 @@ - + @@ -92989,7 +92785,7 @@ - + @@ -92997,7 +92793,7 @@ - + @@ -93593,7 +93389,7 @@ - + @@ -93601,7 +93397,7 @@ - + @@ -93688,18 +93484,18 @@ - + - + - + - + @@ -93726,7 +93522,7 @@ - + @@ -93745,7 +93541,7 @@ - + @@ -93827,10 +93623,10 @@ - + - + @@ -93877,7 +93673,7 @@ - + @@ -94079,7 +93875,7 @@ - + @@ -94331,7 +94127,7 @@ - + @@ -94366,7 +94162,7 @@ - + @@ -94476,6 +94272,41 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -94498,6 +94329,9 @@ + + + @@ -95251,31 +95085,31 @@ - + - + - + - + - + - + @@ -95297,7 +95131,7 @@ - + @@ -95314,7 +95148,7 @@ - + @@ -95331,7 +95165,7 @@ - + @@ -95340,24 +95174,24 @@ - + - + - + - + - + - + @@ -95365,7 +95199,7 @@ - + @@ -95373,7 +95207,7 @@ - + @@ -95381,7 +95215,7 @@ - + @@ -95404,7 +95238,7 @@ - + @@ -95482,13 +95316,13 @@ - + - + @@ -96227,11 +96061,11 @@ - + - + @@ -96240,7 +96074,7 @@ - + @@ -96264,13 +96098,13 @@ - + - + @@ -96278,7 +96112,7 @@ - + @@ -96389,7 +96223,7 @@ - + @@ -96566,7 +96400,7 @@ - + @@ -97133,7 +96967,7 @@ - + @@ -97248,10 +97082,10 @@ - + - + @@ -97333,10 +97167,10 @@ - + - + @@ -97355,10 +97189,10 @@ - + - + @@ -98213,7 +98047,7 @@ - + @@ -99134,7 +98968,7 @@ - + @@ -99163,7 +98997,7 @@ - + @@ -99171,7 +99005,7 @@ - + @@ -99179,7 +99013,7 @@ - + @@ -99187,7 +99021,7 @@ - + @@ -99195,7 +99029,7 @@ - + @@ -99470,7 +99304,7 @@ - + @@ -99850,7 +99684,7 @@ - + @@ -99950,6 +99784,6 @@ -- GitLab From 1f0404339bacfce164b4c4975e095cf3bc7f3094 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Wed, 12 Aug 2020 17:49:43 +0200 Subject: [PATCH 0129/1304] ANDROID: GKI: add some padding to some driver core structures It was requested that the following structures get some padding in order to be able to handle future issues/bugs/fixes that might arise: struct dev_links_info struct device_link struct fwnode_handle Bug: 163662096 Signed-off-by: Greg Kroah-Hartman Change-Id: Ie26f422c590f9ddbe99f0885f36da3feec64e9a6 Signed-off-by: Will McVicker --- include/linux/device.h | 10 ++++++++++ include/linux/fwnode.h | 6 ++++++ 2 files changed, 16 insertions(+) diff --git a/include/linux/device.h b/include/linux/device.h index 7726a1b8fceb..6b0e3448e115 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -901,6 +901,11 @@ struct device_link { struct rcu_head rcu_head; #endif bool supplier_preactivated; /* Owned by consumer probe. */ + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); }; /** @@ -934,6 +939,11 @@ struct dev_links_info { struct list_head defer_sync; bool need_for_probe; enum dl_dev_state status; + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); }; /** diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h index a497b9d97f5a..becec51ec9e5 100644 --- a/include/linux/fwnode.h +++ b/include/linux/fwnode.h @@ -13,6 +13,7 @@ #define _LINUX_FWNODE_H_ #include +#include struct fwnode_operations; struct device; @@ -21,6 +22,11 @@ struct fwnode_handle { struct fwnode_handle *secondary; const struct fwnode_operations *ops; struct device *dev; + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); + ANDROID_KABI_RESERVE(3); + ANDROID_KABI_RESERVE(4); }; /** -- GitLab From d5d6acdf8efa24e93998d80e6d4e46b52d4c1156 Mon Sep 17 00:00:00 2001 From: Will McVicker Date: Thu, 13 Aug 2020 21:54:57 -0700 Subject: [PATCH 0130/1304] ANDROID: Update the ABI xml based on the new driver core padding Leaf changes summary: 1811 artifacts changed (5 filtered out) Changed leaf types summary: 58 leaf types changed Removed/Changed/Added functions summary: 0 Removed, 1735 Changed (5 filtered out), 0 Added function Removed/Changed/Added variables summary: 0 Removed, 18 Changed, 0 Added variable Bug: 163662096 Signed-off-by: Will McVicker Change-Id: I167955cce0d661406fe8ebb047b4fd277fcd823e --- android/abi_gki_aarch64.xml | 6608 ++++++++++++++++++----------------- 1 file changed, 3485 insertions(+), 3123 deletions(-) diff --git a/android/abi_gki_aarch64.xml b/android/abi_gki_aarch64.xml index 4edbf8695e98..c8c54a7d6a31 100644 --- a/android/abi_gki_aarch64.xml +++ b/android/abi_gki_aarch64.xml @@ -3,12 +3,12 @@ - + - - + + - + @@ -18,20 +18,20 @@ - - - - + + + + - - - - + + + + - - + + @@ -39,45 +39,45 @@ - - - - - - - - - + + + + + + + + + - - - - - - - - - + + + + + + + + + - + - + - + - - + + - - + + - + @@ -104,61 +104,61 @@ - + - + - - + + - - - - + + + + - - + + - - - - - - - - - - - - - + + + + + + + + + + + + + - - + + - - - + + + - - - - - - - - - + + + + + + + + + @@ -167,29 +167,29 @@ - + - + - + - + - + - - - - - - + + + + + + @@ -216,28 +216,28 @@ - - + + - + - + - - - + + + - + - + - + @@ -247,185 +247,185 @@ - + - - - + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - + + + + + + + + + + + + + + - - - - - - - - + + + + + + + + - - - - - - - + + + + + + + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - + - - - - - - - - - + + + + + + + + + - + - + - + - - - - - - + + + + + + - - - - - - - - + + + + + + + + - - + + - - + + - + - - + + @@ -440,32 +440,32 @@ - - - - - - - - - - - - - + + + + + + + + + + + + + - - - - - - - - - - - - + + + + + + + + + + + + @@ -473,158 +473,158 @@ - - - + + + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + @@ -633,29 +633,29 @@ - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + @@ -666,15 +666,15 @@ - + - - - - - - - + + + + + + + @@ -684,297 +684,297 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - + + + + + + + + + + + + - + - - - - + + + + - - - + + + - - - - - - - - - - - - + + + + + + + + + + + + - - + + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - - - - - - - - - - - - + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - + + + + + + - - + + - - - - + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + - - + + @@ -986,20 +986,20 @@ - + - + - + - - - + + + @@ -1009,71 +1009,71 @@ - - - - - + + + + + - - - - - - - - + + + + + + + + - + - - + + - + - - - - - + + + + + - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + - - - - - + + + + + - + @@ -1082,28 +1082,28 @@ - + - - - - - - - + + + + + + + - - + + - - - - - - + + + + + + @@ -1118,205 +1118,205 @@ - + - - - - - - - + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + - + - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + - + - - - - - - - + + + + + + + - - + + - - - - - - + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - - + + - - - + + + - - - - - + + + + + - - - + + + - + - + - - - - - - - - + + + + + + + + - - + + @@ -1359,33 +1359,33 @@ - - - - + + + + - - + + - + - - - - - - - - - - - - + + + + + + + + + + + + @@ -1405,22 +1405,22 @@ - - - - - - - + + + + + + + - - + + - + - - + + @@ -1428,52 +1428,52 @@ - - - - - - - + + + + + + + - - + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - + + + + @@ -1481,116 +1481,116 @@ - - - + + + - - - - + + + + - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + - - - - - - - - - - - + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + @@ -1599,89 +1599,89 @@ - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - + + + + + + + + - - - - - + + + + + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - + + + + + + @@ -1689,26 +1689,26 @@ - - - - - - - - - - + + + + + + + + + + - - - - - - - + + + + + + + - + @@ -1722,26 +1722,26 @@ - - + + - - - + + + - - - - + + + + - - - + + + @@ -1758,8 +1758,8 @@ - - + + @@ -1768,87 +1768,87 @@ - - - - - + + + + + - + - - - + + + - + - + - - - - - - - + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - + + + + @@ -1856,8 +1856,8 @@ - - + + @@ -1874,18 +1874,18 @@ - - - + + + - + - - + + - + @@ -1897,8 +1897,8 @@ - - + + @@ -1906,24 +1906,24 @@ - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + @@ -1933,8 +1933,8 @@ - - + + @@ -1944,22 +1944,22 @@ - - - - - - - - - - + + + + + + + + + + - + - + @@ -1971,219 +1971,219 @@ - - + + - + - - + + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - + + + + + + + + - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + - - - - - + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + @@ -2218,17 +2218,17 @@ - + - + - - + + @@ -2245,17 +2245,17 @@ - - - - - - - - - - - + + + + + + + + + + + @@ -2263,130 +2263,130 @@ - - - - - - + + + + + + - + - + - - + + - - - - - - + + + + + + - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + - - + + - + - + - + - - - - - + + + + + - - - - - - - - - - - - + + + + + + + + + + + + - + - + - + - + - + - + - - + + - - - - - - - - - - + + + + + + + + + + - - - + + + @@ -2394,53 +2394,53 @@ - - - - - - + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - + + + + - - + + - + @@ -2450,109 +2450,109 @@ - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - + + + + - - + + - + @@ -2563,47 +2563,47 @@ - - - - - - - - + + + + + + + + - - - - - - - - - - - - - + + + + + + + + + + + + + - - + + - - - - - - + + + + + + @@ -2620,13 +2620,13 @@ - + - + @@ -2636,7 +2636,7 @@ - + @@ -2644,33 +2644,33 @@ - - + + - + - - + + - + - + - + - + @@ -2683,13 +2683,13 @@ - + - + - + @@ -2697,7 +2697,7 @@ - + @@ -2708,9 +2708,9 @@ - - - + + + @@ -8190,7 +8190,7 @@ - + @@ -8209,22 +8209,22 @@ - + - + - + - + - + - + @@ -9347,7 +9347,7 @@ - + @@ -9960,70 +9960,70 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -16372,113 +16372,113 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -17058,208 +17058,208 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -18250,14 +18250,14 @@ - + - + - + @@ -18599,7 +18599,7 @@ - + @@ -18683,7 +18683,7 @@ - + @@ -20245,7 +20245,7 @@ - + @@ -20258,61 +20258,61 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -20853,7 +20853,7 @@ - + @@ -20866,40 +20866,40 @@ - + - + - + - + - + - + - + - + - + - + - + - + @@ -21758,47 +21758,47 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -22006,7 +22006,7 @@ - + @@ -26934,41 +26934,41 @@ - + - + - + - + - + - + - + - + - + - + - + @@ -27256,7 +27256,7 @@ - + @@ -27299,28 +27299,28 @@ - + - + - + - + - + - + - + - + @@ -28193,20 +28193,6 @@ - - - - - - - - - - - - - - @@ -28215,17 +28201,6 @@ - - - - - - - - - - - @@ -28953,26 +28928,26 @@ - + - + - + - + - + - + @@ -30817,7 +30792,7 @@ - + @@ -30842,64 +30817,75 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + + + + + + + + + @@ -30941,17 +30927,6 @@ - - - - - - - - - - - @@ -31442,7 +31417,7 @@ - + @@ -31474,7 +31449,7 @@ - + @@ -31490,85 +31465,85 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -32189,7 +32164,7 @@ - + @@ -32310,25 +32285,25 @@ - + - + - + - + - + - + - + @@ -33688,7 +33663,7 @@ - + @@ -33830,16 +33805,16 @@ - + - + - + - + @@ -33908,7 +33883,7 @@ - + @@ -33935,62 +33910,62 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -34060,25 +34035,25 @@ - + - + - + - + - + - + @@ -35748,7 +35723,7 @@ - + @@ -35758,19 +35733,19 @@ - + - + - + - + - + @@ -36258,7 +36233,7 @@ - + @@ -36277,67 +36252,67 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -41036,7 +41011,7 @@ - + @@ -41052,17 +41027,17 @@ - + - + - + - + @@ -41093,28 +41068,28 @@ - + - + - + - + - + - + - + - + @@ -48813,7 +48788,7 @@ - + @@ -49100,7 +49075,7 @@ - + @@ -49131,6 +49106,18 @@ + + + + + + + + + + + + @@ -49142,7 +49129,7 @@ - + @@ -49150,29 +49137,29 @@ - + - + - + - + - + - + - + - + @@ -49391,7 +49378,7 @@ - + @@ -50052,7 +50039,7 @@ - + @@ -50441,7 +50428,7 @@ - + @@ -51078,7 +51065,7 @@ - + @@ -51121,31 +51108,31 @@ - + - + - + - + - + - + - + - + - + @@ -51154,10 +51141,10 @@ - + - + @@ -51608,96 +51595,96 @@ - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -52184,7 +52171,7 @@ - + @@ -52467,50 +52454,50 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -52694,16 +52681,16 @@ - + - + - + - + @@ -53050,7 +53037,7 @@ - + @@ -53063,13 +53050,13 @@ - + - + - + @@ -53087,28 +53074,28 @@ - + - + - + - + - + - + - + - + @@ -53268,7 +53255,7 @@ - + @@ -53300,21 +53287,21 @@ - + - + - + - + @@ -53326,7 +53313,7 @@ - + @@ -53666,31 +53653,31 @@ - + - + - + - + - + - + - + - + @@ -53871,7 +53858,7 @@ - + @@ -53881,85 +53868,85 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -54027,7 +54014,7 @@ - + @@ -54037,34 +54024,34 @@ - + - + - + - + - + - + - + - + - + - + @@ -54270,7 +54257,7 @@ - + @@ -54532,37 +54519,37 @@ - + - + - + - + - + - + - + - + - + - + @@ -56335,17 +56322,17 @@ - + - + - + @@ -57104,7 +57091,7 @@ - + @@ -57167,35 +57154,35 @@ - + - + - + - + - + - + - + - + - + @@ -57553,7 +57540,6 @@ - @@ -57757,12 +57743,12 @@ - + - + @@ -57804,13 +57790,13 @@ - + - + @@ -57834,7 +57820,7 @@ - + @@ -57843,7 +57829,7 @@ - + @@ -57871,26 +57857,26 @@ - + - + - + - + - + - + - + @@ -58205,65 +58191,6 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -58871,17 +58798,6 @@ - - - - - - - - - - - @@ -58891,7 +58807,7 @@ - + @@ -59057,7 +58973,7 @@ - + @@ -59273,7 +59189,7 @@ - + @@ -59333,7 +59249,7 @@ - + @@ -59364,19 +59280,19 @@ - + - + - + - + - + @@ -59433,16 +59349,16 @@ - + - + - + - + @@ -59797,7 +59713,7 @@ - + @@ -59837,79 +59753,79 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -60046,7 +59962,7 @@ - + @@ -60081,7 +59997,7 @@ - + @@ -60129,7 +60045,7 @@ - + @@ -60157,7 +60073,7 @@ - + @@ -60283,32 +60199,32 @@ - + - + - + - + - + - + @@ -60336,7 +60252,7 @@ - + @@ -60406,11 +60322,11 @@ - + - + @@ -60455,7 +60371,7 @@ - + @@ -60465,11 +60381,11 @@ - + - + @@ -60490,17 +60406,17 @@ - + - + - + @@ -60792,7 +60708,7 @@ - + @@ -60835,34 +60751,34 @@ - + - + - + - + - + - + - + - + - + - + @@ -60904,34 +60820,34 @@ - + - + - + - + - + - + - + - + - + - + @@ -60943,40 +60859,40 @@ - + - + - + - + - + - + - + - + - + - + - + - + @@ -61420,7 +61336,7 @@ - + @@ -61466,22 +61382,22 @@ - + - + - + - + - + - + @@ -63150,47 +63066,47 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -63223,20 +63139,20 @@ - + - + - + - + @@ -63297,26 +63213,26 @@ - + - + - + - + - + - + @@ -63340,20 +63256,20 @@ - + - + - + - + @@ -63574,7 +63490,7 @@ - + @@ -63596,19 +63512,19 @@ - + - + - + - + - + @@ -63712,7 +63628,7 @@ - + @@ -64273,7 +64189,7 @@ - + @@ -64902,7 +64818,7 @@ - + @@ -64910,7 +64826,7 @@ - + @@ -64919,7 +64835,7 @@ - + @@ -65007,14 +64923,6 @@ - - - - - - - - @@ -65035,7 +64943,7 @@ - + @@ -65108,6 +65016,14 @@ + + + + + + + + @@ -65125,6 +65041,16 @@ + + + + + + + + + + @@ -65232,10 +65158,10 @@ - + - + @@ -65744,10 +65670,10 @@ - + - + @@ -65975,7 +65901,7 @@ - + @@ -65988,7 +65914,7 @@ - + @@ -66002,7 +65928,7 @@ - + @@ -66030,7 +65956,7 @@ - + @@ -66038,7 +65964,7 @@ - + @@ -66155,7 +66081,7 @@ - + @@ -66168,22 +66094,22 @@ - + - + - + - + - + - + @@ -66242,7 +66168,7 @@ - + @@ -66254,15 +66180,15 @@ - + - + - + - + @@ -66279,7 +66205,7 @@ - + @@ -66346,7 +66272,7 @@ - + @@ -66483,19 +66409,9 @@ - - - - - - - - - - @@ -67140,13 +67056,13 @@ - + - + @@ -67162,7 +67078,7 @@ - + @@ -67364,7 +67280,7 @@ - + @@ -67415,7 +67331,7 @@ - + @@ -67468,7 +67384,7 @@ - + @@ -67867,7 +67783,7 @@ - + @@ -67875,7 +67791,7 @@ - + @@ -67901,7 +67817,7 @@ - + @@ -67949,7 +67865,7 @@ - + @@ -67977,7 +67893,7 @@ - + @@ -68030,7 +67946,7 @@ - + @@ -68161,7 +68077,7 @@ - + @@ -68584,7 +68500,7 @@ - + @@ -68603,13 +68519,13 @@ - + - + @@ -68853,7 +68769,7 @@ - + @@ -68941,6 +68857,99 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -68950,6 +68959,25 @@ + + + + + + + + + + + + + + + + + + + @@ -71904,159 +71932,159 @@ - + - + - + - + - + - + - + - + - + - + - + - - - - - - - - - - + - - + + - - + + - + - + - - - - + - + - + - + + + + - + - + - + - - + + + + + - + - + - - + + - - + + - - + + - - + + - - + + + + + - + - - + + - - + + - - + + - - + + + + + - + - + - + - - - - - - - - - - - - - + - + - + - + - + + + + + + + + + + + + + @@ -72445,7 +72473,7 @@ - + @@ -72472,7 +72500,7 @@ - + @@ -72726,7 +72754,7 @@ - + @@ -72750,7 +72778,7 @@ - + @@ -74513,7 +74541,7 @@ - + @@ -74532,43 +74560,43 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -74600,7 +74628,7 @@ - + @@ -74628,52 +74656,52 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -76002,14 +76030,6 @@ - - - - - - - - @@ -77018,27 +77038,39 @@ - + - + - + - + - + - + - + + + + + + + + + + + + + - + @@ -77420,7 +77452,7 @@ - + @@ -77436,113 +77468,125 @@ - + - + - + - + - + - + - + - + - + - + - + - + + + + + + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -81732,7 +81776,7 @@ - + @@ -81805,7 +81849,7 @@ - + @@ -82167,7 +82211,7 @@ - + @@ -82183,7 +82227,7 @@ - + @@ -82207,7 +82251,7 @@ - + @@ -82681,7 +82725,7 @@ - + @@ -83142,7 +83186,7 @@ - + @@ -83196,7 +83240,7 @@ - + @@ -83636,14 +83680,6 @@ - - - - - - - - @@ -83794,7 +83830,7 @@ - + @@ -83893,19 +83929,6 @@ - - - - - - - - - - - - - @@ -84091,14 +84114,6 @@ - - - - - - - - @@ -84120,23 +84135,6 @@ - - - - - - - - - - - - - - - - - @@ -84173,6 +84171,78 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -84183,12 +84253,12 @@ - + - + - + @@ -84202,7 +84272,7 @@ - + @@ -84232,7 +84302,7 @@ - + @@ -84278,6 +84348,17 @@ + + + + + + + + + + + @@ -84310,6 +84391,262 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -84322,6 +84659,17 @@ + + + + + + + + + + + @@ -84424,7 +84772,7 @@ - + @@ -84506,7 +84854,7 @@ - + @@ -84709,29 +85057,7 @@ - - - - - - - - - - - - - - - - - - - - - - - + @@ -84739,17 +85065,6 @@ - - - - - - - - - - - @@ -84766,7 +85081,7 @@ - + @@ -84983,17 +85298,17 @@ - + - + - + - + @@ -85026,127 +85341,127 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -85180,22 +85495,22 @@ - + - + - + - + - + - + @@ -85654,7 +85969,7 @@ - + @@ -85662,7 +85977,7 @@ - + @@ -86118,9 +86433,58 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -86361,7 +86725,7 @@ - + @@ -86420,7 +86784,7 @@ - + @@ -86428,7 +86792,7 @@ - + @@ -86827,7 +87191,7 @@ - + @@ -86841,7 +87205,7 @@ - + @@ -86884,94 +87248,13 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + @@ -87030,7 +87313,7 @@ - + @@ -87199,6 +87482,76 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -87233,7 +87586,7 @@ - + @@ -87241,7 +87594,7 @@ - + @@ -87282,7 +87635,7 @@ - + @@ -87294,7 +87647,7 @@ - + @@ -87719,9 +88072,9 @@ - + - + @@ -87841,7 +88194,7 @@ - + @@ -88009,10 +88362,10 @@ - + - + @@ -88027,7 +88380,7 @@ - + @@ -88125,7 +88478,7 @@ - + @@ -88338,7 +88691,7 @@ - + @@ -88346,7 +88699,7 @@ - + @@ -88354,10 +88707,10 @@ - + - + @@ -88589,7 +88942,7 @@ - + @@ -88654,7 +89007,7 @@ - + @@ -88817,64 +89170,64 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -88883,7 +89236,7 @@ - + @@ -90013,6 +90366,53 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -90552,7 +90952,7 @@ - + @@ -91428,7 +91828,7 @@ - + @@ -91440,7 +91840,7 @@ - + @@ -92734,7 +93134,7 @@ - + @@ -92761,7 +93161,7 @@ - + @@ -92769,7 +93169,7 @@ - + @@ -92777,7 +93177,7 @@ - + @@ -92785,7 +93185,7 @@ - + @@ -92793,7 +93193,7 @@ - + @@ -92801,7 +93201,7 @@ - + @@ -93389,7 +93789,7 @@ - + @@ -93484,18 +93884,18 @@ - + - + - + - + @@ -93536,12 +93936,12 @@ - + - + @@ -93623,10 +94023,10 @@ - + - + @@ -94127,7 +94527,7 @@ - + @@ -94162,7 +94562,7 @@ - + @@ -94173,7 +94573,7 @@ - + @@ -94272,41 +94672,6 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -94329,9 +94694,6 @@ - - - @@ -94876,38 +95238,38 @@ - + - + - + - + - + - + - + - + - + - + @@ -95100,10 +95462,10 @@ - + - + @@ -95120,7 +95482,7 @@ - + @@ -95131,7 +95493,7 @@ - + @@ -95176,22 +95538,22 @@ - + - + - + - + - + @@ -95199,7 +95561,7 @@ - + @@ -95207,7 +95569,7 @@ - + @@ -95215,7 +95577,7 @@ - + @@ -95238,7 +95600,7 @@ - + @@ -95550,7 +95912,7 @@ - + @@ -95584,13 +95946,13 @@ - + - + - + @@ -96061,11 +96423,11 @@ - + - + @@ -96074,7 +96436,7 @@ - + @@ -96104,7 +96466,7 @@ - + @@ -96112,7 +96474,7 @@ - + @@ -96223,7 +96585,7 @@ - + @@ -96400,7 +96762,7 @@ - + @@ -96967,7 +97329,7 @@ - + @@ -98047,7 +98409,7 @@ - + @@ -98968,7 +99330,7 @@ - + @@ -99013,7 +99375,7 @@ - + @@ -99021,7 +99383,7 @@ - + @@ -99029,7 +99391,7 @@ - + -- GitLab From f1b1fb853d245c0f0b0f9b7664badf1272439804 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maciej=20=C5=BBenczykowski?= Date: Mon, 17 Aug 2020 14:17:23 -0700 Subject: [PATCH 0131/1304] ANDROID: fix a bug in quota2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If quota is precisely equal to skb->len then a notification would not be sent due to immediately hitting 0. This fixes that, and takes the opportunity to slightly clean up the code and make quota behave more correctly for packet mode as well. Test: builds, net tests continue to pass Bug: 164336990 Signed-off-by: Maciej Żenczykowski Change-Id: I78a11b48794496255513a6226c0469d809d7aa56 (cherry picked from commit b20eacd8ddbd1dbf403df94f5ba6384e6fef0113) --- net/netfilter/xt_quota2.c | 29 ++++++++++++++--------------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/net/netfilter/xt_quota2.c b/net/netfilter/xt_quota2.c index c42724469759..c9a06d0652fe 100644 --- a/net/netfilter/xt_quota2.c +++ b/net/netfilter/xt_quota2.c @@ -306,6 +306,8 @@ quota_mt2(const struct sk_buff *skb, struct xt_action_param *par) { struct xt_quota_mtinfo2 *q = (void *)par->matchinfo; struct xt_quota_counter *e = q->master; + int charge = (q->flags & XT_QUOTA_PACKET) ? 1 : skb->len; + bool no_change = q->flags & XT_QUOTA_NO_CHANGE; bool ret = q->flags & XT_QUOTA_INVERT; spin_lock_bh(&e->lock); @@ -314,24 +316,21 @@ quota_mt2(const struct sk_buff *skb, struct xt_action_param *par) * While no_change is pointless in "grow" mode, we will * implement it here simply to have a consistent behavior. */ - if (!(q->flags & XT_QUOTA_NO_CHANGE)) { - e->quota += (q->flags & XT_QUOTA_PACKET) ? 1 : skb->len; - } - ret = true; + if (!no_change) + e->quota += charge; + ret = true; /* note: does not respect inversion (bug??) */ } else { - if (e->quota >= skb->len) { - if (!(q->flags & XT_QUOTA_NO_CHANGE)) - e->quota -= (q->flags & XT_QUOTA_PACKET) ? 1 : skb->len; + if (e->quota > charge) { + if (!no_change) + e->quota -= charge; ret = !ret; - } else { + } else if (e->quota) { /* We are transitioning, log that fact. */ - if (e->quota) { - quota2_log(xt_hooknum(par), - skb, - xt_in(par), - xt_out(par), - q->name); - } + quota2_log(xt_hooknum(par), + skb, + xt_in(par), + xt_out(par), + q->name); /* we do not allow even small packets from now on */ e->quota = 0; } -- GitLab From 1874d3d6ad0b06bab747e36e70d9b2a5aeb3183a Mon Sep 17 00:00:00 2001 From: Nick Desaulniers Date: Thu, 30 Jul 2020 15:45:54 -0700 Subject: [PATCH 0132/1304] tracepoint: Mark __tracepoint_string's __used commit f3751ad0116fb6881f2c3c957d66a9327f69cefb upstream. __tracepoint_string's have their string data stored in .rodata, and an address to that data stored in the "__tracepoint_str" section. Functions that refer to those strings refer to the symbol of the address. Compiler optimization can replace those address references with references directly to the string data. If the address doesn't appear to have other uses, then it appears dead to the compiler and is removed. This can break the /tracing/printk_formats sysfs node which iterates the addresses stored in the "__tracepoint_str" section. Like other strings stored in custom sections in this header, mark these __used to inform the compiler that there are other non-obvious users of the address, so they should still be emitted. Link: https://lkml.kernel.org/r/20200730224555.2142154-2-ndesaulniers@google.com Cc: Ingo Molnar Cc: Miguel Ojeda Cc: stable@vger.kernel.org Fixes: 102c9323c35a8 ("tracing: Add __tracepoint_string() to export string pointers") Reported-by: Tim Murray Reported-by: Simon MacMullen Suggested-by: Greg Hackmann Signed-off-by: Nick Desaulniers Signed-off-by: Steven Rostedt (VMware) Signed-off-by: Greg Kroah-Hartman --- include/linux/tracepoint.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index e9de8ad0bad7..444aa73037f1 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -364,7 +364,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) static const char *___tp_str __tracepoint_string = str; \ ___tp_str; \ }) -#define __tracepoint_string __attribute__((section("__tracepoint_str"))) +#define __tracepoint_string __attribute__((section("__tracepoint_str"), used)) #else /* * tracepoint_string() is used to save the string address for userspace -- GitLab From 37274b5c9b88b7dd7ba1f4ecc947be64815a8a50 Mon Sep 17 00:00:00 2001 From: Grant Likely Date: Fri, 10 Jul 2020 16:19:39 +0100 Subject: [PATCH 0133/1304] HID: input: Fix devices that return multiple bytes in battery report commit 4f57cace81438cc873a96f9f13f08298815c9b51 upstream. Some devices, particularly the 3DConnexion Spacemouse wireless 3D controllers, return more than just the battery capacity in the battery report. The Spacemouse devices return an additional byte with a device specific field. However, hidinput_query_battery_capacity() only requests a 2 byte transfer. When a spacemouse is connected via USB (direct wire, no wireless dongle) and it returns a 3 byte report instead of the assumed 2 byte battery report the larger transfer confuses and frightens the USB subsystem which chooses to ignore the transfer. Then after 2 seconds assume the device has stopped responding and reset it. This can be reproduced easily by using a wired connection with a wireless spacemouse. The Spacemouse will enter a loop of resetting every 2 seconds which can be observed in dmesg. This patch solves the problem by increasing the transfer request to 4 bytes instead of 2. The fix isn't particularly elegant, but it is simple and safe to backport to stable kernels. A further patch will follow to more elegantly handle battery reports that contain additional data. Signed-off-by: Grant Likely Cc: Darren Hart Cc: Jiri Kosina Cc: Benjamin Tissoires Cc: stable@vger.kernel.org Tested-by: Darren Hart Signed-off-by: Jiri Kosina Signed-off-by: Greg Kroah-Hartman --- drivers/hid/hid-input.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index dbb0cbe65fc9..51bfe23d00bc 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -362,13 +362,13 @@ static int hidinput_query_battery_capacity(struct hid_device *dev) u8 *buf; int ret; - buf = kmalloc(2, GFP_KERNEL); + buf = kmalloc(4, GFP_KERNEL); if (!buf) return -ENOMEM; - ret = hid_hw_raw_request(dev, dev->battery_report_id, buf, 2, + ret = hid_hw_raw_request(dev, dev->battery_report_id, buf, 4, dev->battery_report_type, HID_REQ_GET_REPORT); - if (ret != 2) { + if (ret < 2) { kfree(buf); return -ENODATA; } -- GitLab From 38de4308c5c3319ae9c815b6d6aa8d2b5804bace Mon Sep 17 00:00:00 2001 From: Yang Yingliang Date: Thu, 13 Aug 2020 20:33:42 +0000 Subject: [PATCH 0134/1304] cgroup: add missing skcd->no_refcnt check in cgroup_sk_clone() Add skcd->no_refcnt check which is missed when backporting ad0f75e5f57c ("cgroup: fix cgroup_sk_alloc() for sk_clone_lock()"). This patch is needed in stable-4.9, stable-4.14 and stable-4.19. Signed-off-by: Yang Yingliang Signed-off-by: Sasha Levin --- kernel/cgroup/cgroup.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index 6ae98c714edd..2a879d34bbe5 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -5957,6 +5957,8 @@ void cgroup_sk_clone(struct sock_cgroup_data *skcd) { /* Socket clone path */ if (skcd->val) { + if (skcd->no_refcnt) + return; /* * We might be cloning a socket which is left in an empty * cgroup and the cgroup might have already been rmdir'd. -- GitLab From 40f981691bdf470c1b0b7f4605690efdf28baec3 Mon Sep 17 00:00:00 2001 From: Zhenzhong Duan Date: Thu, 11 Jun 2020 10:32:38 +0800 Subject: [PATCH 0135/1304] x86/mce/inject: Fix a wrong assignment of i_mce.status [ Upstream commit 5d7f7d1d5e01c22894dee7c9c9266500478dca99 ] The original code is a nop as i_mce.status is or'ed with part of itself, fix it. Fixes: a1300e505297 ("x86/ras/mce_amd_inj: Trigger deferred and thresholding errors interrupts") Signed-off-by: Zhenzhong Duan Signed-off-by: Borislav Petkov Acked-by: Yazen Ghannam Link: https://lkml.kernel.org/r/20200611023238.3830-1-zhenzhong.duan@gmail.com Signed-off-by: Sasha Levin --- arch/x86/kernel/cpu/mcheck/mce-inject.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c index 1ceccc4a5472..9cc524be3c94 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-inject.c +++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c @@ -518,7 +518,7 @@ static void do_inject(void) */ if (inj_type == DFR_INT_INJ) { i_mce.status |= MCI_STATUS_DEFERRED; - i_mce.status |= (i_mce.status & ~MCI_STATUS_UC); + i_mce.status &= ~MCI_STATUS_UC; } /* -- GitLab From 519252e38ca1e1d614b11f982b1f489d62c135ee Mon Sep 17 00:00:00 2001 From: Vincent Guittot Date: Tue, 9 Jun 2020 14:37:48 +0200 Subject: [PATCH 0136/1304] sched/fair: Fix NOHZ next idle balance [ Upstream commit 3ea2f097b17e13a8280f1f9386c331b326a3dbef ] With commit: 'b7031a02ec75 ("sched/fair: Add NOHZ_STATS_KICK")' rebalance_domains of the local cfs_rq happens before others idle cpus have updated nohz.next_balance and its value is overwritten. Move the update of nohz.next_balance for other idles cpus before balancing and updating the next_balance of local cfs_rq. Also, the nohz.next_balance is now updated only if all idle cpus got a chance to rebalance their domains and the idle balance has not been aborted because of new activities on the CPU. In case of need_resched, the idle load balance will be kick the next jiffie in order to address remaining ilb. Fixes: b7031a02ec75 ("sched/fair: Add NOHZ_STATS_KICK") Reported-by: Peng Liu Signed-off-by: Vincent Guittot Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Valentin Schneider Acked-by: Mel Gorman Link: https://lkml.kernel.org/r/20200609123748.18636-1-vincent.guittot@linaro.org Signed-off-by: Sasha Levin --- kernel/sched/fair.c | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index d8c249e6dcb7..696d08a4593e 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -9208,7 +9208,12 @@ static void kick_ilb(unsigned int flags) { int ilb_cpu; - nohz.next_balance++; + /* + * Increase nohz.next_balance only when if full ilb is triggered but + * not if we only update stats. + */ + if (flags & NOHZ_BALANCE_KICK) + nohz.next_balance = jiffies+1; ilb_cpu = find_new_ilb(); @@ -9503,6 +9508,14 @@ static bool _nohz_idle_balance(struct rq *this_rq, unsigned int flags, } } + /* + * next_balance will be updated only when there is a need. + * When the CPU is attached to null domain for ex, it will not be + * updated. + */ + if (likely(update_next_balance)) + nohz.next_balance = next_balance; + /* Newly idle CPU doesn't need an update */ if (idle != CPU_NEWLY_IDLE) { update_blocked_averages(this_cpu); @@ -9523,14 +9536,6 @@ static bool _nohz_idle_balance(struct rq *this_rq, unsigned int flags, if (has_blocked_load) WRITE_ONCE(nohz.has_blocked, 1); - /* - * next_balance will be updated only when there is a need. - * When the CPU is attached to null domain for ex, it will not be - * updated. - */ - if (likely(update_next_balance)) - nohz.next_balance = next_balance; - return ret; } -- GitLab From 008560eb23a8ad242c15cb90478c428a5a69f546 Mon Sep 17 00:00:00 2001 From: Peng Liu Date: Tue, 9 Jun 2020 23:09:36 +0800 Subject: [PATCH 0137/1304] sched: correct SD_flags returned by tl->sd_flags() [ Upstream commit 9b1b234bb86bcdcdb142e900d39b599185465dbb ] During sched domain init, we check whether non-topological SD_flags are returned by tl->sd_flags(), if found, fire a waning and correct the violation, but the code failed to correct the violation. Correct this. Fixes: 143e1e28cb40 ("sched: Rework sched_domain topology definition") Signed-off-by: Peng Liu Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Vincent Guittot Reviewed-by: Valentin Schneider Link: https://lkml.kernel.org/r/20200609150936.GA13060@iZj6chx1xj0e0buvshuecpZ Signed-off-by: Sasha Levin --- kernel/sched/topology.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 74b694392f2f..f58efa5cc647 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -1098,7 +1098,7 @@ sd_init(struct sched_domain_topology_level *tl, sd_flags = (*tl->sd_flags)(); if (WARN_ONCE(sd_flags & ~TOPOLOGY_SD_FLAGS, "wrong sd_flags in topology description\n")) - sd_flags &= ~TOPOLOGY_SD_FLAGS; + sd_flags &= TOPOLOGY_SD_FLAGS; *sd = (struct sched_domain){ .min_interval = sd_weight, -- GitLab From 3fe4f18eebb473481371287ba202cefceaa9a3c9 Mon Sep 17 00:00:00 2001 From: Heiko Stuebner Date: Sun, 7 Jun 2020 23:29:09 +0200 Subject: [PATCH 0138/1304] arm64: dts: rockchip: fix rk3368-lion gmac reset gpio [ Upstream commit 2300e6dab473e93181cf76e4fe6671aa3d24c57b ] The lion gmac node currently uses opposite active-values for the gmac phy reset pin. The gpio-declaration uses active-high while the separate snps,reset-active-low property marks the pin as active low. While on the kernel side this works ok, other DT users may get confused - as seen with uboot right now. So bring this in line and make both properties match, similar to the other Rockchip board. Fixes: d99a02bcfa81 ("arm64: dts: rockchip: add RK3368-uQ7 (Lion) SoM") Signed-off-by: Heiko Stuebner Link: https://lore.kernel.org/r/20200607212909.920575-1-heiko@sntech.de Signed-off-by: Sasha Levin --- arch/arm64/boot/dts/rockchip/rk3368-lion.dtsi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/boot/dts/rockchip/rk3368-lion.dtsi b/arch/arm64/boot/dts/rockchip/rk3368-lion.dtsi index 1315972412df..23098c13ad83 100644 --- a/arch/arm64/boot/dts/rockchip/rk3368-lion.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3368-lion.dtsi @@ -159,7 +159,7 @@ pinctrl-0 = <&rgmii_pins>; snps,reset-active-low; snps,reset-delays-us = <0 10000 50000>; - snps,reset-gpio = <&gpio3 RK_PB3 GPIO_ACTIVE_HIGH>; + snps,reset-gpio = <&gpio3 RK_PB3 GPIO_ACTIVE_LOW>; tx_delay = <0x10>; rx_delay = <0x10>; status = "okay"; -- GitLab From ecf6e2538009f1756cfd2d191905611077ddb23f Mon Sep 17 00:00:00 2001 From: Heiko Stuebner Date: Thu, 4 Jun 2020 11:12:39 +0200 Subject: [PATCH 0139/1304] arm64: dts: rockchip: fix rk3399-puma vcc5v0-host gpio [ Upstream commit 7a7184f6cfa9279f1a1c10a1845d247d7fad54ff ] The puma vcc5v0_host regulator node currently uses opposite active-values for the enable pin. The gpio-declaration uses active-high while the separate enable-active-low property marks the pin as active low. While on the kernel side this works ok, other DT users may get confused - as seen with uboot right now. So bring this in line and make both properties match, similar to the gmac fix. Fixes: 2c66fc34e945 ("arm64: dts: rockchip: add RK3399-Q7 (Puma) SoM") Signed-off-by: Heiko Stuebner Link: https://lore.kernel.org/r/20200604091239.424318-1-heiko@sntech.de Signed-off-by: Sasha Levin --- arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi index 0130b9f98c9d..baacb6e227b9 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi @@ -101,7 +101,7 @@ vcc5v0_host: vcc5v0-host-regulator { compatible = "regulator-fixed"; - gpio = <&gpio4 RK_PA3 GPIO_ACTIVE_HIGH>; + gpio = <&gpio4 RK_PA3 GPIO_ACTIVE_LOW>; enable-active-low; pinctrl-names = "default"; pinctrl-0 = <&vcc5v0_host_en>; -- GitLab From 7a8abc99f9569b336d2585ba8b337ac1e3390f2b Mon Sep 17 00:00:00 2001 From: Heiko Stuebner Date: Wed, 3 Jun 2020 15:28:36 +0200 Subject: [PATCH 0140/1304] arm64: dts: rockchip: fix rk3399-puma gmac reset gpio [ Upstream commit 8a445086f8af0b7b9bd8d1901d6f306bb154f70d ] The puma gmac node currently uses opposite active-values for the gmac phy reset pin. The gpio-declaration uses active-high while the separate snps,reset-active-low property marks the pin as active low. While on the kernel side this works ok, other DT users may get confused - as seen with uboot right now. So bring this in line and make both properties match, similar to the other Rockchip board. Fixes: 2c66fc34e945 ("arm64: dts: rockchip: add RK3399-Q7 (Puma) SoM") Signed-off-by: Heiko Stuebner Link: https://lore.kernel.org/r/20200603132836.362519-1-heiko@sntech.de Signed-off-by: Sasha Levin --- arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi index baacb6e227b9..b155f657292b 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi @@ -156,7 +156,7 @@ phy-mode = "rgmii"; pinctrl-names = "default"; pinctrl-0 = <&rgmii_pins>; - snps,reset-gpio = <&gpio3 RK_PC0 GPIO_ACTIVE_HIGH>; + snps,reset-gpio = <&gpio3 RK_PC0 GPIO_ACTIVE_LOW>; snps,reset-active-low; snps,reset-delays-us = <0 10000 50000>; tx_delay = <0x10>; -- GitLab From 0a9c84ce0c78d27004ea46aca1e8a5752dffd924 Mon Sep 17 00:00:00 2001 From: Qiushi Wu Date: Thu, 28 May 2020 15:22:37 -0500 Subject: [PATCH 0141/1304] EDAC: Fix reference count leaks [ Upstream commit 17ed808ad243192fb923e4e653c1338d3ba06207 ] When kobject_init_and_add() returns an error, it should be handled because kobject_init_and_add() takes a reference even when it fails. If this function returns an error, kobject_put() must be called to properly clean up the memory associated with the object. Therefore, replace calling kfree() and call kobject_put() and add a missing kobject_put() in the edac_device_register_sysfs_main_kobj() error path. [ bp: Massage and merge into a single patch. ] Fixes: b2ed215a3338 ("Kobject: change drivers/edac to use kobject_init_and_add") Signed-off-by: Qiushi Wu Signed-off-by: Borislav Petkov Link: https://lkml.kernel.org/r/20200528202238.18078-1-wu000273@umn.edu Link: https://lkml.kernel.org/r/20200528203526.20908-1-wu000273@umn.edu Signed-off-by: Sasha Levin --- drivers/edac/edac_device_sysfs.c | 1 + drivers/edac/edac_pci_sysfs.c | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c index 0e7ea3591b78..5e7593753799 100644 --- a/drivers/edac/edac_device_sysfs.c +++ b/drivers/edac/edac_device_sysfs.c @@ -275,6 +275,7 @@ int edac_device_register_sysfs_main_kobj(struct edac_device_ctl_info *edac_dev) /* Error exit stack */ err_kobj_reg: + kobject_put(&edac_dev->kobj); module_put(edac_dev->owner); err_out: diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c index 72c9eb9fdffb..53042af7262e 100644 --- a/drivers/edac/edac_pci_sysfs.c +++ b/drivers/edac/edac_pci_sysfs.c @@ -386,7 +386,7 @@ static int edac_pci_main_kobj_setup(void) /* Error unwind statck */ kobject_init_and_add_fail: - kfree(edac_pci_top_main_kobj); + kobject_put(edac_pci_top_main_kobj); kzalloc_fail: module_put(THIS_MODULE); -- GitLab From 3b1712152c9fae9f35d55dbdb14a25dc15663e3a Mon Sep 17 00:00:00 2001 From: Stephan Gerhold Date: Fri, 5 Jun 2020 20:59:14 +0200 Subject: [PATCH 0142/1304] arm64: dts: qcom: msm8916: Replace invalid bias-pull-none property [ Upstream commit 1b6a1a162defe649c5599d661b58ac64bb6f31b6 ] msm8916-pins.dtsi specifies "bias-pull-none" for most of the audio pin configurations. This was likely copied from the qcom kernel fork where the same property was used for these audio pins. However, "bias-pull-none" actually does not exist at all - not in mainline and not in downstream. I can only guess that the original intention was to configure "no pull", i.e. bias-disable. Change it to that instead. Fixes: 143bb9ad85b7 ("arm64: dts: qcom: add audio pinctrls") Cc: Srinivas Kandagatla Signed-off-by: Stephan Gerhold Link: https://lore.kernel.org/r/20200605185916.318494-2-stephan@gerhold.net Signed-off-by: Bjorn Andersson Signed-off-by: Sasha Levin --- arch/arm64/boot/dts/qcom/msm8916-pins.dtsi | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi b/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi index 390a2fa28514..60d218c5275c 100644 --- a/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi @@ -516,7 +516,7 @@ pins = "gpio63", "gpio64", "gpio65", "gpio66", "gpio67", "gpio68"; drive-strength = <8>; - bias-pull-none; + bias-disable; }; }; cdc_pdm_lines_sus: pdm_lines_off { @@ -545,7 +545,7 @@ pins = "gpio113", "gpio114", "gpio115", "gpio116"; drive-strength = <8>; - bias-pull-none; + bias-disable; }; }; @@ -573,7 +573,7 @@ pinconf { pins = "gpio110"; drive-strength = <8>; - bias-pull-none; + bias-disable; }; }; @@ -599,7 +599,7 @@ pinconf { pins = "gpio116"; drive-strength = <8>; - bias-pull-none; + bias-disable; }; }; ext_mclk_tlmm_lines_sus: mclk_lines_off { @@ -627,7 +627,7 @@ pins = "gpio112", "gpio117", "gpio118", "gpio119"; drive-strength = <8>; - bias-pull-none; + bias-disable; }; }; ext_sec_tlmm_lines_sus: tlmm_lines_off { -- GitLab From e09b2a736ee5e258172c552c6380a8cacb182c2a Mon Sep 17 00:00:00 2001 From: Gilad Ben-Yossef Date: Sun, 21 Jun 2020 14:19:57 +0300 Subject: [PATCH 0143/1304] crypto: ccree - fix resource leak on error path [ Upstream commit 9bc6165d608d676f05d8bf156a2c9923ee38d05b ] Fix a small resource leak on the error path of cipher processing. Signed-off-by: Gilad Ben-Yossef Fixes: 63ee04c8b491e ("crypto: ccree - add skcipher support") Cc: Markus Elfring Signed-off-by: Herbert Xu Signed-off-by: Sasha Levin --- drivers/crypto/ccree/cc_cipher.c | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c index 28a5b8b38fa2..1bcb6f0157b0 100644 --- a/drivers/crypto/ccree/cc_cipher.c +++ b/drivers/crypto/ccree/cc_cipher.c @@ -137,7 +137,6 @@ static int cc_cipher_init(struct crypto_tfm *tfm) skcipher_alg.base); struct device *dev = drvdata_to_dev(cc_alg->drvdata); unsigned int max_key_buf_size = cc_alg->skcipher_alg.max_keysize; - int rc = 0; dev_dbg(dev, "Initializing context @%p for %s\n", ctx_p, crypto_tfm_alg_name(tfm)); @@ -149,10 +148,19 @@ static int cc_cipher_init(struct crypto_tfm *tfm) ctx_p->flow_mode = cc_alg->flow_mode; ctx_p->drvdata = cc_alg->drvdata; + if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) { + /* Alloc hash tfm for essiv */ + ctx_p->shash_tfm = crypto_alloc_shash("sha256-generic", 0, 0); + if (IS_ERR(ctx_p->shash_tfm)) { + dev_err(dev, "Error allocating hash tfm for ESSIV.\n"); + return PTR_ERR(ctx_p->shash_tfm); + } + } + /* Allocate key buffer, cache line aligned */ ctx_p->user.key = kmalloc(max_key_buf_size, GFP_KERNEL); if (!ctx_p->user.key) - return -ENOMEM; + goto free_shash; dev_dbg(dev, "Allocated key buffer in context. key=@%p\n", ctx_p->user.key); @@ -164,21 +172,19 @@ static int cc_cipher_init(struct crypto_tfm *tfm) if (dma_mapping_error(dev, ctx_p->user.key_dma_addr)) { dev_err(dev, "Mapping Key %u B at va=%pK for DMA failed\n", max_key_buf_size, ctx_p->user.key); - return -ENOMEM; + goto free_key; } dev_dbg(dev, "Mapped key %u B at va=%pK to dma=%pad\n", max_key_buf_size, ctx_p->user.key, &ctx_p->user.key_dma_addr); - if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) { - /* Alloc hash tfm for essiv */ - ctx_p->shash_tfm = crypto_alloc_shash("sha256-generic", 0, 0); - if (IS_ERR(ctx_p->shash_tfm)) { - dev_err(dev, "Error allocating hash tfm for ESSIV.\n"); - return PTR_ERR(ctx_p->shash_tfm); - } - } + return 0; - return rc; +free_key: + kfree(ctx_p->user.key); +free_shash: + crypto_free_shash(ctx_p->shash_tfm); + + return -ENOMEM; } static void cc_cipher_exit(struct crypto_tfm *tfm) -- GitLab From bccb966e9354b91a18e13a2f835c021ad544bc46 Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Fri, 19 Jun 2020 23:03:30 +0100 Subject: [PATCH 0144/1304] firmware: arm_scmi: Fix SCMI genpd domain probing [ Upstream commit e0f1a30cf184821499eeb67daedd7a3f21bbcb0b ] When, at probe time, an SCMI communication failure inhibits the capacity to query power domains states, such domains should be skipped. Registering partially initialized SCMI power domains with genpd will causes kernel panic. arm-scmi timed out in resp(caller: scmi_power_state_get+0xa4/0xd0) scmi-power-domain scmi_dev.2: failed to get state for domain 9 Unable to handle kernel NULL pointer dereference at virtual address 0000000000000000 Mem abort info: ESR = 0x96000006 EC = 0x25: DABT (current EL), IL = 32 bits SET = 0, FnV = 0 EA = 0, S1PTW = 0 Data abort info: ISV = 0, ISS = 0x00000006 CM = 0, WnR = 0 user pgtable: 4k pages, 48-bit VAs, pgdp=00000009f3691000 [0000000000000000] pgd=00000009f1ca0003, p4d=00000009f1ca0003, pud=00000009f35ea003, pmd=0000000000000000 Internal error: Oops: 96000006 [#1] PREEMPT SMP CPU: 2 PID: 381 Comm: bash Not tainted 5.8.0-rc1-00011-gebd118c2cca8 #2 Hardware name: ARM LTD ARM Juno Development Platform/ARM Juno Development Platform, BIOS EDK II Jan 3 2020 Internal error: Oops: 96000006 [#1] PREEMPT SMP pstate: 80000005 (Nzcv daif -PAN -UAO BTYPE=--) pc : of_genpd_add_provider_onecell+0x98/0x1f8 lr : of_genpd_add_provider_onecell+0x48/0x1f8 Call trace: of_genpd_add_provider_onecell+0x98/0x1f8 scmi_pm_domain_probe+0x174/0x1e8 scmi_dev_probe+0x90/0xe0 really_probe+0xe4/0x448 driver_probe_device+0xfc/0x168 device_driver_attach+0x7c/0x88 bind_store+0xe8/0x128 drv_attr_store+0x2c/0x40 sysfs_kf_write+0x4c/0x60 kernfs_fop_write+0x114/0x230 __vfs_write+0x24/0x50 vfs_write+0xbc/0x1e0 ksys_write+0x70/0xf8 __arm64_sys_write+0x24/0x30 el0_svc_common.constprop.3+0x94/0x160 do_el0_svc+0x2c/0x98 el0_sync_handler+0x148/0x1a8 el0_sync+0x158/0x180 Do not register any power domain that failed to be queried with genpd. Fixes: 898216c97ed2 ("firmware: arm_scmi: add device power domain support using genpd") Link: https://lore.kernel.org/r/20200619220330.12217-1-cristian.marussi@arm.com Signed-off-by: Cristian Marussi Signed-off-by: Sudeep Holla Signed-off-by: Sasha Levin --- drivers/firmware/arm_scmi/scmi_pm_domain.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/firmware/arm_scmi/scmi_pm_domain.c b/drivers/firmware/arm_scmi/scmi_pm_domain.c index 87f737e01473..041f8152272b 100644 --- a/drivers/firmware/arm_scmi/scmi_pm_domain.c +++ b/drivers/firmware/arm_scmi/scmi_pm_domain.c @@ -85,7 +85,10 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev) for (i = 0; i < num_domains; i++, scmi_pd++) { u32 state; - domains[i] = &scmi_pd->genpd; + if (handle->power_ops->state_get(handle, i, &state)) { + dev_warn(dev, "failed to get state for domain %d\n", i); + continue; + } scmi_pd->domain = i; scmi_pd->handle = handle; @@ -94,13 +97,10 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev) scmi_pd->genpd.power_off = scmi_pd_power_off; scmi_pd->genpd.power_on = scmi_pd_power_on; - if (handle->power_ops->state_get(handle, i, &state)) { - dev_warn(dev, "failed to get state for domain %d\n", i); - continue; - } - pm_genpd_init(&scmi_pd->genpd, NULL, state == SCMI_POWER_STATE_GENERIC_OFF); + + domains[i] = &scmi_pd->genpd; } scmi_pd_data->domains = domains; -- GitLab From 605614ea4349f2c83c20060628d2d8bfb4c8ede2 Mon Sep 17 00:00:00 2001 From: Alim Akhtar Date: Sun, 5 Jul 2020 12:39:17 +0530 Subject: [PATCH 0145/1304] arm64: dts: exynos: Fix silent hang after boot on Espresso [ Upstream commit b072714bfc0e42c984b8fd6e069f3ca17de8137a ] Once regulators are disabled after kernel boot, on Espresso board silent hang observed because of LDO7 being disabled. LDO7 actually provide power to CPU cores and non-cpu blocks circuitries. Keep this regulator always-on to fix this hang. Fixes: 9589f7721e16 ("arm64: dts: Add S2MPS15 PMIC node on exynos7-espresso") Signed-off-by: Alim Akhtar Signed-off-by: Krzysztof Kozlowski Signed-off-by: Sasha Levin --- arch/arm64/boot/dts/exynos/exynos7-espresso.dts | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm64/boot/dts/exynos/exynos7-espresso.dts b/arch/arm64/boot/dts/exynos/exynos7-espresso.dts index 00dd89b92b42..d991eae5202f 100644 --- a/arch/arm64/boot/dts/exynos/exynos7-espresso.dts +++ b/arch/arm64/boot/dts/exynos/exynos7-espresso.dts @@ -152,6 +152,7 @@ regulator-min-microvolt = <700000>; regulator-max-microvolt = <1150000>; regulator-enable-ramp-delay = <125>; + regulator-always-on; }; ldo8_reg: LDO8 { -- GitLab From a9b01fdb9afc6e9886dcf2347f34f08fe63bc8ea Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Thu, 9 Jul 2020 09:17:05 +0100 Subject: [PATCH 0146/1304] clk: scmi: Fix min and max rate when registering clocks with discrete rates [ Upstream commit fcd2e0deae50bce48450f14c8fc5611b08d7438c ] Currently we are not initializing the scmi clock with discrete rates correctly. We fetch the min_rate and max_rate value only for clocks with ranges and ignore the ones with discrete rates. This will lead to wrong initialization of rate range when clock supports discrete rate. Fix this by using the first and the last rate in the sorted list of the discrete clock rates while registering the clock. Link: https://lore.kernel.org/r/20200709081705.46084-2-sudeep.holla@arm.com Fixes: 6d6a1d82eaef7 ("clk: add support for clocks provided by SCMI") Reviewed-by: Stephen Boyd Reported-and-tested-by: Dien Pham Signed-off-by: Sudeep Holla Signed-off-by: Sasha Levin --- drivers/clk/clk-scmi.c | 22 +++++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-) diff --git a/drivers/clk/clk-scmi.c b/drivers/clk/clk-scmi.c index a985bf5e1ac6..c65d30bba700 100644 --- a/drivers/clk/clk-scmi.c +++ b/drivers/clk/clk-scmi.c @@ -103,6 +103,8 @@ static const struct clk_ops scmi_clk_ops = { static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk) { int ret; + unsigned long min_rate, max_rate; + struct clk_init_data init = { .flags = CLK_GET_RATE_NOCACHE, .num_parents = 0, @@ -112,9 +114,23 @@ static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk) sclk->hw.init = &init; ret = devm_clk_hw_register(dev, &sclk->hw); - if (!ret) - clk_hw_set_rate_range(&sclk->hw, sclk->info->range.min_rate, - sclk->info->range.max_rate); + if (ret) + return ret; + + if (sclk->info->rate_discrete) { + int num_rates = sclk->info->list.num_rates; + + if (num_rates <= 0) + return -EINVAL; + + min_rate = sclk->info->list.rates[0]; + max_rate = sclk->info->list.rates[num_rates - 1]; + } else { + min_rate = sclk->info->range.min_rate; + max_rate = sclk->info->range.max_rate; + } + + clk_hw_set_rate_range(&sclk->hw, min_rate, max_rate); return ret; } -- GitLab From 63e0ea8e26692a64bed3fc8049ef9d36c9a3ad74 Mon Sep 17 00:00:00 2001 From: Finn Thain Date: Sun, 31 May 2020 09:12:13 +1000 Subject: [PATCH 0147/1304] m68k: mac: Don't send IOP message until channel is idle [ Upstream commit aeb445bf2194d83e12e85bf5c65baaf1f093bd8f ] In the following sequence of calls, iop_do_send() gets called when the "send" channel is not in the IOP_MSG_IDLE state: iop_ism_irq() iop_handle_send() (msg->handler)() iop_send_message() iop_do_send() Avoid this by testing the channel state before calling iop_do_send(). When sending, and iop_send_queue is empty, call iop_do_send() because the channel is idle. If iop_send_queue is not empty, iop_do_send() will get called later by iop_handle_send(). Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Signed-off-by: Finn Thain Tested-by: Stan Johnson Cc: Joshua Thompson Link: https://lore.kernel.org/r/6d667c39e53865661fa5a48f16829d18ed8abe54.1590880333.git.fthain@telegraphics.com.au Signed-off-by: Geert Uytterhoeven Signed-off-by: Sasha Levin --- arch/m68k/mac/iop.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/arch/m68k/mac/iop.c b/arch/m68k/mac/iop.c index 9bfa17015768..d8f2282978f9 100644 --- a/arch/m68k/mac/iop.c +++ b/arch/m68k/mac/iop.c @@ -416,7 +416,8 @@ static void iop_handle_send(uint iop_num, uint chan) msg->status = IOP_MSGSTATUS_UNUSED; msg = msg->next; iop_send_queue[iop_num][chan] = msg; - if (msg) iop_do_send(msg); + if (msg && iop_readb(iop, IOP_ADDR_SEND_STATE + chan) == IOP_MSG_IDLE) + iop_do_send(msg); } /* @@ -490,16 +491,12 @@ int iop_send_message(uint iop_num, uint chan, void *privdata, if (!(q = iop_send_queue[iop_num][chan])) { iop_send_queue[iop_num][chan] = msg; + iop_do_send(msg); } else { while (q->next) q = q->next; q->next = msg; } - if (iop_readb(iop_base[iop_num], - IOP_ADDR_SEND_STATE + chan) == IOP_MSG_IDLE) { - iop_do_send(msg); - } - return 0; } -- GitLab From c0603524c0985073818117fee7403aa14486fec8 Mon Sep 17 00:00:00 2001 From: Finn Thain Date: Sun, 31 May 2020 09:12:13 +1000 Subject: [PATCH 0148/1304] m68k: mac: Fix IOP status/control register writes [ Upstream commit 931fc82a6aaf4e2e4a5490addaa6a090d78c24a7 ] When writing values to the IOP status/control register make sure those values do not have any extraneous bits that will clear interrupt flags. To place the SCC IOP into bypass mode would be desirable but this is not achieved by writing IOP_DMAINACTIVE | IOP_RUN | IOP_AUTOINC | IOP_BYPASS to the control register. Drop this ineffective register write. Remove the flawed and unused iop_bypass() function. Make use of the unused iop_stop() function. Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Signed-off-by: Finn Thain Tested-by: Stan Johnson Cc: Joshua Thompson Link: https://lore.kernel.org/r/09bcb7359a1719a18b551ee515da3c4c3cf709e6.1590880333.git.fthain@telegraphics.com.au Signed-off-by: Geert Uytterhoeven Signed-off-by: Sasha Levin --- arch/m68k/mac/iop.c | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/arch/m68k/mac/iop.c b/arch/m68k/mac/iop.c index d8f2282978f9..c432bfafe63e 100644 --- a/arch/m68k/mac/iop.c +++ b/arch/m68k/mac/iop.c @@ -183,7 +183,7 @@ static __inline__ void iop_writeb(volatile struct mac_iop *iop, __u16 addr, __u8 static __inline__ void iop_stop(volatile struct mac_iop *iop) { - iop->status_ctrl &= ~IOP_RUN; + iop->status_ctrl = IOP_AUTOINC; } static __inline__ void iop_start(volatile struct mac_iop *iop) @@ -191,14 +191,9 @@ static __inline__ void iop_start(volatile struct mac_iop *iop) iop->status_ctrl = IOP_RUN | IOP_AUTOINC; } -static __inline__ void iop_bypass(volatile struct mac_iop *iop) -{ - iop->status_ctrl |= IOP_BYPASS; -} - static __inline__ void iop_interrupt(volatile struct mac_iop *iop) { - iop->status_ctrl |= IOP_IRQ; + iop->status_ctrl = IOP_IRQ | IOP_RUN | IOP_AUTOINC; } static int iop_alive(volatile struct mac_iop *iop) @@ -244,7 +239,6 @@ void __init iop_preinit(void) } else { iop_base[IOP_NUM_SCC] = (struct mac_iop *) SCC_IOP_BASE_QUADRA; } - iop_base[IOP_NUM_SCC]->status_ctrl = 0x87; iop_scc_present = 1; } else { iop_base[IOP_NUM_SCC] = NULL; @@ -256,7 +250,7 @@ void __init iop_preinit(void) } else { iop_base[IOP_NUM_ISM] = (struct mac_iop *) ISM_IOP_BASE_QUADRA; } - iop_base[IOP_NUM_ISM]->status_ctrl = 0; + iop_stop(iop_base[IOP_NUM_ISM]); iop_ism_present = 1; } else { iop_base[IOP_NUM_ISM] = NULL; -- GitLab From d5c4d84c95198da37445918013abd9d6bf9a0e02 Mon Sep 17 00:00:00 2001 From: Lu Wei Date: Fri, 10 Jul 2020 17:30:17 +0800 Subject: [PATCH 0149/1304] platform/x86: intel-hid: Fix return value check in check_acpi_dev() [ Upstream commit 71fbe886ce6dd0be17f20aded9c63fe58edd2806 ] In the function check_acpi_dev(), if it fails to create platform device, the return value is ERR_PTR() or NULL. Thus it must use IS_ERR_OR_NULL() to check return value. Fixes: ecc83e52b28c ("intel-hid: new hid event driver for hotkeys") Reported-by: Hulk Robot Signed-off-by: Lu Wei Signed-off-by: Andy Shevchenko Signed-off-by: Sasha Levin --- drivers/platform/x86/intel-hid.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c index c514cb73bb50..d7d69eadb9bb 100644 --- a/drivers/platform/x86/intel-hid.c +++ b/drivers/platform/x86/intel-hid.c @@ -564,7 +564,7 @@ check_acpi_dev(acpi_handle handle, u32 lvl, void *context, void **rv) return AE_OK; if (acpi_match_device_ids(dev, ids) == 0) - if (acpi_create_platform_device(dev, NULL)) + if (!IS_ERR_OR_NULL(acpi_create_platform_device(dev, NULL))) dev_info(&dev->dev, "intel-hid: created platform device\n"); -- GitLab From 473a0cdc8529ee271758b770c477418f0fe430c8 Mon Sep 17 00:00:00 2001 From: Lu Wei Date: Fri, 10 Jul 2020 17:30:18 +0800 Subject: [PATCH 0150/1304] platform/x86: intel-vbtn: Fix return value check in check_acpi_dev() [ Upstream commit 64dd4a5a7d214a07e3d9f40227ec30ac8ba8796e ] In the function check_acpi_dev(), if it fails to create platform device, the return value is ERR_PTR() or NULL. Thus it must use IS_ERR_OR_NULL() to check return value. Fixes: 332e081225fc ("intel-vbtn: new driver for Intel Virtual Button") Reported-by: Hulk Robot Signed-off-by: Lu Wei Signed-off-by: Andy Shevchenko Signed-off-by: Sasha Levin --- drivers/platform/x86/intel-vbtn.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c index d122f33d43ac..c7c8b432c163 100644 --- a/drivers/platform/x86/intel-vbtn.c +++ b/drivers/platform/x86/intel-vbtn.c @@ -272,7 +272,7 @@ check_acpi_dev(acpi_handle handle, u32 lvl, void *context, void **rv) return AE_OK; if (acpi_match_device_ids(dev, ids) == 0) - if (acpi_create_platform_device(dev, NULL)) + if (!IS_ERR_OR_NULL(acpi_create_platform_device(dev, NULL))) dev_info(&dev->dev, "intel-vbtn: created platform device\n"); -- GitLab From 55da218df6a861b3c4bec184dfaea191af8b5dea Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Niklas=20S=C3=B6derlund?= Date: Sat, 4 Jul 2020 17:58:55 +0200 Subject: [PATCH 0151/1304] ARM: dts: gose: Fix ports node name for adv7180 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit d344234abde938ae1062edb6c05852b0bafb4a03 ] When adding the adv7180 device node the ports node was misspelled as port, fix this. Fixes: 8cae359049a88b75 ("ARM: dts: gose: add composite video input") Signed-off-by: Niklas Söderlund Link: https://lore.kernel.org/r/20200704155856.3037010-2-niklas.soderlund+renesas@ragnatech.se Signed-off-by: Geert Uytterhoeven Signed-off-by: Sasha Levin --- arch/arm/boot/dts/r8a7793-gose.dts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/boot/dts/r8a7793-gose.dts b/arch/arm/boot/dts/r8a7793-gose.dts index 6b2f3a4fd13d..9235be8f0f00 100644 --- a/arch/arm/boot/dts/r8a7793-gose.dts +++ b/arch/arm/boot/dts/r8a7793-gose.dts @@ -339,7 +339,7 @@ reg = <0x20>; remote = <&vin1>; - port { + ports { #address-cells = <1>; #size-cells = <0>; -- GitLab From 12b7ce2cb93e99f02e92cd33d79b231e04052f05 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Niklas=20S=C3=B6derlund?= Date: Mon, 13 Jul 2020 13:10:16 +0200 Subject: [PATCH 0152/1304] ARM: dts: gose: Fix ports node name for adv7612 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 59692ac5a7bb8c97ff440fc8917828083fbc38d6 ] When adding the adv7612 device node the ports node was misspelled as port, fix this. Fixes: bc63cd87f3ce924f ("ARM: dts: gose: add HDMI input") Signed-off-by: Niklas Söderlund Link: https://lore.kernel.org/r/20200713111016.523189-1-niklas.soderlund+renesas@ragnatech.se Signed-off-by: Geert Uytterhoeven Signed-off-by: Sasha Levin --- arch/arm/boot/dts/r8a7793-gose.dts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/boot/dts/r8a7793-gose.dts b/arch/arm/boot/dts/r8a7793-gose.dts index 9235be8f0f00..7802ce842a73 100644 --- a/arch/arm/boot/dts/r8a7793-gose.dts +++ b/arch/arm/boot/dts/r8a7793-gose.dts @@ -399,7 +399,7 @@ interrupts = <2 IRQ_TYPE_LEVEL_LOW>; default-input = <0>; - port { + ports { #address-cells = <1>; #size-cells = <0>; -- GitLab From bcaf2719dba43f7ccbcc126c56f05111058eef52 Mon Sep 17 00:00:00 2001 From: yu kuai Date: Thu, 4 Jun 2020 20:33:01 +0800 Subject: [PATCH 0153/1304] ARM: at91: pm: add missing put_device() call in at91_pm_sram_init() [ Upstream commit f87a4f022c44e5b87e842a9f3e644fba87e8385f ] if of_find_device_by_node() succeed, at91_pm_sram_init() doesn't have a corresponding put_device(). Thus add a jump target to fix the exception handling for this function implementation. Fixes: d2e467905596 ("ARM: at91: pm: use the mmio-sram pool to access SRAM") Signed-off-by: yu kuai Signed-off-by: Alexandre Belloni Link: https://lore.kernel.org/r/20200604123301.3905837-1-yukuai3@huawei.com Signed-off-by: Sasha Levin --- arch/arm/mach-at91/pm.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c index e2e4df3d11e5..21bfe9b6e16a 100644 --- a/arch/arm/mach-at91/pm.c +++ b/arch/arm/mach-at91/pm.c @@ -542,13 +542,13 @@ static void __init at91_pm_sram_init(void) sram_pool = gen_pool_get(&pdev->dev, NULL); if (!sram_pool) { pr_warn("%s: sram pool unavailable!\n", __func__); - return; + goto out_put_device; } sram_base = gen_pool_alloc(sram_pool, at91_pm_suspend_in_sram_sz); if (!sram_base) { pr_warn("%s: unable to alloc sram!\n", __func__); - return; + goto out_put_device; } sram_pbase = gen_pool_virt_to_phys(sram_pool, sram_base); @@ -556,12 +556,17 @@ static void __init at91_pm_sram_init(void) at91_pm_suspend_in_sram_sz, false); if (!at91_suspend_sram_fn) { pr_warn("SRAM: Could not map\n"); - return; + goto out_put_device; } /* Copy the pm suspend handler to SRAM */ at91_suspend_sram_fn = fncpy(at91_suspend_sram_fn, &at91_pm_suspend_in_sram, at91_pm_suspend_in_sram_sz); + return; + +out_put_device: + put_device(&pdev->dev); + return; } static bool __init at91_is_pm_mode_active(int pm_mode) -- GitLab From 3fcd97daf6e4606d9bc36fb420bcebcf9b38df49 Mon Sep 17 00:00:00 2001 From: Dilip Kota Date: Fri, 17 Jul 2020 14:27:50 +0800 Subject: [PATCH 0154/1304] spi: lantiq: fix: Rx overflow error in full duplex mode [ Upstream commit 661ccf2b3f1360be50242726f7c26ced6a9e7d52 ] In full duplex mode, rx overflow error is observed. To overcome the error, wait until the complete data got received and proceed further. Fixes: 17f84b793c01 ("spi: lantiq-ssc: add support for Lantiq SSC SPI controller") Signed-off-by: Dilip Kota Link: https://lore.kernel.org/r/efb650b0faa49a00788c4e0ca8ef7196bdba851d.1594957019.git.eswara.kota@linux.intel.com Signed-off-by: Mark Brown Signed-off-by: Sasha Levin --- drivers/spi/spi-lantiq-ssc.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/spi/spi-lantiq-ssc.c b/drivers/spi/spi-lantiq-ssc.c index d5976615d924..dc740b5f720b 100644 --- a/drivers/spi/spi-lantiq-ssc.c +++ b/drivers/spi/spi-lantiq-ssc.c @@ -187,6 +187,7 @@ struct lantiq_ssc_spi { unsigned int tx_fifo_size; unsigned int rx_fifo_size; unsigned int base_cs; + unsigned int fdx_tx_level; }; static u32 lantiq_ssc_readl(const struct lantiq_ssc_spi *spi, u32 reg) @@ -484,6 +485,7 @@ static void tx_fifo_write(struct lantiq_ssc_spi *spi) u32 data; unsigned int tx_free = tx_fifo_free(spi); + spi->fdx_tx_level = 0; while (spi->tx_todo && tx_free) { switch (spi->bits_per_word) { case 2 ... 8: @@ -512,6 +514,7 @@ static void tx_fifo_write(struct lantiq_ssc_spi *spi) lantiq_ssc_writel(spi, data, LTQ_SPI_TB); tx_free--; + spi->fdx_tx_level++; } } @@ -523,6 +526,13 @@ static void rx_fifo_read_full_duplex(struct lantiq_ssc_spi *spi) u32 data; unsigned int rx_fill = rx_fifo_level(spi); + /* + * Wait until all expected data to be shifted in. + * Otherwise, rx overrun may occur. + */ + while (rx_fill != spi->fdx_tx_level) + rx_fill = rx_fifo_level(spi); + while (rx_fill) { data = lantiq_ssc_readl(spi, LTQ_SPI_RB); -- GitLab From c4579cda1929584ae50c20e673c1991068397739 Mon Sep 17 00:00:00 2001 From: Yu Kuai Date: Tue, 21 Jul 2020 21:45:51 +0800 Subject: [PATCH 0155/1304] ARM: socfpga: PM: add missing put_device() call in socfpga_setup_ocram_self_refresh() [ Upstream commit 3ad7b4e8f89d6bcc9887ca701cf2745a6aedb1a0 ] if of_find_device_by_node() succeed, socfpga_setup_ocram_self_refresh doesn't have a corresponding put_device(). Thus add a jump target to fix the exception handling for this function implementation. Fixes: 44fd8c7d4005 ("ARM: socfpga: support suspend to ram") Signed-off-by: Yu Kuai Signed-off-by: Dinh Nguyen Signed-off-by: Sasha Levin --- arch/arm/mach-socfpga/pm.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/arch/arm/mach-socfpga/pm.c b/arch/arm/mach-socfpga/pm.c index d4866788702c..b782294ee30b 100644 --- a/arch/arm/mach-socfpga/pm.c +++ b/arch/arm/mach-socfpga/pm.c @@ -60,14 +60,14 @@ static int socfpga_setup_ocram_self_refresh(void) if (!ocram_pool) { pr_warn("%s: ocram pool unavailable!\n", __func__); ret = -ENODEV; - goto put_node; + goto put_device; } ocram_base = gen_pool_alloc(ocram_pool, socfpga_sdram_self_refresh_sz); if (!ocram_base) { pr_warn("%s: unable to alloc ocram!\n", __func__); ret = -ENOMEM; - goto put_node; + goto put_device; } ocram_pbase = gen_pool_virt_to_phys(ocram_pool, ocram_base); @@ -78,7 +78,7 @@ static int socfpga_setup_ocram_self_refresh(void) if (!suspend_ocram_base) { pr_warn("%s: __arm_ioremap_exec failed!\n", __func__); ret = -ENOMEM; - goto put_node; + goto put_device; } /* Copy the code that puts DDR in self refresh to ocram */ @@ -92,6 +92,8 @@ static int socfpga_setup_ocram_self_refresh(void) if (!socfpga_sdram_self_refresh_in_ocram) ret = -EFAULT; +put_device: + put_device(&pdev->dev); put_node: of_node_put(np); -- GitLab From 1e3a04cb7f4efcdb2afe594217bb1a4ecebdd224 Mon Sep 17 00:00:00 2001 From: Tomi Valkeinen Date: Wed, 29 Apr 2020 13:42:32 +0300 Subject: [PATCH 0156/1304] drm/tilcdc: fix leak & null ref in panel_connector_get_modes [ Upstream commit 3f9c1c872cc97875ddc8d63bc9fe6ee13652b933 ] If videomode_from_timings() returns true, the mode allocated with drm_mode_create will be leaked. Also, the return value of drm_mode_create() is never checked, and thus could cause NULL deref. Fix these two issues. Signed-off-by: Tomi Valkeinen Link: https://patchwork.freedesktop.org/patch/msgid/20200429104234.18910-1-tomi.valkeinen@ti.com Reviewed-by: Jyri Sarha Acked-by: Sam Ravnborg Signed-off-by: Sasha Levin --- drivers/gpu/drm/tilcdc/tilcdc_panel.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c index a1acab39d87f..096a33f12c61 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c @@ -150,12 +150,16 @@ static int panel_connector_get_modes(struct drm_connector *connector) int i; for (i = 0; i < timings->num_timings; i++) { - struct drm_display_mode *mode = drm_mode_create(dev); + struct drm_display_mode *mode; struct videomode vm; if (videomode_from_timings(timings, &vm, i)) break; + mode = drm_mode_create(dev); + if (!mode) + break; + drm_display_mode_from_videomode(&vm, mode); mode->type = DRM_MODE_TYPE_DRIVER; -- GitLab From fe104ad82e51fc70636f060a6d805be75ce47004 Mon Sep 17 00:00:00 2001 From: Maulik Shah Date: Mon, 22 Jun 2020 12:23:25 +0530 Subject: [PATCH 0157/1304] soc: qcom: rpmh-rsc: Set suppress_bind_attrs flag [ Upstream commit 1a53ce9ab4faeb841b33d62d23283dc76c0e7c5a ] rpmh-rsc driver is fairly core to system and should not be removable once its probed. However it allows to unbind driver from sysfs using below command which results into a crash on sc7180. echo 18200000.rsc > /sys/bus/platform/drivers/rpmh/unbind Lets prevent unbind at runtime by setting suppress_bind_attrs flag. Reviewed-by: Stephen Boyd Signed-off-by: Maulik Shah Link: https://lore.kernel.org/r/1592808805-2437-1-git-send-email-mkshah@codeaurora.org Signed-off-by: Bjorn Andersson Signed-off-by: Sasha Levin --- drivers/soc/qcom/rpmh-rsc.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c index 3c6f920535ea..519d19f57eee 100644 --- a/drivers/soc/qcom/rpmh-rsc.c +++ b/drivers/soc/qcom/rpmh-rsc.c @@ -715,6 +715,7 @@ static struct platform_driver rpmh_driver = { .driver = { .name = "rpmh", .of_match_table = rpmh_drv_match, + .suppress_bind_attrs = true, }, }; -- GitLab From 29e1dfcd5150097f32f34891c85a50d9ead19df3 Mon Sep 17 00:00:00 2001 From: Lihong Kou Date: Tue, 23 Jun 2020 20:28:41 +0800 Subject: [PATCH 0158/1304] Bluetooth: add a mutex lock to avoid UAF in do_enale_set [ Upstream commit f9c70bdc279b191da8d60777c627702c06e4a37d ] In the case we set or free the global value listen_chan in different threads, we can encounter the UAF problems because the method is not protected by any lock, add one to avoid this bug. BUG: KASAN: use-after-free in l2cap_chan_close+0x48/0x990 net/bluetooth/l2cap_core.c:730 Read of size 8 at addr ffff888096950000 by task kworker/1:102/2868 CPU: 1 PID: 2868 Comm: kworker/1:102 Not tainted 5.5.0-syzkaller #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Workqueue: events do_enable_set Call Trace: __dump_stack lib/dump_stack.c:77 [inline] dump_stack+0x1fb/0x318 lib/dump_stack.c:118 print_address_description+0x74/0x5c0 mm/kasan/report.c:374 __kasan_report+0x149/0x1c0 mm/kasan/report.c:506 kasan_report+0x26/0x50 mm/kasan/common.c:641 __asan_report_load8_noabort+0x14/0x20 mm/kasan/generic_report.c:135 l2cap_chan_close+0x48/0x990 net/bluetooth/l2cap_core.c:730 do_enable_set+0x660/0x900 net/bluetooth/6lowpan.c:1074 process_one_work+0x7f5/0x10f0 kernel/workqueue.c:2264 worker_thread+0xbbc/0x1630 kernel/workqueue.c:2410 kthread+0x332/0x350 kernel/kthread.c:255 ret_from_fork+0x24/0x30 arch/x86/entry/entry_64.S:352 Allocated by task 2870: save_stack mm/kasan/common.c:72 [inline] set_track mm/kasan/common.c:80 [inline] __kasan_kmalloc+0x118/0x1c0 mm/kasan/common.c:515 kasan_kmalloc+0x9/0x10 mm/kasan/common.c:529 kmem_cache_alloc_trace+0x221/0x2f0 mm/slab.c:3551 kmalloc include/linux/slab.h:555 [inline] kzalloc include/linux/slab.h:669 [inline] l2cap_chan_create+0x50/0x320 net/bluetooth/l2cap_core.c:446 chan_create net/bluetooth/6lowpan.c:640 [inline] bt_6lowpan_listen net/bluetooth/6lowpan.c:959 [inline] do_enable_set+0x6a4/0x900 net/bluetooth/6lowpan.c:1078 process_one_work+0x7f5/0x10f0 kernel/workqueue.c:2264 worker_thread+0xbbc/0x1630 kernel/workqueue.c:2410 kthread+0x332/0x350 kernel/kthread.c:255 ret_from_fork+0x24/0x30 arch/x86/entry/entry_64.S:352 Freed by task 2870: save_stack mm/kasan/common.c:72 [inline] set_track mm/kasan/common.c:80 [inline] kasan_set_free_info mm/kasan/common.c:337 [inline] __kasan_slab_free+0x12e/0x1e0 mm/kasan/common.c:476 kasan_slab_free+0xe/0x10 mm/kasan/common.c:485 __cache_free mm/slab.c:3426 [inline] kfree+0x10d/0x220 mm/slab.c:3757 l2cap_chan_destroy net/bluetooth/l2cap_core.c:484 [inline] kref_put include/linux/kref.h:65 [inline] l2cap_chan_put+0x170/0x190 net/bluetooth/l2cap_core.c:498 do_enable_set+0x66c/0x900 net/bluetooth/6lowpan.c:1075 process_one_work+0x7f5/0x10f0 kernel/workqueue.c:2264 worker_thread+0xbbc/0x1630 kernel/workqueue.c:2410 kthread+0x332/0x350 kernel/kthread.c:255 ret_from_fork+0x24/0x30 arch/x86/entry/entry_64.S:352 The buggy address belongs to the object at ffff888096950000 which belongs to the cache kmalloc-2k of size 2048 The buggy address is located 0 bytes inside of 2048-byte region [ffff888096950000, ffff888096950800) The buggy address belongs to the page: page:ffffea00025a5400 refcount:1 mapcount:0 mapping:ffff8880aa400e00 index:0x0 flags: 0xfffe0000000200(slab) raw: 00fffe0000000200 ffffea00027d1548 ffffea0002397808 ffff8880aa400e00 raw: 0000000000000000 ffff888096950000 0000000100000001 0000000000000000 page dumped because: kasan: bad access detected Memory state around the buggy address: ffff88809694ff00: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ffff88809694ff80: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 >ffff888096950000: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb ^ ffff888096950080: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb ffff888096950100: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb ================================================================== Reported-by: syzbot+96414aa0033c363d8458@syzkaller.appspotmail.com Signed-off-by: Lihong Kou Signed-off-by: Marcel Holtmann Signed-off-by: Sasha Levin --- net/bluetooth/6lowpan.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c index 357475cceec6..9a75f9b00b51 100644 --- a/net/bluetooth/6lowpan.c +++ b/net/bluetooth/6lowpan.c @@ -57,6 +57,7 @@ static bool enable_6lowpan; /* We are listening incoming connections via this channel */ static struct l2cap_chan *listen_chan; +static DEFINE_MUTEX(set_lock); struct lowpan_peer { struct list_head list; @@ -1082,12 +1083,14 @@ static void do_enable_set(struct work_struct *work) enable_6lowpan = set_enable->flag; + mutex_lock(&set_lock); if (listen_chan) { l2cap_chan_close(listen_chan, 0); l2cap_chan_put(listen_chan); } listen_chan = bt_6lowpan_listen(); + mutex_unlock(&set_lock); kfree(set_enable); } @@ -1139,11 +1142,13 @@ static ssize_t lowpan_control_write(struct file *fp, if (ret == -EINVAL) return ret; + mutex_lock(&set_lock); if (listen_chan) { l2cap_chan_close(listen_chan, 0); l2cap_chan_put(listen_chan); listen_chan = NULL; } + mutex_unlock(&set_lock); if (conn) { struct lowpan_peer *peer; -- GitLab From f73b59725d73ad239a5670e85b8765ec23099f0d Mon Sep 17 00:00:00 2001 From: Luis Chamberlain Date: Fri, 19 Jun 2020 20:47:27 +0000 Subject: [PATCH 0159/1304] loop: be paranoid on exit and prevent new additions / removals [ Upstream commit 200f93377220504c5e56754823e7adfea6037f1a ] Be pedantic on removal as well and hold the mutex. This should prevent uses of addition while we exit. Signed-off-by: Luis Chamberlain Reviewed-by: Ming Lei Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe Signed-off-by: Sasha Levin --- drivers/block/loop.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 728681a20b7f..da68c42aed68 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -2279,6 +2279,8 @@ static void __exit loop_exit(void) range = max_loop ? max_loop << part_shift : 1UL << MINORBITS; + mutex_lock(&loop_ctl_mutex); + idr_for_each(&loop_index_idr, &loop_exit_cb, NULL); idr_destroy(&loop_index_idr); @@ -2286,6 +2288,8 @@ static void __exit loop_exit(void) unregister_blkdev(LOOP_MAJOR, "loop"); misc_deregister(&loop_misc); + + mutex_unlock(&loop_ctl_mutex); } module_init(loop_init); -- GitLab From 5e5bcbb1083555fa631f1cc9dd0ae23fffd12c30 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 8 May 2020 14:15:37 -0700 Subject: [PATCH 0160/1304] fs/btrfs: Add cond_resched() for try_release_extent_mapping() stalls [ Upstream commit 9f47eb5461aaeb6cb8696f9d11503ae90e4d5cb0 ] Very large I/Os can cause the following RCU CPU stall warning: RIP: 0010:rb_prev+0x8/0x50 Code: 49 89 c0 49 89 d1 48 89 c2 48 89 f8 e9 e5 fd ff ff 4c 89 48 10 c3 4c = 89 06 c3 4c 89 40 10 c3 0f 1f 00 48 8b 0f 48 39 cf 74 38 <48> 8b 47 10 48 85 c0 74 22 48 8b 50 08 48 85 d2 74 0c 48 89 d0 48 RSP: 0018:ffffc9002212bab0 EFLAGS: 00000287 ORIG_RAX: ffffffffffffff13 RAX: ffff888821f93630 RBX: ffff888821f93630 RCX: ffff888821f937e0 RDX: 0000000000000000 RSI: 0000000000102000 RDI: ffff888821f93630 RBP: 0000000000103000 R08: 000000000006c000 R09: 0000000000000238 R10: 0000000000102fff R11: ffffc9002212bac8 R12: 0000000000000001 R13: ffffffffffffffff R14: 0000000000102000 R15: ffff888821f937e0 __lookup_extent_mapping+0xa0/0x110 try_release_extent_mapping+0xdc/0x220 btrfs_releasepage+0x45/0x70 shrink_page_list+0xa39/0xb30 shrink_inactive_list+0x18f/0x3b0 shrink_lruvec+0x38e/0x6b0 shrink_node+0x14d/0x690 do_try_to_free_pages+0xc6/0x3e0 try_to_free_mem_cgroup_pages+0xe6/0x1e0 reclaim_high.constprop.73+0x87/0xc0 mem_cgroup_handle_over_high+0x66/0x150 exit_to_usermode_loop+0x82/0xd0 do_syscall_64+0xd4/0x100 entry_SYSCALL_64_after_hwframe+0x44/0xa9 On a PREEMPT=n kernel, the try_release_extent_mapping() function's "while" loop might run for a very long time on a large I/O. This commit therefore adds a cond_resched() to this loop, providing RCU any needed quiescent states. Signed-off-by: Paul E. McKenney Signed-off-by: Sasha Levin --- fs/btrfs/extent_io.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 520b70b54331..fbcd18d96c52 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -4270,6 +4270,8 @@ int try_release_extent_mapping(struct page *page, gfp_t mask) /* once for us */ free_extent_map(em); + + cond_resched(); /* Allow large-extent preemption. */ } } return try_release_extent_state(tree, page, mask); -- GitLab From 59d2736af398b7cc96f8ce69f91e1403676eaa87 Mon Sep 17 00:00:00 2001 From: Jack Xiao Date: Wed, 5 Jun 2019 16:30:13 +0800 Subject: [PATCH 0161/1304] drm/amdgpu: avoid dereferencing a NULL pointer MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 55611b507fd6453d26030c0c0619fdf0c262766d ] Check if irq_src is NULL to avoid dereferencing a NULL pointer, for MES ring is uneccessary to recieve an interrupt notification. Signed-off-by: Jack Xiao Acked-by: Alex Deucher Reviewed-by: Hawking Zhang Reviewed-by: Christian König Signed-off-by: Alex Deucher Signed-off-by: Sasha Levin --- drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 869ff624b108..e5e51e4d4f3d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -396,7 +396,9 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, ring->fence_drv.gpu_addr = adev->uvd.inst[ring->me].gpu_addr + index; } amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq)); - amdgpu_irq_get(adev, irq_src, irq_type); + + if (irq_src) + amdgpu_irq_get(adev, irq_src, irq_type); ring->fence_drv.irq_src = irq_src; ring->fence_drv.irq_type = irq_type; @@ -508,8 +510,9 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev) /* no need to trigger GPU reset as we are unloading */ amdgpu_fence_driver_force_completion(ring); } - amdgpu_irq_put(adev, ring->fence_drv.irq_src, - ring->fence_drv.irq_type); + if (ring->fence_drv.irq_src) + amdgpu_irq_put(adev, ring->fence_drv.irq_src, + ring->fence_drv.irq_type); drm_sched_fini(&ring->sched); del_timer_sync(&ring->fence_drv.fallback_timer); for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j) @@ -545,8 +548,9 @@ void amdgpu_fence_driver_suspend(struct amdgpu_device *adev) } /* disable the interrupt */ - amdgpu_irq_put(adev, ring->fence_drv.irq_src, - ring->fence_drv.irq_type); + if (ring->fence_drv.irq_src) + amdgpu_irq_put(adev, ring->fence_drv.irq_src, + ring->fence_drv.irq_type); } } @@ -572,8 +576,9 @@ void amdgpu_fence_driver_resume(struct amdgpu_device *adev) continue; /* enable the interrupt */ - amdgpu_irq_get(adev, ring->fence_drv.irq_src, - ring->fence_drv.irq_type); + if (ring->fence_drv.irq_src) + amdgpu_irq_get(adev, ring->fence_drv.irq_src, + ring->fence_drv.irq_type); } } -- GitLab From 4a4776ef5b229f95848b774c46ce555405ff2d46 Mon Sep 17 00:00:00 2001 From: Aditya Pakki Date: Sat, 13 Jun 2020 21:21:22 -0500 Subject: [PATCH 0162/1304] drm/radeon: Fix reference count leaks caused by pm_runtime_get_sync [ Upstream commit 9fb10671011143d15b6b40d6d5fa9c52c57e9d63 ] On calling pm_runtime_get_sync() the reference count of the device is incremented. In case of failure, decrement the reference count before returning the error. Acked-by: Evan Quan Signed-off-by: Aditya Pakki Signed-off-by: Alex Deucher Signed-off-by: Sasha Levin --- drivers/gpu/drm/radeon/radeon_display.c | 4 +++- drivers/gpu/drm/radeon/radeon_drv.c | 4 +++- drivers/gpu/drm/radeon/radeon_kms.c | 4 +++- 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 7d1e14f0140a..3f0f3a578ddf 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -625,8 +625,10 @@ radeon_crtc_set_config(struct drm_mode_set *set, dev = set->crtc->dev; ret = pm_runtime_get_sync(dev->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_autosuspend(dev->dev); return ret; + } ret = drm_crtc_helper_set_config(set, ctx); diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index c26f09b47ecb..54729acd0d4a 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -523,8 +523,10 @@ long radeon_drm_ioctl(struct file *filp, long ret; dev = file_priv->minor->dev; ret = pm_runtime_get_sync(dev->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_autosuspend(dev->dev); return ret; + } ret = drm_ioctl(filp, cmd, arg); diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 3ff835767ac5..34b3cb6c146f 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -627,8 +627,10 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) file_priv->driver_priv = NULL; r = pm_runtime_get_sync(dev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(dev->dev); return r; + } /* new gpu have virtual address space support */ if (rdev->family >= CHIP_CAYMAN) { -- GitLab From 98d7ab74d3346aebc3c14e012023267af4b4edda Mon Sep 17 00:00:00 2001 From: Sedat Dilek Date: Fri, 3 Jul 2020 16:32:06 +0200 Subject: [PATCH 0163/1304] crypto: aesni - Fix build with LLVM_IAS=1 [ Upstream commit 3347c8a079d67af21760a78cc5f2abbcf06d9571 ] When building with LLVM_IAS=1 means using Clang's Integrated Assembly (IAS) from LLVM/Clang >= v10.0.1-rc1+ instead of GNU/as from GNU/binutils I see the following breakage in Debian/testing AMD64: :15:74: error: too many positional arguments PRECOMPUTE 8*3+8(%rsp), %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, ^ arch/x86/crypto/aesni-intel_asm.S:1598:2: note: while in macro instantiation GCM_INIT %r9, 8*3 +8(%rsp), 8*3 +16(%rsp), 8*3 +24(%rsp) ^ :47:2: error: unknown use of instruction mnemonic without a size suffix GHASH_4_ENCRYPT_4_PARALLEL_dec %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, enc ^ arch/x86/crypto/aesni-intel_asm.S:1599:2: note: while in macro instantiation GCM_ENC_DEC dec ^ :15:74: error: too many positional arguments PRECOMPUTE 8*3+8(%rsp), %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, ^ arch/x86/crypto/aesni-intel_asm.S:1686:2: note: while in macro instantiation GCM_INIT %r9, 8*3 +8(%rsp), 8*3 +16(%rsp), 8*3 +24(%rsp) ^ :47:2: error: unknown use of instruction mnemonic without a size suffix GHASH_4_ENCRYPT_4_PARALLEL_enc %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, enc ^ arch/x86/crypto/aesni-intel_asm.S:1687:2: note: while in macro instantiation GCM_ENC_DEC enc Craig Topper suggested me in ClangBuiltLinux issue #1050: > I think the "too many positional arguments" is because the parser isn't able > to handle the trailing commas. > > The "unknown use of instruction mnemonic" is because the macro was named > GHASH_4_ENCRYPT_4_PARALLEL_DEC but its being instantiated with > GHASH_4_ENCRYPT_4_PARALLEL_dec I guess gas ignores case on the > macro instantiation, but llvm doesn't. First, I removed the trailing comma in the PRECOMPUTE line. Second, I substituted: 1. GHASH_4_ENCRYPT_4_PARALLEL_DEC -> GHASH_4_ENCRYPT_4_PARALLEL_dec 2. GHASH_4_ENCRYPT_4_PARALLEL_ENC -> GHASH_4_ENCRYPT_4_PARALLEL_enc With these changes I was able to build with LLVM_IAS=1 and boot on bare metal. I confirmed that this works with Linux-kernel v5.7.5 final. NOTE: This patch is on top of Linux v5.7 final. Thanks to Craig and especially Nick for double-checking and his comments. Suggested-by: Craig Topper Suggested-by: Craig Topper Suggested-by: Nick Desaulniers Reviewed-by: Nick Desaulniers Cc: "ClangBuiltLinux" Link: https://github.com/ClangBuiltLinux/linux/issues/1050 Link: https://bugs.llvm.org/show_bug.cgi?id=24494 Signed-off-by: Sedat Dilek Signed-off-by: Herbert Xu Signed-off-by: Sasha Levin --- arch/x86/crypto/aesni-intel_asm.S | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S index cb2deb61c5d9..29b27f9a6e1e 100644 --- a/arch/x86/crypto/aesni-intel_asm.S +++ b/arch/x86/crypto/aesni-intel_asm.S @@ -270,7 +270,7 @@ ALL_F: .octa 0xffffffffffffffffffffffffffffffff PSHUFB_XMM %xmm2, %xmm0 movdqu %xmm0, CurCount(%arg2) # ctx_data.current_counter = iv - PRECOMPUTE \SUBKEY, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, + PRECOMPUTE \SUBKEY, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7 movdqu HashKey(%arg2), %xmm13 CALC_AAD_HASH %xmm13, \AAD, \AADLEN, %xmm0, %xmm1, %xmm2, %xmm3, \ @@ -982,7 +982,7 @@ _initial_blocks_done\@: * arg1, %arg3, %arg4 are used as pointers only, not modified * %r11 is the data offset value */ -.macro GHASH_4_ENCRYPT_4_PARALLEL_ENC TMP1 TMP2 TMP3 TMP4 TMP5 \ +.macro GHASH_4_ENCRYPT_4_PARALLEL_enc TMP1 TMP2 TMP3 TMP4 TMP5 \ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation movdqa \XMM1, \XMM5 @@ -1190,7 +1190,7 @@ aes_loop_par_enc_done\@: * arg1, %arg3, %arg4 are used as pointers only, not modified * %r11 is the data offset value */ -.macro GHASH_4_ENCRYPT_4_PARALLEL_DEC TMP1 TMP2 TMP3 TMP4 TMP5 \ +.macro GHASH_4_ENCRYPT_4_PARALLEL_dec TMP1 TMP2 TMP3 TMP4 TMP5 \ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation movdqa \XMM1, \XMM5 -- GitLab From dc1446923ef058574e1a94523d197fd19170f5ff Mon Sep 17 00:00:00 2001 From: Evgeny Novikov Date: Tue, 30 Jun 2020 22:54:51 +0300 Subject: [PATCH 0164/1304] video: fbdev: neofb: fix memory leak in neo_scan_monitor() [ Upstream commit edcb3895a751c762a18d25c8d9846ce9759ed7e1 ] neofb_probe() calls neo_scan_monitor() that can successfully allocate a memory for info->monspecs.modedb and proceed to case 0x03. There it does not free the memory and returns -1. neofb_probe() goes to label err_scan_monitor, thus, it does not free this memory through calling fb_destroy_modedb() as well. We can not go to label err_init_hw since neo_scan_monitor() can fail during memory allocation. So, the patch frees the memory directly for case 0x03. Found by Linux Driver Verification project (linuxtesting.org). Signed-off-by: Evgeny Novikov Cc: Jani Nikula Cc: Mike Rapoport Cc: Daniel Vetter Cc: Andrew Morton Signed-off-by: Bartlomiej Zolnierkiewicz Link: https://patchwork.freedesktop.org/patch/msgid/20200630195451.18675-1-novikov@ispras.ru Signed-off-by: Sasha Levin --- drivers/video/fbdev/neofb.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/video/fbdev/neofb.c b/drivers/video/fbdev/neofb.c index 5d3a444083f7..2018e1ca33eb 100644 --- a/drivers/video/fbdev/neofb.c +++ b/drivers/video/fbdev/neofb.c @@ -1820,6 +1820,7 @@ static int neo_scan_monitor(struct fb_info *info) #else printk(KERN_ERR "neofb: Only 640x480, 800x600/480 and 1024x768 panels are currently supported\n"); + kfree(info->monspecs.modedb); return -1; #endif default: -- GitLab From 0bd6a2313852306374e423e9bb724850e13805ae Mon Sep 17 00:00:00 2001 From: Zhao Heming Date: Thu, 9 Jul 2020 11:29:29 +0800 Subject: [PATCH 0165/1304] md-cluster: fix wild pointer of unlock_all_bitmaps() [ Upstream commit 60f80d6f2d07a6d8aee485a1d1252327eeee0c81 ] reproduction steps: ``` node1 # mdadm -C /dev/md0 -b clustered -e 1.2 -n 2 -l mirror /dev/sda /dev/sdb node2 # mdadm -A /dev/md0 /dev/sda /dev/sdb node1 # mdadm -G /dev/md0 -b none mdadm: failed to remove clustered bitmap. node1 # mdadm -S --scan ^C <==== mdadm hung & kernel crash ``` kernel stack: ``` [ 335.230657] general protection fault: 0000 [#1] SMP NOPTI [...] [ 335.230848] Call Trace: [ 335.230873] ? unlock_all_bitmaps+0x5/0x70 [md_cluster] [ 335.230886] unlock_all_bitmaps+0x3d/0x70 [md_cluster] [ 335.230899] leave+0x10f/0x190 [md_cluster] [ 335.230932] ? md_super_wait+0x93/0xa0 [md_mod] [ 335.230947] ? leave+0x5/0x190 [md_cluster] [ 335.230973] md_cluster_stop+0x1a/0x30 [md_mod] [ 335.230999] md_bitmap_free+0x142/0x150 [md_mod] [ 335.231013] ? _cond_resched+0x15/0x40 [ 335.231025] ? mutex_lock+0xe/0x30 [ 335.231056] __md_stop+0x1c/0xa0 [md_mod] [ 335.231083] do_md_stop+0x160/0x580 [md_mod] [ 335.231119] ? 0xffffffffc05fb078 [ 335.231148] md_ioctl+0xa04/0x1930 [md_mod] [ 335.231165] ? filename_lookup+0xf2/0x190 [ 335.231179] blkdev_ioctl+0x93c/0xa10 [ 335.231205] ? _cond_resched+0x15/0x40 [ 335.231214] ? __check_object_size+0xd4/0x1a0 [ 335.231224] block_ioctl+0x39/0x40 [ 335.231243] do_vfs_ioctl+0xa0/0x680 [ 335.231253] ksys_ioctl+0x70/0x80 [ 335.231261] __x64_sys_ioctl+0x16/0x20 [ 335.231271] do_syscall_64+0x65/0x1f0 [ 335.231278] entry_SYSCALL_64_after_hwframe+0x44/0xa9 ``` Signed-off-by: Zhao Heming Signed-off-by: Song Liu Signed-off-by: Sasha Levin --- drivers/md/md-cluster.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c index 0b2af6e74fc3..4522e87d9d68 100644 --- a/drivers/md/md-cluster.c +++ b/drivers/md/md-cluster.c @@ -1443,6 +1443,7 @@ static void unlock_all_bitmaps(struct mddev *mddev) } } kfree(cinfo->other_bitmap_lockres); + cinfo->other_bitmap_lockres = NULL; } } -- GitLab From 6569387565961965d872bab558f3cf167e818689 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ricardo=20Ca=C3=B1uelo?= Date: Mon, 1 Jun 2020 08:33:06 +0200 Subject: [PATCH 0166/1304] arm64: dts: hisilicon: hikey: fixes to comply with adi, adv7533 DT binding MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit bbe28fc3cbabbef781bcdf847615d52ce2e26e42 ] hi3660-hikey960.dts: Define a 'ports' node for 'adv7533: adv7533@39' and the 'adi,dsi-lanes' property to make it compliant with the adi,adv7533 DT binding. This fills the requirements to meet the binding requirements, remote endpoints are not defined. hi6220-hikey.dts: Change property name s/pd-gpio/pd-gpios, gpio properties should be plural. This is just a cosmetic change. Signed-off-by: Ricardo Cañuelo Acked-by: Laurent Pinchart Signed-off-by: Wei Xu Signed-off-by: Sasha Levin --- arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts | 11 +++++++++++ arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts | 2 +- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts b/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts index c98bcbc8dfba..53848e0e5e0c 100644 --- a/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts +++ b/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts @@ -530,6 +530,17 @@ status = "ok"; compatible = "adi,adv7533"; reg = <0x39>; + adi,dsi-lanes = <4>; + ports { + #address-cells = <1>; + #size-cells = <0>; + port@0 { + reg = <0>; + }; + port@1 { + reg = <1>; + }; + }; }; }; diff --git a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts index e80a792827ed..60568392d21e 100644 --- a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts +++ b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts @@ -515,7 +515,7 @@ reg = <0x39>; interrupt-parent = <&gpio1>; interrupts = <1 2>; - pd-gpio = <&gpio0 4 0>; + pd-gpios = <&gpio0 4 0>; adi,dsi-lanes = <4>; #sound-dai-cells = <0>; -- GitLab From 3d6955baf30486424af0a9ec46afc294d8c7e3b9 Mon Sep 17 00:00:00 2001 From: Navid Emamdoost Date: Mon, 15 Jun 2020 01:12:20 -0500 Subject: [PATCH 0167/1304] drm/etnaviv: fix ref count leak via pm_runtime_get_sync [ Upstream commit c5d5a32ead1e3a61a07a1e59eb52a53e4a6b2a7f ] in etnaviv_gpu_submit, etnaviv_gpu_recover_hang, etnaviv_gpu_debugfs, and etnaviv_gpu_init the call to pm_runtime_get_sync increments the counter even in case of failure, leading to incorrect ref count. In case of failure, decrement the ref count before returning. Signed-off-by: Navid Emamdoost Signed-off-by: Lucas Stach Signed-off-by: Sasha Levin --- drivers/gpu/drm/etnaviv/etnaviv_gpu.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c index 6a859e077ea0..f17fbe6ff7c7 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c @@ -694,7 +694,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu) ret = pm_runtime_get_sync(gpu->dev); if (ret < 0) { dev_err(gpu->dev, "Failed to enable GPU power domain\n"); - return ret; + goto pm_put; } etnaviv_hw_identify(gpu); @@ -808,6 +808,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu) gpu->mmu = NULL; fail: pm_runtime_mark_last_busy(gpu->dev); +pm_put: pm_runtime_put_autosuspend(gpu->dev); return ret; @@ -848,7 +849,7 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m) ret = pm_runtime_get_sync(gpu->dev); if (ret < 0) - return ret; + goto pm_put; dma_lo = gpu_read(gpu, VIVS_FE_DMA_LOW); dma_hi = gpu_read(gpu, VIVS_FE_DMA_HIGH); @@ -971,6 +972,7 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m) ret = 0; pm_runtime_mark_last_busy(gpu->dev); +pm_put: pm_runtime_put_autosuspend(gpu->dev); return ret; @@ -985,7 +987,7 @@ void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu) dev_err(gpu->dev, "recover hung GPU!\n"); if (pm_runtime_get_sync(gpu->dev) < 0) - return; + goto pm_put; mutex_lock(&gpu->lock); @@ -1005,6 +1007,7 @@ void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu) mutex_unlock(&gpu->lock); pm_runtime_mark_last_busy(gpu->dev); +pm_put: pm_runtime_put_autosuspend(gpu->dev); } @@ -1278,8 +1281,10 @@ struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit) if (!submit->runtime_resumed) { ret = pm_runtime_get_sync(gpu->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_noidle(gpu->dev); return NULL; + } submit->runtime_resumed = true; } @@ -1296,6 +1301,7 @@ struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit) ret = event_alloc(gpu, nr_events, event); if (ret) { DRM_ERROR("no free events\n"); + pm_runtime_put_noidle(gpu->dev); return NULL; } -- GitLab From afd847bb56cbf87c5e5b384d7c89d4661b6c3755 Mon Sep 17 00:00:00 2001 From: Aditya Pakki Date: Sat, 13 Jun 2020 20:41:56 -0500 Subject: [PATCH 0168/1304] drm/nouveau: fix multiple instances of reference count leaks [ Upstream commit 659fb5f154c3434c90a34586f3b7aa1c39cf6062 ] On calling pm_runtime_get_sync() the reference count of the device is incremented. In case of failure, decrement the ref count before returning the error. Signed-off-by: Aditya Pakki Signed-off-by: Ben Skeggs Signed-off-by: Sasha Levin --- drivers/gpu/drm/nouveau/nouveau_drm.c | 8 ++++++-- drivers/gpu/drm/nouveau/nouveau_gem.c | 4 +++- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 2b7a54cc3c9e..81999bed1e4a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -899,8 +899,10 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv) /* need to bring up power immediately if opening device */ ret = pm_runtime_get_sync(dev->dev); - if (ret < 0 && ret != -EACCES) + if (ret < 0 && ret != -EACCES) { + pm_runtime_put_autosuspend(dev->dev); return ret; + } get_task_comm(tmpname, current); snprintf(name, sizeof(name), "%s[%d]", tmpname, pid_nr(fpriv->pid)); @@ -980,8 +982,10 @@ nouveau_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg) long ret; ret = pm_runtime_get_sync(dev->dev); - if (ret < 0 && ret != -EACCES) + if (ret < 0 && ret != -EACCES) { + pm_runtime_put_autosuspend(dev->dev); return ret; + } switch (_IOC_NR(cmd) - DRM_COMMAND_BASE) { case DRM_NOUVEAU_NVIF: diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index b56524d343c3..791f970714ed 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -46,8 +46,10 @@ nouveau_gem_object_del(struct drm_gem_object *gem) int ret; ret = pm_runtime_get_sync(dev); - if (WARN_ON(ret < 0 && ret != -EACCES)) + if (WARN_ON(ret < 0 && ret != -EACCES)) { + pm_runtime_put_autosuspend(dev); return; + } if (gem->import_attach) drm_prime_gem_destroy(gem, nvbo->bo.sg); -- GitLab From 459863b756436b4f8f6bd806a38d09646e5b51f7 Mon Sep 17 00:00:00 2001 From: Chunfeng Yun Date: Mon, 27 Jul 2020 15:14:59 +0800 Subject: [PATCH 0169/1304] usb: mtu3: clear dual mode of u3port when disable device [ Upstream commit f1e51e99ed498d4aa9ae5df28e43d558ea627781 ] If not clear u3port's dual mode when disable device, the IP will fail to enter sleep mode when suspend. Signed-off-by: Chunfeng Yun Link: https://lore.kernel.org/r/1595834101-13094-10-git-send-email-chunfeng.yun@mediatek.com Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin --- drivers/usb/mtu3/mtu3_core.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/usb/mtu3/mtu3_core.c b/drivers/usb/mtu3/mtu3_core.c index 860693520132..408e964522ab 100644 --- a/drivers/usb/mtu3/mtu3_core.c +++ b/drivers/usb/mtu3/mtu3_core.c @@ -128,8 +128,12 @@ static void mtu3_device_disable(struct mtu3 *mtu) mtu3_setbits(ibase, SSUSB_U2_CTRL(0), SSUSB_U2_PORT_DIS | SSUSB_U2_PORT_PDN); - if (mtu->ssusb->dr_mode == USB_DR_MODE_OTG) + if (mtu->ssusb->dr_mode == USB_DR_MODE_OTG) { mtu3_clrbits(ibase, SSUSB_U2_CTRL(0), SSUSB_U2_PORT_OTG_SEL); + if (mtu->is_u3_ip) + mtu3_clrbits(ibase, SSUSB_U3_CTRL(0), + SSUSB_U3_PORT_DUAL_MODE); + } mtu3_setbits(ibase, U3D_SSUSB_IP_PW_CTRL2, SSUSB_IP_DEV_PDN); } -- GitLab From 1bed3b87449484134f45f4c263a5e61aa5b9653d Mon Sep 17 00:00:00 2001 From: Michael Tretter Date: Thu, 17 Aug 2017 12:43:07 +0200 Subject: [PATCH 0170/1304] drm/debugfs: fix plain echo to connector "force" attribute [ Upstream commit c704b17071c4dc571dca3af4e4151dac51de081a ] Using plain echo to set the "force" connector attribute fails with -EINVAL, because echo appends a newline to the output. Replace strcmp with sysfs_streq to also accept strings that end with a newline. v2: use sysfs_streq instead of stripping trailing whitespace Signed-off-by: Michael Tretter Reviewed-by: Jani Nikula Signed-off-by: Emil Velikov Link: https://patchwork.freedesktop.org/patch/msgid/20170817104307.17124-1-m.tretter@pengutronix.de Signed-off-by: Sasha Levin --- drivers/gpu/drm/drm_debugfs.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c index 373bd4c2b698..84b7b22a9590 100644 --- a/drivers/gpu/drm/drm_debugfs.c +++ b/drivers/gpu/drm/drm_debugfs.c @@ -265,13 +265,13 @@ static ssize_t connector_write(struct file *file, const char __user *ubuf, buf[len] = '\0'; - if (!strcmp(buf, "on")) + if (sysfs_streq(buf, "on")) connector->force = DRM_FORCE_ON; - else if (!strcmp(buf, "digital")) + else if (sysfs_streq(buf, "digital")) connector->force = DRM_FORCE_ON_DIGITAL; - else if (!strcmp(buf, "off")) + else if (sysfs_streq(buf, "off")) connector->force = DRM_FORCE_OFF; - else if (!strcmp(buf, "unspecified")) + else if (sysfs_streq(buf, "unspecified")) connector->force = DRM_FORCE_UNSPECIFIED; else return -EINVAL; -- GitLab From 2b8064de21e82b2d643f55849ab903b8f252ea7f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Tue, 12 May 2020 10:55:58 +0200 Subject: [PATCH 0171/1304] drm/radeon: disable AGP by default MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit ba806f98f868ce107aa9c453fef751de9980e4af ] Always use the PCI GART instead. We just have to many cases where AGP still causes problems. This means a performance regression for some GPUs, but also a bug fix for some others. Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher Signed-off-by: Sasha Levin --- drivers/gpu/drm/radeon/radeon_drv.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 54729acd0d4a..0cd33289c2b6 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -168,12 +168,7 @@ int radeon_no_wb; int radeon_modeset = -1; int radeon_dynclks = -1; int radeon_r4xx_atom = 0; -#ifdef __powerpc__ -/* Default to PCI on PowerPC (fdo #95017) */ int radeon_agpmode = -1; -#else -int radeon_agpmode = 0; -#endif int radeon_vram_limit = 0; int radeon_gart_size = -1; /* auto */ int radeon_benchmarking = 0; -- GitLab From 1016f98b586dce303561d4e32c1173e1cecd7131 Mon Sep 17 00:00:00 2001 From: Bartosz Golaszewski Date: Mon, 15 Jun 2020 09:44:45 +0200 Subject: [PATCH 0172/1304] irqchip/irq-mtk-sysirq: Replace spinlock with raw_spinlock [ Upstream commit 6eeb997ab5075e770a002c51351fa4ec2c6b5c39 ] This driver may take a regular spinlock when a raw spinlock (irq_desc->lock) is already taken which results in the following lockdep splat: ============================= [ BUG: Invalid wait context ] 5.7.0-rc7 #1 Not tainted ----------------------------- swapper/0/0 is trying to lock: ffffff800303b798 (&chip_data->lock){....}-{3:3}, at: mtk_sysirq_set_type+0x48/0xc0 other info that might help us debug this: context-{5:5} 2 locks held by swapper/0/0: #0: ffffff800302ee68 (&desc->request_mutex){....}-{4:4}, at: __setup_irq+0xc4/0x8a0 #1: ffffff800302ecf0 (&irq_desc_lock_class){....}-{2:2}, at: __setup_irq+0xe4/0x8a0 stack backtrace: CPU: 0 PID: 0 Comm: swapper/0 Not tainted 5.7.0-rc7 #1 Hardware name: Pumpkin MT8516 (DT) Call trace: dump_backtrace+0x0/0x180 show_stack+0x14/0x20 dump_stack+0xd0/0x118 __lock_acquire+0x8c8/0x2270 lock_acquire+0xf8/0x470 _raw_spin_lock_irqsave+0x50/0x78 mtk_sysirq_set_type+0x48/0xc0 __irq_set_trigger+0x58/0x170 __setup_irq+0x420/0x8a0 request_threaded_irq+0xd8/0x190 timer_of_init+0x1e8/0x2c4 mtk_gpt_init+0x5c/0x1dc timer_probe+0x74/0xf4 time_init+0x14/0x44 start_kernel+0x394/0x4f0 Replace the spinlock_t with raw_spinlock_t to avoid this warning. Signed-off-by: Bartosz Golaszewski Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20200615074445.3579-1-brgl@bgdev.pl Signed-off-by: Sasha Levin --- drivers/irqchip/irq-mtk-sysirq.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/irqchip/irq-mtk-sysirq.c b/drivers/irqchip/irq-mtk-sysirq.c index 90aaf190157f..42455f31b061 100644 --- a/drivers/irqchip/irq-mtk-sysirq.c +++ b/drivers/irqchip/irq-mtk-sysirq.c @@ -23,7 +23,7 @@ #include struct mtk_sysirq_chip_data { - spinlock_t lock; + raw_spinlock_t lock; u32 nr_intpol_bases; void __iomem **intpol_bases; u32 *intpol_words; @@ -45,7 +45,7 @@ static int mtk_sysirq_set_type(struct irq_data *data, unsigned int type) reg_index = chip_data->which_word[hwirq]; offset = hwirq & 0x1f; - spin_lock_irqsave(&chip_data->lock, flags); + raw_spin_lock_irqsave(&chip_data->lock, flags); value = readl_relaxed(base + reg_index * 4); if (type == IRQ_TYPE_LEVEL_LOW || type == IRQ_TYPE_EDGE_FALLING) { if (type == IRQ_TYPE_LEVEL_LOW) @@ -61,7 +61,7 @@ static int mtk_sysirq_set_type(struct irq_data *data, unsigned int type) data = data->parent_data; ret = data->chip->irq_set_type(data, type); - spin_unlock_irqrestore(&chip_data->lock, flags); + raw_spin_unlock_irqrestore(&chip_data->lock, flags); return ret; } @@ -220,7 +220,7 @@ static int __init mtk_sysirq_of_init(struct device_node *node, ret = -ENOMEM; goto out_free_which_word; } - spin_lock_init(&chip_data->lock); + raw_spin_lock_init(&chip_data->lock); return 0; -- GitLab From abfa9c47ece7c712d3fab494d1771c8f15bba8fa Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 16 Apr 2020 16:46:10 -0700 Subject: [PATCH 0173/1304] mm/mmap.c: Add cond_resched() for exit_mmap() CPU stalls [ Upstream commit 0a3b3c253a1eb2c7fe7f34086d46660c909abeb3 ] A large process running on a heavily loaded system can encounter the following RCU CPU stall warning: rcu: INFO: rcu_sched self-detected stall on CPU rcu: 3-....: (20998 ticks this GP) idle=4ea/1/0x4000000000000002 softirq=556558/556558 fqs=5190 (t=21013 jiffies g=1005461 q=132576) NMI backtrace for cpu 3 CPU: 3 PID: 501900 Comm: aio-free-ring-w Kdump: loaded Not tainted 5.2.9-108_fbk12_rc3_3858_gb83b75af7909 #1 Hardware name: Wiwynn HoneyBadger/PantherPlus, BIOS HBM6.71 02/03/2016 Call Trace: dump_stack+0x46/0x60 nmi_cpu_backtrace.cold.3+0x13/0x50 ? lapic_can_unplug_cpu.cold.27+0x34/0x34 nmi_trigger_cpumask_backtrace+0xba/0xca rcu_dump_cpu_stacks+0x99/0xc7 rcu_sched_clock_irq.cold.87+0x1aa/0x397 ? tick_sched_do_timer+0x60/0x60 update_process_times+0x28/0x60 tick_sched_timer+0x37/0x70 __hrtimer_run_queues+0xfe/0x270 hrtimer_interrupt+0xf4/0x210 smp_apic_timer_interrupt+0x5e/0x120 apic_timer_interrupt+0xf/0x20 RIP: 0010:kmem_cache_free+0x223/0x300 Code: 88 00 00 00 0f 85 ca 00 00 00 41 8b 55 18 31 f6 f7 da 41 f6 45 0a 02 40 0f 94 c6 83 c6 05 9c 41 5e fa e8 a0 a7 01 00 41 56 9d <49> 8b 47 08 a8 03 0f 85 87 00 00 00 65 48 ff 08 e9 3d fe ff ff 65 RSP: 0018:ffffc9000e8e3da8 EFLAGS: 00000206 ORIG_RAX: ffffffffffffff13 RAX: 0000000000020000 RBX: ffff88861b9de960 RCX: 0000000000000030 RDX: fffffffffffe41e8 RSI: 000060777fe3a100 RDI: 000000000001be18 RBP: ffffea00186e7780 R08: ffffffffffffffff R09: ffffffffffffffff R10: ffff88861b9dea28 R11: ffff88887ffde000 R12: ffffffff81230a1f R13: ffff888854684dc0 R14: 0000000000000206 R15: ffff8888547dbc00 ? remove_vma+0x4f/0x60 remove_vma+0x4f/0x60 exit_mmap+0xd6/0x160 mmput+0x4a/0x110 do_exit+0x278/0xae0 ? syscall_trace_enter+0x1d3/0x2b0 ? handle_mm_fault+0xaa/0x1c0 do_group_exit+0x3a/0xa0 __x64_sys_exit_group+0x14/0x20 do_syscall_64+0x42/0x100 entry_SYSCALL_64_after_hwframe+0x44/0xa9 And on a PREEMPT=n kernel, the "while (vma)" loop in exit_mmap() can run for a very long time given a large process. This commit therefore adds a cond_resched() to this loop, providing RCU any needed quiescent states. Cc: Andrew Morton Cc: Reviewed-by: Shakeel Butt Reviewed-by: Joel Fernandes (Google) Signed-off-by: Paul E. McKenney Signed-off-by: Sasha Levin --- mm/mmap.c | 1 + 1 file changed, 1 insertion(+) diff --git a/mm/mmap.c b/mm/mmap.c index a98f09b83019..e84fd3347a51 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -3100,6 +3100,7 @@ void exit_mmap(struct mm_struct *mm) if (vma->vm_flags & VM_ACCOUNT) nr_accounted += vma_pages(vma); vma = remove_vma(vma); + cond_resched(); } vm_unacct_memory(nr_accounted); } -- GitLab From f76dab806863d50597bc52d5553e10c54eb5f096 Mon Sep 17 00:00:00 2001 From: Wright Feng Date: Thu, 4 Jun 2020 02:18:33 -0500 Subject: [PATCH 0174/1304] brcmfmac: keep SDIO watchdog running when console_interval is non-zero [ Upstream commit eccbf46b15bb3e35d004148f7c3a8fa8e9b26c1e ] brcmfmac host driver makes SDIO bus sleep and stops SDIO watchdog if no pending event or data. As a result, host driver does not poll firmware console buffer before buffer overflow, which leads to missing firmware logs. We should not stop SDIO watchdog if console_interval is non-zero in debug build. Signed-off-by: Wright Feng Signed-off-by: Chi-hsien Lin Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200604071835.3842-4-wright.feng@cypress.com Signed-off-by: Sasha Levin --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index 96870d1b3b73..a5195bdb4d9b 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -3633,7 +3633,11 @@ static void brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus) if (bus->idlecount > bus->idletime) { brcmf_dbg(SDIO, "idle\n"); sdio_claim_host(bus->sdiodev->func1); - brcmf_sdio_wd_timer(bus, false); +#ifdef DEBUG + if (!BRCMF_FWCON_ON() || + bus->console_interval == 0) +#endif + brcmf_sdio_wd_timer(bus, false); bus->idlecount = 0; brcmf_sdio_bus_sleep(bus, true, false); sdio_release_host(bus->sdiodev->func1); -- GitLab From a08c30d9ccf19146f8477feb21b1007acd149357 Mon Sep 17 00:00:00 2001 From: Prasanna Kerekoppa Date: Thu, 4 Jun 2020 02:18:35 -0500 Subject: [PATCH 0175/1304] brcmfmac: To fix Bss Info flag definition Bug [ Upstream commit fa3266541b13f390eb35bdbc38ff4a03368be004 ] Bss info flag definition need to be fixed from 0x2 to 0x4 This flag is for rssi info received on channel. All Firmware branches defined as 0x4 and this is bug in brcmfmac. Signed-off-by: Prasanna Kerekoppa Signed-off-by: Chi-hsien Lin Signed-off-by: Wright Feng Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200604071835.3842-6-wright.feng@cypress.com Signed-off-by: Sasha Levin --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h index d5bb81e88762..9d2367133c7c 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h @@ -30,7 +30,7 @@ #define BRCMF_ARP_OL_PEER_AUTO_REPLY 0x00000008 #define BRCMF_BSS_INFO_VERSION 109 /* curr ver of brcmf_bss_info_le struct */ -#define BRCMF_BSS_RSSI_ON_CHANNEL 0x0002 +#define BRCMF_BSS_RSSI_ON_CHANNEL 0x0004 #define BRCMF_STA_BRCM 0x00000001 /* Running a Broadcom driver */ #define BRCMF_STA_WME 0x00000002 /* WMM association */ -- GitLab From 8cdea53c0c3e2101e44efdc14c8696578b1faccc Mon Sep 17 00:00:00 2001 From: Wright Feng Date: Wed, 24 Jun 2020 04:16:07 -0500 Subject: [PATCH 0176/1304] brcmfmac: set state of hanger slot to FREE when flushing PSQ [ Upstream commit fcdd7a875def793c38d7369633af3eba6c7cf089 ] When USB or SDIO device got abnormal bus disconnection, host driver tried to clean up the skbs in PSQ and TXQ (The skb's pointer in hanger slot linked to PSQ and TSQ), so we should set the state of skb hanger slot to BRCMF_FWS_HANGER_ITEM_STATE_FREE before freeing skb. In brcmf_fws_bus_txq_cleanup it already sets BRCMF_FWS_HANGER_ITEM_STATE_FREE before freeing skb, therefore we add the same thing in brcmf_fws_psq_flush to avoid following warning message. [ 1580.012880] ------------ [ cut here ]------------ [ 1580.017550] WARNING: CPU: 3 PID: 3065 at drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c:49 brcmu_pkt_buf_free_skb+0x21/0x30 [brcmutil] [ 1580.184017] Call Trace: [ 1580.186514] brcmf_fws_cleanup+0x14e/0x190 [brcmfmac] [ 1580.191594] brcmf_fws_del_interface+0x70/0x90 [brcmfmac] [ 1580.197029] brcmf_proto_bcdc_del_if+0xe/0x10 [brcmfmac] [ 1580.202418] brcmf_remove_interface+0x69/0x190 [brcmfmac] [ 1580.207888] brcmf_detach+0x90/0xe0 [brcmfmac] [ 1580.212385] brcmf_usb_disconnect+0x76/0xb0 [brcmfmac] [ 1580.217557] usb_unbind_interface+0x72/0x260 [ 1580.221857] device_release_driver_internal+0x141/0x200 [ 1580.227152] device_release_driver+0x12/0x20 [ 1580.231460] bus_remove_device+0xfd/0x170 [ 1580.235504] device_del+0x1d9/0x300 [ 1580.239041] usb_disable_device+0x9e/0x270 [ 1580.243160] usb_disconnect+0x94/0x270 [ 1580.246980] hub_event+0x76d/0x13b0 [ 1580.250499] process_one_work+0x144/0x360 [ 1580.254564] worker_thread+0x4d/0x3c0 [ 1580.258247] kthread+0x109/0x140 [ 1580.261515] ? rescuer_thread+0x340/0x340 [ 1580.265543] ? kthread_park+0x60/0x60 [ 1580.269237] ? SyS_exit_group+0x14/0x20 [ 1580.273118] ret_from_fork+0x25/0x30 [ 1580.300446] ------------ [ cut here ]------------ Acked-by: Arend van Spriel Signed-off-by: Wright Feng Signed-off-by: Chi-hsien Lin Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200624091608.25154-2-wright.feng@cypress.com Signed-off-by: Sasha Levin --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c index 1de8497d92b8..dc7c970257d2 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c @@ -653,6 +653,7 @@ static inline int brcmf_fws_hanger_poppkt(struct brcmf_fws_hanger *h, static void brcmf_fws_psq_flush(struct brcmf_fws_info *fws, struct pktq *q, int ifidx) { + struct brcmf_fws_hanger_item *hi; bool (*matchfn)(struct sk_buff *, void *) = NULL; struct sk_buff *skb; int prec; @@ -664,6 +665,9 @@ static void brcmf_fws_psq_flush(struct brcmf_fws_info *fws, struct pktq *q, skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx); while (skb) { hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT); + hi = &fws->hanger.items[hslot]; + WARN_ON(skb != hi->pkt); + hi->state = BRCMF_FWS_HANGER_ITEM_STATE_FREE; brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb, true); brcmu_pkt_buf_free_skb(skb); -- GitLab From b1b3ee664a04196ca7a9d9c11bd933943915d524 Mon Sep 17 00:00:00 2001 From: Bolarinwa Olayemi Saheed Date: Mon, 13 Jul 2020 19:55:27 +0200 Subject: [PATCH 0177/1304] iwlegacy: Check the return value of pcie_capability_read_*() [ Upstream commit 9018fd7f2a73e9b290f48a56b421558fa31e8b75 ] On failure pcie_capability_read_dword() sets it's last parameter, val to 0. However, with Patch 14/14, it is possible that val is set to ~0 on failure. This would introduce a bug because (x & x) == (~0 & x). This bug can be avoided without changing the function's behaviour if the return value of pcie_capability_read_dword is checked to confirm success. Check the return value of pcie_capability_read_dword() to ensure success. Suggested-by: Bjorn Helgaas Signed-off-by: Bolarinwa Olayemi Saheed Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200713175529.29715-3-refactormyself@gmail.com Signed-off-by: Sasha Levin --- drivers/net/wireless/intel/iwlegacy/common.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c index e16f2597c219..c1c1cf330de7 100644 --- a/drivers/net/wireless/intel/iwlegacy/common.c +++ b/drivers/net/wireless/intel/iwlegacy/common.c @@ -4302,8 +4302,8 @@ il_apm_init(struct il_priv *il) * power savings, even without L1. */ if (il->cfg->set_l0s) { - pcie_capability_read_word(il->pci_dev, PCI_EXP_LNKCTL, &lctl); - if (lctl & PCI_EXP_LNKCTL_ASPM_L1) { + ret = pcie_capability_read_word(il->pci_dev, PCI_EXP_LNKCTL, &lctl); + if (!ret && (lctl & PCI_EXP_LNKCTL_ASPM_L1)) { /* L1-ASPM enabled; disable(!) L0S */ il_set_bit(il, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED); -- GitLab From e68ee83f8b989c6f6e9009c0ba4eccd99d4d1791 Mon Sep 17 00:00:00 2001 From: Dmitry Osipenko Date: Mon, 29 Jun 2020 06:18:41 +0300 Subject: [PATCH 0178/1304] gpu: host1x: debug: Fix multiple channels emitting messages simultaneously [ Upstream commit 35681862808472a0a4b9a8817ae2789c0b5b3edc ] Once channel's job is hung, it dumps the channel's state into KMSG before tearing down the offending job. If multiple channels hang at once, then they dump messages simultaneously, making the debug info unreadable, and thus, useless. This patch adds mutex which allows only one channel to emit debug messages at a time. Signed-off-by: Dmitry Osipenko Signed-off-by: Thierry Reding Signed-off-by: Sasha Levin --- drivers/gpu/host1x/debug.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/gpu/host1x/debug.c b/drivers/gpu/host1x/debug.c index 329e4a3d8ae7..6c9ad4533999 100644 --- a/drivers/gpu/host1x/debug.c +++ b/drivers/gpu/host1x/debug.c @@ -25,6 +25,8 @@ #include "debug.h" #include "channel.h" +static DEFINE_MUTEX(debug_lock); + unsigned int host1x_debug_trace_cmdbuf; static pid_t host1x_debug_force_timeout_pid; @@ -61,12 +63,14 @@ static int show_channel(struct host1x_channel *ch, void *data, bool show_fifo) struct output *o = data; mutex_lock(&ch->cdma.lock); + mutex_lock(&debug_lock); if (show_fifo) host1x_hw_show_channel_fifo(m, ch, o); host1x_hw_show_channel_cdma(m, ch, o); + mutex_unlock(&debug_lock); mutex_unlock(&ch->cdma.lock); return 0; -- GitLab From fec3ffe702a0a407586fdcccdb4bf3918cf18fb3 Mon Sep 17 00:00:00 2001 From: Evgeny Novikov Date: Tue, 21 Jul 2020 23:15:58 +0300 Subject: [PATCH 0179/1304] usb: gadget: net2280: fix memory leak on probe error handling paths [ Upstream commit 2468c877da428ebfd701142c4cdfefcfb7d4c00e ] Driver does not release memory for device on error handling paths in net2280_probe() when gadget_release() is not registered yet. The patch fixes the bug like in other similar drivers. Found by Linux Driver Verification project (linuxtesting.org). Signed-off-by: Evgeny Novikov Signed-off-by: Felipe Balbi Signed-off-by: Sasha Levin --- drivers/usb/gadget/udc/net2280.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c index ee872cad5270..a87caad8d1c7 100644 --- a/drivers/usb/gadget/udc/net2280.c +++ b/drivers/usb/gadget/udc/net2280.c @@ -3782,8 +3782,10 @@ static int net2280_probe(struct pci_dev *pdev, const struct pci_device_id *id) return 0; done: - if (dev) + if (dev) { net2280_remove(pdev); + kfree(dev); + } return retval; } -- GitLab From 469ea9a2968899d3b1f634731b914fd7a5740efd Mon Sep 17 00:00:00 2001 From: Sasi Kumar Date: Wed, 22 Jul 2020 13:07:42 -0400 Subject: [PATCH 0180/1304] bdc: Fix bug causing crash after multiple disconnects [ Upstream commit a95bdfd22076497288868c028619bc5995f5cc7f ] Multiple connects/disconnects can cause a crash on the second disconnect. The driver had a problem where it would try to send endpoint commands after it was disconnected which is not allowed by the hardware. The fix is to only allow the endpoint commands when the endpoint is connected. This will also fix issues that showed up when using configfs to create gadgets. Signed-off-by: Sasi Kumar Signed-off-by: Al Cooper Acked-by: Florian Fainelli Signed-off-by: Felipe Balbi Signed-off-by: Sasha Levin --- drivers/usb/gadget/udc/bdc/bdc_core.c | 4 ++++ drivers/usb/gadget/udc/bdc/bdc_ep.c | 16 ++++++++++------ 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/drivers/usb/gadget/udc/bdc/bdc_core.c b/drivers/usb/gadget/udc/bdc/bdc_core.c index 01b44e159623..4c557f315446 100644 --- a/drivers/usb/gadget/udc/bdc/bdc_core.c +++ b/drivers/usb/gadget/udc/bdc/bdc_core.c @@ -283,6 +283,7 @@ static void bdc_mem_init(struct bdc *bdc, bool reinit) * in that case reinit is passed as 1 */ if (reinit) { + int i; /* Enable interrupts */ temp = bdc_readl(bdc->regs, BDC_BDCSC); temp |= BDC_GIE; @@ -292,6 +293,9 @@ static void bdc_mem_init(struct bdc *bdc, bool reinit) /* Initialize SRR to 0 */ memset(bdc->srr.sr_bds, 0, NUM_SR_ENTRIES * sizeof(struct bdc_bd)); + /* clear ep flags to avoid post disconnect stops/deconfigs */ + for (i = 1; i < bdc->num_eps; ++i) + bdc->bdc_ep_array[i]->flags = 0; } else { /* One time initiaization only */ /* Enable status report function pointers */ diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.c b/drivers/usb/gadget/udc/bdc/bdc_ep.c index d49c6dc1082d..9ddc0b4e92c9 100644 --- a/drivers/usb/gadget/udc/bdc/bdc_ep.c +++ b/drivers/usb/gadget/udc/bdc/bdc_ep.c @@ -615,7 +615,6 @@ int bdc_ep_enable(struct bdc_ep *ep) } bdc_dbg_bd_list(bdc, ep); /* only for ep0: config ep is called for ep0 from connect event */ - ep->flags |= BDC_EP_ENABLED; if (ep->ep_num == 1) return ret; @@ -759,10 +758,13 @@ static int ep_dequeue(struct bdc_ep *ep, struct bdc_req *req) __func__, ep->name, start_bdi, end_bdi); dev_dbg(bdc->dev, "ep_dequeue ep=%p ep->desc=%p\n", ep, (void *)ep->usb_ep.desc); - /* Stop the ep to see where the HW is ? */ - ret = bdc_stop_ep(bdc, ep->ep_num); - /* if there is an issue with stopping ep, then no need to go further */ - if (ret) + /* if still connected, stop the ep to see where the HW is ? */ + if (!(bdc_readl(bdc->regs, BDC_USPC) & BDC_PST_MASK)) { + ret = bdc_stop_ep(bdc, ep->ep_num); + /* if there is an issue, then no need to go further */ + if (ret) + return 0; + } else return 0; /* @@ -1911,7 +1913,9 @@ static int bdc_gadget_ep_disable(struct usb_ep *_ep) __func__, ep->name, ep->flags); if (!(ep->flags & BDC_EP_ENABLED)) { - dev_warn(bdc->dev, "%s is already disabled\n", ep->name); + if (bdc->gadget.speed != USB_SPEED_UNKNOWN) + dev_warn(bdc->dev, "%s is already disabled\n", + ep->name); return 0; } spin_lock_irqsave(&bdc->lock, flags); -- GitLab From e1f67b26653c7a47745c6d3c524938a87fd93cdc Mon Sep 17 00:00:00 2001 From: Danesh Petigara Date: Wed, 22 Jul 2020 13:07:45 -0400 Subject: [PATCH 0181/1304] usb: bdc: Halt controller on suspend [ Upstream commit 5fc453d7de3d0c345812453823a3a56783c5f82c ] GISB bus error kernel panics have been observed during S2 transition tests on the 7271t platform. The errors are a result of the BDC interrupt handler trying to access BDC register space after the system's suspend callbacks have completed. Adding a suspend hook to the BDC driver that halts the controller before S2 entry thus preventing unwanted access to the BDC register space during this transition. Signed-off-by: Danesh Petigara Signed-off-by: Al Cooper Acked-by: Florian Fainelli Signed-off-by: Felipe Balbi Signed-off-by: Sasha Levin --- drivers/usb/gadget/udc/bdc/bdc_core.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/usb/gadget/udc/bdc/bdc_core.c b/drivers/usb/gadget/udc/bdc/bdc_core.c index 4c557f315446..e174b1b889da 100644 --- a/drivers/usb/gadget/udc/bdc/bdc_core.c +++ b/drivers/usb/gadget/udc/bdc/bdc_core.c @@ -608,9 +608,14 @@ static int bdc_remove(struct platform_device *pdev) static int bdc_suspend(struct device *dev) { struct bdc *bdc = dev_get_drvdata(dev); + int ret; - clk_disable_unprepare(bdc->clk); - return 0; + /* Halt the controller */ + ret = bdc_stop(bdc); + if (!ret) + clk_disable_unprepare(bdc->clk); + + return ret; } static int bdc_resume(struct device *dev) -- GitLab From 26613103374bbf2675c9f02bc087da229c38337b Mon Sep 17 00:00:00 2001 From: Jim Cromie Date: Sun, 19 Jul 2020 17:10:47 -0600 Subject: [PATCH 0182/1304] dyndbg: fix a BUG_ON in ddebug_describe_flags [ Upstream commit f678ce8cc3cb2ad29df75d8824c74f36398ba871 ] ddebug_describe_flags() currently fills a caller provided string buffer, after testing its size (also passed) in a BUG_ON. Fix this by replacing them with a known-big-enough string buffer wrapped in a struct, and passing that instead. Also simplify ddebug_describe_flags() flags parameter from a struct to a member in that struct, and hoist the member deref up to the caller. This makes the function reusable (soon) where flags are unpacked. Acked-by: Signed-off-by: Jim Cromie Link: https://lore.kernel.org/r/20200719231058.1586423-8-jim.cromie@gmail.com Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin --- lib/dynamic_debug.c | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index dbf2b457e47e..9305ff43fc15 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c @@ -85,22 +85,22 @@ static struct { unsigned flag:8; char opt_char; } opt_array[] = { { _DPRINTK_FLAGS_NONE, '_' }, }; +struct flagsbuf { char buf[ARRAY_SIZE(opt_array)+1]; }; + /* format a string into buf[] which describes the _ddebug's flags */ -static char *ddebug_describe_flags(struct _ddebug *dp, char *buf, - size_t maxlen) +static char *ddebug_describe_flags(unsigned int flags, struct flagsbuf *fb) { - char *p = buf; + char *p = fb->buf; int i; - BUG_ON(maxlen < 6); for (i = 0; i < ARRAY_SIZE(opt_array); ++i) - if (dp->flags & opt_array[i].flag) + if (flags & opt_array[i].flag) *p++ = opt_array[i].opt_char; - if (p == buf) + if (p == fb->buf) *p++ = '_'; *p = '\0'; - return buf; + return fb->buf; } #define vpr_info(fmt, ...) \ @@ -142,7 +142,7 @@ static int ddebug_change(const struct ddebug_query *query, struct ddebug_table *dt; unsigned int newflags; unsigned int nfound = 0; - char flagbuf[10]; + struct flagsbuf fbuf; /* search for matching ddebugs */ mutex_lock(&ddebug_lock); @@ -199,8 +199,7 @@ static int ddebug_change(const struct ddebug_query *query, vpr_info("changed %s:%d [%s]%s =%s\n", trim_prefix(dp->filename), dp->lineno, dt->mod_name, dp->function, - ddebug_describe_flags(dp, flagbuf, - sizeof(flagbuf))); + ddebug_describe_flags(dp->flags, &fbuf)); } } mutex_unlock(&ddebug_lock); @@ -779,7 +778,7 @@ static int ddebug_proc_show(struct seq_file *m, void *p) { struct ddebug_iter *iter = m->private; struct _ddebug *dp = p; - char flagsbuf[10]; + struct flagsbuf flags; vpr_info("called m=%p p=%p\n", m, p); @@ -792,7 +791,7 @@ static int ddebug_proc_show(struct seq_file *m, void *p) seq_printf(m, "%s:%u [%s]%s =%s \"", trim_prefix(dp->filename), dp->lineno, iter->table->mod_name, dp->function, - ddebug_describe_flags(dp, flagsbuf, sizeof(flagsbuf))); + ddebug_describe_flags(dp->flags, &flags)); seq_escape(m, dp->format, "\t\r\n\""); seq_puts(m, "\"\n"); -- GitLab From 8f2cb3d2e3bba8f61360b8144e0988499c540268 Mon Sep 17 00:00:00 2001 From: Coly Li Date: Sat, 25 Jul 2020 20:00:26 +0800 Subject: [PATCH 0183/1304] bcache: fix super block seq numbers comparision in register_cache_set() [ Upstream commit 117f636ea695270fe492d0c0c9dfadc7a662af47 ] In register_cache_set(), c is pointer to struct cache_set, and ca is pointer to struct cache, if ca->sb.seq > c->sb.seq, it means this registering cache has up to date version and other members, the in- memory version and other members should be updated to the newer value. But current implementation makes a cache set only has a single cache device, so the above assumption works well except for a special case. The execption is when a cache device new created and both ca->sb.seq and c->sb.seq are 0, because the super block is never flushed out yet. In the location for the following if() check, 2156 if (ca->sb.seq > c->sb.seq) { 2157 c->sb.version = ca->sb.version; 2158 memcpy(c->sb.set_uuid, ca->sb.set_uuid, 16); 2159 c->sb.flags = ca->sb.flags; 2160 c->sb.seq = ca->sb.seq; 2161 pr_debug("set version = %llu\n", c->sb.version); 2162 } c->sb.version is not initialized yet and valued 0. When ca->sb.seq is 0, the if() check will fail (because both values are 0), and the cache set version, set_uuid, flags and seq won't be updated. The above problem is hiden for current code, because the bucket size is compatible among different super block version. And the next time when running cache set again, ca->sb.seq will be larger than 0 and cache set super block version will be updated properly. But if the large bucket feature is enabled, sb->bucket_size is the low 16bits of the bucket size. For a power of 2 value, when the actual bucket size exceeds 16bit width, sb->bucket_size will always be 0. Then read_super_common() will fail because the if() check to is_power_of_2(sb->bucket_size) is false. This is how the long time hidden bug is triggered. This patch modifies the if() check to the following way, 2156 if (ca->sb.seq > c->sb.seq || c->sb.seq == 0) { Then cache set's version, set_uuid, flags and seq will always be updated corectly including for a new created cache device. Signed-off-by: Coly Li Reviewed-by: Hannes Reinecke Signed-off-by: Jens Axboe Signed-off-by: Sasha Levin --- drivers/md/bcache/super.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 68ebc2759c2e..46ad0bf18e1f 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -2013,7 +2013,14 @@ static const char *register_cache_set(struct cache *ca) sysfs_create_link(&c->kobj, &ca->kobj, buf)) goto err; - if (ca->sb.seq > c->sb.seq) { + /* + * A special case is both ca->sb.seq and c->sb.seq are 0, + * such condition happens on a new created cache device whose + * super block is never flushed yet. In this case c->sb.version + * and other members should be updated too, otherwise we will + * have a mistaken super block version in cache set. + */ + if (ca->sb.seq > c->sb.seq || c->sb.seq == 0) { c->sb.version = ca->sb.version; memcpy(c->sb.set_uuid, ca->sb.set_uuid, 16); c->sb.flags = ca->sb.flags; -- GitLab From 3ca48b8952b4d0aa4aa77770d98901db206ec190 Mon Sep 17 00:00:00 2001 From: Erik Kaneda Date: Mon, 20 Jul 2020 10:31:20 -0700 Subject: [PATCH 0184/1304] ACPICA: Do not increment operation_region reference counts for field units [ Upstream commit 6a54ebae6d047c988a31f5ac5a64ab5cf83797a2 ] ACPICA commit e17b28cfcc31918d0db9547b6b274b09c413eb70 Object reference counts are used as a part of ACPICA's garbage collection mechanism. This mechanism keeps track of references to heap-allocated structures such as the ACPI operand objects. Recent server firmware has revealed that this reference count can overflow on large servers that declare many field units under the same operation_region. This occurs because each field unit declaration will add a reference count to the source operation_region. This change solves the reference count overflow for operation_regions objects by preventing fieldunits from incrementing their operation_region's reference count. Each operation_region's reference count will not be changed by named objects declared under the Field operator. During namespace deletion, the operation_region namespace node will be deleted and each fieldunit will be deleted without touching the deleted operation_region object. Link: https://github.com/acpica/acpica/commit/e17b28cf Signed-off-by: Erik Kaneda Signed-off-by: Bob Moore Signed-off-by: Rafael J. Wysocki Signed-off-by: Sasha Levin --- drivers/acpi/acpica/exprep.c | 4 ---- drivers/acpi/acpica/utdelete.c | 6 +----- 2 files changed, 1 insertion(+), 9 deletions(-) diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c index 738f3c732363..228feeea555f 100644 --- a/drivers/acpi/acpica/exprep.c +++ b/drivers/acpi/acpica/exprep.c @@ -473,10 +473,6 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info) (u8)access_byte_width; } } - /* An additional reference for the container */ - - acpi_ut_add_reference(obj_desc->field.region_obj); - ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, "RegionField: BitOff %X, Off %X, Gran %X, Region %p\n", obj_desc->field.start_field_bit_offset, diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c index 8cc4392c61f3..0dc8dea81582 100644 --- a/drivers/acpi/acpica/utdelete.c +++ b/drivers/acpi/acpica/utdelete.c @@ -563,11 +563,6 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action) next_object = object->buffer_field.buffer_obj; break; - case ACPI_TYPE_LOCAL_REGION_FIELD: - - next_object = object->field.region_obj; - break; - case ACPI_TYPE_LOCAL_BANK_FIELD: next_object = object->bank_field.bank_obj; @@ -608,6 +603,7 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action) } break; + case ACPI_TYPE_LOCAL_REGION_FIELD: case ACPI_TYPE_REGION: default: -- GitLab From dfa9bc1aaf0679e9d81ef80ba3a2390bf44f8a2a Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Wed, 1 Jul 2020 13:36:00 -0700 Subject: [PATCH 0185/1304] drm/msm: ratelimit crtc event overflow error [ Upstream commit 5e16372b5940b1fecc3cc887fc02a50ba148d373 ] This can happen a lot when things go pear shaped. Lets not flood dmesg when this happens. Signed-off-by: Rob Clark Reviewed-by: Abhinav Kumar Signed-off-by: Rob Clark Signed-off-by: Sasha Levin --- drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c index 4752f08f0884..3c3b7f7013e8 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c @@ -659,7 +659,7 @@ static void dpu_crtc_frame_event_cb(void *data, u32 event) spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags); if (!fevent) { - DRM_ERROR("crtc%d event %d overflow\n", crtc->base.id, event); + DRM_ERROR_RATELIMITED("crtc%d event %d overflow\n", crtc->base.id, event); return; } -- GitLab From ea01e491c3da0952f6b8bc84f62236a5e431e808 Mon Sep 17 00:00:00 2001 From: Qiushi Wu Date: Fri, 22 May 2020 09:34:51 +0100 Subject: [PATCH 0186/1304] agp/intel: Fix a memory leak on module initialisation failure [ Upstream commit b975abbd382fe442713a4c233549abb90e57c22b ] In intel_gtt_setup_scratch_page(), pointer "page" is not released if pci_dma_mapping_error() return an error, leading to a memory leak on module initialisation failure. Simply fix this issue by freeing "page" before return. Fixes: 0e87d2b06cb46 ("intel-gtt: initialize our own scratch page") Signed-off-by: Qiushi Wu Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20200522083451.7448-1-chris@chris-wilson.co.uk Signed-off-by: Sasha Levin --- drivers/char/agp/intel-gtt.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index b161bdf60000..0941d38b2d32 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -304,8 +304,10 @@ static int intel_gtt_setup_scratch_page(void) if (intel_private.needs_dmar) { dma_addr = pci_map_page(intel_private.pcidev, page, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); - if (pci_dma_mapping_error(intel_private.pcidev, dma_addr)) + if (pci_dma_mapping_error(intel_private.pcidev, dma_addr)) { + __free_page(page); return -EINVAL; + } intel_private.scratch_page_dma = dma_addr; } else -- GitLab From 573150b10c2e9f4d50aac3a2a9125c1b05b9a4d0 Mon Sep 17 00:00:00 2001 From: Dejin Zheng Date: Thu, 23 Apr 2020 00:07:19 +0800 Subject: [PATCH 0187/1304] video: fbdev: sm712fb: fix an issue about iounmap for a wrong address [ Upstream commit 98bd4f72988646c35569e1e838c0ab80d06c77f6 ] the sfb->fb->screen_base is not save the value get by iounmap() when the chip id is 0x720. so iounmap() for address sfb->fb->screen_base is not right. Fixes: 1461d6672864854 ("staging: sm7xxfb: merge sm712fb with fbdev") Cc: Andy Shevchenko Cc: Sudip Mukherjee Cc: Teddy Wang Cc: Greg Kroah-Hartman Signed-off-by: Dejin Zheng Signed-off-by: Bartlomiej Zolnierkiewicz Link: https://patchwork.freedesktop.org/patch/msgid/20200422160719.27763-1-zhengdejin5@gmail.com Signed-off-by: Sasha Levin --- drivers/video/fbdev/sm712fb.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/video/fbdev/sm712fb.c b/drivers/video/fbdev/sm712fb.c index f1dcc6766d1e..1781ca697f66 100644 --- a/drivers/video/fbdev/sm712fb.c +++ b/drivers/video/fbdev/sm712fb.c @@ -1429,6 +1429,8 @@ static int smtc_map_smem(struct smtcfb_info *sfb, static void smtc_unmap_smem(struct smtcfb_info *sfb) { if (sfb && sfb->fb->screen_base) { + if (sfb->chip_id == 0x720) + sfb->fb->screen_base -= 0x00200000; iounmap(sfb->fb->screen_base); sfb->fb->screen_base = NULL; } -- GitLab From fd6aeb2e69996b27e3cfc4579e4a28718e2cc99b Mon Sep 17 00:00:00 2001 From: Dejin Zheng Date: Fri, 24 Apr 2020 00:42:51 +0800 Subject: [PATCH 0188/1304] console: newport_con: fix an issue about leak related system resources [ Upstream commit fd4b8243877250c05bb24af7fea5567110c9720b ] A call of the function do_take_over_console() can fail here. The corresponding system resources were not released then. Thus add a call of iounmap() and release_mem_region() together with the check of a failure predicate. and also add release_mem_region() on device removal. Fixes: e86bb8acc0fdc ("[PATCH] VT binding: Make newport_con support binding") Suggested-by: Bartlomiej Zolnierkiewicz Signed-off-by: Dejin Zheng Reviewed-by: Andy Shevchenko Cc: Greg Kroah-Hartman cc: Thomas Gleixner Cc: Andrew Morton Signed-off-by: Bartlomiej Zolnierkiewicz Link: https://patchwork.freedesktop.org/patch/msgid/20200423164251.3349-1-zhengdejin5@gmail.com Signed-off-by: Sasha Levin --- drivers/video/console/newport_con.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/drivers/video/console/newport_con.c b/drivers/video/console/newport_con.c index 7f2526b43b33..cc2fb5043184 100644 --- a/drivers/video/console/newport_con.c +++ b/drivers/video/console/newport_con.c @@ -31,6 +31,8 @@ #include #include +#define NEWPORT_LEN 0x10000 + #define FONT_DATA ((unsigned char *)font_vga_8x16.data) /* borrowed from fbcon.c */ @@ -42,6 +44,7 @@ static unsigned char *font_data[MAX_NR_CONSOLES]; static struct newport_regs *npregs; +static unsigned long newport_addr; static int logo_active; static int topscan; @@ -701,7 +704,6 @@ const struct consw newport_con = { static int newport_probe(struct gio_device *dev, const struct gio_device_id *id) { - unsigned long newport_addr; int err; if (!dev->resource.start) @@ -711,7 +713,7 @@ static int newport_probe(struct gio_device *dev, return -EBUSY; /* we only support one Newport as console */ newport_addr = dev->resource.start + 0xF0000; - if (!request_mem_region(newport_addr, 0x10000, "Newport")) + if (!request_mem_region(newport_addr, NEWPORT_LEN, "Newport")) return -ENODEV; npregs = (struct newport_regs *)/* ioremap cannot fail */ @@ -719,6 +721,11 @@ static int newport_probe(struct gio_device *dev, console_lock(); err = do_take_over_console(&newport_con, 0, MAX_NR_CONSOLES - 1, 1); console_unlock(); + + if (err) { + iounmap((void *)npregs); + release_mem_region(newport_addr, NEWPORT_LEN); + } return err; } @@ -726,6 +733,7 @@ static void newport_remove(struct gio_device *dev) { give_up_console(&newport_con); iounmap((void *)npregs); + release_mem_region(newport_addr, NEWPORT_LEN); } static struct gio_device_id newport_ids[] = { -- GitLab From 73f971a790e3b975ee894f6f1f51f3eac03981f3 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Wed, 29 Apr 2020 10:45:05 +0200 Subject: [PATCH 0189/1304] video: pxafb: Fix the function used to balance a 'dma_alloc_coherent()' call [ Upstream commit 499a2c41b954518c372873202d5e7714e22010c4 ] 'dma_alloc_coherent()' must be balanced by a call to 'dma_free_coherent()' not 'dma_free_wc()'. The correct dma_free_ function is already used in the error handling path of the probe function. Fixes: 77e196752bdd ("[ARM] pxafb: allow video memory size to be configurable") Signed-off-by: Christophe JAILLET Cc: Sumit Semwal Cc: Rafael J. Wysocki Cc: Jonathan Corbet Cc: Viresh Kumar Cc: Jani Nikula cc: Mauro Carvalho Chehab Cc: Eric Miao Signed-off-by: Bartlomiej Zolnierkiewicz Link: https://patchwork.freedesktop.org/patch/msgid/20200429084505.108897-1-christophe.jaillet@wanadoo.fr Signed-off-by: Sasha Levin --- drivers/video/fbdev/pxafb.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c index d59c8a59f582..90dee3e6f8bc 100644 --- a/drivers/video/fbdev/pxafb.c +++ b/drivers/video/fbdev/pxafb.c @@ -2446,8 +2446,8 @@ static int pxafb_remove(struct platform_device *dev) free_pages_exact(fbi->video_mem, fbi->video_mem_size); - dma_free_wc(&dev->dev, fbi->dma_buff_size, fbi->dma_buff, - fbi->dma_buff_phys); + dma_free_coherent(&dev->dev, fbi->dma_buff_size, fbi->dma_buff, + fbi->dma_buff_phys); return 0; } -- GitLab From 43bd11238da345ad4b09a585ae2a038c6f8b7113 Mon Sep 17 00:00:00 2001 From: Evan Green Date: Thu, 4 Jun 2020 10:59:11 -0700 Subject: [PATCH 0190/1304] ath10k: Acquire tx_lock in tx error paths [ Upstream commit a738e766e3ed92c4ee5ec967777276b5ce11dd2c ] ath10k_htt_tx_free_msdu_id() has a lockdep assertion that htt->tx_lock is held. Acquire the lock in a couple of error paths when calling that function to ensure this condition is met. Fixes: 6421969f248fd ("ath10k: refactor tx pending management") Fixes: e62ee5c381c59 ("ath10k: Add support for htt_data_tx_desc_64 descriptor") Signed-off-by: Evan Green Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200604105901.1.I5b8b0c7ee0d3e51a73248975a9da61401b8f3900@changeid Signed-off-by: Sasha Levin --- drivers/net/wireless/ath/ath10k/htt_tx.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c index 7cff0d52338f..fd011bdabb96 100644 --- a/drivers/net/wireless/ath/ath10k/htt_tx.c +++ b/drivers/net/wireless/ath/ath10k/htt_tx.c @@ -1329,7 +1329,9 @@ static int ath10k_htt_tx_32(struct ath10k_htt *htt, err_unmap_msdu: dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); err_free_msdu_id: + spin_lock_bh(&htt->tx_lock); ath10k_htt_tx_free_msdu_id(htt, msdu_id); + spin_unlock_bh(&htt->tx_lock); err: return res; } @@ -1536,7 +1538,9 @@ static int ath10k_htt_tx_64(struct ath10k_htt *htt, err_unmap_msdu: dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); err_free_msdu_id: + spin_lock_bh(&htt->tx_lock); ath10k_htt_tx_free_msdu_id(htt, msdu_id); + spin_unlock_bh(&htt->tx_lock); err: return res; } -- GitLab From 8b6e1f79d7ea283c80fb5521747a768ab1f58113 Mon Sep 17 00:00:00 2001 From: Tomasz Duszynski Date: Mon, 1 Jun 2020 18:15:52 +0200 Subject: [PATCH 0191/1304] iio: improve IIO_CONCENTRATION channel type description [ Upstream commit df16c33a4028159d1ba8a7061c9fa950b58d1a61 ] IIO_CONCENTRATION together with INFO_RAW specifier is used for reporting raw concentrations of pollutants. Raw value should be meaningless before being properly scaled. Because of that description shouldn't mention raw value unit whatsoever. Fix this by rephrasing existing description so it follows conventions used throughout IIO ABI docs. Fixes: 8ff6b3bc94930 ("iio: chemical: Add IIO_CONCENTRATION channel type") Signed-off-by: Tomasz Duszynski Acked-by: Matt Ranostay Signed-off-by: Jonathan Cameron Signed-off-by: Sasha Levin --- Documentation/ABI/testing/sysfs-bus-iio | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Documentation/ABI/testing/sysfs-bus-iio b/Documentation/ABI/testing/sysfs-bus-iio index 8127a08e366d..d10bcca6c3fb 100644 --- a/Documentation/ABI/testing/sysfs-bus-iio +++ b/Documentation/ABI/testing/sysfs-bus-iio @@ -1559,7 +1559,8 @@ What: /sys/bus/iio/devices/iio:deviceX/in_concentrationX_voc_raw KernelVersion: 4.3 Contact: linux-iio@vger.kernel.org Description: - Raw (unscaled no offset etc.) percentage reading of a substance. + Raw (unscaled no offset etc.) reading of a substance. Units + after application of scale and offset are percents. What: /sys/bus/iio/devices/iio:deviceX/in_resistance_raw What: /sys/bus/iio/devices/iio:deviceX/in_resistanceX_raw -- GitLab From 0e0d6222a489a15329aad30715d25d7f14cb6638 Mon Sep 17 00:00:00 2001 From: Lubomir Rintel Date: Tue, 16 Jun 2020 23:21:24 +0200 Subject: [PATCH 0192/1304] drm/etnaviv: Fix error path on failure to enable bus clk [ Upstream commit f8794feaf65cdc97767604cf864775d20b97f397 ] Since commit 65f037e8e908 ("drm/etnaviv: add support for slave interface clock") the reg clock is enabled before the bus clock and we need to undo its enablement on error. Fixes: 65f037e8e908 ("drm/etnaviv: add support for slave interface clock") Signed-off-by: Lubomir Rintel Signed-off-by: Lucas Stach Signed-off-by: Sasha Levin --- drivers/gpu/drm/etnaviv/etnaviv_gpu.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c index f17fbe6ff7c7..37ae15dc4fc6 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c @@ -1465,7 +1465,7 @@ static int etnaviv_gpu_clk_enable(struct etnaviv_gpu *gpu) if (gpu->clk_bus) { ret = clk_prepare_enable(gpu->clk_bus); if (ret) - return ret; + goto disable_clk_reg; } if (gpu->clk_core) { @@ -1488,6 +1488,9 @@ static int etnaviv_gpu_clk_enable(struct etnaviv_gpu *gpu) disable_clk_bus: if (gpu->clk_bus) clk_disable_unprepare(gpu->clk_bus); +disable_clk_reg: + if (gpu->clk_reg) + clk_disable_unprepare(gpu->clk_reg); return ret; } -- GitLab From 64918e3f716ec0b13417f4396118183889c80636 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 18 Jun 2020 11:04:00 +0100 Subject: [PATCH 0193/1304] drm/arm: fix unintentional integer overflow on left shift [ Upstream commit 5f368ddea6fec519bdb93b5368f6a844b6ea27a6 ] Shifting the integer value 1 is evaluated using 32-bit arithmetic and then used in an expression that expects a long value leads to a potential integer overflow. Fix this by using the BIT macro to perform the shift to avoid the overflow. Addresses-Coverity: ("Unintentional integer overflow") Fixes: ad49f8602fe8 ("drm/arm: Add support for Mali Display Processors") Signed-off-by: Colin Ian King Acked-by: Liviu Dudau Signed-off-by: Liviu Dudau Link: https://patchwork.freedesktop.org/patch/msgid/20200618100400.11464-1-colin.king@canonical.com Signed-off-by: Sasha Levin --- drivers/gpu/drm/arm/malidp_planes.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c index 29409a65d864..a347b27405d8 100644 --- a/drivers/gpu/drm/arm/malidp_planes.c +++ b/drivers/gpu/drm/arm/malidp_planes.c @@ -446,7 +446,7 @@ int malidp_de_planes_init(struct drm_device *drm) const struct malidp_hw_regmap *map = &malidp->dev->hw->map; struct malidp_plane *plane = NULL; enum drm_plane_type plane_type; - unsigned long crtcs = 1 << drm->mode_config.num_crtc; + unsigned long crtcs = BIT(drm->mode_config.num_crtc); unsigned long flags = DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270 | DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y; u32 *formats; -- GitLab From 125ec9008e56a201d2d845384872bf05110191d0 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 5 May 2020 16:19:17 +0200 Subject: [PATCH 0194/1304] leds: lm355x: avoid enum conversion warning [ Upstream commit 985b1f596f9ed56f42b8c2280005f943e1434c06 ] clang points out that doing arithmetic between diffent enums is usually a mistake: drivers/leds/leds-lm355x.c:167:28: warning: bitwise operation between different enumeration types ('enum lm355x_tx2' and 'enum lm355x_ntc') [-Wenum-enum-conversion] reg_val = pdata->pin_tx2 | pdata->ntc_pin; ~~~~~~~~~~~~~~ ^ ~~~~~~~~~~~~~~ drivers/leds/leds-lm355x.c:178:28: warning: bitwise operation between different enumeration types ('enum lm355x_tx2' and 'enum lm355x_ntc') [-Wenum-enum-conversion] reg_val = pdata->pin_tx2 | pdata->ntc_pin | pdata->pass_mode; ~~~~~~~~~~~~~~ ^ ~~~~~~~~~~~~~~ In this driver, it is intentional, so add a cast to hide the false-positive warning. It appears to be the only instance of this warning at the moment. Fixes: b98d13c72592 ("leds: Add new LED driver for lm355x chips") Signed-off-by: Arnd Bergmann Signed-off-by: Pavel Machek Signed-off-by: Sasha Levin --- drivers/leds/leds-lm355x.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/leds/leds-lm355x.c b/drivers/leds/leds-lm355x.c index 6cb94f9a2f3f..b9c60dd2b132 100644 --- a/drivers/leds/leds-lm355x.c +++ b/drivers/leds/leds-lm355x.c @@ -168,18 +168,19 @@ static int lm355x_chip_init(struct lm355x_chip_data *chip) /* input and output pins configuration */ switch (chip->type) { case CHIP_LM3554: - reg_val = pdata->pin_tx2 | pdata->ntc_pin; + reg_val = (u32)pdata->pin_tx2 | (u32)pdata->ntc_pin; ret = regmap_update_bits(chip->regmap, 0xE0, 0x28, reg_val); if (ret < 0) goto out; - reg_val = pdata->pass_mode; + reg_val = (u32)pdata->pass_mode; ret = regmap_update_bits(chip->regmap, 0xA0, 0x04, reg_val); if (ret < 0) goto out; break; case CHIP_LM3556: - reg_val = pdata->pin_tx2 | pdata->ntc_pin | pdata->pass_mode; + reg_val = (u32)pdata->pin_tx2 | (u32)pdata->ntc_pin | + (u32)pdata->pass_mode; ret = regmap_update_bits(chip->regmap, 0x0A, 0xC4, reg_val); if (ret < 0) goto out; -- GitLab From 7af83dfb6e30260f80ca844b070f5e4fd86adaed Mon Sep 17 00:00:00 2001 From: Chuhong Yuan Date: Wed, 3 Jun 2020 18:41:22 +0200 Subject: [PATCH 0195/1304] media: omap3isp: Add missed v4l2_ctrl_handler_free() for preview_init_entities() [ Upstream commit dc7690a73017e1236202022e26a6aa133f239c8c ] preview_init_entities() does not call v4l2_ctrl_handler_free() when it fails. Add the missed function to fix it. Fixes: de1135d44f4f ("[media] omap3isp: CCDC, preview engine and resizer") Signed-off-by: Chuhong Yuan Reviewed-by: Laurent Pinchart Signed-off-by: Sakari Ailus Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Sasha Levin --- drivers/media/platform/omap3isp/isppreview.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/media/platform/omap3isp/isppreview.c b/drivers/media/platform/omap3isp/isppreview.c index 591c6de498f8..20857ae42a77 100644 --- a/drivers/media/platform/omap3isp/isppreview.c +++ b/drivers/media/platform/omap3isp/isppreview.c @@ -2290,7 +2290,7 @@ static int preview_init_entities(struct isp_prev_device *prev) me->ops = &preview_media_ops; ret = media_entity_pads_init(me, PREV_PADS_NUM, pads); if (ret < 0) - return ret; + goto error_handler_free; preview_init_formats(sd, NULL); @@ -2323,6 +2323,8 @@ static int preview_init_entities(struct isp_prev_device *prev) omap3isp_video_cleanup(&prev->video_in); error_video_in: media_entity_cleanup(&prev->subdev.entity); +error_handler_free: + v4l2_ctrl_handler_free(&prev->ctrls); return ret; } -- GitLab From 18e8f1f6b535656c0f598277981659521486060c Mon Sep 17 00:00:00 2001 From: Pierre-Louis Bossart Date: Thu, 25 Jun 2020 14:12:55 -0500 Subject: [PATCH 0196/1304] ASoC: Intel: bxt_rt298: add missing .owner field [ Upstream commit 88cee34b776f80d2da04afb990c2a28c36799c43 ] This field is required for ASoC cards. Not setting it will result in a module->name pointer being NULL and generate problems such as cat /proc/asound/modules 0 (efault) Fixes: 76016322ec56 ('ASoC: Intel: Add Broxton-P machine driver') Reported-by: Jaroslav Kysela Suggested-by: Takashi Iwai Signed-off-by: Pierre-Louis Bossart Reviewed-by: Kai Vehmanen Link: https://lore.kernel.org/r/20200625191308.3322-5-pierre-louis.bossart@linux.intel.com Signed-off-by: Mark Brown Signed-off-by: Sasha Levin --- sound/soc/intel/boards/bxt_rt298.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sound/soc/intel/boards/bxt_rt298.c b/sound/soc/intel/boards/bxt_rt298.c index 27308337ab12..ba76e37a4b09 100644 --- a/sound/soc/intel/boards/bxt_rt298.c +++ b/sound/soc/intel/boards/bxt_rt298.c @@ -544,6 +544,7 @@ static int bxt_card_late_probe(struct snd_soc_card *card) /* broxton audio machine driver for SPT + RT298S */ static struct snd_soc_card broxton_rt298 = { .name = "broxton-rt298", + .owner = THIS_MODULE, .dai_link = broxton_rt298_dais, .num_links = ARRAY_SIZE(broxton_rt298_dais), .controls = broxton_controls, @@ -559,6 +560,7 @@ static struct snd_soc_card broxton_rt298 = { static struct snd_soc_card geminilake_rt298 = { .name = "geminilake-rt298", + .owner = THIS_MODULE, .dai_link = broxton_rt298_dais, .num_links = ARRAY_SIZE(broxton_rt298_dais), .controls = broxton_controls, -- GitLab From fdc83318dd5c9de1c8b8fa3950c1f9736b12a767 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Thu, 25 Jun 2020 22:47:30 +0200 Subject: [PATCH 0197/1304] scsi: cumana_2: Fix different dev_id between request_irq() and free_irq() [ Upstream commit 040ab9c4fd0070cd5fa71ba3a7b95b8470db9b4d ] The dev_id used in request_irq() and free_irq() should match. Use 'info' in both cases. Link: https://lore.kernel.org/r/20200625204730.943520-1-christophe.jaillet@wanadoo.fr Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Acked-by: Russell King Signed-off-by: Christophe JAILLET Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin --- drivers/scsi/arm/cumana_2.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/arm/cumana_2.c b/drivers/scsi/arm/cumana_2.c index edce5f3cfdba..93ba83e3148e 100644 --- a/drivers/scsi/arm/cumana_2.c +++ b/drivers/scsi/arm/cumana_2.c @@ -454,7 +454,7 @@ static int cumanascsi2_probe(struct expansion_card *ec, if (info->info.scsi.dma != NO_DMA) free_dma(info->info.scsi.dma); - free_irq(ec->irq, host); + free_irq(ec->irq, info); out_release: fas216_release(host); -- GitLab From 1292ad224fa5c6e1e7b1cf2b24eb816685baed05 Mon Sep 17 00:00:00 2001 From: Emil Velikov Date: Tue, 5 May 2020 17:03:29 +0100 Subject: [PATCH 0198/1304] drm/mipi: use dcs write for mipi_dsi_dcs_set_tear_scanline [ Upstream commit 7a05c3b6d24b8460b3cec436cf1d33fac43c8450 ] The helper uses the MIPI_DCS_SET_TEAR_SCANLINE, although it's currently using the generic write. This does not look right. Perhaps some platforms don't distinguish between the two writers? Cc: Robert Chiras Cc: Vinay Simha BN Cc: Jani Nikula Cc: Thierry Reding Fixes: e83950816367 ("drm/dsi: Implement set tear scanline") Signed-off-by: Emil Velikov Reviewed-by: Thierry Reding Signed-off-by: Sam Ravnborg Link: https://patchwork.freedesktop.org/patch/msgid/20200505160329.2976059-3-emil.l.velikov@gmail.com Signed-off-by: Sasha Levin --- drivers/gpu/drm/drm_mipi_dsi.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c index 80b75501f5c6..7ed8e510565e 100644 --- a/drivers/gpu/drm/drm_mipi_dsi.c +++ b/drivers/gpu/drm/drm_mipi_dsi.c @@ -1034,11 +1034,11 @@ EXPORT_SYMBOL(mipi_dsi_dcs_set_pixel_format); */ int mipi_dsi_dcs_set_tear_scanline(struct mipi_dsi_device *dsi, u16 scanline) { - u8 payload[3] = { MIPI_DCS_SET_TEAR_SCANLINE, scanline >> 8, - scanline & 0xff }; + u8 payload[2] = { scanline >> 8, scanline & 0xff }; ssize_t err; - err = mipi_dsi_generic_write(dsi, payload, sizeof(payload)); + err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_TEAR_SCANLINE, payload, + sizeof(payload)); if (err < 0) return err; -- GitLab From bf9efd4118d4341cb58582e8ac46525e343c7022 Mon Sep 17 00:00:00 2001 From: Wang Hai Date: Tue, 2 Jun 2020 20:07:33 +0800 Subject: [PATCH 0199/1304] cxl: Fix kobject memleak [ Upstream commit 85c5cbeba8f4fb28e6b9bfb3e467718385f78f76 ] Currently the error return path from kobject_init_and_add() is not followed by a call to kobject_put() - which means we are leaking the kobject. Fix it by adding a call to kobject_put() in the error path of kobject_init_and_add(). Fixes: b087e6190ddc ("cxl: Export optional AFU configuration record in sysfs") Reported-by: Hulk Robot Signed-off-by: Wang Hai Acked-by: Andrew Donnellan Acked-by: Frederic Barrat Link: https://lore.kernel.org/r/20200602120733.5943-1-wanghai38@huawei.com Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin --- drivers/misc/cxl/sysfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c index 629e2e156412..0baa229d2b7d 100644 --- a/drivers/misc/cxl/sysfs.c +++ b/drivers/misc/cxl/sysfs.c @@ -628,7 +628,7 @@ static struct afu_config_record *cxl_sysfs_afu_new_cr(struct cxl_afu *afu, int c rc = kobject_init_and_add(&cr->kobj, &afu_config_record_type, &afu->dev.kobj, "cr%i", cr->cr); if (rc) - goto err; + goto err1; rc = sysfs_create_bin_file(&cr->kobj, &cr->config_attr); if (rc) -- GitLab From 50037ae68bbd1d9f2c7daeb823dcb61dfd988a00 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 24 Jun 2020 13:07:10 +0100 Subject: [PATCH 0200/1304] drm/radeon: fix array out-of-bounds read and write issues [ Upstream commit 7ee78aff9de13d5dccba133f4a0de5367194b243 ] There is an off-by-one bounds check on the index into arrays table->mc_reg_address and table->mc_reg_table_entry[k].mc_data[j] that can lead to reads and writes outside of arrays. Fix the bound checking off-by-one error. Addresses-Coverity: ("Out-of-bounds read/write") Fixes: cc8dbbb4f62a ("drm/radeon: add dpm support for CI dGPUs (v2)") Signed-off-by: Colin Ian King Signed-off-by: Alex Deucher Signed-off-by: Sasha Levin --- drivers/gpu/drm/radeon/ci_dpm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c index 9e7d5e44a12f..90c1afe498be 100644 --- a/drivers/gpu/drm/radeon/ci_dpm.c +++ b/drivers/gpu/drm/radeon/ci_dpm.c @@ -4364,7 +4364,7 @@ static int ci_set_mc_special_registers(struct radeon_device *rdev, table->mc_reg_table_entry[k].mc_data[j] |= 0x100; } j++; - if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) + if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) return -EINVAL; if (!pi->mem_gddr5) { -- GitLab From 5afc55c836e980d3dc3f1dda82c195a8d8b27dd3 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Fri, 26 Jun 2020 05:59:48 +0200 Subject: [PATCH 0201/1304] scsi: powertec: Fix different dev_id between request_irq() and free_irq() [ Upstream commit d179f7c763241c1dc5077fca88ddc3c47d21b763 ] The dev_id used in request_irq() and free_irq() should match. Use 'info' in both cases. Link: https://lore.kernel.org/r/20200626035948.944148-1-christophe.jaillet@wanadoo.fr Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Signed-off-by: Christophe JAILLET Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin --- drivers/scsi/arm/powertec.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/arm/powertec.c b/drivers/scsi/arm/powertec.c index 79aa88911b7f..b5e4a25ea1ef 100644 --- a/drivers/scsi/arm/powertec.c +++ b/drivers/scsi/arm/powertec.c @@ -382,7 +382,7 @@ static int powertecscsi_probe(struct expansion_card *ec, if (info->info.scsi.dma != NO_DMA) free_dma(info->info.scsi.dma); - free_irq(ec->irq, host); + free_irq(ec->irq, info); out_release: fas216_release(host); -- GitLab From fdac85326f40c7ba6ae2b9e4a2c710f26b708ab8 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Fri, 26 Jun 2020 06:05:53 +0200 Subject: [PATCH 0202/1304] scsi: eesox: Fix different dev_id between request_irq() and free_irq() [ Upstream commit 86f2da1112ccf744ad9068b1d5d9843faf8ddee6 ] The dev_id used in request_irq() and free_irq() should match. Use 'info' in both cases. Link: https://lore.kernel.org/r/20200626040553.944352-1-christophe.jaillet@wanadoo.fr Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Signed-off-by: Christophe JAILLET Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin --- drivers/scsi/arm/eesox.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/arm/eesox.c b/drivers/scsi/arm/eesox.c index e93e047f4316..65bb34ce93b9 100644 --- a/drivers/scsi/arm/eesox.c +++ b/drivers/scsi/arm/eesox.c @@ -575,7 +575,7 @@ static int eesoxscsi_probe(struct expansion_card *ec, const struct ecard_id *id) if (info->info.scsi.dma != NO_DMA) free_dma(info->info.scsi.dma); - free_irq(ec->irq, host); + free_irq(ec->irq, info); out_remove: fas216_remove(host); -- GitLab From 7106f943302247ed9fcde84afdca06cbe9e19dce Mon Sep 17 00:00:00 2001 From: Julian Anastasov Date: Wed, 1 Jul 2020 18:17:19 +0300 Subject: [PATCH 0203/1304] ipvs: allow connection reuse for unconfirmed conntrack MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit f0a5e4d7a594e0fe237d3dfafb069bb82f80f42f ] YangYuxi is reporting that connection reuse is causing one-second delay when SYN hits existing connection in TIME_WAIT state. Such delay was added to give time to expire both the IPVS connection and the corresponding conntrack. This was considered a rare case at that time but it is causing problem for some environments such as Kubernetes. As nf_conntrack_tcp_packet() can decide to release the conntrack in TIME_WAIT state and to replace it with a fresh NEW conntrack, we can use this to allow rescheduling just by tuning our check: if the conntrack is confirmed we can not schedule it to different real server and the one-second delay still applies but if new conntrack was created, we are free to select new real server without any delays. YangYuxi lists some of the problem reports: - One second connection delay in masquerading mode: https://marc.info/?t=151683118100004&r=1&w=2 - IPVS low throughput #70747 https://github.com/kubernetes/kubernetes/issues/70747 - Apache Bench can fill up ipvs service proxy in seconds #544 https://github.com/cloudnativelabs/kube-router/issues/544 - Additional 1s latency in `host -> service IP -> pod` https://github.com/kubernetes/kubernetes/issues/90854 Fixes: f719e3754ee2 ("ipvs: drop first packet to redirect conntrack") Co-developed-by: YangYuxi Signed-off-by: YangYuxi Signed-off-by: Julian Anastasov Reviewed-by: Simon Horman Signed-off-by: Pablo Neira Ayuso Signed-off-by: Sasha Levin --- include/net/ip_vs.h | 10 ++++------ net/netfilter/ipvs/ip_vs_core.c | 12 +++++++----- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index af0ede9ad4d0..c31e54a41b5c 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -1614,18 +1614,16 @@ static inline void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp) } #endif /* CONFIG_IP_VS_NFCT */ -/* Really using conntrack? */ -static inline bool ip_vs_conn_uses_conntrack(struct ip_vs_conn *cp, - struct sk_buff *skb) +/* Using old conntrack that can not be redirected to another real server? */ +static inline bool ip_vs_conn_uses_old_conntrack(struct ip_vs_conn *cp, + struct sk_buff *skb) { #ifdef CONFIG_IP_VS_NFCT enum ip_conntrack_info ctinfo; struct nf_conn *ct; - if (!(cp->flags & IP_VS_CONN_F_NFCT)) - return false; ct = nf_ct_get(skb, &ctinfo); - if (ct) + if (ct && nf_ct_is_confirmed(ct)) return true; #endif return false; diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index a71f777d1353..d5e4329579e2 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c @@ -1928,14 +1928,14 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int conn_reuse_mode = sysctl_conn_reuse_mode(ipvs); if (conn_reuse_mode && !iph.fragoffs && is_new_conn(skb, &iph) && cp) { - bool uses_ct = false, resched = false; + bool old_ct = false, resched = false; if (unlikely(sysctl_expire_nodest_conn(ipvs)) && cp->dest && unlikely(!atomic_read(&cp->dest->weight))) { resched = true; - uses_ct = ip_vs_conn_uses_conntrack(cp, skb); + old_ct = ip_vs_conn_uses_old_conntrack(cp, skb); } else if (is_new_conn_expected(cp, conn_reuse_mode)) { - uses_ct = ip_vs_conn_uses_conntrack(cp, skb); + old_ct = ip_vs_conn_uses_old_conntrack(cp, skb); if (!atomic_read(&cp->n_control)) { resched = true; } else { @@ -1943,15 +1943,17 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int * that uses conntrack while it is still * referenced by controlled connection(s). */ - resched = !uses_ct; + resched = !old_ct; } } if (resched) { + if (!old_ct) + cp->flags &= ~IP_VS_CONN_F_NFCT; if (!atomic_read(&cp->n_control)) ip_vs_conn_expire_now(cp); __ip_vs_conn_put(cp); - if (uses_ct) + if (old_ct) return NF_DROP; cp = NULL; } -- GitLab From 6f88a685bdc140fd41491286a5bbe31ca45ac755 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 8 May 2020 16:40:22 +0200 Subject: [PATCH 0204/1304] media: firewire: Using uninitialized values in node_probe() [ Upstream commit 2505a210fc126599013aec2be741df20aaacc490 ] If fw_csr_string() returns -ENOENT, then "name" is uninitialized. So then the "strlen(model_names[i]) <= name_len" is true because strlen() is unsigned and -ENOENT is type promoted to a very high positive value. Then the "strncmp(name, model_names[i], name_len)" uses uninitialized data because "name" is uninitialized. Fixes: 92374e886c75 ("[media] firedtv: drop obsolete backend abstraction") Signed-off-by: Dan Carpenter Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Sasha Levin --- drivers/media/firewire/firedtv-fw.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/media/firewire/firedtv-fw.c b/drivers/media/firewire/firedtv-fw.c index 92f4112d2e37..eaf94b817dbc 100644 --- a/drivers/media/firewire/firedtv-fw.c +++ b/drivers/media/firewire/firedtv-fw.c @@ -271,6 +271,8 @@ static int node_probe(struct fw_unit *unit, const struct ieee1394_device_id *id) name_len = fw_csr_string(unit->directory, CSR_MODEL, name, sizeof(name)); + if (name_len < 0) + return name_len; for (i = ARRAY_SIZE(model_names); --i; ) if (strlen(model_names[i]) <= name_len && strncmp(name, model_names[i], name_len) == 0) -- GitLab From f6ed47df61f3c5dc21fa5b359d99ad01df8f803d Mon Sep 17 00:00:00 2001 From: Chuhong Yuan Date: Thu, 28 May 2020 08:41:47 +0200 Subject: [PATCH 0205/1304] media: exynos4-is: Add missed check for pinctrl_lookup_state() [ Upstream commit 18ffec750578f7447c288647d7282c7d12b1d969 ] fimc_md_get_pinctrl() misses a check for pinctrl_lookup_state(). Add the missed check to fix it. Fixes: 4163851f7b99 ("[media] s5p-fimc: Use pinctrl API for camera ports configuration]") Signed-off-by: Chuhong Yuan Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Sasha Levin --- drivers/media/platform/exynos4-is/media-dev.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c index b5993532831d..2d25a197dc65 100644 --- a/drivers/media/platform/exynos4-is/media-dev.c +++ b/drivers/media/platform/exynos4-is/media-dev.c @@ -1259,6 +1259,9 @@ static int fimc_md_get_pinctrl(struct fimc_md *fmd) pctl->state_idle = pinctrl_lookup_state(pctl->pinctrl, PINCTRL_STATE_IDLE); + if (IS_ERR(pctl->state_idle)) + return PTR_ERR(pctl->state_idle); + return 0; } -- GitLab From 17979d8c8b2a18e10c309860054bf522c980159e Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Mon, 29 Jun 2020 14:47:17 -0700 Subject: [PATCH 0206/1304] xfs: don't eat an EIO/ENOSPC writeback error when scrubbing data fork [ Upstream commit eb0efe5063bb10bcb653e4f8e92a74719c03a347 ] The data fork scrubber calls filemap_write_and_wait to flush dirty pages and delalloc reservations out to disk prior to checking the data fork's extent mappings. Unfortunately, this means that scrub can consume the EIO/ENOSPC errors that would otherwise have stayed around in the address space until (we hope) the writer application calls fsync to persist data and collect errors. The end result is that programs that wrote to a file might never see the error code and proceed as if nothing were wrong. xfs_scrub is not in a position to notify file writers about the writeback failure, and it's only here to check metadata, not file contents. Therefore, if writeback fails, we should stuff the error code back into the address space so that an fsync by the writer application can pick that up. Fixes: 99d9d8d05da2 ("xfs: scrub inode block mappings") Signed-off-by: Darrick J. Wong Reviewed-by: Brian Foster Reviewed-by: Dave Chinner Signed-off-by: Sasha Levin --- fs/xfs/scrub/bmap.c | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/fs/xfs/scrub/bmap.c b/fs/xfs/scrub/bmap.c index e1d11f3223e3..f84a58e523bc 100644 --- a/fs/xfs/scrub/bmap.c +++ b/fs/xfs/scrub/bmap.c @@ -53,9 +53,27 @@ xchk_setup_inode_bmap( */ if (S_ISREG(VFS_I(sc->ip)->i_mode) && sc->sm->sm_type == XFS_SCRUB_TYPE_BMBTD) { + struct address_space *mapping = VFS_I(sc->ip)->i_mapping; + inode_dio_wait(VFS_I(sc->ip)); - error = filemap_write_and_wait(VFS_I(sc->ip)->i_mapping); - if (error) + + /* + * Try to flush all incore state to disk before we examine the + * space mappings for the data fork. Leave accumulated errors + * in the mapping for the writer threads to consume. + * + * On ENOSPC or EIO writeback errors, we continue into the + * extent mapping checks because write failures do not + * necessarily imply anything about the correctness of the file + * metadata. The metadata and the file data could be on + * completely separate devices; a media failure might only + * affect a subset of the disk, etc. We can handle delalloc + * extents in the scrubber, so leaving them in memory is fine. + */ + error = filemap_fdatawrite(mapping); + if (!error) + error = filemap_fdatawait_keep_errors(mapping); + if (error && (error != -ENOSPC && error != -EIO)) goto out; } -- GitLab From 4c2c947f4888454a91f04a5df26cf8303453e089 Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Mon, 29 Jun 2020 14:47:18 -0700 Subject: [PATCH 0207/1304] xfs: fix reflink quota reservation accounting error [ Upstream commit 83895227aba1ade33e81f586aa7b6b1e143096a5 ] Quota reservations are supposed to account for the blocks that might be allocated due to a bmap btree split. Reflink doesn't do this, so fix this to make the quota accounting more accurate before we start rearranging things. Fixes: 862bb360ef56 ("xfs: reflink extents from one file to another") Signed-off-by: Darrick J. Wong Reviewed-by: Brian Foster Signed-off-by: Sasha Levin --- fs/xfs/xfs_reflink.c | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c index 6622652a85a8..0b159a79a17c 100644 --- a/fs/xfs/xfs_reflink.c +++ b/fs/xfs/xfs_reflink.c @@ -1010,6 +1010,7 @@ xfs_reflink_remap_extent( xfs_filblks_t rlen; xfs_filblks_t unmap_len; xfs_off_t newlen; + int64_t qres; int error; unmap_len = irec->br_startoff + irec->br_blockcount - destoff; @@ -1032,13 +1033,19 @@ xfs_reflink_remap_extent( xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_trans_ijoin(tp, ip, 0); - /* If we're not just clearing space, then do we have enough quota? */ - if (real_extent) { - error = xfs_trans_reserve_quota_nblks(tp, ip, - irec->br_blockcount, 0, XFS_QMOPT_RES_REGBLKS); - if (error) - goto out_cancel; - } + /* + * Reserve quota for this operation. We don't know if the first unmap + * in the dest file will cause a bmap btree split, so we always reserve + * at least enough blocks for that split. If the extent being mapped + * in is written, we need to reserve quota for that too. + */ + qres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK); + if (real_extent) + qres += irec->br_blockcount; + error = xfs_trans_reserve_quota_nblks(tp, ip, qres, 0, + XFS_QMOPT_RES_REGBLKS); + if (error) + goto out_cancel; trace_xfs_reflink_remap(ip, irec->br_startoff, irec->br_blockcount, irec->br_startblock); -- GitLab From 13a8ac537f1d86c3a85f46f701083ce4460d2171 Mon Sep 17 00:00:00 2001 From: Zhu Yanjun Date: Tue, 30 Jun 2020 15:36:05 +0300 Subject: [PATCH 0208/1304] RDMA/rxe: Skip dgid check in loopback mode [ Upstream commit 5c99274be8864519328aa74bc550ba410095bc1c ] In the loopback tests, the following call trace occurs. Call Trace: __rxe_do_task+0x1a/0x30 [rdma_rxe] rxe_qp_destroy+0x61/0xa0 [rdma_rxe] rxe_destroy_qp+0x20/0x60 [rdma_rxe] ib_destroy_qp_user+0xcc/0x220 [ib_core] uverbs_free_qp+0x3c/0xc0 [ib_uverbs] destroy_hw_idr_uobject+0x24/0x70 [ib_uverbs] uverbs_destroy_uobject+0x43/0x1b0 [ib_uverbs] uobj_destroy+0x41/0x70 [ib_uverbs] __uobj_get_destroy+0x39/0x70 [ib_uverbs] ib_uverbs_destroy_qp+0x88/0xc0 [ib_uverbs] ib_uverbs_handler_UVERBS_METHOD_INVOKE_WRITE+0xb9/0xf0 [ib_uverbs] ib_uverbs_cmd_verbs+0xb16/0xc30 [ib_uverbs] The root cause is that the actual RDMA connection is not created in the loopback tests and the rxe_match_dgid will fail randomly. To fix this call trace which appear in the loopback tests, skip check of the dgid. Fixes: 8700e3e7c485 ("Soft RoCE driver") Link: https://lore.kernel.org/r/20200630123605.446959-1-leon@kernel.org Signed-off-by: Zhu Yanjun Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe Signed-off-by: Sasha Levin --- drivers/infiniband/sw/rxe/rxe_recv.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c index 695a607e2d14..b8f3e65402d1 100644 --- a/drivers/infiniband/sw/rxe/rxe_recv.c +++ b/drivers/infiniband/sw/rxe/rxe_recv.c @@ -332,10 +332,14 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb) static int rxe_match_dgid(struct rxe_dev *rxe, struct sk_buff *skb) { + struct rxe_pkt_info *pkt = SKB_TO_PKT(skb); const struct ib_gid_attr *gid_attr; union ib_gid dgid; union ib_gid *pdgid; + if (pkt->mask & RXE_LOOPBACK_MASK) + return 0; + if (skb->protocol == htons(ETH_P_IP)) { ipv6_addr_set_v4mapped(ip_hdr(skb)->daddr, (struct in6_addr *)&dgid); @@ -368,7 +372,7 @@ void rxe_rcv(struct sk_buff *skb) if (unlikely(skb->len < pkt->offset + RXE_BTH_BYTES)) goto drop; - if (unlikely(rxe_match_dgid(rxe, skb) < 0)) { + if (rxe_match_dgid(rxe, skb) < 0) { pr_warn_ratelimited("failed matching dgid\n"); goto drop; } -- GitLab From adf0cae3ea6c6236a8579a4a9f92da4021b6e293 Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Thu, 25 Jun 2020 18:14:55 -0500 Subject: [PATCH 0209/1304] PCI: Fix pci_cfg_wait queue locking problem [ Upstream commit 2a7e32d0547f41c5ce244f84cf5d6ca7fccee7eb ] The pci_cfg_wait queue is used to prevent user-space config accesses to devices while they are recovering from reset. Previously we used these operations on pci_cfg_wait: __add_wait_queue(&pci_cfg_wait, ...) __remove_wait_queue(&pci_cfg_wait, ...) wake_up_all(&pci_cfg_wait) The wake_up acquires the wait queue lock, but the add and remove do not. Originally these were all protected by the pci_lock, but cdcb33f98244 ("PCI: Avoid possible deadlock on pci_lock and p->pi_lock"), moved wake_up_all() outside pci_lock, so it could race with add/remove operations, which caused occasional kernel panics, e.g., during vfio-pci hotplug/unplug testing: Unable to handle kernel read from unreadable memory at virtual address ffff802dac469000 Resolve this by using wait_event() instead of __add_wait_queue() and __remove_wait_queue(). The wait queue lock is held by both wait_event() and wake_up_all(), so it provides mutual exclusion. Fixes: cdcb33f98244 ("PCI: Avoid possible deadlock on pci_lock and p->pi_lock") Link: https://lore.kernel.org/linux-pci/79827f2f-9b43-4411-1376-b9063b67aee3@huawei.com/T/#u Based-on: https://lore.kernel.org/linux-pci/20191210031527.40136-1-zhengxiang9@huawei.com/ Based-on-patch-by: Xiang Zheng Signed-off-by: Bjorn Helgaas Tested-by: Xiang Zheng Cc: Heyi Guo Cc: Biaoxiang Ye Signed-off-by: Sasha Levin --- drivers/pci/access.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/drivers/pci/access.c b/drivers/pci/access.c index a3ad2fe185b9..3c8ffd62dc00 100644 --- a/drivers/pci/access.c +++ b/drivers/pci/access.c @@ -204,17 +204,13 @@ EXPORT_SYMBOL(pci_bus_set_ops); static DECLARE_WAIT_QUEUE_HEAD(pci_cfg_wait); static noinline void pci_wait_cfg(struct pci_dev *dev) + __must_hold(&pci_lock) { - DECLARE_WAITQUEUE(wait, current); - - __add_wait_queue(&pci_cfg_wait, &wait); do { - set_current_state(TASK_UNINTERRUPTIBLE); raw_spin_unlock_irq(&pci_lock); - schedule(); + wait_event(pci_cfg_wait, !dev->block_cfg_access); raw_spin_lock_irq(&pci_lock); } while (dev->block_cfg_access); - __remove_wait_queue(&pci_cfg_wait, &wait); } /* Returns 0 on success, negative values indicate error. */ -- GitLab From 92887e8b7e81f52fd4155cf3452c7b6af236865e Mon Sep 17 00:00:00 2001 From: Kai-Heng Feng Date: Thu, 2 Jul 2020 13:45:00 +0800 Subject: [PATCH 0210/1304] leds: core: Flush scheduled work for system suspend [ Upstream commit 302a085c20194bfa7df52e0fe684ee0c41da02e6 ] Sometimes LED won't be turned off by LED_CORE_SUSPENDRESUME flag upon system suspend. led_set_brightness_nopm() uses schedule_work() to set LED brightness. However, there's no guarantee that the scheduled work gets executed because no one flushes the work. So flush the scheduled work to make sure LED gets turned off. Signed-off-by: Kai-Heng Feng Acked-by: Jacek Anaszewski Fixes: 81fe8e5b73e3 ("leds: core: Add led_set_brightness_nosleep{nopm} functions") Signed-off-by: Pavel Machek Signed-off-by: Sasha Levin --- drivers/leds/led-class.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c index 3c7e3487b373..4e63dd2bfcf8 100644 --- a/drivers/leds/led-class.c +++ b/drivers/leds/led-class.c @@ -173,6 +173,7 @@ void led_classdev_suspend(struct led_classdev *led_cdev) { led_cdev->flags |= LED_SUSPENDED; led_set_brightness_nopm(led_cdev, 0); + flush_work(&led_cdev->set_brightness_work); } EXPORT_SYMBOL_GPL(led_classdev_suspend); -- GitLab From f275958ecef38b6c3d034a621a77ee00fa0b2ffb Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Sun, 12 Jul 2020 01:53:17 +0300 Subject: [PATCH 0211/1304] drm: panel: simple: Fix bpc for LG LB070WV8 panel [ Upstream commit a6ae2fe5c9f9fd355a48fb7d21c863e5b20d6c9c ] The LG LB070WV8 panel incorrectly reports a 16 bits per component value, while the panel uses 8 bits per component. Fix it. Fixes: dd0150026901 ("drm/panel: simple: Add support for LG LB070WV8 800x480 7" panel") Signed-off-by: Laurent Pinchart Signed-off-by: Sam Ravnborg Link: https://patchwork.freedesktop.org/patch/msgid/20200711225317.28476-1-laurent.pinchart+renesas@ideasonboard.com Signed-off-by: Sasha Levin --- drivers/gpu/drm/panel/panel-simple.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index 654fea2b4312..8814aa38c5e7 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -1503,7 +1503,7 @@ static const struct drm_display_mode lg_lb070wv8_mode = { static const struct panel_desc lg_lb070wv8 = { .modes = &lg_lb070wv8_mode, .num_modes = 1, - .bpc = 16, + .bpc = 8, .size = { .width = 151, .height = 91, -- GitLab From 84d2717b22603d86628dbca1d66a3bd55870d857 Mon Sep 17 00:00:00 2001 From: Marek Szyprowski Date: Wed, 8 Jul 2020 15:38:00 +0200 Subject: [PATCH 0212/1304] phy: exynos5-usbdrd: Calibrating makes sense only for USB2.0 PHY [ Upstream commit dcbabfeb17c3c2fdb6bc92a3031ecd37df1834a8 ] PHY calibration is needed only for USB2.0 (UTMI) PHY, so skip calling calibration code when phy_calibrate() is called for USB3.0 (PIPE3) PHY. Fixes: d8c80bb3b55b ("phy: exynos5-usbdrd: Calibrate LOS levels for exynos5420/5800") Signed-off-by: Marek Szyprowski Acked-by: Krzysztof Kozlowski Link: https://lore.kernel.org/r/20200708133800.3336-1-m.szyprowski@samsung.com Signed-off-by: Vinod Koul Signed-off-by: Sasha Levin --- drivers/phy/samsung/phy-exynos5-usbdrd.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/phy/samsung/phy-exynos5-usbdrd.c b/drivers/phy/samsung/phy-exynos5-usbdrd.c index b8b226a20014..1feb1e1bf85e 100644 --- a/drivers/phy/samsung/phy-exynos5-usbdrd.c +++ b/drivers/phy/samsung/phy-exynos5-usbdrd.c @@ -717,7 +717,9 @@ static int exynos5_usbdrd_phy_calibrate(struct phy *phy) struct phy_usb_instance *inst = phy_get_drvdata(phy); struct exynos5_usbdrd_phy *phy_drd = to_usbdrd_phy(inst); - return exynos5420_usbdrd_phy_calibrate(phy_drd); + if (inst->phy_cfg->id == EXYNOS5_DRDPHY_UTMI) + return exynos5420_usbdrd_phy_calibrate(phy_drd); + return 0; } static const struct phy_ops exynos5_usbdrd_phy_ops = { -- GitLab From a135b366c37b942117d19e1bdbb955d81eadb222 Mon Sep 17 00:00:00 2001 From: Tom Rix Date: Sun, 12 Jul 2020 08:24:53 -0700 Subject: [PATCH 0213/1304] drm/bridge: sil_sii8620: initialize return of sii8620_readb [ Upstream commit 02cd2d3144653e6e2a0c7ccaa73311e48e2dc686 ] clang static analysis flags this error sil-sii8620.c:184:2: warning: Undefined or garbage value returned to caller [core.uninitialized.UndefReturn] return ret; ^~~~~~~~~~ sii8620_readb calls sii8620_read_buf. sii8620_read_buf can return without setting its output pararmeter 'ret'. So initialize ret. Fixes: ce6e153f414a ("drm/bridge: add Silicon Image SiI8620 driver") Signed-off-by: Tom Rix Reviewed-by: Laurent Pinchart Reviewed-by: Andrzej Hajda Signed-off-by: Sam Ravnborg Link: https://patchwork.freedesktop.org/patch/msgid/20200712152453.27510-1-trix@redhat.com Signed-off-by: Sasha Levin --- drivers/gpu/drm/bridge/sil-sii8620.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c index a6e8f4591e63..1ea2a1b0fe37 100644 --- a/drivers/gpu/drm/bridge/sil-sii8620.c +++ b/drivers/gpu/drm/bridge/sil-sii8620.c @@ -180,7 +180,7 @@ static void sii8620_read_buf(struct sii8620 *ctx, u16 addr, u8 *buf, int len) static u8 sii8620_readb(struct sii8620 *ctx, u16 addr) { - u8 ret; + u8 ret = 0; sii8620_read_buf(ctx, addr, &ret, 1); return ret; -- GitLab From 96867800fc148f854ec34641c976ff5bcf8c8dd7 Mon Sep 17 00:00:00 2001 From: John Garry Date: Thu, 9 Jul 2020 20:23:19 +0800 Subject: [PATCH 0214/1304] scsi: scsi_debug: Add check for sdebug_max_queue during module init [ Upstream commit c87bf24cfb60bce27b4d2c7e56ebfd86fb9d16bb ] sdebug_max_queue should not exceed SDEBUG_CANQUEUE, otherwise crashes like this can be triggered by passing an out-of-range value: Hardware name: Huawei D06 /D06, BIOS Hisilicon D06 UEFI RC0 - V1.16.01 03/15/2019 pstate: 20400009 (nzCv daif +PAN -UAO BTYPE=--) pc : schedule_resp+0x2a4/0xa70 [scsi_debug] lr : schedule_resp+0x52c/0xa70 [scsi_debug] sp : ffff800022ab36f0 x29: ffff800022ab36f0 x28: ffff0023a935a610 x27: ffff800008e0a648 x26: 0000000000000003 x25: ffff0023e84f3200 x24: 00000000003d0900 x23: 0000000000000000 x22: 0000000000000000 x21: ffff0023be60a320 x20: ffff0023be60b538 x19: ffff800008e13000 x18: 0000000000000000 x17: 0000000000000000 x16: 0000000000000000 x15: 0000000000000000 x14: 0000000000000000 x13: 0000000000000000 x12: 0000000000000000 x11: 0000000000000000 x10: 0000000000000000 x9 : 0000000000000001 x8 : 0000000000000000 x7 : 0000000000000000 x6 : 00000000000000c1 x5 : 0000020000200000 x4 : dead0000000000ff x3 : 0000000000000200 x2 : 0000000000000200 x1 : ffff800008e13d88 x0 : 0000000000000000 Call trace: schedule_resp+0x2a4/0xa70 [scsi_debug] scsi_debug_queuecommand+0x2c4/0x9e0 [scsi_debug] scsi_queue_rq+0x698/0x840 __blk_mq_try_issue_directly+0x108/0x228 blk_mq_request_issue_directly+0x58/0x98 blk_mq_try_issue_list_directly+0x5c/0xf0 blk_mq_sched_insert_requests+0x18c/0x200 blk_mq_flush_plug_list+0x11c/0x190 blk_flush_plug_list+0xdc/0x110 blk_finish_plug+0x38/0x210 blkdev_direct_IO+0x450/0x4d8 generic_file_read_iter+0x84/0x180 blkdev_read_iter+0x3c/0x50 aio_read+0xc0/0x170 io_submit_one+0x5c8/0xc98 __arm64_sys_io_submit+0x1b0/0x258 el0_svc_common.constprop.3+0x68/0x170 do_el0_svc+0x24/0x90 el0_sync_handler+0x13c/0x1a8 el0_sync+0x158/0x180 Code: 528847e0 72a001e0 6b00003f 540018cd (3941c340) In addition, it should not be less than 1. So add checks for these, and fail the module init for those cases. [mkp: changed if condition to match error message] Link: https://lore.kernel.org/r/1594297400-24756-2-git-send-email-john.garry@huawei.com Fixes: c483739430f1 ("scsi_debug: add multiple queue support") Reviewed-by: Ming Lei Acked-by: Douglas Gilbert Signed-off-by: John Garry Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin --- drivers/scsi/scsi_debug.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index a1dbae806fde..d2b045eb7274 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -5384,6 +5384,12 @@ static int __init scsi_debug_init(void) pr_err("submit_queues must be 1 or more\n"); return -EINVAL; } + + if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) { + pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE); + return -EINVAL; + } + sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue), GFP_KERNEL); if (sdebug_q_arr == NULL) -- GitLab From daa69a213f038f4dafb4feda19db9d135b5ce308 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 8 Jul 2020 14:58:57 +0300 Subject: [PATCH 0215/1304] mwifiex: Prevent memory corruption handling keys [ Upstream commit e18696786548244914f36ec3c46ac99c53df99c3 ] The length of the key comes from the network and it's a 16 bit number. It needs to be capped to prevent a buffer overflow. Fixes: 5e6e3a92b9a4 ("wireless: mwifiex: initial commit for Marvell mwifiex driver") Signed-off-by: Dan Carpenter Acked-by: Ganapathi Bhat Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200708115857.GA13729@mwanda Signed-off-by: Sasha Levin --- .../wireless/marvell/mwifiex/sta_cmdresp.c | 22 +++++++++++++------ 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c index 69e3b624adbb..797c2e978394 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c @@ -581,6 +581,11 @@ static int mwifiex_ret_802_11_key_material_v1(struct mwifiex_private *priv, { struct host_cmd_ds_802_11_key_material *key = &resp->params.key_material; + int len; + + len = le16_to_cpu(key->key_param_set.key_len); + if (len > sizeof(key->key_param_set.key)) + return -EINVAL; if (le16_to_cpu(key->action) == HostCmd_ACT_GEN_SET) { if ((le16_to_cpu(key->key_param_set.key_info) & KEY_MCAST)) { @@ -594,9 +599,8 @@ static int mwifiex_ret_802_11_key_material_v1(struct mwifiex_private *priv, memset(priv->aes_key.key_param_set.key, 0, sizeof(key->key_param_set.key)); - priv->aes_key.key_param_set.key_len = key->key_param_set.key_len; - memcpy(priv->aes_key.key_param_set.key, key->key_param_set.key, - le16_to_cpu(priv->aes_key.key_param_set.key_len)); + priv->aes_key.key_param_set.key_len = cpu_to_le16(len); + memcpy(priv->aes_key.key_param_set.key, key->key_param_set.key, len); return 0; } @@ -611,9 +615,14 @@ static int mwifiex_ret_802_11_key_material_v2(struct mwifiex_private *priv, struct host_cmd_ds_command *resp) { struct host_cmd_ds_802_11_key_material_v2 *key_v2; - __le16 len; + int len; key_v2 = &resp->params.key_material_v2; + + len = le16_to_cpu(key_v2->key_param_set.key_params.aes.key_len); + if (len > WLAN_KEY_LEN_CCMP) + return -EINVAL; + if (le16_to_cpu(key_v2->action) == HostCmd_ACT_GEN_SET) { if ((le16_to_cpu(key_v2->key_param_set.key_info) & KEY_MCAST)) { mwifiex_dbg(priv->adapter, INFO, "info: key: GTK is set\n"); @@ -629,10 +638,9 @@ static int mwifiex_ret_802_11_key_material_v2(struct mwifiex_private *priv, memset(priv->aes_key_v2.key_param_set.key_params.aes.key, 0, WLAN_KEY_LEN_CCMP); priv->aes_key_v2.key_param_set.key_params.aes.key_len = - key_v2->key_param_set.key_params.aes.key_len; - len = priv->aes_key_v2.key_param_set.key_params.aes.key_len; + cpu_to_le16(len); memcpy(priv->aes_key_v2.key_param_set.key_params.aes.key, - key_v2->key_param_set.key_params.aes.key, le16_to_cpu(len)); + key_v2->key_param_set.key_params.aes.key, len); return 0; } -- GitLab From dbc374d039a159b7e7cc0d68530552c46a0db498 Mon Sep 17 00:00:00 2001 From: Milton Miller Date: Thu, 16 Jul 2020 09:37:04 +1000 Subject: [PATCH 0216/1304] powerpc/vdso: Fix vdso cpu truncation [ Upstream commit a9f675f950a07d5c1dbcbb97aabac56f5ed085e3 ] The code in vdso_cpu_init that exposes the cpu and numa node to userspace via SPRG_VDSO incorrctly masks the cpu to 12 bits. This means that any kernel running on a box with more than 4096 threads (NR_CPUS advertises a limit of of 8192 cpus) would expose userspace to two cpu contexts running at the same time with the same cpu number. Note: I'm not aware of any distro shipping a kernel with support for more than 4096 threads today, nor of any system image that currently exceeds 4096 threads. Found via code browsing. Fixes: 18ad51dd342a7eb09dbcd059d0b451b616d4dafc ("powerpc: Add VDSO version of getcpu") Signed-off-by: Milton Miller Signed-off-by: Anton Blanchard Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200715233704.1352257-1-anton@ozlabs.org Signed-off-by: Sasha Levin --- arch/powerpc/kernel/vdso.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index 65b3bdb99f0b..31ab6eb61e26 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c @@ -705,7 +705,7 @@ int vdso_getcpu_init(void) node = cpu_to_node(cpu); WARN_ON_ONCE(node > 0xffff); - val = (cpu & 0xfff) | ((node & 0xffff) << 16); + val = (cpu & 0xffff) | ((node & 0xffff) << 16); mtspr(SPRN_SPRG_VDSO_WRITE, val); get_paca()->sprg_vdso = val; -- GitLab From 3383a99923a14c7b93e108731a4f4a1534b0f0a8 Mon Sep 17 00:00:00 2001 From: Yuval Basson Date: Wed, 8 Jul 2020 22:55:26 +0300 Subject: [PATCH 0217/1304] RDMA/qedr: SRQ's bug fixes [ Upstream commit acca72e2b031b9fbb4184511072bd246a0abcebc ] QP's with the same SRQ, working on different CQs and running in parallel on different CPUs could lead to a race when maintaining the SRQ consumer count, and leads to FW running out of SRQs. Update the consumer atomically. Make sure the wqe_prod is updated after the sge_prod due to FW requirements. Fixes: 3491c9e799fb ("qedr: Add support for kernel mode SRQ's") Link: https://lore.kernel.org/r/20200708195526.31040-1-ybason@marvell.com Signed-off-by: Michal Kalderon Signed-off-by: Yuval Basson Signed-off-by: Jason Gunthorpe Signed-off-by: Sasha Levin --- drivers/infiniband/hw/qedr/qedr.h | 4 ++-- drivers/infiniband/hw/qedr/verbs.c | 22 ++++++++++------------ 2 files changed, 12 insertions(+), 14 deletions(-) diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h index a2d708dceb8d..cca12100c583 100644 --- a/drivers/infiniband/hw/qedr/qedr.h +++ b/drivers/infiniband/hw/qedr/qedr.h @@ -351,10 +351,10 @@ struct qedr_srq_hwq_info { u32 wqe_prod; u32 sge_prod; u32 wr_prod_cnt; - u32 wr_cons_cnt; + atomic_t wr_cons_cnt; u32 num_elems; - u32 *virt_prod_pair_addr; + struct rdma_srq_producers *virt_prod_pair_addr; dma_addr_t phy_prod_pair_addr; }; diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 38fe2f741375..7b26afc7fef3 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -3577,7 +3577,7 @@ static u32 qedr_srq_elem_left(struct qedr_srq_hwq_info *hw_srq) * count and consumer count and subtract it from max * work request supported so that we get elements left. */ - used = hw_srq->wr_prod_cnt - hw_srq->wr_cons_cnt; + used = hw_srq->wr_prod_cnt - (u32)atomic_read(&hw_srq->wr_cons_cnt); return hw_srq->max_wr - used; } @@ -3592,7 +3592,6 @@ int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, unsigned long flags; int status = 0; u32 num_sge; - u32 offset; spin_lock_irqsave(&srq->lock, flags); @@ -3605,7 +3604,8 @@ int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, if (!qedr_srq_elem_left(hw_srq) || wr->num_sge > srq->hw_srq.max_sges) { DP_ERR(dev, "Can't post WR (%d,%d) || (%d > %d)\n", - hw_srq->wr_prod_cnt, hw_srq->wr_cons_cnt, + hw_srq->wr_prod_cnt, + atomic_read(&hw_srq->wr_cons_cnt), wr->num_sge, srq->hw_srq.max_sges); status = -ENOMEM; *bad_wr = wr; @@ -3639,22 +3639,20 @@ int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, hw_srq->sge_prod++; } - /* Flush WQE and SGE information before + /* Update WQE and SGE information before * updating producer. */ - wmb(); + dma_wmb(); /* SRQ producer is 8 bytes. Need to update SGE producer index * in first 4 bytes and need to update WQE producer in * next 4 bytes. */ - *srq->hw_srq.virt_prod_pair_addr = hw_srq->sge_prod; - offset = offsetof(struct rdma_srq_producers, wqe_prod); - *((u8 *)srq->hw_srq.virt_prod_pair_addr + offset) = - hw_srq->wqe_prod; + srq->hw_srq.virt_prod_pair_addr->sge_prod = hw_srq->sge_prod; + /* Make sure sge producer is updated first */ + dma_wmb(); + srq->hw_srq.virt_prod_pair_addr->wqe_prod = hw_srq->wqe_prod; - /* Flush producer after updating it. */ - wmb(); wr = wr->next; } @@ -4077,7 +4075,7 @@ static int process_resp_one_srq(struct qedr_dev *dev, struct qedr_qp *qp, } else { __process_resp_one(dev, qp, cq, wc, resp, wr_id); } - srq->hw_srq.wr_cons_cnt++; + atomic_inc(&srq->hw_srq.wr_cons_cnt); return 1; } -- GitLab From 7573ee6f763e8db3a104ef7dabc5859122e3132c Mon Sep 17 00:00:00 2001 From: Mikhail Malygin Date: Thu, 16 Jul 2020 22:03:41 +0300 Subject: [PATCH 0218/1304] RDMA/rxe: Prevent access to wr->next ptr afrer wr is posted to send queue [ Upstream commit 5f0b2a6093a4d9aab093964c65083fe801ef1e58 ] rxe_post_send_kernel() iterates over linked list of wr's, until the wr->next ptr is NULL. However if we've got an interrupt after last wr is posted, control may be returned to the code after send completion callback is executed and wr memory is freed. As a result, wr->next pointer may contain incorrect value leading to panic. Store the wr->next on the stack before posting it. Fixes: 8700e3e7c485 ("Soft RoCE driver") Link: https://lore.kernel.org/r/20200716190340.23453-1-m.malygin@yadro.com Signed-off-by: Mikhail Malygin Signed-off-by: Sergey Kojushev Signed-off-by: Jason Gunthorpe Signed-off-by: Sasha Levin --- drivers/infiniband/sw/rxe/rxe_verbs.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index f5b1e0ad6142..3a94eb5edcf9 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c @@ -733,6 +733,7 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr, unsigned int mask; unsigned int length = 0; int i; + struct ib_send_wr *next; while (wr) { mask = wr_opcode_mask(wr->opcode, qp); @@ -749,6 +750,8 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr, break; } + next = wr->next; + length = 0; for (i = 0; i < wr->num_sge; i++) length += wr->sg_list[i].length; @@ -759,7 +762,7 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr, *bad_wr = wr; break; } - wr = wr->next; + wr = next; } rxe_run_task(&qp->req.task, 1); -- GitLab From 8b26055ccdc02047e39caa128e65add116b29525 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 16 Jul 2020 16:47:20 +0100 Subject: [PATCH 0219/1304] staging: rtl8192u: fix a dubious looking mask before a shift [ Upstream commit c4283950a9a4d3bf4a3f362e406c80ab14f10714 ] Currently the masking of ret with 0xff and followed by a right shift of 8 bits always leaves a zero result. It appears the mask of 0xff is incorrect and should be 0xff00, but I don't have the hardware to test this. Fix this to mask the upper 8 bits before shifting. [ Not tested ] Addresses-Coverity: ("Operands don't affect result") Fixes: 8fc8598e61f6 ("Staging: Added Realtek rtl8192u driver to staging") Signed-off-by: Colin Ian King Link: https://lore.kernel.org/r/20200716154720.1710252-1-colin.king@canonical.com Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin --- drivers/staging/rtl8192u/r8192U_core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c index 2066a1d9bc84..87244a208976 100644 --- a/drivers/staging/rtl8192u/r8192U_core.c +++ b/drivers/staging/rtl8192u/r8192U_core.c @@ -2484,7 +2484,7 @@ static int rtl8192_read_eeprom_info(struct net_device *dev) ret = eprom_read(dev, (EEPROM_TxPwIndex_CCK >> 1)); if (ret < 0) return ret; - priv->EEPROMTxPowerLevelCCK = ((u16)ret & 0xff) >> 8; + priv->EEPROMTxPowerLevelCCK = ((u16)ret & 0xff00) >> 8; } else priv->EEPROMTxPowerLevelCCK = 0x10; RT_TRACE(COMP_EPROM, "CCK Tx Power Levl: 0x%02x\n", priv->EEPROMTxPowerLevelCCK); -- GitLab From 35903b8dbbddaca48df46cbb84639aa76c40eac8 Mon Sep 17 00:00:00 2001 From: Xiongfeng Wang Date: Fri, 17 Jul 2020 15:59:25 +0800 Subject: [PATCH 0220/1304] PCI/ASPM: Add missing newline in sysfs 'policy' [ Upstream commit 3167e3d340c092fd47924bc4d23117a3074ef9a9 ] When I cat ASPM parameter 'policy' by sysfs, it displays as follows. Add a newline for easy reading. Other sysfs attributes already include a newline. [root@localhost ~]# cat /sys/module/pcie_aspm/parameters/policy [default] performance powersave powersupersave [root@localhost ~]# Fixes: 7d715a6c1ae5 ("PCI: add PCI Express ASPM support") Link: https://lore.kernel.org/r/1594972765-10404-1-git-send-email-wangxiongfeng2@huawei.com Signed-off-by: Xiongfeng Wang Signed-off-by: Bjorn Helgaas Signed-off-by: Sasha Levin --- drivers/pci/pcie/aspm.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index 6e50f84733b7..279f9f0197b0 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -1164,6 +1164,7 @@ static int pcie_aspm_get_policy(char *buffer, const struct kernel_param *kp) cnt += sprintf(buffer + cnt, "[%s] ", policy_str[i]); else cnt += sprintf(buffer + cnt, "%s ", policy_str[i]); + cnt += sprintf(buffer + cnt, "\n"); return cnt; } -- GitLab From 2cb6880f0e8cde9c4ee63dd19d1f3dd19e670829 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Thu, 9 Jul 2020 08:59:24 +0530 Subject: [PATCH 0221/1304] powerpc/book3s64/pkeys: Use PVR check instead of cpu feature [ Upstream commit d79e7a5f26f1d179cbb915a8bf2469b6d7431c29 ] We are wrongly using CPU_FTRS_POWER8 to check for P8 support. Instead, we should use PVR value. Now considering we are using CPU_FTRS_POWER8, that implies we returned true for P9 with older firmware. Keep the same behavior by checking for P9 PVR value. Fixes: cf43d3b26452 ("powerpc: Enable pkey subsystem") Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200709032946.881753-2-aneesh.kumar@linux.ibm.com Signed-off-by: Sasha Levin --- arch/powerpc/mm/pkeys.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/arch/powerpc/mm/pkeys.c b/arch/powerpc/mm/pkeys.c index 7124af17da72..a587f9013988 100644 --- a/arch/powerpc/mm/pkeys.c +++ b/arch/powerpc/mm/pkeys.c @@ -81,13 +81,17 @@ int pkey_initialize(void) scan_pkey_feature(); /* - * Let's assume 32 pkeys on P8 bare metal, if its not defined by device - * tree. We make this exception since skiboot forgot to expose this - * property on power8. + * Let's assume 32 pkeys on P8/P9 bare metal, if its not defined by device + * tree. We make this exception since some version of skiboot forgot to + * expose this property on power8/9. */ - if (!pkeys_devtree_defined && !firmware_has_feature(FW_FEATURE_LPAR) && - cpu_has_feature(CPU_FTRS_POWER8)) - pkeys_total = 32; + if (!pkeys_devtree_defined && !firmware_has_feature(FW_FEATURE_LPAR)) { + unsigned long pvr = mfspr(SPRN_PVR); + + if (PVR_VER(pvr) == PVR_POWER8 || PVR_VER(pvr) == PVR_POWER8E || + PVR_VER(pvr) == PVR_POWER8NVL || PVR_VER(pvr) == PVR_POWER9) + pkeys_total = 32; + } /* * Adjust the upper limit, based on the number of bits supported by -- GitLab From 58a42b449d71ad9e46aea134e8a498006bbec274 Mon Sep 17 00:00:00 2001 From: Marco Felsch Date: Thu, 11 Jun 2020 14:43:32 +0200 Subject: [PATCH 0222/1304] drm/imx: tve: fix regulator_disable error path [ Upstream commit 7bb58b987fee26da2a1665c01033022624986b7c ] Add missing regulator_disable() as devm_action to avoid dedicated unbind() callback and fix the missing error handling. Fixes: fcbc51e54d2a ("staging: drm/imx: Add support for Television Encoder (TVEv2)") Signed-off-by: Marco Felsch Signed-off-by: Philipp Zabel Signed-off-by: Sasha Levin --- drivers/gpu/drm/imx/imx-tve.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c index cffd3310240e..c19c1dfbfcdc 100644 --- a/drivers/gpu/drm/imx/imx-tve.c +++ b/drivers/gpu/drm/imx/imx-tve.c @@ -498,6 +498,13 @@ static int imx_tve_register(struct drm_device *drm, struct imx_tve *tve) return 0; } +static void imx_tve_disable_regulator(void *data) +{ + struct imx_tve *tve = data; + + regulator_disable(tve->dac_reg); +} + static bool imx_tve_readable_reg(struct device *dev, unsigned int reg) { return (reg % 4 == 0) && (reg <= 0xdc); @@ -622,6 +629,9 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data) ret = regulator_enable(tve->dac_reg); if (ret) return ret; + ret = devm_add_action_or_reset(dev, imx_tve_disable_regulator, tve); + if (ret) + return ret; } tve->clk = devm_clk_get(dev, "tve"); @@ -668,18 +678,8 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data) return 0; } -static void imx_tve_unbind(struct device *dev, struct device *master, - void *data) -{ - struct imx_tve *tve = dev_get_drvdata(dev); - - if (!IS_ERR(tve->dac_reg)) - regulator_disable(tve->dac_reg); -} - static const struct component_ops imx_tve_ops = { .bind = imx_tve_bind, - .unbind = imx_tve_unbind, }; static int imx_tve_probe(struct platform_device *pdev) -- GitLab From ac780c229e0408f587194e307106da2b806071e0 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Thu, 16 Jul 2020 10:50:55 +0200 Subject: [PATCH 0223/1304] USB: serial: iuu_phoenix: fix led-activity helpers [ Upstream commit de37458f8c2bfc465500a1dd0d15dbe96d2a698c ] The set-led command is eight bytes long and starts with a command byte followed by six bytes of RGB data and ends with a byte encoding a frequency (see iuu_led() and iuu_rgbf_fill_buffer()). The led activity helpers had a few long-standing bugs which corrupted the command packets by inserting a second command byte and thereby offsetting the RGB data and dropping the frequency in non-xmas mode. In xmas mode, a related off-by-one error left the frequency field uninitialised. Fixes: 60a8fc017103 ("USB: add iuu_phoenix driver") Reported-by: George Spelvin Link: https://lore.kernel.org/r/20200716085056.31471-1-johan@kernel.org Reviewed-by: Greg Kroah-Hartman Signed-off-by: Johan Hovold Signed-off-by: Sasha Levin --- drivers/usb/serial/iuu_phoenix.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c index e287fd52c575..734f18d0a7f7 100644 --- a/drivers/usb/serial/iuu_phoenix.c +++ b/drivers/usb/serial/iuu_phoenix.c @@ -353,10 +353,11 @@ static void iuu_led_activity_on(struct urb *urb) struct usb_serial_port *port = urb->context; int result; char *buf_ptr = port->write_urb->transfer_buffer; - *buf_ptr++ = IUU_SET_LED; + if (xmas) { - get_random_bytes(buf_ptr, 6); - *(buf_ptr+7) = 1; + buf_ptr[0] = IUU_SET_LED; + get_random_bytes(buf_ptr + 1, 6); + buf_ptr[7] = 1; } else { iuu_rgbf_fill_buffer(buf_ptr, 255, 255, 0, 0, 0, 0, 255); } @@ -374,13 +375,14 @@ static void iuu_led_activity_off(struct urb *urb) struct usb_serial_port *port = urb->context; int result; char *buf_ptr = port->write_urb->transfer_buffer; + if (xmas) { iuu_rxcmd(urb); return; - } else { - *buf_ptr++ = IUU_SET_LED; - iuu_rgbf_fill_buffer(buf_ptr, 0, 0, 255, 255, 0, 0, 255); } + + iuu_rgbf_fill_buffer(buf_ptr, 0, 0, 255, 255, 0, 0, 255); + usb_fill_bulk_urb(port->write_urb, port->serial->dev, usb_sndbulkpipe(port->serial->dev, port->bulk_out_endpointAddress), -- GitLab From ebb23423609067b714e0e05358b0cf4bbd75d239 Mon Sep 17 00:00:00 2001 From: Kars Mulder Date: Tue, 7 Jul 2020 16:43:50 +0200 Subject: [PATCH 0224/1304] usb: core: fix quirks_param_set() writing to a const pointer [ Upstream commit b1b6bed3b5036509b449b5965285d5057ba42527 ] The function quirks_param_set() takes as argument a const char* pointer to the new value of the usbcore.quirks parameter. It then casts this pointer to a non-const char* pointer and passes it to the strsep() function, which overwrites the value. Fix this by creating a copy of the value using kstrdup() and letting that copy be written to by strsep(). Fixes: 027bd6cafd9a ("usb: core: Add "quirks" parameter for usbcore") Signed-off-by: Kars Mulder Link: https://lore.kernel.org/r/5ee2-5f048a00-21-618c5c00@230659773 Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin --- drivers/usb/core/quirks.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index e0b77674869c..c96c50faccf7 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -25,17 +25,23 @@ static unsigned int quirk_count; static char quirks_param[128]; -static int quirks_param_set(const char *val, const struct kernel_param *kp) +static int quirks_param_set(const char *value, const struct kernel_param *kp) { - char *p, *field; + char *val, *p, *field; u16 vid, pid; u32 flags; size_t i; int err; + val = kstrdup(value, GFP_KERNEL); + if (!val) + return -ENOMEM; + err = param_set_copystring(val, kp); - if (err) + if (err) { + kfree(val); return err; + } mutex_lock(&quirk_mutex); @@ -60,10 +66,11 @@ static int quirks_param_set(const char *val, const struct kernel_param *kp) if (!quirk_list) { quirk_count = 0; mutex_unlock(&quirk_mutex); + kfree(val); return -ENOMEM; } - for (i = 0, p = (char *)val; p && *p;) { + for (i = 0, p = val; p && *p;) { /* Each entry consists of VID:PID:flags */ field = strsep(&p, ":"); if (!field) @@ -144,6 +151,7 @@ static int quirks_param_set(const char *val, const struct kernel_param *kp) unlock: mutex_unlock(&quirk_mutex); + kfree(val); return 0; } -- GitLab From 34c4882cf00ff81685698336506fcc39f93e72b2 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 16 Jun 2020 12:19:49 +0300 Subject: [PATCH 0225/1304] thermal: ti-soc-thermal: Fix reversed condition in ti_thermal_expose_sensor() [ Upstream commit 0f348db01fdf128813fdd659fcc339038fb421a4 ] This condition is reversed and will cause breakage. Fixes: 7440f518dad9 ("thermal/drivers/ti-soc-thermal: Avoid dereferencing ERR_PTR") Signed-off-by: Dan Carpenter Signed-off-by: Daniel Lezcano Link: https://lore.kernel.org/r/20200616091949.GA11940@mwanda Signed-off-by: Sasha Levin --- drivers/thermal/ti-soc-thermal/ti-thermal-common.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c index 452e034aedc1..343da0031299 100644 --- a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c +++ b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c @@ -183,7 +183,7 @@ int ti_thermal_expose_sensor(struct ti_bandgap *bgp, int id, data = ti_bandgap_get_sensor_data(bgp, id); - if (!IS_ERR_OR_NULL(data)) + if (IS_ERR_OR_NULL(data)) data = ti_thermal_build_data(bgp, id); if (!data) -- GitLab From e13f98ef31b3c3fd8e4788f312576f9f414314c2 Mon Sep 17 00:00:00 2001 From: Sai Prakash Ranjan Date: Thu, 16 Jul 2020 11:57:42 -0600 Subject: [PATCH 0226/1304] coresight: tmc: Fix TMC mode read in tmc_read_unprepare_etb() [ Upstream commit d021f5c5ff679432c5e9faee0fd7350db2efb97c ] Reading TMC mode register without proper coresight power management can lead to exceptions like the one in the call trace below in tmc_read_unprepare_etb() when the trace data is read after the sink is disabled. So fix this by having a check for coresight sysfs mode before reading TMC mode management register in tmc_read_unprepare_etb() similar to tmc_read_prepare_etb(). SError Interrupt on CPU6, code 0xbe000411 -- SError pstate: 80400089 (Nzcv daIf +PAN -UAO) pc : tmc_read_unprepare_etb+0x74/0x108 lr : tmc_read_unprepare_etb+0x54/0x108 sp : ffffff80d9507c30 x29: ffffff80d9507c30 x28: ffffff80b3569a0c x27: 0000000000000000 x26: 00000000000a0001 x25: ffffff80cbae9550 x24: 0000000000000010 x23: ffffffd07296b0f0 x22: ffffffd0109ee028 x21: 0000000000000000 x20: ffffff80d19e70e0 x19: ffffff80d19e7080 x18: 0000000000000000 x17: 0000000000000000 x16: 0000000000000000 x15: 0000000000000000 x14: 0000000000000000 x13: 0000000000000000 x12: 0000000000000000 x11: 0000000000000000 x10: dfffffd000000001 x9 : 0000000000000000 x8 : 0000000000000002 x7 : ffffffd071d0fe78 x6 : 0000000000000000 x5 : 0000000000000080 x4 : 0000000000000001 x3 : ffffffd071d0fe98 x2 : 0000000000000000 x1 : 0000000000000004 x0 : 0000000000000001 Kernel panic - not syncing: Asynchronous SError Interrupt Fixes: 4525412a5046 ("coresight: tmc: making prepare/unprepare functions generic") Reported-by: Mike Leach Signed-off-by: Sai Prakash Ranjan Tested-by: Mike Leach Signed-off-by: Mathieu Poirier Link: https://lore.kernel.org/r/20200716175746.3338735-14-mathieu.poirier@linaro.org Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin --- drivers/hwtracing/coresight/coresight-tmc-etf.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c index e90af39283b1..29dc2eac5b06 100644 --- a/drivers/hwtracing/coresight/coresight-tmc-etf.c +++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c @@ -583,15 +583,14 @@ int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata) spin_lock_irqsave(&drvdata->spinlock, flags); - /* There is no point in reading a TMC in HW FIFO mode */ - mode = readl_relaxed(drvdata->base + TMC_MODE); - if (mode != TMC_MODE_CIRCULAR_BUFFER) { - spin_unlock_irqrestore(&drvdata->spinlock, flags); - return -EINVAL; - } - /* Re-enable the TMC if need be */ if (drvdata->mode == CS_MODE_SYSFS) { + /* There is no point in reading a TMC in HW FIFO mode */ + mode = readl_relaxed(drvdata->base + TMC_MODE); + if (mode != TMC_MODE_CIRCULAR_BUFFER) { + spin_unlock_irqrestore(&drvdata->spinlock, flags); + return -EINVAL; + } /* * The trace run will continue with the same allocated trace * buffer. As such zero-out the buffer so that we don't end -- GitLab From 1aadfae64db50bba5d81a9a6724a62ed4a60ddf1 Mon Sep 17 00:00:00 2001 From: Yu Kuai Date: Tue, 21 Jul 2020 21:47:18 +0800 Subject: [PATCH 0227/1304] MIPS: OCTEON: add missing put_device() call in dwc3_octeon_device_init() [ Upstream commit e8b9fc10f2615b9a525fce56981e40b489528355 ] if of_find_device_by_node() succeed, dwc3_octeon_device_init() doesn't have a corresponding put_device(). Thus add put_device() to fix the exception handling for this function implementation. Fixes: 93e502b3c2d4 ("MIPS: OCTEON: Platform support for OCTEON III USB controller") Signed-off-by: Yu Kuai Signed-off-by: Thomas Bogendoerfer Signed-off-by: Sasha Levin --- arch/mips/cavium-octeon/octeon-usb.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/mips/cavium-octeon/octeon-usb.c b/arch/mips/cavium-octeon/octeon-usb.c index bfdfaf32d2c4..75189ff2f3c7 100644 --- a/arch/mips/cavium-octeon/octeon-usb.c +++ b/arch/mips/cavium-octeon/octeon-usb.c @@ -517,6 +517,7 @@ static int __init dwc3_octeon_device_init(void) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) { + put_device(&pdev->dev); dev_err(&pdev->dev, "No memory resources\n"); return -ENXIO; } @@ -528,8 +529,10 @@ static int __init dwc3_octeon_device_init(void) * know the difference. */ base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(base)) + if (IS_ERR(base)) { + put_device(&pdev->dev); return PTR_ERR(base); + } mutex_lock(&dwc3_octeon_clocks_mutex); dwc3_octeon_clocks_start(&pdev->dev, (u64)base); -- GitLab From fa7fd9ba18533e9aa5f718a06de3deb522a4b587 Mon Sep 17 00:00:00 2001 From: Marek Szyprowski Date: Thu, 16 Jul 2020 14:09:48 +0200 Subject: [PATCH 0228/1304] usb: dwc2: Fix error path in gadget registration [ Upstream commit 33a06f1300a79cfd461cea0268f05e969d4f34ec ] When gadget registration fails, one should not call usb_del_gadget_udc(). Ensure this by setting gadget->udc to NULL. Also in case of a failure there is no need to disable low-level hardware, so return immiedetly instead of jumping to error_init label. This fixes the following kernel NULL ptr dereference on gadget failure (can be easily triggered with g_mass_storage without any module parameters): dwc2 12480000.hsotg: dwc2_check_params: Invalid parameter besl=1 dwc2 12480000.hsotg: dwc2_check_params: Invalid parameter g_np_tx_fifo_size=1024 dwc2 12480000.hsotg: EPs: 16, dedicated fifos, 7808 entries in SPRAM Mass Storage Function, version: 2009/09/11 LUN: removable file: (no medium) no file given for LUN0 g_mass_storage 12480000.hsotg: failed to start g_mass_storage: -22 8<--- cut here --- Unable to handle kernel NULL pointer dereference at virtual address 00000104 pgd = (ptrval) [00000104] *pgd=00000000 Internal error: Oops: 805 [#1] PREEMPT SMP ARM Modules linked in: CPU: 0 PID: 12 Comm: kworker/0:1 Not tainted 5.8.0-rc5 #3133 Hardware name: Samsung Exynos (Flattened Device Tree) Workqueue: events deferred_probe_work_func PC is at usb_del_gadget_udc+0x38/0xc4 LR is at __mutex_lock+0x31c/0xb18 ... Process kworker/0:1 (pid: 12, stack limit = 0x(ptrval)) Stack: (0xef121db0 to 0xef122000) ... [] (usb_del_gadget_udc) from [] (dwc2_hsotg_remove+0x10/0x20) [] (dwc2_hsotg_remove) from [] (dwc2_driver_probe+0x57c/0x69c) [] (dwc2_driver_probe) from [] (platform_drv_probe+0x6c/0xa4) [] (platform_drv_probe) from [] (really_probe+0x200/0x48c) [] (really_probe) from [] (driver_probe_device+0x78/0x1fc) [] (driver_probe_device) from [] (bus_for_each_drv+0x74/0xb8) [] (bus_for_each_drv) from [] (__device_attach+0xd4/0x16c) [] (__device_attach) from [] (bus_probe_device+0x88/0x90) [] (bus_probe_device) from [] (deferred_probe_work_func+0x3c/0xd0) [] (deferred_probe_work_func) from [] (process_one_work+0x234/0x7dc) [] (process_one_work) from [] (worker_thread+0x44/0x51c) [] (worker_thread) from [] (kthread+0x158/0x1a0) [] (kthread) from [] (ret_from_fork+0x14/0x20) Exception stack(0xef121fb0 to 0xef121ff8) ... ---[ end trace 9724c2fc7cc9c982 ]--- While fixing this also fix the double call to dwc2_lowlevel_hw_disable() if dr_mode is set to USB_DR_MODE_PERIPHERAL. In such case low-level hardware is already disabled before calling usb_add_gadget_udc(). That function correctly preserves low-level hardware state, there is no need for the second unconditional dwc2_lowlevel_hw_disable() call. Fixes: 207324a321a8 ("usb: dwc2: Postponed gadget registration to the udc class driver") Acked-by: Minas Harutyunyan Signed-off-by: Marek Szyprowski Signed-off-by: Felipe Balbi Signed-off-by: Sasha Levin --- drivers/usb/dwc2/platform.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c index c35c93f16a49..a9e86f5e6eaa 100644 --- a/drivers/usb/dwc2/platform.c +++ b/drivers/usb/dwc2/platform.c @@ -499,6 +499,7 @@ static int dwc2_driver_probe(struct platform_device *dev) if (hsotg->gadget_enabled) { retval = usb_add_gadget_udc(hsotg->dev, &hsotg->gadget); if (retval) { + hsotg->gadget.udc = NULL; dwc2_hsotg_remove(hsotg); goto error; } @@ -507,7 +508,8 @@ static int dwc2_driver_probe(struct platform_device *dev) return 0; error: - dwc2_lowlevel_hw_disable(hsotg); + if (hsotg->dr_mode != USB_DR_MODE_PERIPHERAL) + dwc2_lowlevel_hw_disable(hsotg); return retval; } -- GitLab From 73add9e317c34d01e6d487256a9975633ecd8e9c Mon Sep 17 00:00:00 2001 From: Finn Thain Date: Thu, 23 Jul 2020 09:25:51 +1000 Subject: [PATCH 0229/1304] scsi: mesh: Fix panic after host or bus reset [ Upstream commit edd7dd2292ab9c3628b65c4d04514c3068ad54f6 ] Booting Linux with a Conner CP3200 drive attached to the MESH SCSI bus results in EH measures and a panic: [ 25.499838] mesh: configured for synchronous 5 MB/s [ 25.787154] mesh: performing initial bus reset... [ 29.867115] scsi host0: MESH [ 29.929527] mesh: target 0 synchronous at 3.6 MB/s [ 29.998763] scsi 0:0:0:0: Direct-Access CONNER CP3200-200mb-3.5 4040 PQ: 0 ANSI: 1 CCS [ 31.989975] sd 0:0:0:0: [sda] 415872 512-byte logical blocks: (213 MB/203 MiB) [ 32.070975] sd 0:0:0:0: [sda] Write Protect is off [ 32.137197] sd 0:0:0:0: [sda] Mode Sense: 5b 00 00 08 [ 32.209661] sd 0:0:0:0: [sda] Write cache: enabled, read cache: enabled, doesn't support DPO or FUA [ 32.332708] sda: [mac] sda1 sda2 sda3 [ 32.417733] sd 0:0:0:0: [sda] Attached SCSI disk ... snip ... [ 76.687067] mesh_abort((ptrval)) [ 76.743606] mesh: state at (ptrval), regs at (ptrval), dma at (ptrval) [ 76.810798] ct=6000 seq=86 bs=4017 fc= 0 exc= 0 err= 0 im= 7 int= 0 sp=85 [ 76.880720] dma stat=84e0 cmdptr=1f73d000 [ 76.941387] phase=4 msgphase=0 conn_tgt=0 data_ptr=24576 [ 77.005567] dma_st=1 dma_ct=0 n_msgout=0 [ 77.065456] target 0: req=(ptrval) goes_out=0 saved_ptr=0 [ 77.130512] mesh_abort((ptrval)) [ 77.187670] mesh: state at (ptrval), regs at (ptrval), dma at (ptrval) [ 77.255594] ct=6000 seq=86 bs=4017 fc= 0 exc= 0 err= 0 im= 7 int= 0 sp=85 [ 77.325778] dma stat=84e0 cmdptr=1f73d000 [ 77.387239] phase=4 msgphase=0 conn_tgt=0 data_ptr=24576 [ 77.453665] dma_st=1 dma_ct=0 n_msgout=0 [ 77.515900] target 0: req=(ptrval) goes_out=0 saved_ptr=0 [ 77.582902] mesh_host_reset [ 88.187083] Kernel panic - not syncing: mesh: double DMA start ! [ 88.254510] CPU: 0 PID: 358 Comm: scsi_eh_0 Not tainted 5.6.13-pmac #1 [ 88.323302] Call Trace: [ 88.378854] [e16ddc58] [c0027080] panic+0x13c/0x308 (unreliable) [ 88.446221] [e16ddcb8] [c02b2478] mesh_start.part.12+0x130/0x414 [ 88.513298] [e16ddcf8] [c02b2fc8] mesh_queue+0x54/0x70 [ 88.577097] [e16ddd18] [c02a1848] scsi_send_eh_cmnd+0x374/0x384 [ 88.643476] [e16dddc8] [c02a1938] scsi_eh_tur+0x5c/0xb8 [ 88.707878] [e16dddf8] [c02a1ab8] scsi_eh_test_devices+0x124/0x178 [ 88.775663] [e16dde28] [c02a2094] scsi_eh_ready_devs+0x588/0x8a8 [ 88.843124] [e16dde98] [c02a31d8] scsi_error_handler+0x344/0x520 [ 88.910697] [e16ddf08] [c00409c8] kthread+0xe4/0xe8 [ 88.975166] [e16ddf38] [c000f234] ret_from_kernel_thread+0x14/0x1c [ 89.044112] Rebooting in 180 seconds.. In theory, a panic can happen after a bus or host reset with dma_started flag set. Fix this by halting the DMA before reinitializing the host. Don't assume that ms->current_req is set when halt_dma() is invoked as it may not hold for bus or host reset. BTW, this particular Conner drive can be made to work by inhibiting disconnect/reselect with 'mesh.resel_targets=0'. Link: https://lore.kernel.org/r/3952bc691e150a7128b29120999b6092071b039a.1595460351.git.fthain@telegraphics.com.au Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Cc: Paul Mackerras Reported-and-tested-by: Stan Johnson Signed-off-by: Finn Thain Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin --- drivers/scsi/mesh.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c index 82e01dbe90af..7c0eaa9ea1ed 100644 --- a/drivers/scsi/mesh.c +++ b/drivers/scsi/mesh.c @@ -1044,6 +1044,8 @@ static void handle_error(struct mesh_state *ms) while ((in_8(&mr->bus_status1) & BS1_RST) != 0) udelay(1); printk("done\n"); + if (ms->dma_started) + halt_dma(ms); handle_reset(ms); /* request_q is empty, no point in mesh_start() */ return; @@ -1356,7 +1358,8 @@ static void halt_dma(struct mesh_state *ms) ms->conn_tgt, ms->data_ptr, scsi_bufflen(cmd), ms->tgts[ms->conn_tgt].data_goes_out); } - scsi_dma_unmap(cmd); + if (cmd) + scsi_dma_unmap(cmd); ms->dma_started = 0; } @@ -1711,6 +1714,9 @@ static int mesh_host_reset(struct scsi_cmnd *cmd) spin_lock_irqsave(ms->host->host_lock, flags); + if (ms->dma_started) + halt_dma(ms); + /* Reset the controller & dbdma channel */ out_le32(&md->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* stop dma */ out_8(&mr->exception, 0xff); /* clear all exception bits */ -- GitLab From f5a77bafe02c6c31be5883458b0cbce4c43ea4cd Mon Sep 17 00:00:00 2001 From: Chris Packham Date: Fri, 24 Jul 2020 11:21:20 +1200 Subject: [PATCH 0230/1304] net: dsa: mv88e6xxx: MV88E6097 does not support jumbo configuration [ Upstream commit 0f3c66a3c7b4e8b9f654b3c998e9674376a51b0f ] The MV88E6097 chip does not support configuring jumbo frames. Prior to commit 5f4366660d65 only the 6352, 6351, 6165 and 6320 chips configured jumbo mode. The refactor accidentally added the function for the 6097. Remove the erroneous function pointer assignment. Fixes: 5f4366660d65 ("net: dsa: mv88e6xxx: Refactor setting of jumbo frames") Signed-off-by: Chris Packham Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/dsa/mv88e6xxx/chip.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 43b00e8bcdcd..6fa8aa69b418 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -2930,7 +2930,6 @@ static const struct mv88e6xxx_ops mv88e6097_ops = { .port_set_frame_mode = mv88e6351_port_set_frame_mode, .port_set_egress_floods = mv88e6352_port_set_egress_floods, .port_set_ether_type = mv88e6351_port_set_ether_type, - .port_set_jumbo_size = mv88e6165_port_set_jumbo_size, .port_egress_rate_limiting = mv88e6095_port_egress_rate_limiting, .port_pause_limit = mv88e6097_port_pause_limit, .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, -- GitLab From e3e983b327e7f2456c6d72c7942dcbb1a91160af Mon Sep 17 00:00:00 2001 From: Kishon Vijay Abraham I Date: Wed, 22 Jul 2020 16:33:11 +0530 Subject: [PATCH 0231/1304] PCI: cadence: Fix updating Vendor ID and Subsystem Vendor ID register [ Upstream commit e3bca37d15dca118f2ef1f0a068bb6e07846ea20 ] Commit 1b79c5284439 ("PCI: cadence: Add host driver for Cadence PCIe controller") in order to update Vendor ID, directly wrote to PCI_VENDOR_ID register. However PCI_VENDOR_ID in root port configuration space is read-only register and writing to it will have no effect. Use local management register to configure Vendor ID and Subsystem Vendor ID. Link: https://lore.kernel.org/r/20200722110317.4744-10-kishon@ti.com Fixes: 1b79c5284439 ("PCI: cadence: Add host driver for Cadence PCIe controller") Signed-off-by: Kishon Vijay Abraham I Signed-off-by: Lorenzo Pieralisi Reviewed-by: Rob Herring Signed-off-by: Sasha Levin --- drivers/pci/controller/pcie-cadence-host.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/pci/controller/pcie-cadence-host.c b/drivers/pci/controller/pcie-cadence-host.c index ec394f6a19c8..ae7affcb1a81 100644 --- a/drivers/pci/controller/pcie-cadence-host.c +++ b/drivers/pci/controller/pcie-cadence-host.c @@ -102,6 +102,7 @@ static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc) { struct cdns_pcie *pcie = &rc->pcie; u32 value, ctrl; + u32 id; /* * Set the root complex BAR configuration register: @@ -121,8 +122,12 @@ static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc) cdns_pcie_writel(pcie, CDNS_PCIE_LM_RC_BAR_CFG, value); /* Set root port configuration space */ - if (rc->vendor_id != 0xffff) - cdns_pcie_rp_writew(pcie, PCI_VENDOR_ID, rc->vendor_id); + if (rc->vendor_id != 0xffff) { + id = CDNS_PCIE_LM_ID_VENDOR(rc->vendor_id) | + CDNS_PCIE_LM_ID_SUBSYS(rc->vendor_id); + cdns_pcie_writel(pcie, CDNS_PCIE_LM_ID, id); + } + if (rc->device_id != 0xffff) cdns_pcie_rp_writew(pcie, PCI_DEVICE_ID, rc->device_id); -- GitLab From b6be0b71eaaa2eb71d87d29f2775c74a36d320ad Mon Sep 17 00:00:00 2001 From: Li Heng Date: Sat, 25 Jul 2020 10:56:27 +0800 Subject: [PATCH 0232/1304] RDMA/core: Fix return error value in _ib_modify_qp() to negative [ Upstream commit 47fda651d5af2506deac57d54887cf55ce26e244 ] The error codes in _ib_modify_qp() are supposed to be negative errno. Fixes: 7a5c938b9ed0 ("IB/core: Check for rdma_protocol_ib only after validating port_num") Link: https://lore.kernel.org/r/1595645787-20375-1-git-send-email-liheng40@huawei.com Reported-by: Hulk Robot Signed-off-by: Li Heng Reviewed-by: Parav Pandit Signed-off-by: Jason Gunthorpe Signed-off-by: Sasha Levin --- drivers/infiniband/core/verbs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 82f309fb3ce5..e8432876cc86 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -1617,7 +1617,7 @@ static int _ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr, if (!(rdma_protocol_ib(qp->device, attr->alt_ah_attr.port_num) && rdma_protocol_ib(qp->device, port))) { - ret = EINVAL; + ret = -EINVAL; goto out; } } -- GitLab From 820defebf4ead19c96ecc36a16dba85f315c0931 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 23 Jul 2020 18:22:19 +0300 Subject: [PATCH 0233/1304] Smack: fix another vsscanf out of bounds [ Upstream commit a6bd4f6d9b07452b0b19842044a6c3ea384b0b88 ] This is similar to commit 84e99e58e8d1 ("Smack: slab-out-of-bounds in vsscanf") where we added a bounds check on "rule". Reported-by: syzbot+a22c6092d003d6fe1122@syzkaller.appspotmail.com Fixes: f7112e6c9abf ("Smack: allow for significantly longer Smack labels v4") Signed-off-by: Dan Carpenter Signed-off-by: Casey Schaufler Signed-off-by: Sasha Levin --- security/smack/smackfs.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c index 10ee51d04492..981f582539ac 100644 --- a/security/smack/smackfs.c +++ b/security/smack/smackfs.c @@ -933,6 +933,10 @@ static ssize_t smk_set_cipso(struct file *file, const char __user *buf, for (i = 0; i < catlen; i++) { rule += SMK_DIGITLEN; + if (rule > data + count) { + rc = -EOVERFLOW; + goto out; + } ret = sscanf(rule, "%u", &cat); if (ret != 1 || cat > SMACK_CIPSO_MAXCATNUM) goto out; -- GitLab From adef6d2d512d140ac2b02776a3f266f60b8827d2 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 23 Jul 2020 18:23:05 +0300 Subject: [PATCH 0234/1304] Smack: prevent underflow in smk_set_cipso() [ Upstream commit 42a2df3e829f3c5562090391b33714b2e2e5ad4a ] We have an upper bound on "maplevel" but forgot to check for negative values. Fixes: e114e473771c ("Smack: Simplified Mandatory Access Control Kernel") Signed-off-by: Dan Carpenter Signed-off-by: Casey Schaufler Signed-off-by: Sasha Levin --- security/smack/smackfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c index 981f582539ac..accd3846f1e3 100644 --- a/security/smack/smackfs.c +++ b/security/smack/smackfs.c @@ -912,7 +912,7 @@ static ssize_t smk_set_cipso(struct file *file, const char __user *buf, } ret = sscanf(rule, "%d", &maplevel); - if (ret != 1 || maplevel > SMACK_CIPSO_MAXLEVEL) + if (ret != 1 || maplevel < 0 || maplevel > SMACK_CIPSO_MAXLEVEL) goto out; rule += SMK_DIGITLEN; -- GitLab From 6b03a2a347b2ac8e11cf8107f08086d7ec468fc6 Mon Sep 17 00:00:00 2001 From: Tom Rix Date: Sun, 12 Jul 2020 12:23:51 -0700 Subject: [PATCH 0235/1304] power: supply: check if calc_soc succeeded in pm860x_init_battery [ Upstream commit ccf193dee1f0fff55b556928591f7818bac1b3b1 ] clang static analysis flags this error 88pm860x_battery.c:522:19: warning: Assigned value is garbage or undefined [core.uninitialized.Assign] info->start_soc = soc; ^ ~~~ soc is set by calling calc_soc. But calc_soc can return without setting soc. So check the return status and bail similarly to other checks in pm860x_init_battery and initialize soc to silence the warning. Fixes: a830d28b48bf ("power_supply: Enable battery-charger for 88pm860x") Signed-off-by: Tom Rix Signed-off-by: Sebastian Reichel Signed-off-by: Sasha Levin --- drivers/power/supply/88pm860x_battery.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/power/supply/88pm860x_battery.c b/drivers/power/supply/88pm860x_battery.c index 63c57dc82ac1..4eda5065b5bb 100644 --- a/drivers/power/supply/88pm860x_battery.c +++ b/drivers/power/supply/88pm860x_battery.c @@ -436,7 +436,7 @@ static void pm860x_init_battery(struct pm860x_battery_info *info) int ret; int data; int bat_remove; - int soc; + int soc = 0; /* measure enable on GPADC1 */ data = MEAS1_GP1; @@ -499,7 +499,9 @@ static void pm860x_init_battery(struct pm860x_battery_info *info) } mutex_unlock(&info->lock); - calc_soc(info, OCV_MODE_ACTIVE, &soc); + ret = calc_soc(info, OCV_MODE_ACTIVE, &soc); + if (ret < 0) + goto out; data = pm860x_reg_read(info->i2c, PM8607_POWER_UP_LOG); bat_remove = data & BAT_WU_LOG; -- GitLab From a8acb2bd0c03a9f02806f82032cfc9e976cab5c4 Mon Sep 17 00:00:00 2001 From: Nicolas Boichat Date: Tue, 21 Jul 2020 10:37:15 +0800 Subject: [PATCH 0236/1304] Bluetooth: hci_h5: Set HCI_UART_RESET_ON_INIT to correct flags [ Upstream commit a7ad4b6119d740b1ec5788f1b98be0fd1c1b5a5a ] HCI_UART_RESET_ON_INIT belongs in hdev_flags, not flags. Fixes: ce945552fde4a09 ("Bluetooth: hci_h5: Add support for serdev enumerated devices") Signed-off-by: Nicolas Boichat Reviewed-by: Hans de Goede Signed-off-by: Marcel Holtmann Signed-off-by: Sasha Levin --- drivers/bluetooth/hci_h5.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c index 8eede1197cd2..5a68cd4dd71c 100644 --- a/drivers/bluetooth/hci_h5.c +++ b/drivers/bluetooth/hci_h5.c @@ -803,7 +803,7 @@ static int h5_serdev_probe(struct serdev_device *serdev) if (!h5) return -ENOMEM; - set_bit(HCI_UART_RESET_ON_INIT, &h5->serdev_hu.flags); + set_bit(HCI_UART_RESET_ON_INIT, &h5->serdev_hu.hdev_flags); h5->hu = &h5->serdev_hu; h5->serdev_hu.serdev = serdev; -- GitLab From d30d21110706a0a197ec4803f5af1fa364e49826 Mon Sep 17 00:00:00 2001 From: Nicolas Boichat Date: Tue, 21 Jul 2020 10:37:16 +0800 Subject: [PATCH 0237/1304] Bluetooth: hci_serdev: Only unregister device if it was registered [ Upstream commit 202798db9570104728dce8bb57dfeed47ce764bc ] We should not call hci_unregister_dev if the device was not successfully registered. Fixes: c34dc3bfa7642fd ("Bluetooth: hci_serdev: Introduce hci_uart_unregister_device()") Signed-off-by: Nicolas Boichat Signed-off-by: Marcel Holtmann Signed-off-by: Sasha Levin --- drivers/bluetooth/hci_serdev.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c index 46e20444ba19..d3fb0d657fa5 100644 --- a/drivers/bluetooth/hci_serdev.c +++ b/drivers/bluetooth/hci_serdev.c @@ -369,7 +369,8 @@ void hci_uart_unregister_device(struct hci_uart *hu) struct hci_dev *hdev = hu->hdev; clear_bit(HCI_UART_PROTO_READY, &hu->flags); - hci_unregister_dev(hdev); + if (test_bit(HCI_UART_REGISTERED, &hu->flags)) + hci_unregister_dev(hdev); hci_free_dev(hdev); cancel_work_sync(&hu->write_work); -- GitLab From f212dc76bac1490c91c25760c146fc7c0f5c3390 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Mon, 27 Jul 2020 01:34:39 +0200 Subject: [PATCH 0238/1304] net: dsa: rtl8366: Fix VLAN semantics [ Upstream commit 15ab7906cc9290afb006df1bb1074907fbcc7061 ] The RTL8366 would not handle adding new members (ports) to a VLAN: the code assumed that ->port_vlan_add() was only called once for a single port. When intializing the switch with .configure_vlan_while_not_filtering set to true, the function is called numerous times for adding all ports to VLAN1, which was something the code could not handle. Alter rtl8366_set_vlan() to just |= new members and untagged flags to 4k and MC VLAN table entries alike. This makes it possible to just add new ports to a VLAN. Put in some helpful debug code that can be used to find any further bugs here. Cc: DENG Qingfang Cc: Mauri Sandberg Reviewed-by: Florian Fainelli Fixes: d8652956cf37 ("net: dsa: realtek-smi: Add Realtek SMI driver") Signed-off-by: Linus Walleij Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/dsa/rtl8366.c | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/rtl8366.c index c281c488a306..145f34de7b41 100644 --- a/drivers/net/dsa/rtl8366.c +++ b/drivers/net/dsa/rtl8366.c @@ -43,18 +43,26 @@ int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member, int ret; int i; + dev_dbg(smi->dev, + "setting VLAN%d 4k members: 0x%02x, untagged: 0x%02x\n", + vid, member, untag); + /* Update the 4K table */ ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k); if (ret) return ret; - vlan4k.member = member; - vlan4k.untag = untag; + vlan4k.member |= member; + vlan4k.untag |= untag; vlan4k.fid = fid; ret = smi->ops->set_vlan_4k(smi, &vlan4k); if (ret) return ret; + dev_dbg(smi->dev, + "resulting VLAN%d 4k members: 0x%02x, untagged: 0x%02x\n", + vid, vlan4k.member, vlan4k.untag); + /* Try to find an existing MC entry for this VID */ for (i = 0; i < smi->num_vlan_mc; i++) { struct rtl8366_vlan_mc vlanmc; @@ -65,11 +73,16 @@ int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member, if (vid == vlanmc.vid) { /* update the MC entry */ - vlanmc.member = member; - vlanmc.untag = untag; + vlanmc.member |= member; + vlanmc.untag |= untag; vlanmc.fid = fid; ret = smi->ops->set_vlan_mc(smi, i, &vlanmc); + + dev_dbg(smi->dev, + "resulting VLAN%d MC members: 0x%02x, untagged: 0x%02x\n", + vid, vlanmc.member, vlanmc.untag); + break; } } -- GitLab From 79a10f5a0021735171488889a9100aacd6f5b68c Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Mon, 27 Jul 2020 01:34:40 +0200 Subject: [PATCH 0239/1304] net: dsa: rtl8366: Fix VLAN set-up [ Upstream commit 788abc6d9d278ed6fa1fa94db2098481a04152b7 ] Alter the rtl8366_vlan_add() to call rtl8366_set_vlan() inside the loop that goes over all VIDs since we now properly support calling that function more than once. Augment the loop to postincrement as this is more intuitive. The loop moved past the last VID but called rtl8366_set_vlan() with the port number instead of the VID, assuming a 1-to-1 correspondence between ports and VIDs. This was also a bug. Cc: DENG Qingfang Cc: Mauri Sandberg Reviewed-by: Florian Fainelli Fixes: d8652956cf37 ("net: dsa: realtek-smi: Add Realtek SMI driver") Signed-off-by: Linus Walleij Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/dsa/rtl8366.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/rtl8366.c index 145f34de7b41..7e27c9aff9b7 100644 --- a/drivers/net/dsa/rtl8366.c +++ b/drivers/net/dsa/rtl8366.c @@ -397,7 +397,7 @@ void rtl8366_vlan_add(struct dsa_switch *ds, int port, if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port)) dev_err(smi->dev, "port is DSA or CPU port\n"); - for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) { + for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { int pvid_val = 0; dev_info(smi->dev, "add VLAN %04x\n", vid); @@ -420,13 +420,13 @@ void rtl8366_vlan_add(struct dsa_switch *ds, int port, if (ret < 0) return; } - } - ret = rtl8366_set_vlan(smi, port, member, untag, 0); - if (ret) - dev_err(smi->dev, - "failed to set up VLAN %04x", - vid); + ret = rtl8366_set_vlan(smi, vid, member, untag, 0); + if (ret) + dev_err(smi->dev, + "failed to set up VLAN %04x", + vid); + } } EXPORT_SYMBOL_GPL(rtl8366_vlan_add); -- GitLab From fb17074f89eadd865d978547aafe503f1897f350 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 24 Jul 2020 23:17:26 +1000 Subject: [PATCH 0240/1304] powerpc/boot: Fix CONFIG_PPC_MPC52XX references [ Upstream commit e5eff89657e72a9050d95fde146b54c7dc165981 ] Commit 866bfc75f40e ("powerpc: conditionally compile platform-specific serial drivers") made some code depend on CONFIG_PPC_MPC52XX, which doesn't exist. Fix it to use CONFIG_PPC_MPC52xx. Fixes: 866bfc75f40e ("powerpc: conditionally compile platform-specific serial drivers") Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200724131728.1643966-7-mpe@ellerman.id.au Signed-off-by: Sasha Levin --- arch/powerpc/boot/Makefile | 2 +- arch/powerpc/boot/serial.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile index 7d5ddf53750c..7a83b5e136e0 100644 --- a/arch/powerpc/boot/Makefile +++ b/arch/powerpc/boot/Makefile @@ -122,7 +122,7 @@ src-wlib-y := string.S crt0.S stdio.c decompress.c main.c \ elf_util.c $(zlib-y) devtree.c stdlib.c \ oflib.c ofconsole.c cuboot.c -src-wlib-$(CONFIG_PPC_MPC52XX) += mpc52xx-psc.c +src-wlib-$(CONFIG_PPC_MPC52xx) += mpc52xx-psc.c src-wlib-$(CONFIG_PPC64_BOOT_WRAPPER) += opal-calls.S opal.c ifndef CONFIG_PPC64_BOOT_WRAPPER src-wlib-y += crtsavres.S diff --git a/arch/powerpc/boot/serial.c b/arch/powerpc/boot/serial.c index 48e3743faedf..83c78427c20b 100644 --- a/arch/powerpc/boot/serial.c +++ b/arch/powerpc/boot/serial.c @@ -127,7 +127,7 @@ int serial_console_init(void) dt_is_compatible(devp, "fsl,cpm2-smc-uart")) rc = cpm_console_init(devp, &serial_cd); #endif -#ifdef CONFIG_PPC_MPC52XX +#ifdef CONFIG_PPC_MPC52xx else if (dt_is_compatible(devp, "fsl,mpc5200-psc-uart")) rc = mpc5200_psc_console_init(devp, &serial_cd); #endif -- GitLab From c2a811630880eec62025b9975d5ce09e891f152f Mon Sep 17 00:00:00 2001 From: Harish Date: Tue, 9 Jun 2020 13:44:23 +0530 Subject: [PATCH 0241/1304] selftests/powerpc: Fix CPU affinity for child process [ Upstream commit 854eb5022be04f81e318765f089f41a57c8e5d83 ] On systems with large number of cpus, test fails trying to set affinity by calling sched_setaffinity() with smaller size for affinity mask. This patch fixes it by making sure that the size of allocated affinity mask is dependent on the number of CPUs as reported by get_nprocs(). Fixes: 00b7ec5c9cf3 ("selftests/powerpc: Import Anton's context_switch2 benchmark") Reported-by: Shirisha Ganta Signed-off-by: Sandipan Das Signed-off-by: Harish Reviewed-by: Kamalesh Babulal Reviewed-by: Satheesh Rajendran Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200609081423.529664-1-harish@linux.ibm.com Signed-off-by: Sasha Levin --- .../powerpc/benchmarks/context_switch.c | 21 ++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/tools/testing/selftests/powerpc/benchmarks/context_switch.c b/tools/testing/selftests/powerpc/benchmarks/context_switch.c index 87f1f0252299..9ec7674697b1 100644 --- a/tools/testing/selftests/powerpc/benchmarks/context_switch.c +++ b/tools/testing/selftests/powerpc/benchmarks/context_switch.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -108,8 +109,9 @@ static void start_thread_on(void *(*fn)(void *), void *arg, unsigned long cpu) static void start_process_on(void *(*fn)(void *), void *arg, unsigned long cpu) { - int pid; - cpu_set_t cpuset; + int pid, ncpus; + cpu_set_t *cpuset; + size_t size; pid = fork(); if (pid == -1) { @@ -120,14 +122,23 @@ static void start_process_on(void *(*fn)(void *), void *arg, unsigned long cpu) if (pid) return; - CPU_ZERO(&cpuset); - CPU_SET(cpu, &cpuset); + ncpus = get_nprocs(); + size = CPU_ALLOC_SIZE(ncpus); + cpuset = CPU_ALLOC(ncpus); + if (!cpuset) { + perror("malloc"); + exit(1); + } + CPU_ZERO_S(size, cpuset); + CPU_SET_S(cpu, size, cpuset); - if (sched_setaffinity(0, sizeof(cpuset), &cpuset)) { + if (sched_setaffinity(0, size, cpuset)) { perror("sched_setaffinity"); + CPU_FREE(cpuset); exit(1); } + CPU_FREE(cpuset); fn(arg); exit(0); -- GitLab From 88106a1039032405b738e8da72c4061cb79aa4e1 Mon Sep 17 00:00:00 2001 From: Hanjun Guo Date: Wed, 22 Jul 2020 17:44:28 +0800 Subject: [PATCH 0242/1304] PCI: Release IVRS table in AMD ACS quirk [ Upstream commit 090688fa4e448284aaa16136372397d7d10814db ] The acpi_get_table() should be coupled with acpi_put_table() if the mapped table is not used at runtime to release the table mapping. In pci_quirk_amd_sb_acs(), IVRS table is just used for checking AMD IOMMU is supported, not used at runtime, so put the table after using it. Fixes: 15b100dfd1c9 ("PCI: Claim ACS support for AMD southbridge devices") Link: https://lore.kernel.org/r/1595411068-15440-1-git-send-email-guohanjun@huawei.com Signed-off-by: Hanjun Guo Signed-off-by: Bjorn Helgaas Signed-off-by: Sasha Levin --- drivers/pci/quirks.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 8f856657dac2..9129ccd593d1 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -4334,6 +4334,8 @@ static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags) if (ACPI_FAILURE(status)) return -ENODEV; + acpi_put_table(header); + /* Filter out flags not applicable to multifunction */ acs_flags &= (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC | PCI_ACS_DT); -- GitLab From 8615b9cacd6cf3dd3521fc080252940b723a0972 Mon Sep 17 00:00:00 2001 From: Sandipan Das Date: Thu, 30 Jul 2020 10:38:46 +0530 Subject: [PATCH 0243/1304] selftests/powerpc: Fix online CPU selection [ Upstream commit dfa03fff86027e58c8dba5c03ae68150d4e513ad ] The size of the CPU affinity mask must be large enough for systems with a very large number of CPUs. Otherwise, tests which try to determine the first online CPU by calling sched_getaffinity() will fail. This makes sure that the size of the allocated affinity mask is dependent on the number of CPUs as reported by get_nprocs_conf(). Fixes: 3752e453f6ba ("selftests/powerpc: Add tests of PMU EBBs") Reported-by: Shirisha Ganta Signed-off-by: Sandipan Das Reviewed-by: Kamalesh Babulal Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/a408c4b8e9a23bb39b539417a21eb0ff47bb5127.1596084858.git.sandipan@linux.ibm.com Signed-off-by: Sasha Levin --- tools/testing/selftests/powerpc/utils.c | 37 +++++++++++++++++-------- 1 file changed, 25 insertions(+), 12 deletions(-) diff --git a/tools/testing/selftests/powerpc/utils.c b/tools/testing/selftests/powerpc/utils.c index aa8fc1e6365b..ba0959d454b3 100644 --- a/tools/testing/selftests/powerpc/utils.c +++ b/tools/testing/selftests/powerpc/utils.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -83,28 +84,40 @@ void *get_auxv_entry(int type) int pick_online_cpu(void) { - cpu_set_t mask; - int cpu; + int ncpus, cpu = -1; + cpu_set_t *mask; + size_t size; + + ncpus = get_nprocs_conf(); + size = CPU_ALLOC_SIZE(ncpus); + mask = CPU_ALLOC(ncpus); + if (!mask) { + perror("malloc"); + return -1; + } - CPU_ZERO(&mask); + CPU_ZERO_S(size, mask); - if (sched_getaffinity(0, sizeof(mask), &mask)) { + if (sched_getaffinity(0, size, mask)) { perror("sched_getaffinity"); - return -1; + goto done; } /* We prefer a primary thread, but skip 0 */ - for (cpu = 8; cpu < CPU_SETSIZE; cpu += 8) - if (CPU_ISSET(cpu, &mask)) - return cpu; + for (cpu = 8; cpu < ncpus; cpu += 8) + if (CPU_ISSET_S(cpu, size, mask)) + goto done; /* Search for anything, but in reverse */ - for (cpu = CPU_SETSIZE - 1; cpu >= 0; cpu--) - if (CPU_ISSET(cpu, &mask)) - return cpu; + for (cpu = ncpus - 1; cpu >= 0; cpu--) + if (CPU_ISSET_S(cpu, size, mask)) + goto done; printf("No cpus in affinity mask?!\n"); - return -1; + +done: + CPU_FREE(mask); + return cpu; } bool is_ppc64le(void) -- GitLab From 62b15380d0908b71515be193bb7ae67d2e3afbcb Mon Sep 17 00:00:00 2001 From: Jerome Brunet Date: Wed, 29 Jul 2020 17:44:53 +0200 Subject: [PATCH 0244/1304] ASoC: meson: axg-tdm-interface: fix link fmt setup [ Upstream commit 6878ba91ce84f7a07887a0615af70f969508839f ] The .set_fmt() callback of the axg tdm interface incorrectly test the content of SND_SOC_DAIFMT_MASTER_MASK as if it was a bitfield, which it is not. Implement the test correctly. Fixes: d60e4f1e4be5 ("ASoC: meson: add tdm interface driver") Signed-off-by: Jerome Brunet Link: https://lore.kernel.org/r/20200729154456.1983396-2-jbrunet@baylibre.com Signed-off-by: Mark Brown Signed-off-by: Sasha Levin --- sound/soc/meson/axg-tdm-interface.c | 26 +++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-) diff --git a/sound/soc/meson/axg-tdm-interface.c b/sound/soc/meson/axg-tdm-interface.c index 7b8baf46d968..5c055d8de8c7 100644 --- a/sound/soc/meson/axg-tdm-interface.c +++ b/sound/soc/meson/axg-tdm-interface.c @@ -111,18 +111,25 @@ static int axg_tdm_iface_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) { struct axg_tdm_iface *iface = snd_soc_dai_get_drvdata(dai); - /* These modes are not supported */ - if (fmt & (SND_SOC_DAIFMT_CBS_CFM | SND_SOC_DAIFMT_CBM_CFS)) { + switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { + case SND_SOC_DAIFMT_CBS_CFS: + if (!iface->mclk) { + dev_err(dai->dev, "cpu clock master: mclk missing\n"); + return -ENODEV; + } + break; + + case SND_SOC_DAIFMT_CBM_CFM: + break; + + case SND_SOC_DAIFMT_CBS_CFM: + case SND_SOC_DAIFMT_CBM_CFS: dev_err(dai->dev, "only CBS_CFS and CBM_CFM are supported\n"); + /* Fall-through */ + default: return -EINVAL; } - /* If the TDM interface is the clock master, it requires mclk */ - if (!iface->mclk && (fmt & SND_SOC_DAIFMT_CBS_CFS)) { - dev_err(dai->dev, "cpu clock master: mclk missing\n"); - return -ENODEV; - } - iface->fmt = fmt; return 0; } @@ -311,7 +318,8 @@ static int axg_tdm_iface_hw_params(struct snd_pcm_substream *substream, if (ret) return ret; - if (iface->fmt & SND_SOC_DAIFMT_CBS_CFS) { + if ((iface->fmt & SND_SOC_DAIFMT_MASTER_MASK) == + SND_SOC_DAIFMT_CBS_CFS) { ret = axg_tdm_iface_set_sclk(dai, params); if (ret) return ret; -- GitLab From dea13b385f6a2459b05062803ab1ed8a840ac982 Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Thu, 30 Jul 2020 17:01:20 +0200 Subject: [PATCH 0245/1304] s390/qeth: don't process empty bridge port events [ Upstream commit 02472e28b9a45471c6d8729ff2c7422baa9be46a ] Discard events that don't contain any entries. This shouldn't happen, but subsequent code relies on being able to use entry 0. So better be safe than accessing garbage. Fixes: b4d72c08b358 ("qeth: bridgeport support - basic control") Signed-off-by: Julian Wiedmann Reviewed-by: Alexandra Winter Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/s390/net/qeth_l2_main.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index eb917e93fa72..8d30f9ac3e9d 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -1463,6 +1463,10 @@ static void qeth_bridge_state_change(struct qeth_card *card, int extrasize; QETH_CARD_TEXT(card, 2, "brstchng"); + if (qports->num_entries == 0) { + QETH_CARD_TEXT(card, 2, "BPempty"); + return; + } if (qports->entry_length != sizeof(struct qeth_sbp_port_entry)) { QETH_CARD_TEXT_(card, 2, "BPsz%04x", qports->entry_length); return; -- GitLab From 89d733f13a9f3797ef893ce6779050bb33aaad12 Mon Sep 17 00:00:00 2001 From: Wang Hai Date: Thu, 30 Jul 2020 15:39:39 +0800 Subject: [PATCH 0246/1304] wl1251: fix always return 0 error [ Upstream commit 20e6421344b5bc2f97b8e2db47b6994368417904 ] wl1251_event_ps_report() should not always return 0 because wl1251_ps_set_mode() may fail. Change it to return 'ret'. Fixes: f7ad1eed4d4b ("wl1251: retry power save entry") Reported-by: Hulk Robot Signed-off-by: Wang Hai Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200730073939.33704-1-wanghai38@huawei.com Signed-off-by: Sasha Levin --- drivers/net/wireless/ti/wl1251/event.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/ti/wl1251/event.c b/drivers/net/wireless/ti/wl1251/event.c index f5acd24d0e2b..988abb49771f 100644 --- a/drivers/net/wireless/ti/wl1251/event.c +++ b/drivers/net/wireless/ti/wl1251/event.c @@ -84,7 +84,7 @@ static int wl1251_event_ps_report(struct wl1251 *wl, break; } - return 0; + return ret; } static void wl1251_event_mbox_dump(struct event_mailbox *mbox) -- GitLab From 5c928cc547d89150702a82d09fb4ab54f941bed0 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Thu, 30 Jul 2020 19:42:44 -0700 Subject: [PATCH 0247/1304] tools, build: Propagate build failures from tools/build/Makefile.build [ Upstream commit a278f3d8191228212c553a5d4303fa603214b717 ] The '&&' command seems to have a bad effect when $(cmd_$(1)) exits with non-zero effect: the command failure is masked (despite `set -e`) and all but the first command of $(dep-cmd) is executed (successfully, as they are mostly printfs), thus overall returning 0 in the end. This means in practice that despite compilation errors, tools's build Makefile will return success. We see this very reliably with libbpf's Makefile, which doesn't get compilation error propagated properly. This in turns causes issues with selftests build, as well as bpftool and other projects that rely on building libbpf. The fix is simple: don't use &&. Given `set -e`, we don't need to chain commands with &&. The shell will exit on first failure, giving desired behavior and propagating error properly. Fixes: 275e2d95591e ("tools build: Move dependency copy into function") Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Acked-by: Jiri Olsa Link: https://lore.kernel.org/bpf/20200731024244.872574-1-andriin@fb.com Signed-off-by: Sasha Levin --- tools/build/Build.include | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/build/Build.include b/tools/build/Build.include index 9ec01f4454f9..585486e40995 100644 --- a/tools/build/Build.include +++ b/tools/build/Build.include @@ -74,7 +74,8 @@ dep-cmd = $(if $(wildcard $(fixdep)), # dependencies in the cmd file if_changed_dep = $(if $(strip $(any-prereq) $(arg-check)), \ @set -e; \ - $(echo-cmd) $(cmd_$(1)) && $(dep-cmd)) + $(echo-cmd) $(cmd_$(1)); \ + $(dep-cmd)) # if_changed - execute command if any prerequisite is newer than # target, or command line has changed -- GitLab From 79508a540f7de2e81d4d7dadcded834ede24eeb9 Mon Sep 17 00:00:00 2001 From: Tianjia Zhang Date: Sun, 2 Aug 2020 19:15:37 +0800 Subject: [PATCH 0248/1304] net: ethernet: aquantia: Fix wrong return value [ Upstream commit 0470a48880f8bc42ce26962b79c7b802c5a695ec ] In function hw_atl_a0_hw_multicast_list_set(), when an invalid request is encountered, a negative error code should be returned. Fixes: bab6de8fd180b ("net: ethernet: aquantia: Atlantic A0 and B0 specific functions") Cc: David VomLehn Signed-off-by: Tianjia Zhang Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c index dab5891b9714..d48595470ec8 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c @@ -774,7 +774,7 @@ static int hw_atl_a0_hw_multicast_list_set(struct aq_hw_s *self, int err = 0; if (count > (HW_ATL_A0_MAC_MAX - HW_ATL_A0_MAC_MIN)) { - err = EBADRQC; + err = -EBADRQC; goto err_exit; } for (self->aq_nic_cfg->mc_list_count = 0U; -- GitLab From 78b41e3b0e29ff3aecd9f365c07138e3956247aa Mon Sep 17 00:00:00 2001 From: Tianjia Zhang Date: Sun, 2 Aug 2020 19:15:44 +0800 Subject: [PATCH 0249/1304] liquidio: Fix wrong return value in cn23xx_get_pf_num() [ Upstream commit aa027850a292ea65524b8fab83eb91a124ad362c ] On an error exit path, a negative error code should be returned instead of a positive return value. Fixes: 0c45d7fe12c7e ("liquidio: fix use of pf in pass-through mode in a virtual machine") Cc: Rick Farrington Signed-off-by: Tianjia Zhang Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c index 9f4f3c1d5043..55fe80ca10d3 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c @@ -1167,7 +1167,7 @@ static int cn23xx_get_pf_num(struct octeon_device *oct) oct->pf_num = ((fdl_bit >> CN23XX_PCIE_SRIOV_FDL_BIT_POS) & CN23XX_PCIE_SRIOV_FDL_MASK); } else { - ret = EINVAL; + ret = -EINVAL; /* Under some virtual environments, extended PCI regs are * inaccessible, in which case the above read will have failed. -- GitLab From 9cd863d4e114de28a1558d12dad9c353af6a2f12 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sun, 2 Aug 2020 15:53:33 +0200 Subject: [PATCH 0250/1304] net: spider_net: Fix the size used in a 'dma_free_coherent()' call [ Upstream commit 36f28f7687a9ce665479cce5d64ce7afaa9e77ae ] Update the size used in 'dma_free_coherent()' in order to match the one used in the corresponding 'dma_alloc_coherent()', in 'spider_net_init_chain()'. Fixes: d4ed8f8d1fb7 ("Spidernet DMA coalescing") Signed-off-by: Christophe JAILLET Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/ethernet/toshiba/spider_net.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c index 23417266b7ec..e66014e0427f 100644 --- a/drivers/net/ethernet/toshiba/spider_net.c +++ b/drivers/net/ethernet/toshiba/spider_net.c @@ -296,8 +296,8 @@ spider_net_free_chain(struct spider_net_card *card, descr = descr->next; } while (descr != chain->ring); - dma_free_coherent(&card->pdev->dev, chain->num_desc, - chain->hwring, chain->dma_addr); + dma_free_coherent(&card->pdev->dev, chain->num_desc * sizeof(struct spider_net_hw_descr), + chain->hwring, chain->dma_addr); } /** -- GitLab From b39993b26c6d78ec74952849310a72aa6a7a2fb7 Mon Sep 17 00:00:00 2001 From: Florinel Iordache Date: Mon, 3 Aug 2020 10:07:30 +0300 Subject: [PATCH 0251/1304] fsl/fman: use 32-bit unsigned integer [ Upstream commit 99f47abd9f7bf6e365820d355dc98f6955a562df ] Potentially overflowing expression (ts_freq << 16 and intgr << 16) declared as type u32 (32-bit unsigned) is evaluated using 32-bit arithmetic and then used in a context that expects an expression of type u64 (64-bit unsigned) which ultimately is used as 16-bit unsigned by typecasting to u16. Fixed by using an unsigned 32-bit integer since the value is truncated anyway in the end. Fixes: 414fd46e7762 ("fsl/fman: Add FMan support") Signed-off-by: Florinel Iordache Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/ethernet/freescale/fman/fman.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c index 21d8023535ae..eba7e54ecf85 100644 --- a/drivers/net/ethernet/freescale/fman/fman.c +++ b/drivers/net/ethernet/freescale/fman/fman.c @@ -1396,8 +1396,7 @@ static void enable_time_stamp(struct fman *fman) { struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs; u16 fm_clk_freq = fman->state->fm_clk_freq; - u32 tmp, intgr, ts_freq; - u64 frac; + u32 tmp, intgr, ts_freq, frac; ts_freq = (u32)(1 << fman->state->count1_micro_bit); /* configure timestamp so that bit 8 will count 1 microsecond -- GitLab From 9d29b75478079474cc94c1d568c847f36b52e659 Mon Sep 17 00:00:00 2001 From: Florinel Iordache Date: Mon, 3 Aug 2020 10:07:31 +0300 Subject: [PATCH 0252/1304] fsl/fman: fix dereference null return value [ Upstream commit 0572054617f32670abab4b4e89a876954d54b704 ] Check before using returned value to avoid dereferencing null pointer. Fixes: 18a6c85fcc78 ("fsl/fman: Add FMan Port Support") Signed-off-by: Florinel Iordache Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/ethernet/freescale/fman/fman_port.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c index ee82ee1384eb..47f6fee1f396 100644 --- a/drivers/net/ethernet/freescale/fman/fman_port.c +++ b/drivers/net/ethernet/freescale/fman/fman_port.c @@ -1756,6 +1756,7 @@ static int fman_port_probe(struct platform_device *of_dev) struct fman_port *port; struct fman *fman; struct device_node *fm_node, *port_node; + struct platform_device *fm_pdev; struct resource res; struct resource *dev_res; u32 val; @@ -1780,8 +1781,14 @@ static int fman_port_probe(struct platform_device *of_dev) goto return_err; } - fman = dev_get_drvdata(&of_find_device_by_node(fm_node)->dev); + fm_pdev = of_find_device_by_node(fm_node); of_node_put(fm_node); + if (!fm_pdev) { + err = -EINVAL; + goto return_err; + } + + fman = dev_get_drvdata(&fm_pdev->dev); if (!fman) { err = -EINVAL; goto return_err; -- GitLab From 48320371e70d98f2d6a3d59fcb8aeb864ff784b1 Mon Sep 17 00:00:00 2001 From: Florinel Iordache Date: Mon, 3 Aug 2020 10:07:32 +0300 Subject: [PATCH 0253/1304] fsl/fman: fix unreachable code [ Upstream commit cc79fd8f557767de90ff199d3b6fb911df43160a ] The parameter 'priority' is incorrectly forced to zero which ultimately induces logically dead code in the subsequent lines. Fixes: 57ba4c9b56d8 ("fsl/fman: Add FMan MAC support") Signed-off-by: Florinel Iordache Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/ethernet/freescale/fman/fman_memac.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c index e1901874c19f..08f8b36779ea 100644 --- a/drivers/net/ethernet/freescale/fman/fman_memac.c +++ b/drivers/net/ethernet/freescale/fman/fman_memac.c @@ -856,7 +856,6 @@ int memac_set_tx_pause_frames(struct fman_mac *memac, u8 priority, tmp = ioread32be(®s->command_config); tmp &= ~CMD_CFG_PFC_MODE; - priority = 0; iowrite32be(tmp, ®s->command_config); -- GitLab From 623b1bba4cbd8d3551c3d07cb4f541f507d6eff2 Mon Sep 17 00:00:00 2001 From: Florinel Iordache Date: Mon, 3 Aug 2020 10:07:33 +0300 Subject: [PATCH 0254/1304] fsl/fman: check dereferencing null pointer [ Upstream commit cc5d229a122106733a85c279d89d7703f21e4d4f ] Add a safe check to avoid dereferencing null pointer Fixes: 57ba4c9b56d8 ("fsl/fman: Add FMan MAC support") Signed-off-by: Florinel Iordache Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/ethernet/freescale/fman/fman_dtsec.c | 4 ++-- drivers/net/ethernet/freescale/fman/fman_memac.c | 2 +- drivers/net/ethernet/freescale/fman/fman_tgec.c | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c index 1ca543ac8f2c..d2de9ea80c43 100644 --- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c +++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c @@ -1205,7 +1205,7 @@ int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr) list_for_each(pos, &dtsec->multicast_addr_hash->lsts[bucket]) { hash_entry = ETH_HASH_ENTRY_OBJ(pos); - if (hash_entry->addr == addr) { + if (hash_entry && hash_entry->addr == addr) { list_del_init(&hash_entry->node); kfree(hash_entry); break; @@ -1218,7 +1218,7 @@ int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr) list_for_each(pos, &dtsec->unicast_addr_hash->lsts[bucket]) { hash_entry = ETH_HASH_ENTRY_OBJ(pos); - if (hash_entry->addr == addr) { + if (hash_entry && hash_entry->addr == addr) { list_del_init(&hash_entry->node); kfree(hash_entry); break; diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c index 08f8b36779ea..9088b4f4b4b8 100644 --- a/drivers/net/ethernet/freescale/fman/fman_memac.c +++ b/drivers/net/ethernet/freescale/fman/fman_memac.c @@ -985,7 +985,7 @@ int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr) list_for_each(pos, &memac->multicast_addr_hash->lsts[hash]) { hash_entry = ETH_HASH_ENTRY_OBJ(pos); - if (hash_entry->addr == addr) { + if (hash_entry && hash_entry->addr == addr) { list_del_init(&hash_entry->node); kfree(hash_entry); break; diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.c b/drivers/net/ethernet/freescale/fman/fman_tgec.c index f75b9c11b2d2..ac5a281e0ec3 100644 --- a/drivers/net/ethernet/freescale/fman/fman_tgec.c +++ b/drivers/net/ethernet/freescale/fman/fman_tgec.c @@ -630,7 +630,7 @@ int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr) list_for_each(pos, &tgec->multicast_addr_hash->lsts[hash]) { hash_entry = ETH_HASH_ENTRY_OBJ(pos); - if (hash_entry->addr == addr) { + if (hash_entry && hash_entry->addr == addr) { list_del_init(&hash_entry->node); kfree(hash_entry); break; -- GitLab From 2986dda799e6d7d2becc24ed3db11610a49ad911 Mon Sep 17 00:00:00 2001 From: Florinel Iordache Date: Mon, 3 Aug 2020 10:07:34 +0300 Subject: [PATCH 0255/1304] fsl/fman: fix eth hash table allocation [ Upstream commit 3207f715c34317d08e798e11a10ce816feb53c0f ] Fix memory allocation for ethernet address hash table. The code was wrongly allocating an array for eth hash table which is incorrect because this is the main structure for eth hash table (struct eth_hash_t) that contains inside a number of elements. Fixes: 57ba4c9b56d8 ("fsl/fman: Add FMan MAC support") Signed-off-by: Florinel Iordache Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/ethernet/freescale/fman/fman_mac.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/freescale/fman/fman_mac.h b/drivers/net/ethernet/freescale/fman/fman_mac.h index dd6d0526f6c1..19f327efdaff 100644 --- a/drivers/net/ethernet/freescale/fman/fman_mac.h +++ b/drivers/net/ethernet/freescale/fman/fman_mac.h @@ -252,7 +252,7 @@ static inline struct eth_hash_t *alloc_hash_table(u16 size) struct eth_hash_t *hash; /* Allocate address hash table */ - hash = kmalloc_array(size, sizeof(struct eth_hash_t *), GFP_KERNEL); + hash = kmalloc(sizeof(*hash), GFP_KERNEL); if (!hash) return NULL; -- GitLab From 6a2db034f9b3d811b4afd0aaafa54cfec1c4030a Mon Sep 17 00:00:00 2001 From: Wang Hai Date: Mon, 15 Jun 2020 11:25:33 +0800 Subject: [PATCH 0256/1304] dlm: Fix kobject memleak [ Upstream commit 0ffddafc3a3970ef7013696e7f36b3d378bc4c16 ] Currently the error return path from kobject_init_and_add() is not followed by a call to kobject_put() - which means we are leaking the kobject. Set do_unreg = 1 before kobject_init_and_add() to ensure that kobject_put() can be called in its error patch. Fixes: 901195ed7f4b ("Kobject: change GFS2 to use kobject_init_and_add") Reported-by: Hulk Robot Signed-off-by: Wang Hai Signed-off-by: David Teigland Signed-off-by: Sasha Levin --- fs/dlm/lockspace.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c index f1261fa0af8a..244b87e4dfe7 100644 --- a/fs/dlm/lockspace.c +++ b/fs/dlm/lockspace.c @@ -633,6 +633,9 @@ static int new_lockspace(const char *name, const char *cluster, wait_event(ls->ls_recover_lock_wait, test_bit(LSFL_RECOVER_LOCK, &ls->ls_flags)); + /* let kobject handle freeing of ls if there's an error */ + do_unreg = 1; + ls->ls_kobj.kset = dlm_kset; error = kobject_init_and_add(&ls->ls_kobj, &dlm_ktype, NULL, "%s", ls->ls_name); @@ -640,9 +643,6 @@ static int new_lockspace(const char *name, const char *cluster, goto out_recoverd; kobject_uevent(&ls->ls_kobj, KOBJ_ADD); - /* let kobject handle freeing of ls if there's an error */ - do_unreg = 1; - /* This uevent triggers dlm_controld in userspace to add us to the group of nodes that are members of this lockspace (managed by the cluster infrastructure.) Once it's done that, it tells us who the -- GitLab From fc1ffe508c0ad59291b84a5dc78c5880efeb9e1b Mon Sep 17 00:00:00 2001 From: Pavel Machek Date: Thu, 6 Aug 2020 23:18:09 -0700 Subject: [PATCH 0257/1304] ocfs2: fix unbalanced locking [ Upstream commit 57c720d4144a9c2b88105c3e8f7b0e97e4b5cc93 ] Based on what fails, function can return with nfs_sync_rwlock either locked or unlocked. That can not be right. Always return with lock unlocked on error. Fixes: 4cd9973f9ff6 ("ocfs2: avoid inode removal while nfsd is accessing it") Signed-off-by: Pavel Machek (CIP) Signed-off-by: Andrew Morton Reviewed-by: Joseph Qi Reviewed-by: Andrew Morton Cc: Mark Fasheh Cc: Joel Becker Cc: Junxiao Bi Cc: Changwei Ge Cc: Gang He Cc: Jun Piao Link: http://lkml.kernel.org/r/20200724124443.GA28164@duo.ucw.cz Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin --- fs/ocfs2/dlmglue.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c index c141b06811a6..8149fb6f1f0d 100644 --- a/fs/ocfs2/dlmglue.c +++ b/fs/ocfs2/dlmglue.c @@ -2867,9 +2867,15 @@ int ocfs2_nfs_sync_lock(struct ocfs2_super *osb, int ex) status = ocfs2_cluster_lock(osb, lockres, ex ? LKM_EXMODE : LKM_PRMODE, 0, 0); - if (status < 0) + if (status < 0) { mlog(ML_ERROR, "lock on nfs sync lock failed %d\n", status); + if (ex) + up_write(&osb->nfs_sync_rwlock); + else + up_read(&osb->nfs_sync_rwlock); + } + return status; } -- GitLab From 8bf3b460bf2fa14bb3e78b46594d0d52de366da9 Mon Sep 17 00:00:00 2001 From: Drew Fustini Date: Mon, 8 Jun 2020 14:51:43 +0200 Subject: [PATCH 0258/1304] pinctrl-single: fix pcs_parse_pinconf() return value [ Upstream commit f46fe79ff1b65692a65266a5bec6dbe2bf7fc70f ] This patch causes pcs_parse_pinconf() to return -ENOTSUPP when no pinctrl_map is added. The current behavior is to return 0 when !PCS_HAS_PINCONF or !nconfs. Thus pcs_parse_one_pinctrl_entry() incorrectly assumes that a map was added and sets num_maps = 2. Analysis: ========= The function pcs_parse_one_pinctrl_entry() calls pcs_parse_pinconf() if PCS_HAS_PINCONF is enabled. The function pcs_parse_pinconf() returns 0 to indicate there was no error and num_maps is then set to 2: 980 static int pcs_parse_one_pinctrl_entry(struct pcs_device *pcs, 981 struct device_node *np, 982 struct pinctrl_map **map, 983 unsigned *num_maps, 984 const char **pgnames) 985 { 1053 (*map)->type = PIN_MAP_TYPE_MUX_GROUP; 1054 (*map)->data.mux.group = np->name; 1055 (*map)->data.mux.function = np->name; 1056 1057 if (PCS_HAS_PINCONF && function) { 1058 res = pcs_parse_pinconf(pcs, np, function, map); 1059 if (res) 1060 goto free_pingroups; 1061 *num_maps = 2; 1062 } else { 1063 *num_maps = 1; 1064 } However, pcs_parse_pinconf() will also return 0 if !PCS_HAS_PINCONF or !nconfs. I believe these conditions should indicate that no map was added by returning -ENOTSUPP. Otherwise pcs_parse_one_pinctrl_entry() will set num_maps = 2 even though no maps were successfully added, as it does not reach "m++" on line 940: 895 static int pcs_parse_pinconf(struct pcs_device *pcs, struct device_node *np, 896 struct pcs_function *func, 897 struct pinctrl_map **map) 898 899 { 900 struct pinctrl_map *m = *map; 917 /* If pinconf isn't supported, don't parse properties in below. */ 918 if (!PCS_HAS_PINCONF) 919 return 0; 920 921 /* cacluate how much properties are supported in current node */ 922 for (i = 0; i < ARRAY_SIZE(prop2); i++) { 923 if (of_find_property(np, prop2[i].name, NULL)) 924 nconfs++; 925 } 926 for (i = 0; i < ARRAY_SIZE(prop4); i++) { 927 if (of_find_property(np, prop4[i].name, NULL)) 928 nconfs++; 929 } 930 if (!nconfs) 919 return 0; 932 933 func->conf = devm_kcalloc(pcs->dev, 934 nconfs, sizeof(struct pcs_conf_vals), 935 GFP_KERNEL); 936 if (!func->conf) 937 return -ENOMEM; 938 func->nconfs = nconfs; 939 conf = &(func->conf[0]); 940 m++; This situtation will cause a boot failure [0] on the BeagleBone Black (AM3358) when am33xx_pinmux node in arch/arm/boot/dts/am33xx-l4.dtsi has compatible = "pinconf-single" instead of "pinctrl-single". The patch fixes this issue by returning -ENOSUPP when !PCS_HAS_PINCONF or !nconfs, so that pcs_parse_one_pinctrl_entry() will know that no map was added. Logic is also added to pcs_parse_one_pinctrl_entry() to distinguish between -ENOSUPP and other errors. In the case of -ENOSUPP, num_maps is set to 1 as it is valid for pinconf to be enabled and a given pin group to not any pinconf properties. [0] https://lore.kernel.org/linux-omap/20200529175544.GA3766151@x1/ Fixes: 9dddb4df90d1 ("pinctrl: single: support generic pinconf") Signed-off-by: Drew Fustini Acked-by: Tony Lindgren Link: https://lore.kernel.org/r/20200608125143.GA2789203@x1 Signed-off-by: Linus Walleij Signed-off-by: Sasha Levin --- drivers/pinctrl/pinctrl-single.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c index 7ec72ff2419a..04a4e761e9a9 100644 --- a/drivers/pinctrl/pinctrl-single.c +++ b/drivers/pinctrl/pinctrl-single.c @@ -916,7 +916,7 @@ static int pcs_parse_pinconf(struct pcs_device *pcs, struct device_node *np, /* If pinconf isn't supported, don't parse properties in below. */ if (!PCS_HAS_PINCONF) - return 0; + return -ENOTSUPP; /* cacluate how much properties are supported in current node */ for (i = 0; i < ARRAY_SIZE(prop2); i++) { @@ -928,7 +928,7 @@ static int pcs_parse_pinconf(struct pcs_device *pcs, struct device_node *np, nconfs++; } if (!nconfs) - return 0; + return -ENOTSUPP; func->conf = devm_kcalloc(pcs->dev, nconfs, sizeof(struct pcs_conf_vals), @@ -1056,9 +1056,12 @@ static int pcs_parse_one_pinctrl_entry(struct pcs_device *pcs, if (PCS_HAS_PINCONF && function) { res = pcs_parse_pinconf(pcs, np, function, map); - if (res) + if (res == 0) + *num_maps = 2; + else if (res == -ENOTSUPP) + *num_maps = 1; + else goto free_pingroups; - *num_maps = 2; } else { *num_maps = 1; } -- GitLab From fc14f613ca02398a25d9a54ce4f877f9213cd656 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 11 Jun 2020 12:44:56 -0400 Subject: [PATCH 0259/1304] svcrdma: Fix page leak in svc_rdma_recv_read_chunk() [ Upstream commit e814eecbe3bbeaa8b004d25a4b8974d232b765a9 ] Commit 07d0ff3b0cd2 ("svcrdma: Clean up Read chunk path") moved the page saver logic so that it gets executed event when an error occurs. In that case, the I/O is never posted, and those pages are then leaked. Errors in this path, however, are quite rare. Fixes: 07d0ff3b0cd2 ("svcrdma: Clean up Read chunk path") Signed-off-by: Chuck Lever Signed-off-by: Sasha Levin --- net/sunrpc/xprtrdma/svc_rdma_rw.c | 28 +++++++++++++++++++++------- 1 file changed, 21 insertions(+), 7 deletions(-) diff --git a/net/sunrpc/xprtrdma/svc_rdma_rw.c b/net/sunrpc/xprtrdma/svc_rdma_rw.c index 4fc0ce127089..22f135263815 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_rw.c +++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c @@ -679,7 +679,6 @@ static int svc_rdma_build_read_chunk(struct svc_rqst *rqstp, struct svc_rdma_read_info *info, __be32 *p) { - unsigned int i; int ret; ret = -EINVAL; @@ -702,12 +701,6 @@ static int svc_rdma_build_read_chunk(struct svc_rqst *rqstp, info->ri_chunklen += rs_length; } - /* Pages under I/O have been copied to head->rc_pages. - * Prevent their premature release by svc_xprt_release() . - */ - for (i = 0; i < info->ri_readctxt->rc_page_count; i++) - rqstp->rq_pages[i] = NULL; - return ret; } @@ -802,6 +795,26 @@ static int svc_rdma_build_pz_read_chunk(struct svc_rqst *rqstp, return ret; } +/* Pages under I/O have been copied to head->rc_pages. Ensure they + * are not released by svc_xprt_release() until the I/O is complete. + * + * This has to be done after all Read WRs are constructed to properly + * handle a page that is part of I/O on behalf of two different RDMA + * segments. + * + * Do this only if I/O has been posted. Otherwise, we do indeed want + * svc_xprt_release() to clean things up properly. + */ +static void svc_rdma_save_io_pages(struct svc_rqst *rqstp, + const unsigned int start, + const unsigned int num_pages) +{ + unsigned int i; + + for (i = start; i < num_pages + start; i++) + rqstp->rq_pages[i] = NULL; +} + /** * svc_rdma_recv_read_chunk - Pull a Read chunk from the client * @rdma: controlling RDMA transport @@ -855,6 +868,7 @@ int svc_rdma_recv_read_chunk(struct svcxprt_rdma *rdma, struct svc_rqst *rqstp, ret = svc_rdma_post_chunk_ctxt(&info->ri_cc); if (ret < 0) goto out_err; + svc_rdma_save_io_pages(rqstp, 0, head->rc_page_count); return 0; out_err: -- GitLab From 4331212e4a6329470dc480bd15ae5cd20a6f1093 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 14 Aug 2020 11:16:17 -0700 Subject: [PATCH 0260/1304] x86/fsgsbase/64: Fix NULL deref in 86_fsgsbase_read_task [ Upstream commit 8ab49526b53d3172d1d8dd03a75c7d1f5bd21239 ] syzbot found its way in 86_fsgsbase_read_task() and triggered this oops: KASAN: null-ptr-deref in range [0x0000000000000008-0x000000000000000f] CPU: 0 PID: 6866 Comm: syz-executor262 Not tainted 5.8.0-syzkaller #0 RIP: 0010:x86_fsgsbase_read_task+0x16d/0x310 arch/x86/kernel/process_64.c:393 Call Trace: putreg32+0x3ab/0x530 arch/x86/kernel/ptrace.c:876 genregs32_set arch/x86/kernel/ptrace.c:1026 [inline] genregs32_set+0xa4/0x100 arch/x86/kernel/ptrace.c:1006 copy_regset_from_user include/linux/regset.h:326 [inline] ia32_arch_ptrace arch/x86/kernel/ptrace.c:1061 [inline] compat_arch_ptrace+0x36c/0xd90 arch/x86/kernel/ptrace.c:1198 __do_compat_sys_ptrace kernel/ptrace.c:1420 [inline] __se_compat_sys_ptrace kernel/ptrace.c:1389 [inline] __ia32_compat_sys_ptrace+0x220/0x2f0 kernel/ptrace.c:1389 do_syscall_32_irqs_on arch/x86/entry/common.c:84 [inline] __do_fast_syscall_32+0x57/0x80 arch/x86/entry/common.c:126 do_fast_syscall_32+0x2f/0x70 arch/x86/entry/common.c:149 entry_SYSENTER_compat_after_hwframe+0x4d/0x5c This can happen if ptrace() or sigreturn() pokes an LDT selector into FS or GS for a task with no LDT and something tries to read the base before a return to usermode notices the bad selector and fixes it. The fix is to make sure ldt pointer is not NULL. Fixes: 07e1d88adaae ("x86/fsgsbase/64: Fix ptrace() to read the FS/GS base accurately") Co-developed-by: Jann Horn Signed-off-by: Eric Dumazet Reported-by: syzbot Acked-by: Andy Lutomirski Cc: Chang S. Bae Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Markus T Metzger Cc: Peter Zijlstra Cc: Ravi Shankar Cc: Rik van Riel Cc: Thomas Gleixner Cc: Ingo Molnar Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin --- arch/x86/kernel/ptrace.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 8d4d50645310..1401f86e4007 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -374,7 +374,7 @@ static unsigned long task_seg_base(struct task_struct *task, */ mutex_lock(&task->mm->context.lock); ldt = task->mm->context.ldt; - if (unlikely(idx >= ldt->nr_entries)) + if (unlikely(!ldt || idx >= ldt->nr_entries)) base = 0; else base = get_desc_base(ldt->entries + idx); -- GitLab From 7ea71a7be5e19b3b08cc263d68852af277ec93cb Mon Sep 17 00:00:00 2001 From: Jian Cai Date: Mon, 22 Jun 2020 16:24:33 -0700 Subject: [PATCH 0261/1304] crypto: aesni - add compatibility with IAS [ Upstream commit 44069737ac9625a0f02f0f7f5ab96aae4cd819bc ] Clang's integrated assembler complains "invalid reassignment of non-absolute variable 'var_ddq_add'" while assembling arch/x86/crypto/aes_ctrby8_avx-x86_64.S. It was because var_ddq_add was reassigned with non-absolute values several times, which IAS did not support. We can avoid the reassignment by replacing the uses of var_ddq_add with its definitions accordingly to have compatilibility with IAS. Link: https://github.com/ClangBuiltLinux/linux/issues/1008 Reported-by: Sedat Dilek Reported-by: Fangrui Song Tested-by: Sedat Dilek # build+boot Linux v5.7.5; clang v11.0.0-git Signed-off-by: Jian Cai Signed-off-by: Herbert Xu Signed-off-by: Sasha Levin --- arch/x86/crypto/aes_ctrby8_avx-x86_64.S | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S index 5f6a5af9c489..77043a82da51 100644 --- a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S +++ b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S @@ -127,10 +127,6 @@ ddq_add_8: /* generate a unique variable for ddq_add_x */ -.macro setddq n - var_ddq_add = ddq_add_\n -.endm - /* generate a unique variable for xmm register */ .macro setxdata n var_xdata = %xmm\n @@ -140,9 +136,7 @@ ddq_add_8: .macro club name, id .altmacro - .if \name == DDQ_DATA - setddq %\id - .elseif \name == XDATA + .if \name == XDATA setxdata %\id .endif .noaltmacro @@ -165,9 +159,8 @@ ddq_add_8: .set i, 1 .rept (by - 1) - club DDQ_DATA, i club XDATA, i - vpaddq var_ddq_add(%rip), xcounter, var_xdata + vpaddq (ddq_add_1 + 16 * (i - 1))(%rip), xcounter, var_xdata vptest ddq_low_msk(%rip), var_xdata jnz 1f vpaddq ddq_high_add_1(%rip), var_xdata, var_xdata @@ -180,8 +173,7 @@ ddq_add_8: vmovdqa 1*16(p_keys), xkeyA vpxor xkey0, xdata0, xdata0 - club DDQ_DATA, by - vpaddq var_ddq_add(%rip), xcounter, xcounter + vpaddq (ddq_add_1 + 16 * (by - 1))(%rip), xcounter, xcounter vptest ddq_low_msk(%rip), xcounter jnz 1f vpaddq ddq_high_add_1(%rip), xcounter, xcounter -- GitLab From ff00ef9aac0a8e17f52169c25f70e5c7e88c87c9 Mon Sep 17 00:00:00 2001 From: John Ogness Date: Thu, 13 Aug 2020 21:45:25 +0206 Subject: [PATCH 0262/1304] af_packet: TPACKET_V3: fix fill status rwlock imbalance [ Upstream commit 88fd1cb80daa20af063bce81e1fad14e945a8dc4 ] After @blk_fill_in_prog_lock is acquired there is an early out vnet situation that can occur. In that case, the rwlock needs to be released. Also, since @blk_fill_in_prog_lock is only acquired when @tp_version is exactly TPACKET_V3, only release it on that exact condition as well. And finally, add sparse annotation so that it is clearer that prb_fill_curr_block() and prb_clear_blk_fill_status() are acquiring and releasing @blk_fill_in_prog_lock, respectively. sparse is still unable to understand the balance, but the warnings are now on a higher level that make more sense. Fixes: 632ca50f2cbd ("af_packet: TPACKET_V3: replace busy-wait loop") Signed-off-by: John Ogness Reported-by: kernel test robot Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/packet/af_packet.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 0e029aefa707..b3caf1eac6af 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -949,6 +949,7 @@ static int prb_queue_frozen(struct tpacket_kbdq_core *pkc) } static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb) + __releases(&pkc->blk_fill_in_prog_lock) { struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb); atomic_dec(&pkc->blk_fill_in_prog); @@ -996,6 +997,7 @@ static void prb_fill_curr_block(char *curr, struct tpacket_kbdq_core *pkc, struct tpacket_block_desc *pbd, unsigned int len) + __acquires(&pkc->blk_fill_in_prog_lock) { struct tpacket3_hdr *ppd; @@ -2272,8 +2274,11 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, if (do_vnet && virtio_net_hdr_from_skb(skb, h.raw + macoff - sizeof(struct virtio_net_hdr), - vio_le(), true, 0)) + vio_le(), true, 0)) { + if (po->tp_version == TPACKET_V3) + prb_clear_blk_fill_status(&po->rx_ring); goto drop_n_account; + } if (po->tp_version <= TPACKET_V2) { packet_increment_rx_head(po, &po->rx_ring); @@ -2379,7 +2384,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, __clear_bit(slot_id, po->rx_ring.rx_owner_map); spin_unlock(&sk->sk_receive_queue.lock); sk->sk_data_ready(sk); - } else { + } else if (po->tp_version == TPACKET_V3) { prb_clear_blk_fill_status(&po->rx_ring); } -- GitLab From 597d2df941303316651fc147457bf85aced2f959 Mon Sep 17 00:00:00 2001 From: Xie He Date: Wed, 5 Aug 2020 18:50:40 -0700 Subject: [PATCH 0263/1304] drivers/net/wan/lapbether: Added needed_headroom and a skb->len check [ Upstream commit c7ca03c216acb14466a713fedf1b9f2c24994ef2 ] 1. Added a skb->len check This driver expects upper layers to include a pseudo header of 1 byte when passing down a skb for transmission. This driver will read this 1-byte header. This patch added a skb->len check before reading the header to make sure the header exists. 2. Changed to use needed_headroom instead of hard_header_len to request necessary headroom to be allocated In net/packet/af_packet.c, the function packet_snd first reserves a headroom of length (dev->hard_header_len + dev->needed_headroom). Then if the socket is a SOCK_DGRAM socket, it calls dev_hard_header, which calls dev->header_ops->create, to create the link layer header. If the socket is a SOCK_RAW socket, it "un-reserves" a headroom of length (dev->hard_header_len), and assumes the user to provide the appropriate link layer header. So according to the logic of af_packet.c, dev->hard_header_len should be the length of the header that would be created by dev->header_ops->create. However, this driver doesn't provide dev->header_ops, so logically dev->hard_header_len should be 0. So we should use dev->needed_headroom instead of dev->hard_header_len to request necessary headroom to be allocated. This change fixes kernel panic when this driver is used with AF_PACKET SOCK_RAW sockets. Call stack when panic: [ 168.399197] skbuff: skb_under_panic: text:ffffffff819d95fb len:20 put:14 head:ffff8882704c0a00 data:ffff8882704c09fd tail:0x11 end:0xc0 dev:veth0 ... [ 168.399255] Call Trace: [ 168.399259] skb_push.cold+0x14/0x24 [ 168.399262] eth_header+0x2b/0xc0 [ 168.399267] lapbeth_data_transmit+0x9a/0xb0 [lapbether] [ 168.399275] lapb_data_transmit+0x22/0x2c [lapb] [ 168.399277] lapb_transmit_buffer+0x71/0xb0 [lapb] [ 168.399279] lapb_kick+0xe3/0x1c0 [lapb] [ 168.399281] lapb_data_request+0x76/0xc0 [lapb] [ 168.399283] lapbeth_xmit+0x56/0x90 [lapbether] [ 168.399286] dev_hard_start_xmit+0x91/0x1f0 [ 168.399289] ? irq_init_percpu_irqstack+0xc0/0x100 [ 168.399291] __dev_queue_xmit+0x721/0x8e0 [ 168.399295] ? packet_parse_headers.isra.0+0xd2/0x110 [ 168.399297] dev_queue_xmit+0x10/0x20 [ 168.399298] packet_sendmsg+0xbf0/0x19b0 ...... Cc: Willem de Bruijn Cc: Martin Schiller Cc: Brian Norris Signed-off-by: Xie He Acked-by: Willem de Bruijn Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/wan/lapbether.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c index ac34257e9f20..c94dfa70f2a3 100644 --- a/drivers/net/wan/lapbether.c +++ b/drivers/net/wan/lapbether.c @@ -160,6 +160,12 @@ static netdev_tx_t lapbeth_xmit(struct sk_buff *skb, if (!netif_running(dev)) goto drop; + /* There should be a pseudo header of 1 byte added by upper layers. + * Check to make sure it is there before reading it. + */ + if (skb->len < 1) + goto drop; + switch (skb->data[0]) { case X25_IFACE_DATA: break; @@ -308,6 +314,7 @@ static void lapbeth_setup(struct net_device *dev) dev->netdev_ops = &lapbeth_netdev_ops; dev->needs_free_netdev = true; dev->type = ARPHRD_X25; + dev->hard_header_len = 0; dev->mtu = 1000; dev->addr_len = 0; } @@ -334,7 +341,8 @@ static int lapbeth_new_device(struct net_device *dev) * then this driver prepends a length field of 2 bytes, * then the underlying Ethernet device prepends its own header. */ - ndev->hard_header_len = -1 + 3 + 2 + dev->hard_header_len; + ndev->needed_headroom = -1 + 3 + 2 + dev->hard_header_len + + dev->needed_headroom; lapbeth = netdev_priv(ndev); lapbeth->axdev = ndev; -- GitLab From b200620cada4eaa63108be32e040ed557a1965fd Mon Sep 17 00:00:00 2001 From: Qingyu Li Date: Mon, 10 Aug 2020 09:51:00 +0800 Subject: [PATCH 0264/1304] net/nfc/rawsock.c: add CAP_NET_RAW check. [ Upstream commit 26896f01467a28651f7a536143fe5ac8449d4041 ] When creating a raw AF_NFC socket, CAP_NET_RAW needs to be checked first. Signed-off-by: Qingyu Li Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/nfc/rawsock.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c index e2188deb08dc..b927730d9ab0 100644 --- a/net/nfc/rawsock.c +++ b/net/nfc/rawsock.c @@ -344,10 +344,13 @@ static int rawsock_create(struct net *net, struct socket *sock, if ((sock->type != SOCK_SEQPACKET) && (sock->type != SOCK_RAW)) return -ESOCKTNOSUPPORT; - if (sock->type == SOCK_RAW) + if (sock->type == SOCK_RAW) { + if (!capable(CAP_NET_RAW)) + return -EPERM; sock->ops = &rawsock_raw_ops; - else + } else { sock->ops = &rawsock_ops; + } sk = sk_alloc(net, PF_NFC, GFP_ATOMIC, nfc_proto->proto, kern); if (!sk) -- GitLab From 520f0e37ba9ea2ca70135d99924652a106f85072 Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Thu, 6 Aug 2020 19:53:16 +0800 Subject: [PATCH 0265/1304] net: Set fput_needed iff FDPUT_FPUT is set [ Upstream commit ce787a5a074a86f76f5d3fd804fa78e01bfb9e89 ] We should fput() file iff FDPUT_FPUT is set. So we should set fput_needed accordingly. Fixes: 00e188ef6a7e ("sockfd_lookup_light(): switch to fdget^W^Waway from fget_light") Signed-off-by: Miaohe Lin Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/socket.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/socket.c b/net/socket.c index 1030a612423b..29169045dcfe 100644 --- a/net/socket.c +++ b/net/socket.c @@ -474,7 +474,7 @@ static struct socket *sockfd_lookup_light(int fd, int *err, int *fput_needed) if (f.file) { sock = sock_from_file(f.file, err); if (likely(sock)) { - *fput_needed = f.flags; + *fput_needed = f.flags & FDPUT_FPUT; return sock; } fdput(f); -- GitLab From bc8ce7de36b00ad3d416a03555cb4f4bdd4519ef Mon Sep 17 00:00:00 2001 From: Ira Weiny Date: Mon, 10 Aug 2020 17:02:58 -0700 Subject: [PATCH 0266/1304] net/tls: Fix kmap usage [ Upstream commit b06c19d9f827f6743122795570bfc0c72db482b0 ] When MSG_OOB is specified to tls_device_sendpage() the mapped page is never unmapped. Hold off mapping the page until after the flags are checked and the page is actually needed. Fixes: e8f69799810c ("net/tls: Add generic NIC offload infrastructure") Signed-off-by: Ira Weiny Reviewed-by: Jakub Kicinski Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/tls/tls_device.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index 8f40bbfd60ea..575d62130578 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -476,7 +476,7 @@ int tls_device_sendpage(struct sock *sk, struct page *page, int offset, size_t size, int flags) { struct iov_iter msg_iter; - char *kaddr = kmap(page); + char *kaddr; struct kvec iov; int rc; @@ -490,6 +490,7 @@ int tls_device_sendpage(struct sock *sk, struct page *page, goto out; } + kaddr = kmap(page); iov.iov_base = kaddr + offset; iov.iov_len = size; iov_iter_kvec(&msg_iter, WRITE | ITER_KVEC, &iov, 1, size); -- GitLab From 42f4480a37d682da5d144488f4c7443fd41d5067 Mon Sep 17 00:00:00 2001 From: Tim Froidcoeur Date: Tue, 11 Aug 2020 20:33:23 +0200 Subject: [PATCH 0267/1304] net: refactor bind_bucket fastreuse into helper [ Upstream commit 62ffc589abb176821662efc4525ee4ac0b9c3894 ] Refactor the fastreuse update code in inet_csk_get_port into a small helper function that can be called from other places. Acked-by: Matthieu Baerts Signed-off-by: Tim Froidcoeur Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- include/net/inet_connection_sock.h | 4 ++ net/ipv4/inet_connection_sock.c | 93 ++++++++++++++++-------------- 2 files changed, 55 insertions(+), 42 deletions(-) diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index 371b3b45fd5c..2d5220ab0600 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -313,5 +313,9 @@ int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname, int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen); +/* update the fast reuse flag when adding a socket */ +void inet_csk_update_fastreuse(struct inet_bind_bucket *tb, + struct sock *sk); + struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu); #endif /* _INET_CONNECTION_SOCK_H */ diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index ddbe58f0d597..534e2598981d 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -285,51 +285,12 @@ static inline int sk_reuseport_match(struct inet_bind_bucket *tb, ipv6_only_sock(sk), true, false); } -/* Obtain a reference to a local port for the given sock, - * if snum is zero it means select any available local port. - * We try to allocate an odd port (and leave even ports for connect()) - */ -int inet_csk_get_port(struct sock *sk, unsigned short snum) +void inet_csk_update_fastreuse(struct inet_bind_bucket *tb, + struct sock *sk) { - bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN; - struct inet_hashinfo *hinfo = sk->sk_prot->h.hashinfo; - int ret = 1, port = snum; - struct inet_bind_hashbucket *head; - struct net *net = sock_net(sk); - struct inet_bind_bucket *tb = NULL; kuid_t uid = sock_i_uid(sk); + bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN; - if (!port) { - head = inet_csk_find_open_port(sk, &tb, &port); - if (!head) - return ret; - if (!tb) - goto tb_not_found; - goto success; - } - head = &hinfo->bhash[inet_bhashfn(net, port, - hinfo->bhash_size)]; - spin_lock_bh(&head->lock); - inet_bind_bucket_for_each(tb, &head->chain) - if (net_eq(ib_net(tb), net) && tb->port == port) - goto tb_found; -tb_not_found: - tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep, - net, head, port); - if (!tb) - goto fail_unlock; -tb_found: - if (!hlist_empty(&tb->owners)) { - if (sk->sk_reuse == SK_FORCE_REUSE) - goto success; - - if ((tb->fastreuse > 0 && reuse) || - sk_reuseport_match(tb, sk)) - goto success; - if (inet_csk_bind_conflict(sk, tb, true, true)) - goto fail_unlock; - } -success: if (hlist_empty(&tb->owners)) { tb->fastreuse = reuse; if (sk->sk_reuseport) { @@ -373,6 +334,54 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum) tb->fastreuseport = 0; } } +} + +/* Obtain a reference to a local port for the given sock, + * if snum is zero it means select any available local port. + * We try to allocate an odd port (and leave even ports for connect()) + */ +int inet_csk_get_port(struct sock *sk, unsigned short snum) +{ + bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN; + struct inet_hashinfo *hinfo = sk->sk_prot->h.hashinfo; + int ret = 1, port = snum; + struct inet_bind_hashbucket *head; + struct net *net = sock_net(sk); + struct inet_bind_bucket *tb = NULL; + + if (!port) { + head = inet_csk_find_open_port(sk, &tb, &port); + if (!head) + return ret; + if (!tb) + goto tb_not_found; + goto success; + } + head = &hinfo->bhash[inet_bhashfn(net, port, + hinfo->bhash_size)]; + spin_lock_bh(&head->lock); + inet_bind_bucket_for_each(tb, &head->chain) + if (net_eq(ib_net(tb), net) && tb->port == port) + goto tb_found; +tb_not_found: + tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep, + net, head, port); + if (!tb) + goto fail_unlock; +tb_found: + if (!hlist_empty(&tb->owners)) { + if (sk->sk_reuse == SK_FORCE_REUSE) + goto success; + + if ((tb->fastreuse > 0 && reuse) || + sk_reuseport_match(tb, sk)) + goto success; + if (inet_csk_bind_conflict(sk, tb, true, true)) + goto fail_unlock; + } +success: + inet_csk_update_fastreuse(tb, sk); + if (!inet_csk(sk)->icsk_bind_hash) inet_bind_hash(sk, tb, port); WARN_ON(inet_csk(sk)->icsk_bind_hash != tb); -- GitLab From 0e866a435676b6c6fa546bdc1f6709be6367685e Mon Sep 17 00:00:00 2001 From: Tim Froidcoeur Date: Tue, 11 Aug 2020 20:33:24 +0200 Subject: [PATCH 0268/1304] net: initialize fastreuse on inet_inherit_port [ Upstream commit d76f3351cea2d927fdf70dd7c06898235035e84e ] In the case of TPROXY, bind_conflict optimizations for SO_REUSEADDR or SO_REUSEPORT are broken, possibly resulting in O(n) instead of O(1) bind behaviour or in the incorrect reuse of a bind. the kernel keeps track for each bind_bucket if all sockets in the bind_bucket support SO_REUSEADDR or SO_REUSEPORT in two fastreuse flags. These flags allow skipping the costly bind_conflict check when possible (meaning when all sockets have the proper SO_REUSE option). For every socket added to a bind_bucket, these flags need to be updated. As soon as a socket that does not support reuse is added, the flag is set to false and will never go back to true, unless the bind_bucket is deleted. Note that there is no mechanism to re-evaluate these flags when a socket is removed (this might make sense when removing a socket that would not allow reuse; this leaves room for a future patch). For this optimization to work, it is mandatory that these flags are properly initialized and updated. When a child socket is created from a listen socket in __inet_inherit_port, the TPROXY case could create a new bind bucket without properly initializing these flags, thus preventing the optimization to work. Alternatively, a socket not allowing reuse could be added to an existing bind bucket without updating the flags, causing bind_conflict to never be called as it should. Call inet_csk_update_fastreuse when __inet_inherit_port decides to create a new bind_bucket or use a different bind_bucket than the one of the listen socket. Fixes: 093d282321da ("tproxy: fix hash locking issue when using port redirection in __inet_inherit_port()") Acked-by: Matthieu Baerts Signed-off-by: Tim Froidcoeur Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/ipv4/inet_hashtables.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index b53da2691adb..3a5f12f011cb 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -161,6 +161,7 @@ int __inet_inherit_port(const struct sock *sk, struct sock *child) return -ENOMEM; } } + inet_csk_update_fastreuse(tb, child); } inet_bind_hash(child, tb, port); spin_unlock(&head->lock); -- GitLab From 05c411cafd1c5e01760fb800b5dc6c0445521573 Mon Sep 17 00:00:00 2001 From: Brant Merryman Date: Fri, 26 Jun 2020 04:24:20 +0000 Subject: [PATCH 0269/1304] USB: serial: cp210x: re-enable auto-RTS on open commit c7614ff9b73a1e6fb2b1b51396da132ed22fecdb upstream. CP210x hardware disables auto-RTS but leaves auto-CTS when in hardware flow control mode and UART on cp210x hardware is disabled. When re-opening the port, if auto-CTS is enabled on the cp210x, then auto-RTS must be re-enabled in the driver. Signed-off-by: Brant Merryman Co-developed-by: Phu Luu Signed-off-by: Phu Luu Link: https://lore.kernel.org/r/ECCF8E73-91F3-4080-BE17-1714BC8818FB@silabs.com [ johan: fix up tags and problem description ] Fixes: 39a66b8d22a3 ("[PATCH] USB: CP2101 Add support for flow control") Cc: stable # 2.6.12 Signed-off-by: Johan Hovold Signed-off-by: Greg Kroah-Hartman --- drivers/usb/serial/cp210x.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index 7ae121567098..69220f13c864 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -893,6 +893,7 @@ static void cp210x_get_termios_port(struct usb_serial_port *port, u32 baud; u16 bits; u32 ctl_hs; + u32 flow_repl; cp210x_read_u32_reg(port, CP210X_GET_BAUDRATE, &baud); @@ -993,6 +994,22 @@ static void cp210x_get_termios_port(struct usb_serial_port *port, ctl_hs = le32_to_cpu(flow_ctl.ulControlHandshake); if (ctl_hs & CP210X_SERIAL_CTS_HANDSHAKE) { dev_dbg(dev, "%s - flow control = CRTSCTS\n", __func__); + /* + * When the port is closed, the CP210x hardware disables + * auto-RTS and RTS is deasserted but it leaves auto-CTS when + * in hardware flow control mode. When re-opening the port, if + * auto-CTS is enabled on the cp210x, then auto-RTS must be + * re-enabled in the driver. + */ + flow_repl = le32_to_cpu(flow_ctl.ulFlowReplace); + flow_repl &= ~CP210X_SERIAL_RTS_MASK; + flow_repl |= CP210X_SERIAL_RTS_SHIFT(CP210X_SERIAL_RTS_FLOW_CTL); + flow_ctl.ulFlowReplace = cpu_to_le32(flow_repl); + cp210x_write_reg_block(port, + CP210X_SET_FLOW, + &flow_ctl, + sizeof(flow_ctl)); + cflag |= CRTSCTS; } else { dev_dbg(dev, "%s - flow control = NONE\n", __func__); -- GitLab From 6614bb86cec4d4e1dac5119199fe622efcdbd738 Mon Sep 17 00:00:00 2001 From: Brant Merryman Date: Fri, 26 Jun 2020 04:22:58 +0000 Subject: [PATCH 0270/1304] USB: serial: cp210x: enable usb generic throttle/unthrottle commit 4387b3dbb079d482d3c2b43a703ceed4dd27ed28 upstream. Assign the .throttle and .unthrottle functions to be generic function in the driver structure to prevent data loss that can otherwise occur if the host does not enable USB throttling. Signed-off-by: Brant Merryman Co-developed-by: Phu Luu Signed-off-by: Phu Luu Link: https://lore.kernel.org/r/57401AF3-9961-461F-95E1-F8AFC2105F5E@silabs.com [ johan: fix up tags ] Fixes: 39a66b8d22a3 ("[PATCH] USB: CP2101 Add support for flow control") Cc: stable # 2.6.12 Signed-off-by: Johan Hovold Signed-off-by: Greg Kroah-Hartman --- drivers/usb/serial/cp210x.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index 69220f13c864..46ec30a2c516 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -271,6 +271,8 @@ static struct usb_serial_driver cp210x_device = { .break_ctl = cp210x_break_ctl, .set_termios = cp210x_set_termios, .tx_empty = cp210x_tx_empty, + .throttle = usb_serial_generic_throttle, + .unthrottle = usb_serial_generic_unthrottle, .tiocmget = cp210x_tiocmget, .tiocmset = cp210x_tiocmset, .attach = cp210x_attach, -- GitLab From db0d4c7a97d8848f89ebff4074ff9c6d12f8b987 Mon Sep 17 00:00:00 2001 From: Hui Wang Date: Mon, 10 Aug 2020 10:16:59 +0800 Subject: [PATCH 0271/1304] ALSA: hda - fix the micmute led status for Lenovo ThinkCentre AIO commit 386a6539992b82fe9ac4f9dc3f548956fd894d8c upstream. After installing the Ubuntu Linux, the micmute led status is not correct. Users expect that the led is on if the capture is disabled, but with the current kernel, the led is off with the capture disabled. We tried the old linux kernel like linux-4.15, there is no this issue. It looks like we introduced this issue when switching to the led_cdev. Cc: Signed-off-by: Hui Wang Link: https://lore.kernel.org/r/20200810021659.7429-1-hui.wang@canonical.com Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/pci/hda/patch_realtek.c | 1 + 1 file changed, 1 insertion(+) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index fed9df479ef8..9c5b3d19bfa7 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -4072,6 +4072,7 @@ static void alc233_fixup_lenovo_line2_mic_hotkey(struct hda_codec *codec, { struct alc_spec *spec = codec->spec; + spec->micmute_led_polarity = 1; alc_fixup_hp_gpio_led(codec, action, 0, 0x04); if (action == HDA_FIXUP_ACT_PRE_PROBE) { spec->init_amp = ALC_INIT_DEFAULT; -- GitLab From ce46fc10b1ca91b2c472e0c0b20a88838d9080c0 Mon Sep 17 00:00:00 2001 From: Mirko Dietrich Date: Thu, 6 Aug 2020 14:48:50 +0200 Subject: [PATCH 0272/1304] ALSA: usb-audio: Creative USB X-Fi Pro SB1095 volume knob support commit fec9008828cde0076aae595ac031bfcf49d335a4 upstream. Adds an entry for Creative USB X-Fi to the rc_config array in mixer_quirks.c to allow use of volume knob on the device. Adds support for newer X-Fi Pro card, known as "Model No. SB1095" with USB ID "041e:3263" Signed-off-by: Mirko Dietrich Cc: Link: https://lore.kernel.org/r/20200806124850.20334-1-buzz@l4m1.de Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/usb/mixer_quirks.c | 1 + 1 file changed, 1 insertion(+) diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c index 21c1135bb89b..d1328abd1bc4 100644 --- a/sound/usb/mixer_quirks.c +++ b/sound/usb/mixer_quirks.c @@ -195,6 +195,7 @@ static const struct rc_config { { USB_ID(0x041e, 0x3042), 0, 1, 1, 1, 1, 0x000d }, /* Usb X-Fi S51 */ { USB_ID(0x041e, 0x30df), 0, 1, 1, 1, 1, 0x000d }, /* Usb X-Fi S51 Pro */ { USB_ID(0x041e, 0x3237), 0, 1, 1, 1, 1, 0x000d }, /* Usb X-Fi S51 Pro */ + { USB_ID(0x041e, 0x3263), 0, 1, 1, 1, 1, 0x000d }, /* Usb X-Fi S51 Pro */ { USB_ID(0x041e, 0x3048), 2, 2, 6, 6, 2, 0x6e91 }, /* Toshiba SB0500 */ }; -- GitLab From 7f496a7224b1a70a07ed7ed0d5b329980730816e Mon Sep 17 00:00:00 2001 From: Hector Martin Date: Mon, 10 Aug 2020 13:53:19 +0900 Subject: [PATCH 0273/1304] ALSA: usb-audio: fix overeager device match for MacroSilicon MS2109 commit 14a720dc1f5332f3bdf30a23a3bc549e81be974c upstream. Matching by device matches all interfaces, which breaks the video/HID portions of the device depending on module load order. Fixes: e337bf19f6af ("ALSA: usb-audio: add quirk for MacroSilicon MS2109") Cc: stable@vger.kernel.org Signed-off-by: Hector Martin Link: https://lore.kernel.org/r/20200810045319.128745-1-marcan@marcan.st Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/usb/quirks-table.h | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h index 41a5e38b7870..106dd8529bad 100644 --- a/sound/usb/quirks-table.h +++ b/sound/usb/quirks-table.h @@ -3472,7 +3472,13 @@ ALC1220_VB_DESKTOP(0x26ce, 0x0a01), /* Asrock TRX40 Creator */ * with. */ { - USB_DEVICE(0x534d, 0x2109), + .match_flags = USB_DEVICE_ID_MATCH_DEVICE | + USB_DEVICE_ID_MATCH_INT_CLASS | + USB_DEVICE_ID_MATCH_INT_SUBCLASS, + .idVendor = 0x534d, + .idProduct = 0x2109, + .bInterfaceClass = USB_CLASS_AUDIO, + .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL, .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { .vendor_name = "MacroSilicon", .product_name = "MS2109", -- GitLab From 820ec1efe430395f6a699fa920c516bf0a70aa1c Mon Sep 17 00:00:00 2001 From: Hector Martin Date: Mon, 10 Aug 2020 17:24:00 +0900 Subject: [PATCH 0274/1304] ALSA: usb-audio: work around streaming quirk for MacroSilicon MS2109 commit 1b7ecc241a67ad6b584e071bd791a54e0cd5f097 upstream. Further investigation of the L-R swap problem on the MS2109 reveals that the problem isn't that the channels are swapped, but rather that they are swapped and also out of phase by one sample. In other words, the issue is actually that the very first frame that comes from the hardware is a half-frame containing only the right channel, and after that everything becomes offset. So introduce a new quirk field to drop the very first 2 bytes that come in after the format is configured and a capture stream starts. This puts the channels in phase and in the correct order. Cc: stable@vger.kernel.org Signed-off-by: Hector Martin Link: https://lore.kernel.org/r/20200810082400.225858-1-marcan@marcan.st Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/usb/card.h | 1 + sound/usb/pcm.c | 6 ++++++ sound/usb/quirks.c | 3 +++ sound/usb/stream.c | 1 + 4 files changed, 11 insertions(+) diff --git a/sound/usb/card.h b/sound/usb/card.h index 7f11655bde50..f30fec68e2ca 100644 --- a/sound/usb/card.h +++ b/sound/usb/card.h @@ -129,6 +129,7 @@ struct snd_usb_substream { unsigned int tx_length_quirk:1; /* add length specifier to transfers */ unsigned int fmt_type; /* USB audio format type (1-3) */ unsigned int pkt_offset_adj; /* Bytes to drop from beginning of packets (for non-compliant devices) */ + unsigned int stream_offset_adj; /* Bytes to drop from beginning of stream (for non-compliant devices) */ unsigned int running: 1; /* running status */ diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c index 8b91be394407..7743e7bc6bf2 100644 --- a/sound/usb/pcm.c +++ b/sound/usb/pcm.c @@ -1387,6 +1387,12 @@ static void retire_capture_urb(struct snd_usb_substream *subs, // continue; } bytes = urb->iso_frame_desc[i].actual_length; + if (subs->stream_offset_adj > 0) { + unsigned int adj = min(subs->stream_offset_adj, bytes); + cp += adj; + bytes -= adj; + subs->stream_offset_adj -= adj; + } frames = bytes / stride; if (!subs->txfr_quirk) bytes = frames * stride; diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index e9ec6166acc6..8d9117312e30 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c @@ -1166,6 +1166,9 @@ void snd_usb_set_format_quirk(struct snd_usb_substream *subs, case USB_ID(0x041e, 0x3f19): /* E-Mu 0204 USB */ set_format_emu_quirk(subs, fmt); break; + case USB_ID(0x534d, 0x2109): /* MacroSilicon MS2109 */ + subs->stream_offset_adj = 2; + break; } } diff --git a/sound/usb/stream.c b/sound/usb/stream.c index 9d020bd0de17..ff5d803cfaf0 100644 --- a/sound/usb/stream.c +++ b/sound/usb/stream.c @@ -99,6 +99,7 @@ static void snd_usb_init_substream(struct snd_usb_stream *as, subs->tx_length_quirk = as->chip->tx_length_quirk; subs->speed = snd_usb_get_speed(subs->dev); subs->pkt_offset_adj = 0; + subs->stream_offset_adj = 0; snd_usb_set_pcm_ops(as->pcm, stream); -- GitLab From 3b71aed505934d9fe4d30c07e7a2d55d9b8291b2 Mon Sep 17 00:00:00 2001 From: Matteo Croce Date: Mon, 6 Jul 2020 19:37:36 -0700 Subject: [PATCH 0275/1304] pstore: Fix linking when crypto API disabled commit fd49e03280e596e54edb93a91bc96170f8e97e4a upstream. When building a kernel with CONFIG_PSTORE=y and CONFIG_CRYPTO not set, a build error happens: ld: fs/pstore/platform.o: in function `pstore_dump': platform.c:(.text+0x3f9): undefined reference to `crypto_comp_compress' ld: fs/pstore/platform.o: in function `pstore_get_backend_records': platform.c:(.text+0x784): undefined reference to `crypto_comp_decompress' This because some pstore code uses crypto_comp_(de)compress regardless of the CONFIG_CRYPTO status. Fix it by wrapping the (de)compress usage by IS_ENABLED(CONFIG_PSTORE_COMPRESS) Signed-off-by: Matteo Croce Link: https://lore.kernel.org/lkml/20200706234045.9516-1-mcroce@linux.microsoft.com Fixes: cb3bee0369bc ("pstore: Use crypto compress API") Cc: stable@vger.kernel.org Signed-off-by: Kees Cook Signed-off-by: Greg Kroah-Hartman --- fs/pstore/platform.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c index dcd9c3163587..2197bf68f278 100644 --- a/fs/pstore/platform.c +++ b/fs/pstore/platform.c @@ -250,6 +250,9 @@ static int pstore_compress(const void *in, void *out, { int ret; + if (!IS_ENABLED(CONFIG_PSTORE_COMPRESSION)) + return -EINVAL; + ret = crypto_comp_compress(tfm, in, inlen, out, &outlen); if (ret) { pr_err("crypto_comp_compress failed, ret = %d!\n", ret); @@ -647,7 +650,7 @@ static void decompress_record(struct pstore_record *record) int unzipped_len; char *decompressed; - if (!record->compressed) + if (!IS_ENABLED(CONFIG_PSTORE_COMPRESSION) || !record->compressed) return; /* Only PSTORE_TYPE_DMESG support compression. */ -- GitLab From 92d211b1217cc1bffaa1c8a7a968104522ffbcb4 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Wed, 17 Jun 2020 09:49:52 -0400 Subject: [PATCH 0276/1304] crypto: hisilicon - don't sleep of CRYPTO_TFM_REQ_MAY_SLEEP was not specified commit 5ead051780404b5cb22147170acadd1994dc3236 upstream. There is this call chain: sec_alg_skcipher_encrypt -> sec_alg_skcipher_crypto -> sec_alg_alloc_and_calc_split_sizes -> kcalloc where we call sleeping allocator function even if CRYPTO_TFM_REQ_MAY_SLEEP was not specified. Signed-off-by: Mikulas Patocka Cc: stable@vger.kernel.org # v4.19+ Fixes: 915e4e8413da ("crypto: hisilicon - SEC security accelerator driver") Acked-by: Jonathan Cameron Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- drivers/crypto/hisilicon/sec/sec_algs.c | 34 +++++++++++++------------ 1 file changed, 18 insertions(+), 16 deletions(-) diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c index bf9658800bda..3e3cc28d5cfe 100644 --- a/drivers/crypto/hisilicon/sec/sec_algs.c +++ b/drivers/crypto/hisilicon/sec/sec_algs.c @@ -175,7 +175,8 @@ static int sec_alloc_and_fill_hw_sgl(struct sec_hw_sgl **sec_sgl, dma_addr_t *psec_sgl, struct scatterlist *sgl, int count, - struct sec_dev_info *info) + struct sec_dev_info *info, + gfp_t gfp) { struct sec_hw_sgl *sgl_current = NULL; struct sec_hw_sgl *sgl_next; @@ -190,7 +191,7 @@ static int sec_alloc_and_fill_hw_sgl(struct sec_hw_sgl **sec_sgl, sge_index = i % SEC_MAX_SGE_NUM; if (sge_index == 0) { sgl_next = dma_pool_zalloc(info->hw_sgl_pool, - GFP_KERNEL, &sgl_next_dma); + gfp, &sgl_next_dma); if (!sgl_next) { ret = -ENOMEM; goto err_free_hw_sgls; @@ -553,14 +554,14 @@ void sec_alg_callback(struct sec_bd_info *resp, void *shadow) } static int sec_alg_alloc_and_calc_split_sizes(int length, size_t **split_sizes, - int *steps) + int *steps, gfp_t gfp) { size_t *sizes; int i; /* Split into suitable sized blocks */ *steps = roundup(length, SEC_REQ_LIMIT) / SEC_REQ_LIMIT; - sizes = kcalloc(*steps, sizeof(*sizes), GFP_KERNEL); + sizes = kcalloc(*steps, sizeof(*sizes), gfp); if (!sizes) return -ENOMEM; @@ -576,7 +577,7 @@ static int sec_map_and_split_sg(struct scatterlist *sgl, size_t *split_sizes, int steps, struct scatterlist ***splits, int **splits_nents, int sgl_len_in, - struct device *dev) + struct device *dev, gfp_t gfp) { int ret, count; @@ -584,12 +585,12 @@ static int sec_map_and_split_sg(struct scatterlist *sgl, size_t *split_sizes, if (!count) return -EINVAL; - *splits = kcalloc(steps, sizeof(struct scatterlist *), GFP_KERNEL); + *splits = kcalloc(steps, sizeof(struct scatterlist *), gfp); if (!*splits) { ret = -ENOMEM; goto err_unmap_sg; } - *splits_nents = kcalloc(steps, sizeof(int), GFP_KERNEL); + *splits_nents = kcalloc(steps, sizeof(int), gfp); if (!*splits_nents) { ret = -ENOMEM; goto err_free_splits; @@ -597,7 +598,7 @@ static int sec_map_and_split_sg(struct scatterlist *sgl, size_t *split_sizes, /* output the scatter list before and after this */ ret = sg_split(sgl, count, 0, steps, split_sizes, - *splits, *splits_nents, GFP_KERNEL); + *splits, *splits_nents, gfp); if (ret) { ret = -ENOMEM; goto err_free_splits_nents; @@ -638,13 +639,13 @@ static struct sec_request_el int el_size, bool different_dest, struct scatterlist *sgl_in, int n_ents_in, struct scatterlist *sgl_out, int n_ents_out, - struct sec_dev_info *info) + struct sec_dev_info *info, gfp_t gfp) { struct sec_request_el *el; struct sec_bd_info *req; int ret; - el = kzalloc(sizeof(*el), GFP_KERNEL); + el = kzalloc(sizeof(*el), gfp); if (!el) return ERR_PTR(-ENOMEM); el->el_length = el_size; @@ -676,7 +677,7 @@ static struct sec_request_el el->sgl_in = sgl_in; ret = sec_alloc_and_fill_hw_sgl(&el->in, &el->dma_in, el->sgl_in, - n_ents_in, info); + n_ents_in, info, gfp); if (ret) goto err_free_el; @@ -687,7 +688,7 @@ static struct sec_request_el el->sgl_out = sgl_out; ret = sec_alloc_and_fill_hw_sgl(&el->out, &el->dma_out, el->sgl_out, - n_ents_out, info); + n_ents_out, info, gfp); if (ret) goto err_free_hw_sgl_in; @@ -728,6 +729,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq, int *splits_out_nents = NULL; struct sec_request_el *el, *temp; bool split = skreq->src != skreq->dst; + gfp_t gfp = skreq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; mutex_init(&sec_req->lock); sec_req->req_base = &skreq->base; @@ -736,13 +738,13 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq, sec_req->len_in = sg_nents(skreq->src); ret = sec_alg_alloc_and_calc_split_sizes(skreq->cryptlen, &split_sizes, - &steps); + &steps, gfp); if (ret) return ret; sec_req->num_elements = steps; ret = sec_map_and_split_sg(skreq->src, split_sizes, steps, &splits_in, &splits_in_nents, sec_req->len_in, - info->dev); + info->dev, gfp); if (ret) goto err_free_split_sizes; @@ -750,7 +752,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq, sec_req->len_out = sg_nents(skreq->dst); ret = sec_map_and_split_sg(skreq->dst, split_sizes, steps, &splits_out, &splits_out_nents, - sec_req->len_out, info->dev); + sec_req->len_out, info->dev, gfp); if (ret) goto err_unmap_in_sg; } @@ -783,7 +785,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq, splits_in[i], splits_in_nents[i], split ? splits_out[i] : NULL, split ? splits_out_nents[i] : 0, - info); + info, gfp); if (IS_ERR(el)) { ret = PTR_ERR(el); goto err_free_elements; -- GitLab From ef1b91644f50ca4c6aaeb30bcfe188f13c099660 Mon Sep 17 00:00:00 2001 From: Tom Rix Date: Mon, 13 Jul 2020 07:06:34 -0700 Subject: [PATCH 0277/1304] crypto: qat - fix double free in qat_uclo_create_batch_init_list commit c06c76602e03bde24ee69a2022a829127e504202 upstream. clang static analysis flags this error qat_uclo.c:297:3: warning: Attempt to free released memory [unix.Malloc] kfree(*init_tab_base); ^~~~~~~~~~~~~~~~~~~~~ When input *init_tab_base is null, the function allocates memory for the head of the list. When there is problem allocating other list elements the list is unwound and freed. Then a check is made if the list head was allocated and is also freed. Keeping track of the what may need to be freed is the variable 'tail_old'. The unwinding/freeing block is while (tail_old) { mem_init = tail_old->next; kfree(tail_old); tail_old = mem_init; } The problem is that the first element of tail_old is also what was allocated for the list head init_header = kzalloc(sizeof(*init_header), GFP_KERNEL); ... *init_tab_base = init_header; flag = 1; } tail_old = init_header; So *init_tab_base/init_header are freed twice. There is another problem. When the input *init_tab_base is non null the tail_old is calculated by traveling down the list to first non null entry. tail_old = init_header; while (tail_old->next) tail_old = tail_old->next; When the unwinding free happens, the last entry of the input list will be freed. So the freeing needs a general changed. If locally allocated the first element of tail_old is freed, else it is skipped. As a bit of cleanup, reset *init_tab_base if it came in as null. Fixes: b4b7e67c917f ("crypto: qat - Intel(R) QAT ucode part of fw loader") Cc: Signed-off-by: Tom Rix Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- drivers/crypto/qat/qat_common/qat_uclo.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c index 6bd8f6a2a24f..aeb03081415c 100644 --- a/drivers/crypto/qat/qat_common/qat_uclo.c +++ b/drivers/crypto/qat/qat_common/qat_uclo.c @@ -332,13 +332,18 @@ static int qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle } return 0; out_err: + /* Do not free the list head unless we allocated it. */ + tail_old = tail_old->next; + if (flag) { + kfree(*init_tab_base); + *init_tab_base = NULL; + } + while (tail_old) { mem_init = tail_old->next; kfree(tail_old); tail_old = mem_init; } - if (flag) - kfree(*init_tab_base); return -ENOMEM; } -- GitLab From aabc11c145177650ec2cd7f6a496aa4c4a77a64f Mon Sep 17 00:00:00 2001 From: John Allen Date: Mon, 22 Jun 2020 15:24:02 -0500 Subject: [PATCH 0278/1304] crypto: ccp - Fix use of merged scatterlists commit 8a302808c60d441d9884cb00ea7f2b534f2e3ca5 upstream. Running the crypto manager self tests with CONFIG_CRYPTO_MANAGER_EXTRA_TESTS may result in several types of errors when using the ccp-crypto driver: alg: skcipher: cbc-des3-ccp encryption failed on test vector 0; expected_error=0, actual_error=-5 ... alg: skcipher: ctr-aes-ccp decryption overran dst buffer on test vector 0 ... alg: ahash: sha224-ccp test failed (wrong result) on test vector ... These errors are the result of improper processing of scatterlists mapped for DMA. Given a scatterlist in which entries are merged as part of mapping the scatterlist for DMA, the DMA length of a merged entry will reflect the combined length of the entries that were merged. The subsequent scatterlist entry will contain DMA information for the scatterlist entry after the last merged entry, but the non-DMA information will be that of the first merged entry. The ccp driver does not take this scatterlist merging into account. To address this, add a second scatterlist pointer to track the current position in the DMA mapped representation of the scatterlist. Both the DMA representation and the original representation of the scatterlist must be tracked as while most of the driver can use just the DMA representation, scatterlist_map_and_copy() must use the original representation and expects the scatterlist pointer to be accurate to the original representation. In order to properly walk the original scatterlist, the scatterlist must be walked until the combined lengths of the entries seen is equal to the DMA length of the current entry being processed in the DMA mapped representation. Fixes: 63b945091a070 ("crypto: ccp - CCP device driver and interface support") Signed-off-by: John Allen Cc: stable@vger.kernel.org Acked-by: Tom Lendacky Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- drivers/crypto/ccp/ccp-dev.h | 1 + drivers/crypto/ccp/ccp-ops.c | 37 +++++++++++++++++++++++++----------- 2 files changed, 27 insertions(+), 11 deletions(-) diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index 7442b0422f8a..bd43b5c1450c 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h @@ -471,6 +471,7 @@ struct ccp_sg_workarea { unsigned int sg_used; struct scatterlist *dma_sg; + struct scatterlist *dma_sg_head; struct device *dma_dev; unsigned int dma_count; enum dma_data_direction dma_dir; diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index 43b74cf0787e..626b643d610e 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -67,7 +67,7 @@ static u32 ccp_gen_jobid(struct ccp_device *ccp) static void ccp_sg_free(struct ccp_sg_workarea *wa) { if (wa->dma_count) - dma_unmap_sg(wa->dma_dev, wa->dma_sg, wa->nents, wa->dma_dir); + dma_unmap_sg(wa->dma_dev, wa->dma_sg_head, wa->nents, wa->dma_dir); wa->dma_count = 0; } @@ -96,6 +96,7 @@ static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev, return 0; wa->dma_sg = sg; + wa->dma_sg_head = sg; wa->dma_dev = dev; wa->dma_dir = dma_dir; wa->dma_count = dma_map_sg(dev, sg, wa->nents, dma_dir); @@ -108,14 +109,28 @@ static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev, static void ccp_update_sg_workarea(struct ccp_sg_workarea *wa, unsigned int len) { unsigned int nbytes = min_t(u64, len, wa->bytes_left); + unsigned int sg_combined_len = 0; if (!wa->sg) return; wa->sg_used += nbytes; wa->bytes_left -= nbytes; - if (wa->sg_used == wa->sg->length) { - wa->sg = sg_next(wa->sg); + if (wa->sg_used == sg_dma_len(wa->dma_sg)) { + /* Advance to the next DMA scatterlist entry */ + wa->dma_sg = sg_next(wa->dma_sg); + + /* In the case that the DMA mapped scatterlist has entries + * that have been merged, the non-DMA mapped scatterlist + * must be advanced multiple times for each merged entry. + * This ensures that the current non-DMA mapped entry + * corresponds to the current DMA mapped entry. + */ + do { + sg_combined_len += wa->sg->length; + wa->sg = sg_next(wa->sg); + } while (wa->sg_used > sg_combined_len); + wa->sg_used = 0; } } @@ -304,7 +319,7 @@ static unsigned int ccp_queue_buf(struct ccp_data *data, unsigned int from) /* Update the structures and generate the count */ buf_count = 0; while (sg_wa->bytes_left && (buf_count < dm_wa->length)) { - nbytes = min(sg_wa->sg->length - sg_wa->sg_used, + nbytes = min(sg_dma_len(sg_wa->dma_sg) - sg_wa->sg_used, dm_wa->length - buf_count); nbytes = min_t(u64, sg_wa->bytes_left, nbytes); @@ -336,11 +351,11 @@ static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst, * and destination. The resulting len values will always be <= UINT_MAX * because the dma length is an unsigned int. */ - sg_src_len = sg_dma_len(src->sg_wa.sg) - src->sg_wa.sg_used; + sg_src_len = sg_dma_len(src->sg_wa.dma_sg) - src->sg_wa.sg_used; sg_src_len = min_t(u64, src->sg_wa.bytes_left, sg_src_len); if (dst) { - sg_dst_len = sg_dma_len(dst->sg_wa.sg) - dst->sg_wa.sg_used; + sg_dst_len = sg_dma_len(dst->sg_wa.dma_sg) - dst->sg_wa.sg_used; sg_dst_len = min_t(u64, src->sg_wa.bytes_left, sg_dst_len); op_len = min(sg_src_len, sg_dst_len); } else { @@ -370,7 +385,7 @@ static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst, /* Enough data in the sg element, but we need to * adjust for any previously copied data */ - op->src.u.dma.address = sg_dma_address(src->sg_wa.sg); + op->src.u.dma.address = sg_dma_address(src->sg_wa.dma_sg); op->src.u.dma.offset = src->sg_wa.sg_used; op->src.u.dma.length = op_len & ~(block_size - 1); @@ -391,7 +406,7 @@ static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst, /* Enough room in the sg element, but we need to * adjust for any previously used area */ - op->dst.u.dma.address = sg_dma_address(dst->sg_wa.sg); + op->dst.u.dma.address = sg_dma_address(dst->sg_wa.dma_sg); op->dst.u.dma.offset = dst->sg_wa.sg_used; op->dst.u.dma.length = op->src.u.dma.length; } @@ -2034,7 +2049,7 @@ ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) dst.sg_wa.sg_used = 0; for (i = 1; i <= src.sg_wa.dma_count; i++) { if (!dst.sg_wa.sg || - (dst.sg_wa.sg->length < src.sg_wa.sg->length)) { + (sg_dma_len(dst.sg_wa.sg) < sg_dma_len(src.sg_wa.sg))) { ret = -EINVAL; goto e_dst; } @@ -2060,8 +2075,8 @@ ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) goto e_dst; } - dst.sg_wa.sg_used += src.sg_wa.sg->length; - if (dst.sg_wa.sg_used == dst.sg_wa.sg->length) { + dst.sg_wa.sg_used += sg_dma_len(src.sg_wa.sg); + if (dst.sg_wa.sg_used == sg_dma_len(dst.sg_wa.sg)) { dst.sg_wa.sg = sg_next(dst.sg_wa.sg); dst.sg_wa.sg_used = 0; } -- GitLab From c7590efac26aa5f4ed2487f5d8fbfb1ccc7cfacc Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Wed, 17 Jun 2020 09:48:56 -0400 Subject: [PATCH 0279/1304] crypto: cpt - don't sleep of CRYPTO_TFM_REQ_MAY_SLEEP was not specified commit 9e27c99104707f083dccd3b4d79762859b5a0614 upstream. There is this call chain: cvm_encrypt -> cvm_enc_dec -> cptvf_do_request -> process_request -> kzalloc where we call sleeping allocator function even if CRYPTO_TFM_REQ_MAY_SLEEP was not specified. Signed-off-by: Mikulas Patocka Cc: stable@vger.kernel.org # v4.11+ Fixes: c694b233295b ("crypto: cavium - Add the Virtual Function driver for CPT") Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- drivers/crypto/cavium/cpt/cptvf_algs.c | 1 + drivers/crypto/cavium/cpt/cptvf_reqmanager.c | 12 ++++++------ drivers/crypto/cavium/cpt/request_manager.h | 2 ++ 3 files changed, 9 insertions(+), 6 deletions(-) diff --git a/drivers/crypto/cavium/cpt/cptvf_algs.c b/drivers/crypto/cavium/cpt/cptvf_algs.c index 600336d169a9..cd4d60d318ba 100644 --- a/drivers/crypto/cavium/cpt/cptvf_algs.c +++ b/drivers/crypto/cavium/cpt/cptvf_algs.c @@ -205,6 +205,7 @@ static inline int cvm_enc_dec(struct ablkcipher_request *req, u32 enc) int status; memset(req_info, 0, sizeof(struct cpt_request_info)); + req_info->may_sleep = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) != 0; memset(fctx, 0, sizeof(struct fc_context)); create_input_list(req, enc, enc_iv_len); create_output_list(req, enc_iv_len); diff --git a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c index b0ba4331944b..43fe69d0981a 100644 --- a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c +++ b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c @@ -136,7 +136,7 @@ static inline int setup_sgio_list(struct cpt_vf *cptvf, /* Setup gather (input) components */ g_sz_bytes = ((req->incnt + 3) / 4) * sizeof(struct sglist_component); - info->gather_components = kzalloc(g_sz_bytes, GFP_KERNEL); + info->gather_components = kzalloc(g_sz_bytes, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC); if (!info->gather_components) { ret = -ENOMEM; goto scatter_gather_clean; @@ -153,7 +153,7 @@ static inline int setup_sgio_list(struct cpt_vf *cptvf, /* Setup scatter (output) components */ s_sz_bytes = ((req->outcnt + 3) / 4) * sizeof(struct sglist_component); - info->scatter_components = kzalloc(s_sz_bytes, GFP_KERNEL); + info->scatter_components = kzalloc(s_sz_bytes, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC); if (!info->scatter_components) { ret = -ENOMEM; goto scatter_gather_clean; @@ -170,7 +170,7 @@ static inline int setup_sgio_list(struct cpt_vf *cptvf, /* Create and initialize DPTR */ info->dlen = g_sz_bytes + s_sz_bytes + SG_LIST_HDR_SIZE; - info->in_buffer = kzalloc(info->dlen, GFP_KERNEL); + info->in_buffer = kzalloc(info->dlen, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC); if (!info->in_buffer) { ret = -ENOMEM; goto scatter_gather_clean; @@ -198,7 +198,7 @@ static inline int setup_sgio_list(struct cpt_vf *cptvf, } /* Create and initialize RPTR */ - info->out_buffer = kzalloc(COMPLETION_CODE_SIZE, GFP_KERNEL); + info->out_buffer = kzalloc(COMPLETION_CODE_SIZE, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC); if (!info->out_buffer) { ret = -ENOMEM; goto scatter_gather_clean; @@ -434,7 +434,7 @@ int process_request(struct cpt_vf *cptvf, struct cpt_request_info *req) struct cpt_vq_command vq_cmd; union cpt_inst_s cptinst; - info = kzalloc(sizeof(*info), GFP_KERNEL); + info = kzalloc(sizeof(*info), req->may_sleep ? GFP_KERNEL : GFP_ATOMIC); if (unlikely(!info)) { dev_err(&pdev->dev, "Unable to allocate memory for info_buffer\n"); return -ENOMEM; @@ -456,7 +456,7 @@ int process_request(struct cpt_vf *cptvf, struct cpt_request_info *req) * Get buffer for union cpt_res_s response * structure and its physical address */ - info->completion_addr = kzalloc(sizeof(union cpt_res_s), GFP_KERNEL); + info->completion_addr = kzalloc(sizeof(union cpt_res_s), req->may_sleep ? GFP_KERNEL : GFP_ATOMIC); if (unlikely(!info->completion_addr)) { dev_err(&pdev->dev, "Unable to allocate memory for completion_addr\n"); ret = -ENOMEM; diff --git a/drivers/crypto/cavium/cpt/request_manager.h b/drivers/crypto/cavium/cpt/request_manager.h index 80ee074c6e0c..09930d95ad24 100644 --- a/drivers/crypto/cavium/cpt/request_manager.h +++ b/drivers/crypto/cavium/cpt/request_manager.h @@ -65,6 +65,8 @@ struct cpt_request_info { union ctrl_info ctrl; /* User control information */ struct cptvf_request req; /* Request Information (Core specific) */ + bool may_sleep; + struct buf_ptr in[MAX_BUF_CNT]; struct buf_ptr out[MAX_BUF_CNT]; -- GitLab From 2824988a204adf11c1d646a8ef4d192a2d30cff2 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Mon, 10 Aug 2020 11:21:11 -0700 Subject: [PATCH 0280/1304] bitfield.h: don't compile-time validate _val in FIELD_FIT commit 444da3f52407d74c9aa12187ac6b01f76ee47d62 upstream. When ur_load_imm_any() is inlined into jeq_imm(), it's possible for the compiler to deduce a case where _val can only have the value of -1 at compile time. Specifically, /* struct bpf_insn: _s32 imm */ u64 imm = insn->imm; /* sign extend */ if (imm >> 32) { /* non-zero only if insn->imm is negative */ /* inlined from ur_load_imm_any */ u32 __imm = imm >> 32; /* therefore, always 0xffffffff */ if (__builtin_constant_p(__imm) && __imm > 255) compiletime_assert_XXX() This can result in tripping a BUILD_BUG_ON() in __BF_FIELD_CHECK() that checks that a given value is representable in one byte (interpreted as unsigned). FIELD_FIT() should return true or false at runtime for whether a value can fit for not. Don't break the build over a value that's too large for the mask. We'd prefer to keep the inlining and compiler optimizations though we know this case will always return false. Cc: stable@vger.kernel.org Fixes: 1697599ee301a ("bitfield.h: add FIELD_FIT() helper") Link: https://lore.kernel.org/kernel-hardening/CAK7LNASvb0UDJ0U5wkYYRzTAdnEs64HjXpEUL7d=V0CXiAXcNw@mail.gmail.com/ Reported-by: Masahiro Yamada Debugged-by: Sami Tolvanen Signed-off-by: Jakub Kicinski Signed-off-by: Nick Desaulniers Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- include/linux/bitfield.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h index 3f1ef4450a7c..775cd10c04b0 100644 --- a/include/linux/bitfield.h +++ b/include/linux/bitfield.h @@ -72,7 +72,7 @@ */ #define FIELD_FIT(_mask, _val) \ ({ \ - __BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_FIT: "); \ + __BF_FIELD_CHECK(_mask, 0ULL, 0ULL, "FIELD_FIT: "); \ !((((typeof(_mask))_val) << __bf_shf(_mask)) & ~(_mask)); \ }) -- GitLab From b846b77fba7b62246a6538618498bf13f5c148a7 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Tue, 11 Aug 2020 18:35:24 -0700 Subject: [PATCH 0281/1304] fs/minix: check return value of sb_getblk() commit da27e0a0e5f655f0d58d4e153c3182bb2b290f64 upstream. Patch series "fs/minix: fix syzbot bugs and set s_maxbytes". This series fixes all syzbot bugs in the minix filesystem: KASAN: null-ptr-deref Write in get_block KASAN: use-after-free Write in get_block KASAN: use-after-free Read in get_block WARNING in inc_nlink KMSAN: uninit-value in get_block WARNING in drop_nlink It also fixes the minix filesystem to set s_maxbytes correctly, so that userspace sees the correct behavior when exceeding the max file size. This patch (of 6): sb_getblk() can fail, so check its return value. This fixes a NULL pointer dereference. Originally from Qiujun Huang. Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Reported-by: syzbot+4a88b2b9dc280f47baf4@syzkaller.appspotmail.com Signed-off-by: Eric Biggers Signed-off-by: Andrew Morton Cc: Qiujun Huang Cc: Alexander Viro Cc: Link: http://lkml.kernel.org/r/20200628060846.682158-1-ebiggers@kernel.org Link: http://lkml.kernel.org/r/20200628060846.682158-2-ebiggers@kernel.org Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- fs/minix/itree_common.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/fs/minix/itree_common.c b/fs/minix/itree_common.c index 043c3fdbc8e7..446148792f41 100644 --- a/fs/minix/itree_common.c +++ b/fs/minix/itree_common.c @@ -75,6 +75,7 @@ static int alloc_branch(struct inode *inode, int n = 0; int i; int parent = minix_new_block(inode); + int err = -ENOSPC; branch[0].key = cpu_to_block(parent); if (parent) for (n = 1; n < num; n++) { @@ -85,6 +86,11 @@ static int alloc_branch(struct inode *inode, break; branch[n].key = cpu_to_block(nr); bh = sb_getblk(inode->i_sb, parent); + if (!bh) { + minix_free_block(inode, nr); + err = -ENOMEM; + break; + } lock_buffer(bh); memset(bh->b_data, 0, bh->b_size); branch[n].bh = bh; @@ -103,7 +109,7 @@ static int alloc_branch(struct inode *inode, bforget(branch[i].bh); for (i = 0; i < n; i++) minix_free_block(inode, block_to_cpu(branch[i].key)); - return -ENOSPC; + return err; } static inline int splice_branch(struct inode *inode, -- GitLab From 169f7f37bd6b0bb91242099cc261219791067d5c Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Tue, 11 Aug 2020 18:35:27 -0700 Subject: [PATCH 0282/1304] fs/minix: don't allow getting deleted inodes commit facb03dddec04e4aac1bb2139accdceb04deb1f3 upstream. If an inode has no links, we need to mark it bad rather than allowing it to be accessed. This avoids WARNINGs in inc_nlink() and drop_nlink() when doing directory operations on a fuzzed filesystem. Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Reported-by: syzbot+a9ac3de1b5de5fb10efc@syzkaller.appspotmail.com Reported-by: syzbot+df958cf5688a96ad3287@syzkaller.appspotmail.com Signed-off-by: Eric Biggers Signed-off-by: Andrew Morton Cc: Alexander Viro Cc: Qiujun Huang Cc: Link: http://lkml.kernel.org/r/20200628060846.682158-3-ebiggers@kernel.org Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- fs/minix/inode.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/fs/minix/inode.c b/fs/minix/inode.c index 72e308c3e66b..69b33684a08c 100644 --- a/fs/minix/inode.c +++ b/fs/minix/inode.c @@ -471,6 +471,13 @@ static struct inode *V1_minix_iget(struct inode *inode) iget_failed(inode); return ERR_PTR(-EIO); } + if (raw_inode->i_nlinks == 0) { + printk("MINIX-fs: deleted inode referenced: %lu\n", + inode->i_ino); + brelse(bh); + iget_failed(inode); + return ERR_PTR(-ESTALE); + } inode->i_mode = raw_inode->i_mode; i_uid_write(inode, raw_inode->i_uid); i_gid_write(inode, raw_inode->i_gid); @@ -504,6 +511,13 @@ static struct inode *V2_minix_iget(struct inode *inode) iget_failed(inode); return ERR_PTR(-EIO); } + if (raw_inode->i_nlinks == 0) { + printk("MINIX-fs: deleted inode referenced: %lu\n", + inode->i_ino); + brelse(bh); + iget_failed(inode); + return ERR_PTR(-ESTALE); + } inode->i_mode = raw_inode->i_mode; i_uid_write(inode, raw_inode->i_uid); i_gid_write(inode, raw_inode->i_gid); -- GitLab From 954fc7da99a9513d5e6b3ccf38f6f7c9af5a276d Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Tue, 11 Aug 2020 18:35:30 -0700 Subject: [PATCH 0283/1304] fs/minix: reject too-large maximum file size commit 270ef41094e9fa95273f288d7d785313ceab2ff3 upstream. If the minix filesystem tries to map a very large logical block number to its on-disk location, block_to_path() can return offsets that are too large, causing out-of-bounds memory accesses when accessing indirect index blocks. This should be prevented by the check against the maximum file size, but this doesn't work because the maximum file size is read directly from the on-disk superblock and isn't validated itself. Fix this by validating the maximum file size at mount time. Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Reported-by: syzbot+c7d9ec7a1a7272dd71b3@syzkaller.appspotmail.com Reported-by: syzbot+3b7b03a0c28948054fb5@syzkaller.appspotmail.com Reported-by: syzbot+6e056ee473568865f3e6@syzkaller.appspotmail.com Signed-off-by: Eric Biggers Signed-off-by: Andrew Morton Cc: Alexander Viro Cc: Qiujun Huang Cc: Link: http://lkml.kernel.org/r/20200628060846.682158-4-ebiggers@kernel.org Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- fs/minix/inode.c | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/fs/minix/inode.c b/fs/minix/inode.c index 69b33684a08c..4f994de46e6b 100644 --- a/fs/minix/inode.c +++ b/fs/minix/inode.c @@ -155,6 +155,23 @@ static int minix_remount (struct super_block * sb, int * flags, char * data) return 0; } +static bool minix_check_superblock(struct minix_sb_info *sbi) +{ + if (sbi->s_imap_blocks == 0 || sbi->s_zmap_blocks == 0) + return false; + + /* + * s_max_size must not exceed the block mapping limitation. This check + * is only needed for V1 filesystems, since V2/V3 support an extra level + * of indirect blocks which places the limit well above U32_MAX. + */ + if (sbi->s_version == MINIX_V1 && + sbi->s_max_size > (7 + 512 + 512*512) * BLOCK_SIZE) + return false; + + return true; +} + static int minix_fill_super(struct super_block *s, void *data, int silent) { struct buffer_head *bh; @@ -233,11 +250,12 @@ static int minix_fill_super(struct super_block *s, void *data, int silent) } else goto out_no_fs; + if (!minix_check_superblock(sbi)) + goto out_illegal_sb; + /* * Allocate the buffer map to keep the superblock small. */ - if (sbi->s_imap_blocks == 0 || sbi->s_zmap_blocks == 0) - goto out_illegal_sb; i = (sbi->s_imap_blocks + sbi->s_zmap_blocks) * sizeof(bh); map = kzalloc(i, GFP_KERNEL); if (!map) -- GitLab From d22c224704b720887e3fad683281a2cf97b679ea Mon Sep 17 00:00:00 2001 From: Hector Martin Date: Mon, 10 Aug 2020 17:25:02 +0900 Subject: [PATCH 0284/1304] ALSA: usb-audio: add quirk for Pioneer DDJ-RB commit 6e8596172ee1cd46ec0bfd5adcf4ff86371478b6 upstream. This is just another Pioneer device with fixed endpoints. Input is dummy but used as feedback (it always returns silence). Cc: stable@vger.kernel.org Signed-off-by: Hector Martin Link: https://lore.kernel.org/r/20200810082502.225979-1-marcan@marcan.st Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/usb/quirks-table.h | 56 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h index 106dd8529bad..89b70308b551 100644 --- a/sound/usb/quirks-table.h +++ b/sound/usb/quirks-table.h @@ -3419,6 +3419,62 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"), .type = QUIRK_SETUP_FMT_AFTER_RESUME } }, +{ + /* + * PIONEER DJ DDJ-RB + * PCM is 4 channels out, 2 dummy channels in @ 44.1 fixed + * The feedback for the output is the dummy input. + */ + USB_DEVICE_VENDOR_SPEC(0x2b73, 0x000e), + .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { + .ifnum = QUIRK_ANY_INTERFACE, + .type = QUIRK_COMPOSITE, + .data = (const struct snd_usb_audio_quirk[]) { + { + .ifnum = 0, + .type = QUIRK_AUDIO_FIXED_ENDPOINT, + .data = &(const struct audioformat) { + .formats = SNDRV_PCM_FMTBIT_S24_3LE, + .channels = 4, + .iface = 0, + .altsetting = 1, + .altset_idx = 1, + .endpoint = 0x01, + .ep_attr = USB_ENDPOINT_XFER_ISOC| + USB_ENDPOINT_SYNC_ASYNC, + .rates = SNDRV_PCM_RATE_44100, + .rate_min = 44100, + .rate_max = 44100, + .nr_rates = 1, + .rate_table = (unsigned int[]) { 44100 } + } + }, + { + .ifnum = 0, + .type = QUIRK_AUDIO_FIXED_ENDPOINT, + .data = &(const struct audioformat) { + .formats = SNDRV_PCM_FMTBIT_S24_3LE, + .channels = 2, + .iface = 0, + .altsetting = 1, + .altset_idx = 1, + .endpoint = 0x82, + .ep_attr = USB_ENDPOINT_XFER_ISOC| + USB_ENDPOINT_SYNC_ASYNC| + USB_ENDPOINT_USAGE_IMPLICIT_FB, + .rates = SNDRV_PCM_RATE_44100, + .rate_min = 44100, + .rate_max = 44100, + .nr_rates = 1, + .rate_table = (unsigned int[]) { 44100 } + } + }, + { + .ifnum = -1 + } + } + } +}, #define ALC1220_VB_DESKTOP(vend, prod) { \ USB_DEVICE(vend, prod), \ -- GitLab From ec41ee06e9e0c9a6dbc2cf420f199fc2a522aec8 Mon Sep 17 00:00:00 2001 From: Zheng Bin Date: Mon, 15 Jun 2020 09:21:53 +0800 Subject: [PATCH 0285/1304] 9p: Fix memory leak in v9fs_mount commit cb0aae0e31c632c407a2cab4307be85a001d4d98 upstream. v9fs_mount v9fs_session_init v9fs_cache_session_get_cookie v9fs_random_cachetag -->alloc cachetag v9ses->fscache = fscache_acquire_cookie -->maybe NULL sb = sget -->fail, goto clunk clunk_fid: v9fs_session_close if (v9ses->fscache) -->NULL kfree(v9ses->cachetag) Thus memleak happens. Link: http://lkml.kernel.org/r/20200615012153.89538-1-zhengbin13@huawei.com Fixes: 60e78d2c993e ("9p: Add fscache support to 9p") Cc: # v2.6.32+ Signed-off-by: Zheng Bin Signed-off-by: Dominique Martinet Signed-off-by: Greg Kroah-Hartman --- fs/9p/v9fs.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c index 619128b55837..c579966a0e5c 100644 --- a/fs/9p/v9fs.c +++ b/fs/9p/v9fs.c @@ -515,10 +515,9 @@ void v9fs_session_close(struct v9fs_session_info *v9ses) } #ifdef CONFIG_9P_FSCACHE - if (v9ses->fscache) { + if (v9ses->fscache) v9fs_cache_session_put_cookie(v9ses); - kfree(v9ses->cachetag); - } + kfree(v9ses->cachetag); #endif kfree(v9ses->uname); kfree(v9ses->aname); -- GitLab From 10c8a526b2db1fcdf9e2d59d4885377b91939c55 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 28 Jul 2020 14:17:36 +1000 Subject: [PATCH 0286/1304] drm/ttm/nouveau: don't call tt destroy callback on alloc failure. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 5de5b6ecf97a021f29403aa272cb4e03318ef586 upstream. This is confusing, and from my reading of all the drivers only nouveau got this right. Just make the API act under driver control of it's own allocation failing, and don't call destroy, if the page table fails to create there is nothing to cleanup here. (I'm willing to believe I've missed something here, so please review deeply). Reviewed-by: Christian König Signed-off-by: Dave Airlie Link: https://patchwork.freedesktop.org/patch/msgid/20200728041736.20689-1-airlied@gmail.com Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/nouveau/nouveau_sgdma.c | 9 +++------ drivers/gpu/drm/ttm/ttm_tt.c | 3 --- 2 files changed, 3 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index 8ebdc74cc0ad..326948b65542 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c @@ -96,12 +96,9 @@ nouveau_sgdma_create_ttm(struct ttm_buffer_object *bo, uint32_t page_flags) else nvbe->ttm.ttm.func = &nv50_sgdma_backend; - if (ttm_dma_tt_init(&nvbe->ttm, bo, page_flags)) - /* - * A failing ttm_dma_tt_init() will call ttm_tt_destroy() - * and thus our nouveau_sgdma_destroy() hook, so we don't need - * to free nvbe here. - */ + if (ttm_dma_tt_init(&nvbe->ttm, bo, page_flags)) { + kfree(nvbe); return NULL; + } return &nvbe->ttm.ttm; } diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index e3a0691582ff..68cfa25674e5 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -241,7 +241,6 @@ int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo, ttm_tt_init_fields(ttm, bo, page_flags); if (ttm_tt_alloc_page_directory(ttm)) { - ttm_tt_destroy(ttm); pr_err("Failed allocating page table\n"); return -ENOMEM; } @@ -265,7 +264,6 @@ int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo, INIT_LIST_HEAD(&ttm_dma->pages_list); if (ttm_dma_tt_alloc_page_directory(ttm_dma)) { - ttm_tt_destroy(ttm); pr_err("Failed allocating page table\n"); return -ENOMEM; } @@ -287,7 +285,6 @@ int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo, else ret = ttm_dma_tt_alloc_page_directory(ttm_dma); if (ret) { - ttm_tt_destroy(ttm); pr_err("Failed allocating page table\n"); return -ENOMEM; } -- GitLab From ceefd5f9a60e25a89f6acd8f00bd4d1ecc229b00 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 4 Aug 2020 16:30:30 -0400 Subject: [PATCH 0287/1304] NFS: Don't move layouts to plh_return_segs list while in use commit ff041727e9e029845857cac41aae118ead5e261b upstream. If the layout segment is still in use for a read or a write, we should not move it to the layout plh_return_segs list. If we do, we can end up returning the layout while I/O is still in progress. Fixes: e0b7d420f72a ("pNFS: Don't discard layout segments that are marked for return") Cc: stable@vger.kernel.org # v4.19+ Signed-off-by: Trond Myklebust Signed-off-by: Greg Kroah-Hartman --- fs/nfs/pnfs.c | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 66f699e18755..9709425de3f1 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -2291,16 +2291,6 @@ pnfs_layout_process(struct nfs4_layoutget *lgp) return ERR_PTR(-EAGAIN); } -static int -mark_lseg_invalid_or_return(struct pnfs_layout_segment *lseg, - struct list_head *tmp_list) -{ - if (!mark_lseg_invalid(lseg, tmp_list)) - return 0; - pnfs_cache_lseg_for_layoutreturn(lseg->pls_layout, lseg); - return 1; -} - /** * pnfs_mark_matching_lsegs_return - Free or return matching layout segments * @lo: pointer to layout header @@ -2337,7 +2327,7 @@ pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo, lseg, lseg->pls_range.iomode, lseg->pls_range.offset, lseg->pls_range.length); - if (mark_lseg_invalid_or_return(lseg, tmp_list)) + if (mark_lseg_invalid(lseg, tmp_list)) continue; remaining++; set_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags); -- GitLab From 5052b997592af482f29c5441b8bc39831015818c Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Wed, 5 Aug 2020 09:03:56 -0400 Subject: [PATCH 0288/1304] NFS: Don't return layout segments that are in use commit d474f96104bd4377573526ebae2ee212205a6839 upstream. If the NFS_LAYOUT_RETURN_REQUESTED flag is set, we want to return the layout as soon as possible, meaning that the affected layout segments should be marked as invalid, and should no longer be in use for I/O. Fixes: f0b429819b5f ("pNFS: Ignore non-recalled layouts in pnfs_layout_need_return()") Cc: stable@vger.kernel.org # v4.19+ Signed-off-by: Trond Myklebust Signed-off-by: Greg Kroah-Hartman --- fs/nfs/pnfs.c | 34 +++++++++++++++------------------- 1 file changed, 15 insertions(+), 19 deletions(-) diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 9709425de3f1..2b9e139a2997 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -1181,31 +1181,27 @@ pnfs_send_layoutreturn(struct pnfs_layout_hdr *lo, const nfs4_stateid *stateid, return status; } +static bool +pnfs_layout_segments_returnable(struct pnfs_layout_hdr *lo, + enum pnfs_iomode iomode, + u32 seq) +{ + struct pnfs_layout_range recall_range = { + .length = NFS4_MAX_UINT64, + .iomode = iomode, + }; + return pnfs_mark_matching_lsegs_return(lo, &lo->plh_return_segs, + &recall_range, seq) != -EBUSY; +} + /* Return true if layoutreturn is needed */ static bool pnfs_layout_need_return(struct pnfs_layout_hdr *lo) { - struct pnfs_layout_segment *s; - enum pnfs_iomode iomode; - u32 seq; - if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags)) return false; - - seq = lo->plh_return_seq; - iomode = lo->plh_return_iomode; - - /* Defer layoutreturn until all recalled lsegs are done */ - list_for_each_entry(s, &lo->plh_segs, pls_list) { - if (seq && pnfs_seqid_is_newer(s->pls_seq, seq)) - continue; - if (iomode != IOMODE_ANY && s->pls_range.iomode != iomode) - continue; - if (test_bit(NFS_LSEG_LAYOUTRETURN, &s->pls_flags)) - return false; - } - - return true; + return pnfs_layout_segments_returnable(lo, lo->plh_return_iomode, + lo->plh_return_seq); } static void pnfs_layoutreturn_before_put_layout_hdr(struct pnfs_layout_hdr *lo) -- GitLab From 93a64a8d27e8285144b82897f5f930a8c7d34019 Mon Sep 17 00:00:00 2001 From: Ivan Kokshaysky Date: Sat, 20 Jun 2020 17:44:49 +0100 Subject: [PATCH 0289/1304] cpufreq: dt: fix oops on armada37xx commit 10470dec3decaf5ed3c596f85debd7c42777ae12 upstream. Commit 0c868627e617e43a295d8 (cpufreq: dt: Allow platform specific intermediate callbacks) added two function pointers to the struct cpufreq_dt_platform_data. However, armada37xx_cpufreq_driver_init() has this struct (pdata) located on the stack and uses only "suspend" and "resume" fields. So these newly added "get_intermediate" and "target_intermediate" pointers are uninitialized and contain arbitrary non-null values, causing all kinds of trouble. For instance, here is an oops on espressobin after an attempt to change the cpefreq governor: [ 29.174554] Unable to handle kernel execute from non-executable memory at virtual address ffff00003f87bdc0 ... [ 29.269373] pc : 0xffff00003f87bdc0 [ 29.272957] lr : __cpufreq_driver_target+0x138/0x580 ... Fixed by zeroing out pdata before use. Cc: # v5.7+ Signed-off-by: Ivan Kokshaysky Reviewed-by: Andrew Lunn Signed-off-by: Viresh Kumar Signed-off-by: Greg Kroah-Hartman --- drivers/cpufreq/armada-37xx-cpufreq.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c index 0df16eb1eb3c..c5f98cafc25c 100644 --- a/drivers/cpufreq/armada-37xx-cpufreq.c +++ b/drivers/cpufreq/armada-37xx-cpufreq.c @@ -458,6 +458,7 @@ static int __init armada37xx_cpufreq_driver_init(void) /* Now that everything is setup, enable the DVFS at hardware level */ armada37xx_cpufreq_enable_dvfs(nb_pm_base); + memset(&pdata, 0, sizeof(pdata)); pdata.suspend = armada37xx_cpufreq_suspend; pdata.resume = armada37xx_cpufreq_resume; -- GitLab From 5de7ab80c866b4e31907109cb1993ac7422e09ae Mon Sep 17 00:00:00 2001 From: Romain Naour Date: Fri, 14 Aug 2020 17:31:57 -0700 Subject: [PATCH 0290/1304] include/asm-generic/vmlinux.lds.h: align ro_after_init commit 7f897acbe5d57995438c831670b7c400e9c0dc00 upstream. Since the patch [1], building the kernel using a toolchain built with binutils 2.33.1 prevents booting a sh4 system under Qemu. Apply the patch provided by Alan Modra [2] that fix alignment of rodata. [1] https://sourceware.org/git/gitweb.cgi?p=binutils-gdb.git;h=ebd2263ba9a9124d93bbc0ece63d7e0fae89b40e [2] https://www.sourceware.org/ml/binutils/2019-12/msg00112.html Signed-off-by: Romain Naour Signed-off-by: Andrew Morton Cc: Alan Modra Cc: Bin Meng Cc: Chen Zhou Cc: Geert Uytterhoeven Cc: John Paul Adrian Glaubitz Cc: Krzysztof Kozlowski Cc: Kuninori Morimoto Cc: Rich Felker Cc: Sam Ravnborg Cc: Yoshinori Sato Cc: Arnd Bergmann Cc: Link: https://marc.info/?l=linux-sh&m=158429470221261 Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- include/asm-generic/vmlinux.lds.h | 1 + 1 file changed, 1 insertion(+) diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 95479f35e239..4976f4d30f55 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -307,6 +307,7 @@ */ #ifndef RO_AFTER_INIT_DATA #define RO_AFTER_INIT_DATA \ + . = ALIGN(8); \ __start_ro_after_init = .; \ *(.data..ro_after_init) \ __end_ro_after_init = .; -- GitLab From acc8ff07a2117b4b0d02e1f007dd1c66f34a8645 Mon Sep 17 00:00:00 2001 From: Christian Eggers Date: Tue, 28 Jul 2020 12:08:32 +0200 Subject: [PATCH 0291/1304] spi: spidev: Align buffers for DMA commit aa9e862d7d5bcecd4dca9f39e8b684b93dd84ee7 upstream. Simply copying all xfers from userspace into one bounce buffer causes alignment problems if the SPI controller uses DMA. Ensure that all transfer data blocks within the rx and tx bounce buffers are aligned for DMA (according to ARCH_KMALLOC_MINALIGN). Alignment may increase the usage of the bounce buffers. In some cases, the buffers may need to be increased using the "bufsiz" module parameter. Signed-off-by: Christian Eggers Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20200728100832.24788-1-ceggers@arri.de Signed-off-by: Mark Brown Signed-off-by: Greg Kroah-Hartman --- drivers/spi/spidev.c | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c index 167047760d79..e444e7cc6968 100644 --- a/drivers/spi/spidev.c +++ b/drivers/spi/spidev.c @@ -232,6 +232,11 @@ static int spidev_message(struct spidev_data *spidev, for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers; n; n--, k_tmp++, u_tmp++) { + /* Ensure that also following allocations from rx_buf/tx_buf will meet + * DMA alignment requirements. + */ + unsigned int len_aligned = ALIGN(u_tmp->len, ARCH_KMALLOC_MINALIGN); + k_tmp->len = u_tmp->len; total += k_tmp->len; @@ -247,17 +252,17 @@ static int spidev_message(struct spidev_data *spidev, if (u_tmp->rx_buf) { /* this transfer needs space in RX bounce buffer */ - rx_total += k_tmp->len; + rx_total += len_aligned; if (rx_total > bufsiz) { status = -EMSGSIZE; goto done; } k_tmp->rx_buf = rx_buf; - rx_buf += k_tmp->len; + rx_buf += len_aligned; } if (u_tmp->tx_buf) { /* this transfer needs space in TX bounce buffer */ - tx_total += k_tmp->len; + tx_total += len_aligned; if (tx_total > bufsiz) { status = -EMSGSIZE; goto done; @@ -267,7 +272,7 @@ static int spidev_message(struct spidev_data *spidev, (uintptr_t) u_tmp->tx_buf, u_tmp->len)) goto done; - tx_buf += k_tmp->len; + tx_buf += len_aligned; } k_tmp->cs_change = !!u_tmp->cs_change; @@ -297,16 +302,16 @@ static int spidev_message(struct spidev_data *spidev, goto done; /* copy any rx data out of bounce buffer */ - rx_buf = spidev->rx_buffer; - for (n = n_xfers, u_tmp = u_xfers; n; n--, u_tmp++) { + for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers; + n; + n--, k_tmp++, u_tmp++) { if (u_tmp->rx_buf) { if (copy_to_user((u8 __user *) - (uintptr_t) u_tmp->rx_buf, rx_buf, + (uintptr_t) u_tmp->rx_buf, k_tmp->rx_buf, u_tmp->len)) { status = -EFAULT; goto done; } - rx_buf += u_tmp->len; } } status = total; -- GitLab From f6859ae7cc40ae092dbe2cea7ff5c89c60a433dd Mon Sep 17 00:00:00 2001 From: Sivaprakash Murugesan Date: Fri, 12 Jun 2020 13:28:15 +0530 Subject: [PATCH 0292/1304] mtd: rawnand: qcom: avoid write to unavailable register commit 443440cc4a901af462239d286cd10721aa1c7dfc upstream. SFLASHC_BURST_CFG is only available on older ipq NAND platforms, this register has been removed when the NAND controller got implemented in the qpic controller. Avoid writing this register on devices which are based on qpic NAND controller. Fixes: dce84760b09f ("mtd: nand: qcom: Support for IPQ8074 QPIC NAND controller") Cc: stable@vger.kernel.org Signed-off-by: Sivaprakash Murugesan Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/1591948696-16015-2-git-send-email-sivaprak@codeaurora.org Signed-off-by: Greg Kroah-Hartman --- drivers/mtd/nand/raw/qcom_nandc.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c index 1f9d64aeb863..9fcbcf4b217b 100644 --- a/drivers/mtd/nand/raw/qcom_nandc.c +++ b/drivers/mtd/nand/raw/qcom_nandc.c @@ -466,11 +466,13 @@ struct qcom_nand_host { * among different NAND controllers. * @ecc_modes - ecc mode for NAND * @is_bam - whether NAND controller is using BAM + * @is_qpic - whether NAND CTRL is part of qpic IP * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset */ struct qcom_nandc_props { u32 ecc_modes; bool is_bam; + bool is_qpic; u32 dev_cmd_reg_start; }; @@ -2766,7 +2768,8 @@ static int qcom_nandc_setup(struct qcom_nand_controller *nandc) u32 nand_ctrl; /* kill onenand */ - nandc_write(nandc, SFLASHC_BURST_CFG, 0); + if (!nandc->props->is_qpic) + nandc_write(nandc, SFLASHC_BURST_CFG, 0); nandc_write(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD), NAND_DEV_CMD_VLD_VAL); @@ -3022,12 +3025,14 @@ static const struct qcom_nandc_props ipq806x_nandc_props = { static const struct qcom_nandc_props ipq4019_nandc_props = { .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT), .is_bam = true, + .is_qpic = true, .dev_cmd_reg_start = 0x0, }; static const struct qcom_nandc_props ipq8074_nandc_props = { .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT), .is_bam = true, + .is_qpic = true, .dev_cmd_reg_start = 0x7000, }; -- GitLab From c46716b16d6db3d51b745c18ea3fb1ff01be108e Mon Sep 17 00:00:00 2001 From: John David Anglin Date: Thu, 30 Jul 2020 08:59:12 -0400 Subject: [PATCH 0293/1304] parisc: Implement __smp_store_release and __smp_load_acquire barriers commit e96ebd589debd9a6a793608c4ec7019c38785dea upstream. This patch implements the __smp_store_release and __smp_load_acquire barriers using ordered stores and loads. This avoids the sync instruction present in the generic implementation. Cc: # 4.14+ Signed-off-by: Dave Anglin Signed-off-by: Helge Deller Signed-off-by: Greg Kroah-Hartman --- arch/parisc/include/asm/barrier.h | 61 +++++++++++++++++++++++++++++++ 1 file changed, 61 insertions(+) diff --git a/arch/parisc/include/asm/barrier.h b/arch/parisc/include/asm/barrier.h index dbaaca84f27f..640d46edf32e 100644 --- a/arch/parisc/include/asm/barrier.h +++ b/arch/parisc/include/asm/barrier.h @@ -26,6 +26,67 @@ #define __smp_rmb() mb() #define __smp_wmb() mb() +#define __smp_store_release(p, v) \ +do { \ + typeof(p) __p = (p); \ + union { typeof(*p) __val; char __c[1]; } __u = \ + { .__val = (__force typeof(*p)) (v) }; \ + compiletime_assert_atomic_type(*p); \ + switch (sizeof(*p)) { \ + case 1: \ + asm volatile("stb,ma %0,0(%1)" \ + : : "r"(*(__u8 *)__u.__c), "r"(__p) \ + : "memory"); \ + break; \ + case 2: \ + asm volatile("sth,ma %0,0(%1)" \ + : : "r"(*(__u16 *)__u.__c), "r"(__p) \ + : "memory"); \ + break; \ + case 4: \ + asm volatile("stw,ma %0,0(%1)" \ + : : "r"(*(__u32 *)__u.__c), "r"(__p) \ + : "memory"); \ + break; \ + case 8: \ + if (IS_ENABLED(CONFIG_64BIT)) \ + asm volatile("std,ma %0,0(%1)" \ + : : "r"(*(__u64 *)__u.__c), "r"(__p) \ + : "memory"); \ + break; \ + } \ +} while (0) + +#define __smp_load_acquire(p) \ +({ \ + union { typeof(*p) __val; char __c[1]; } __u; \ + typeof(p) __p = (p); \ + compiletime_assert_atomic_type(*p); \ + switch (sizeof(*p)) { \ + case 1: \ + asm volatile("ldb,ma 0(%1),%0" \ + : "=r"(*(__u8 *)__u.__c) : "r"(__p) \ + : "memory"); \ + break; \ + case 2: \ + asm volatile("ldh,ma 0(%1),%0" \ + : "=r"(*(__u16 *)__u.__c) : "r"(__p) \ + : "memory"); \ + break; \ + case 4: \ + asm volatile("ldw,ma 0(%1),%0" \ + : "=r"(*(__u32 *)__u.__c) : "r"(__p) \ + : "memory"); \ + break; \ + case 8: \ + if (IS_ENABLED(CONFIG_64BIT)) \ + asm volatile("ldd,ma 0(%1),%0" \ + : "=r"(*(__u64 *)__u.__c) : "r"(__p) \ + : "memory"); \ + break; \ + } \ + __u.__val; \ +}) #include #endif /* !__ASSEMBLY__ */ -- GitLab From 2310f713e110b66e8ee61636e7d40b4fa9068c97 Mon Sep 17 00:00:00 2001 From: Sven Schnelle Date: Tue, 11 Aug 2020 18:19:19 +0200 Subject: [PATCH 0294/1304] parisc: mask out enable and reserved bits from sba imask commit 5b24993c21cbf2de11aff077a48c5cb0505a0450 upstream. When using kexec the SBA IOMMU IBASE might still have the RE bit set. This triggers a WARN_ON when trying to write back the IBASE register later, and it also makes some mask calculations fail. Cc: Signed-off-by: Sven Schnelle Signed-off-by: Helge Deller Signed-off-by: Greg Kroah-Hartman --- drivers/parisc/sba_iommu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c index 6dd1780a5885..0f19cc75cc0c 100644 --- a/drivers/parisc/sba_iommu.c +++ b/drivers/parisc/sba_iommu.c @@ -1291,7 +1291,7 @@ sba_ioc_init_pluto(struct parisc_device *sba, struct ioc *ioc, int ioc_num) ** (one that doesn't overlap memory or LMMIO space) in the ** IBASE and IMASK registers. */ - ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE); + ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE) & ~0x1fffffULL; iova_space_size = ~(READ_REG(ioc->ioc_hpa + IOC_IMASK) & 0xFFFFFFFFUL) + 1; if ((ioc->ibase < 0xfed00000UL) && ((ioc->ibase + iova_space_size) > 0xfee00000UL)) { -- GitLab From c0cfb9eb60bd626665bc01a8657a94946c5f0dab Mon Sep 17 00:00:00 2001 From: Nathan Huckleberry Date: Fri, 10 Jul 2020 20:23:37 +0100 Subject: [PATCH 0295/1304] ARM: 8992/1: Fix unwind_frame for clang-built kernels commit b4d5ec9b39f8b31d98f65bc5577b5d15d93795d7 upstream. Since clang does not push pc and sp in function prologues, the current implementation of unwind_frame does not work. By using the previous frame's lr/fp instead of saved pc/sp we get valid unwinds on clang-built kernels. The bounds check on next frame pointer must be changed as well since there are 8 less bytes between frames. This fixes /proc//stack. Link: https://github.com/ClangBuiltLinux/linux/issues/912 Reported-by: Miles Chen Tested-by: Miles Chen Cc: stable@vger.kernel.org Reviewed-by: Nick Desaulniers Signed-off-by: Nathan Huckleberry Signed-off-by: Russell King Signed-off-by: Greg Kroah-Hartman --- arch/arm/kernel/stacktrace.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c index a56e7c856ab5..a4d4a28fe07d 100644 --- a/arch/arm/kernel/stacktrace.c +++ b/arch/arm/kernel/stacktrace.c @@ -21,6 +21,19 @@ * A simple function epilogue looks like this: * ldm sp, {fp, sp, pc} * + * When compiled with clang, pc and sp are not pushed. A simple function + * prologue looks like this when built with clang: + * + * stmdb {..., fp, lr} + * add fp, sp, #x + * sub sp, sp, #y + * + * A simple function epilogue looks like this when built with clang: + * + * sub sp, fp, #x + * ldm {..., fp, pc} + * + * * Note that with framepointer enabled, even the leaf functions have the same * prologue and epilogue, therefore we can ignore the LR value in this case. */ @@ -33,6 +46,16 @@ int notrace unwind_frame(struct stackframe *frame) low = frame->sp; high = ALIGN(low, THREAD_SIZE); +#ifdef CONFIG_CC_IS_CLANG + /* check current frame pointer is within bounds */ + if (fp < low + 4 || fp > high - 4) + return -EINVAL; + + frame->sp = frame->fp; + frame->fp = *(unsigned long *)(fp); + frame->pc = frame->lr; + frame->lr = *(unsigned long *)(fp + 4); +#else /* check current frame pointer is within bounds */ if (fp < low + 12 || fp > high - 4) return -EINVAL; @@ -41,6 +64,7 @@ int notrace unwind_frame(struct stackframe *frame) frame->fp = *(unsigned long *)(fp - 12); frame->sp = *(unsigned long *)(fp - 8); frame->pc = *(unsigned long *)(fp - 4); +#endif return 0; } -- GitLab From 8e22f6848fc80c34ce7e7bf2bdbd5c8fb54e2fe4 Mon Sep 17 00:00:00 2001 From: Jon Derrick Date: Tue, 21 Jul 2020 14:26:09 -0600 Subject: [PATCH 0296/1304] irqdomain/treewide: Free firmware node after domain removal commit ec0160891e387f4771f953b888b1fe951398e5d9 upstream. Commit 711419e504eb ("irqdomain: Add the missing assignment of domain->fwnode for named fwnode") unintentionally caused a dangling pointer page fault issue on firmware nodes that were freed after IRQ domain allocation. Commit e3beca48a45b fixed that dangling pointer issue by only freeing the firmware node after an IRQ domain allocation failure. That fix no longer frees the firmware node immediately, but leaves the firmware node allocated after the domain is removed. The firmware node must be kept around through irq_domain_remove, but should be freed it afterwards. Add the missing free operations after domain removal where where appropriate. Fixes: e3beca48a45b ("irqdomain/treewide: Keep firmware node unconditionally allocated") Signed-off-by: Jon Derrick Signed-off-by: Thomas Gleixner Reviewed-by: Andy Shevchenko Acked-by: Bjorn Helgaas # drivers/pci Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/1595363169-7157-1-git-send-email-jonathan.derrick@intel.com Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/apic/io_apic.c | 5 +++++ drivers/iommu/intel_irq_remapping.c | 8 ++++++++ drivers/pci/controller/vmd.c | 3 +++ 3 files changed, 16 insertions(+) diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 08e2f3a5f124..95e21c438012 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -2342,8 +2342,13 @@ static int mp_irqdomain_create(int ioapic) static void ioapic_destroy_irqdomain(int idx) { + struct ioapic_domain_cfg *cfg = &ioapics[idx].irqdomain_cfg; + struct fwnode_handle *fn = ioapics[idx].irqdomain->fwnode; + if (ioapics[idx].irqdomain) { irq_domain_remove(ioapics[idx].irqdomain); + if (!cfg->dev) + irq_domain_free_fwnode(fn); ioapics[idx].irqdomain = NULL; } } diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c index 852e2841395b..15a4ad31c510 100644 --- a/drivers/iommu/intel_irq_remapping.c +++ b/drivers/iommu/intel_irq_remapping.c @@ -601,13 +601,21 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu) static void intel_teardown_irq_remapping(struct intel_iommu *iommu) { + struct fwnode_handle *fn; + if (iommu && iommu->ir_table) { if (iommu->ir_msi_domain) { + fn = iommu->ir_msi_domain->fwnode; + irq_domain_remove(iommu->ir_msi_domain); + irq_domain_free_fwnode(fn); iommu->ir_msi_domain = NULL; } if (iommu->ir_domain) { + fn = iommu->ir_domain->fwnode; + irq_domain_remove(iommu->ir_domain); + irq_domain_free_fwnode(fn); iommu->ir_domain = NULL; } free_pages((unsigned long)iommu->ir_table->base, diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c index ad39b404f10a..3d1b004a58f8 100644 --- a/drivers/pci/controller/vmd.c +++ b/drivers/pci/controller/vmd.c @@ -718,6 +718,7 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) if (!vmd->bus) { pci_free_resource_list(&resources); irq_domain_remove(vmd->irq_domain); + irq_domain_free_fwnode(fn); return -ENODEV; } @@ -820,6 +821,7 @@ static void vmd_cleanup_srcu(struct vmd_dev *vmd) static void vmd_remove(struct pci_dev *dev) { struct vmd_dev *vmd = pci_get_drvdata(dev); + struct fwnode_handle *fn = vmd->irq_domain->fwnode; sysfs_remove_link(&vmd->dev->dev.kobj, "domain"); pci_stop_root_bus(vmd->bus); @@ -828,6 +830,7 @@ static void vmd_remove(struct pci_dev *dev) vmd_teardown_dma_ops(vmd); vmd_detach_resources(vmd); irq_domain_remove(vmd->irq_domain); + irq_domain_free_fwnode(fn); } #ifdef CONFIG_PM_SLEEP -- GitLab From bbe6145f996edb4490ddde31e981dafb1e922917 Mon Sep 17 00:00:00 2001 From: Roger Pau Monne Date: Mon, 27 Jul 2020 11:13:39 +0200 Subject: [PATCH 0297/1304] xen/balloon: fix accounting in alloc_xenballooned_pages error path MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 1951fa33ec259abdf3497bfee7b63e7ddbb1a394 upstream. target_unpopulated is incremented with nr_pages at the start of the function, but the call to free_xenballooned_pages will only subtract pgno number of pages, and thus the rest need to be subtracted before returning or else accounting will be skewed. Signed-off-by: Roger Pau Monné Reviewed-by: Juergen Gross Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20200727091342.52325-2-roger.pau@citrix.com Signed-off-by: Juergen Gross Signed-off-by: Greg Kroah-Hartman --- drivers/xen/balloon.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index 6fa7209f24f4..6e89b6ab1d3e 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -632,6 +632,12 @@ int alloc_xenballooned_pages(int nr_pages, struct page **pages) out_undo: mutex_unlock(&balloon_mutex); free_xenballooned_pages(pgno, pages); + /* + * NB: free_xenballooned_pages will only subtract pgno pages, but since + * target_unpopulated is incremented with nr_pages at the start we need + * to remove the remaining ones also, or accounting will be screwed. + */ + balloon_stats.target_unpopulated -= nr_pages - pgno; return ret; } EXPORT_SYMBOL(alloc_xenballooned_pages); -- GitLab From 0fc9dd00afe7f82ff6692387c6a0088bb2c9f067 Mon Sep 17 00:00:00 2001 From: Roger Pau Monne Date: Mon, 27 Jul 2020 11:13:40 +0200 Subject: [PATCH 0298/1304] xen/balloon: make the balloon wait interruptible MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 88a479ff6ef8af7f07e11593d58befc644244ff7 upstream. So it can be killed, or else processes can get hung indefinitely waiting for balloon pages. Signed-off-by: Roger Pau Monné Reviewed-by: Juergen Gross Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20200727091342.52325-3-roger.pau@citrix.com Signed-off-by: Juergen Gross Signed-off-by: Greg Kroah-Hartman --- drivers/xen/balloon.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index 6e89b6ab1d3e..b23edf64c2b2 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -570,11 +570,13 @@ static int add_ballooned_pages(int nr_pages) if (xen_hotplug_unpopulated) { st = reserve_additional_memory(); if (st != BP_ECANCELED) { + int rc; + mutex_unlock(&balloon_mutex); - wait_event(balloon_wq, + rc = wait_event_interruptible(balloon_wq, !list_empty(&ballooned_pages)); mutex_lock(&balloon_mutex); - return 0; + return rc ? -ENOMEM : 0; } } -- GitLab From e4ca0185dc1c18eb998e94c8dfa2757a85f67aa5 Mon Sep 17 00:00:00 2001 From: Oleksandr Andrushchenko Date: Thu, 13 Aug 2020 09:21:09 +0300 Subject: [PATCH 0299/1304] xen/gntdev: Fix dmabuf import with non-zero sgt offset commit 5fa4e6f1c2d8c9a4e47e1931b42893172d388f2b upstream. It is possible that the scatter-gather table during dmabuf import has non-zero offset of the data, but user-space doesn't expect that. Fix this by failing the import, so user-space doesn't access wrong data. Fixes: bf8dc55b1358 ("xen/gntdev: Implement dma-buf import functionality") Signed-off-by: Oleksandr Andrushchenko Acked-by: Juergen Gross Cc: Link: https://lore.kernel.org/r/20200813062113.11030-2-andr2000@gmail.com Signed-off-by: Juergen Gross Signed-off-by: Greg Kroah-Hartman --- drivers/xen/gntdev-dmabuf.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/xen/gntdev-dmabuf.c b/drivers/xen/gntdev-dmabuf.c index d97fcfc5e558..f6589563ff71 100644 --- a/drivers/xen/gntdev-dmabuf.c +++ b/drivers/xen/gntdev-dmabuf.c @@ -641,6 +641,14 @@ dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev, goto fail_detach; } + /* Check that we have zero offset. */ + if (sgt->sgl->offset) { + ret = ERR_PTR(-EINVAL); + pr_debug("DMA buffer has %d bytes offset, user-space expects 0\n", + sgt->sgl->offset); + goto fail_unmap; + } + /* Check number of pages that imported buffer has. */ if (attach->dmabuf->size != gntdev_dmabuf->nr_pages << PAGE_SHIFT) { ret = ERR_PTR(-EINVAL); -- GitLab From a834132bd465f9d7f4049be65648e01cf2533cb8 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Wed, 19 Aug 2020 08:15:08 +0200 Subject: [PATCH 0300/1304] Linux 4.19.140 Tested-by: Shuah Khan Signed-off-by: Greg Kroah-Hartman --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index f6012170995e..c5ee1c10a39c 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 4 PATCHLEVEL = 19 -SUBLEVEL = 139 +SUBLEVEL = 140 EXTRAVERSION = NAME = "People's Front" -- GitLab From 567b7f4dc6528168d6583725fad6949082de7670 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Wed, 19 Aug 2020 11:02:55 +0200 Subject: [PATCH 0301/1304] Revert "ALSA: usb-audio: work around streaming quirk for MacroSilicon MS2109" This reverts commit 820ec1efe430395f6a699fa920c516bf0a70aa1c which is commit 1b7ecc241a67ad6b584e071bd791a54e0cd5f097 upstream. It breaks the abi for the android-4.19-stable branch, and isn't needed in android devices, so just revert it. Signed-off-by: Greg Kroah-Hartman Change-Id: I081472f3c3663edaa0c1275fe5e1303833bd533c --- sound/usb/card.h | 1 - sound/usb/pcm.c | 6 ------ sound/usb/quirks.c | 3 --- sound/usb/stream.c | 1 - 4 files changed, 11 deletions(-) diff --git a/sound/usb/card.h b/sound/usb/card.h index 57bc9d1aecb4..c773ad3853dd 100644 --- a/sound/usb/card.h +++ b/sound/usb/card.h @@ -136,7 +136,6 @@ struct snd_usb_substream { unsigned int tx_length_quirk:1; /* add length specifier to transfers */ unsigned int fmt_type; /* USB audio format type (1-3) */ unsigned int pkt_offset_adj; /* Bytes to drop from beginning of packets (for non-compliant devices) */ - unsigned int stream_offset_adj; /* Bytes to drop from beginning of stream (for non-compliant devices) */ unsigned int running: 1; /* running status */ diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c index 722076b5ac9c..3ba1f9c5d87a 100644 --- a/sound/usb/pcm.c +++ b/sound/usb/pcm.c @@ -1534,12 +1534,6 @@ static void retire_capture_urb(struct snd_usb_substream *subs, // continue; } bytes = urb->iso_frame_desc[i].actual_length; - if (subs->stream_offset_adj > 0) { - unsigned int adj = min(subs->stream_offset_adj, bytes); - cp += adj; - bytes -= adj; - subs->stream_offset_adj -= adj; - } frames = bytes / stride; if (!subs->txfr_quirk) bytes = frames * stride; diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index 8d9117312e30..e9ec6166acc6 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c @@ -1166,9 +1166,6 @@ void snd_usb_set_format_quirk(struct snd_usb_substream *subs, case USB_ID(0x041e, 0x3f19): /* E-Mu 0204 USB */ set_format_emu_quirk(subs, fmt); break; - case USB_ID(0x534d, 0x2109): /* MacroSilicon MS2109 */ - subs->stream_offset_adj = 2; - break; } } diff --git a/sound/usb/stream.c b/sound/usb/stream.c index ce8fedbdf921..c414dffa55f0 100644 --- a/sound/usb/stream.c +++ b/sound/usb/stream.c @@ -104,7 +104,6 @@ static void snd_usb_init_substream(struct snd_usb_stream *as, subs->tx_length_quirk = as->chip->tx_length_quirk; subs->speed = snd_usb_get_speed(subs->dev); subs->pkt_offset_adj = 0; - subs->stream_offset_adj = 0; snd_usb_set_pcm_ops(as->pcm, stream); -- GitLab From 76ed1573056a64e6fa24655c5d7bf6b8b4c13575 Mon Sep 17 00:00:00 2001 From: Yiwei Zhang Date: Thu, 20 Aug 2020 11:35:22 -0700 Subject: [PATCH 0302/1304] ANDROID: ABI: update the ABI xml representation Leaf changes summary: 1 artifact changed Changed leaf types summary: 0 leaf type changed Removed/Changed/Added functions summary: 0 Removed, 0 Changed, 0 Added function Removed/Changed/Added variables summary: 0 Removed, 0 Changed, 1 Added variable 1 Added variable: [A] 'tracepoint __tracepoint_gpu_mem_total' Bug: 155967942 Change-Id: Ibd9a55dff5e32926399d6b7b655dde0d3ee38407 Signed-off-by: Yiwei Zhang --- android/abi_gki_aarch64.xml | 474 +++++++++++++++-------------------- android/abi_gki_aarch64_qcom | 1 + 2 files changed, 206 insertions(+), 269 deletions(-) diff --git a/android/abi_gki_aarch64.xml b/android/abi_gki_aarch64.xml index c8c54a7d6a31..3d6847f32e87 100644 --- a/android/abi_gki_aarch64.xml +++ b/android/abi_gki_aarch64.xml @@ -2614,6 +2614,7 @@ + @@ -30629,14 +30630,14 @@ - - - - + + + + - - + + @@ -30759,6 +30760,9 @@ + + + @@ -30875,17 +30879,6 @@ - - - - - - - - - - - @@ -30927,6 +30920,17 @@ + + + + + + + + + + + @@ -41719,23 +41723,6 @@ - - - - - - - - - - - - - - - - - @@ -41935,6 +41922,25 @@ + + + + + + + + + + + + + + + + + + + @@ -41967,7 +41973,7 @@ - + @@ -42386,7 +42392,7 @@ - + @@ -47736,7 +47742,7 @@ - + @@ -48279,7 +48285,7 @@ - + @@ -48762,28 +48768,28 @@ - - - - + + + + - - - - + + + + - - - - + + + + - - - - + + + + @@ -57062,7 +57068,7 @@ - + @@ -57540,6 +57546,7 @@ + @@ -57743,12 +57750,12 @@ - + - + @@ -57790,13 +57797,13 @@ - + - + @@ -59189,7 +59196,7 @@ - + @@ -59962,7 +59969,7 @@ - + @@ -60045,7 +60052,7 @@ - + @@ -60073,7 +60080,7 @@ - + @@ -60199,32 +60206,32 @@ - + - + - + - + - + - + @@ -60252,7 +60259,7 @@ - + @@ -60322,11 +60329,11 @@ - + - + @@ -60371,7 +60378,7 @@ - + @@ -60381,11 +60388,11 @@ - + - + @@ -60406,17 +60413,17 @@ - + - + - + @@ -65041,7 +65048,7 @@ - + @@ -65914,7 +65921,7 @@ - + @@ -65928,7 +65935,7 @@ - + @@ -65964,7 +65971,7 @@ - + @@ -67331,7 +67338,7 @@ - + @@ -67414,7 +67421,7 @@ - + @@ -67454,7 +67461,7 @@ - + @@ -67817,7 +67824,7 @@ - + @@ -67865,7 +67872,7 @@ - + @@ -68933,7 +68940,7 @@ - + @@ -72473,7 +72480,7 @@ - + @@ -72510,14 +72517,6 @@ - - - - - - - - @@ -72614,6 +72613,14 @@ + + + + + + + + @@ -72754,7 +72761,7 @@ - + @@ -72778,7 +72785,7 @@ - + @@ -81849,7 +81856,7 @@ - + @@ -82211,7 +82218,7 @@ - + @@ -82227,7 +82234,7 @@ - + @@ -82251,7 +82258,7 @@ - + @@ -83909,41 +83916,6 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -84175,7 +84147,7 @@ - + @@ -84184,7 +84156,7 @@ - + @@ -84227,7 +84199,7 @@ - + @@ -84348,7 +84320,7 @@ - + @@ -84404,7 +84376,7 @@ - + @@ -84659,7 +84631,7 @@ - + @@ -85057,7 +85029,7 @@ - + @@ -85065,6 +85037,17 @@ + + + + + + + + + + + @@ -85969,7 +85952,7 @@ - + @@ -86800,7 +86783,7 @@ - + @@ -87254,7 +87237,7 @@ - + @@ -87313,7 +87296,7 @@ - + @@ -87490,7 +87473,7 @@ - + @@ -87498,7 +87481,7 @@ - + @@ -87594,7 +87577,7 @@ - + @@ -88072,7 +88055,7 @@ - + @@ -88083,7 +88066,7 @@ - + @@ -88279,23 +88262,6 @@ - - - - - - - - - - - - - - - - - @@ -88365,7 +88331,7 @@ - + @@ -88478,7 +88444,7 @@ - + @@ -88691,7 +88657,7 @@ - + @@ -90366,53 +90332,6 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -91828,7 +91747,7 @@ - + @@ -91840,7 +91759,7 @@ - + @@ -93145,7 +93064,7 @@ - + @@ -93153,7 +93072,7 @@ - + @@ -93169,7 +93088,7 @@ - + @@ -93789,7 +93708,7 @@ - + @@ -93797,7 +93716,7 @@ - + @@ -93805,6 +93724,23 @@ + + + + + + + + + + + + + + + + + @@ -93889,7 +93825,7 @@ - + @@ -93922,7 +93858,7 @@ - + @@ -94023,10 +93959,10 @@ - + - + @@ -95447,19 +95383,19 @@ - + - + - + - + @@ -95471,7 +95407,7 @@ - + @@ -95510,7 +95446,7 @@ - + @@ -95541,7 +95477,7 @@ - + @@ -95561,7 +95497,7 @@ - + @@ -97444,10 +97380,10 @@ - + - + @@ -97529,10 +97465,10 @@ - + - + @@ -99338,28 +99274,7 @@ - - - - - - - - - - - - - - - - - - - - - - + @@ -99383,7 +99298,7 @@ - + @@ -99399,6 +99314,27 @@ + + + + + + + + + + + + + + + + + + + + + @@ -99666,7 +99602,7 @@ - + @@ -100046,7 +99982,7 @@ - + diff --git a/android/abi_gki_aarch64_qcom b/android/abi_gki_aarch64_qcom index 6cc4d398a8f6..c3c44f9a1a83 100644 --- a/android/abi_gki_aarch64_qcom +++ b/android/abi_gki_aarch64_qcom @@ -1571,6 +1571,7 @@ set_page_dirty_lock sg_alloc_table_from_pages sysfs_remove_files + __tracepoint_gpu_mem_total unmapped_area_topdown unregister_shrinker vm_insert_page -- GitLab From f4d84941832fca4b1939b1683dd1e40fdc59a32d Mon Sep 17 00:00:00 2001 From: Steve French Date: Thu, 16 Jul 2020 00:34:21 -0500 Subject: [PATCH 0303/1304] smb3: warn on confusing error scenario with sec=krb5 commit 0a018944eee913962bce8ffebbb121960d5125d9 upstream. When mounting with Kerberos, users have been confused about the default error returned in scenarios in which either keyutils is not installed or the user did not properly acquire a krb5 ticket. Log a warning message in the case that "ENOKEY" is returned from the get_spnego_key upcall so that users can better understand why mount failed in those two cases. CC: Stable Signed-off-by: Steve French Signed-off-by: Greg Kroah-Hartman --- fs/cifs/smb2pdu.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index e2d2b749c8f3..379ac8caa29a 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -1132,6 +1132,8 @@ SMB2_auth_kerberos(struct SMB2_sess_data *sess_data) spnego_key = cifs_get_spnego_key(ses); if (IS_ERR(spnego_key)) { rc = PTR_ERR(spnego_key); + if (rc == -ENOKEY) + cifs_dbg(VFS, "Verify user has a krb5 ticket and keyutils is installed\n"); spnego_key = NULL; goto out; } -- GitLab From 5c4d9eefd314e763dcb2a499797176c17ad6ab69 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 24 Jul 2020 22:44:41 +0200 Subject: [PATCH 0304/1304] genirq/affinity: Make affinity setting if activated opt-in commit f0c7baca180046824e07fc5f1326e83a8fd150c7 upstream. John reported that on a RK3288 system the perf per CPU interrupts are all affine to CPU0 and provided the analysis: "It looks like what happens is that because the interrupts are not per-CPU in the hardware, armpmu_request_irq() calls irq_force_affinity() while the interrupt is deactivated and then request_irq() with IRQF_PERCPU | IRQF_NOBALANCING. Now when irq_startup() runs with IRQ_STARTUP_NORMAL, it calls irq_setup_affinity() which returns early because IRQF_PERCPU and IRQF_NOBALANCING are set, leaving the interrupt on its original CPU." This was broken by the recent commit which blocked interrupt affinity setting in hardware before activation of the interrupt. While this works in general, it does not work for this particular case. As contrary to the initial analysis not all interrupt chip drivers implement an activate callback, the safe cure is to make the deferred interrupt affinity setting at activation time opt-in. Implement the necessary core logic and make the two irqchip implementations for which this is required opt-in. In hindsight this would have been the right thing to do, but ... Fixes: baedb87d1b53 ("genirq/affinity: Handle affinity setting on inactive interrupts correctly") Reported-by: John Keeping Signed-off-by: Thomas Gleixner Tested-by: Marc Zyngier Acked-by: Marc Zyngier Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/87blk4tzgm.fsf@nanos.tec.linutronix.de Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/apic/vector.c | 4 ++++ drivers/irqchip/irq-gic-v3-its.c | 5 ++++- include/linux/irq.h | 13 +++++++++++++ kernel/irq/manage.c | 6 +++++- 4 files changed, 26 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c index 99c28c02b7a5..8b7e0b46e86e 100644 --- a/arch/x86/kernel/apic/vector.c +++ b/arch/x86/kernel/apic/vector.c @@ -556,6 +556,10 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq, irqd->chip_data = apicd; irqd->hwirq = virq + i; irqd_set_single_target(irqd); + + /* Don't invoke affinity setter on deactivated interrupts */ + irqd_set_affinity_on_activate(irqd); + /* * Legacy vectors are already assigned when the IOAPIC * takes them over. They stay on the same vector. This is diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index fe7d63cdfb1d..d5cc32e80f5e 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -2458,6 +2458,7 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, { msi_alloc_info_t *info = args; struct its_device *its_dev = info->scratchpad[0].ptr; + struct irq_data *irqd; irq_hw_number_t hwirq; int err; int i; @@ -2473,7 +2474,9 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, &its_irq_chip, its_dev); - irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i))); + irqd = irq_get_irq_data(virq + i); + irqd_set_single_target(irqd); + irqd_set_affinity_on_activate(irqd); pr_debug("ID:%d pID:%d vID:%d\n", (int)(hwirq + i - its_dev->event_map.lpi_base), (int)(hwirq + i), virq + i); diff --git a/include/linux/irq.h b/include/linux/irq.h index 6ecaf056ab63..a042faefb9b7 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -210,6 +210,8 @@ struct irq_data { * IRQD_CAN_RESERVE - Can use reservation mode * IRQD_MSI_NOMASK_QUIRK - Non-maskable MSI quirk for affinity change * required + * IRQD_AFFINITY_ON_ACTIVATE - Affinity is set on activation. Don't call + * irq_chip::irq_set_affinity() when deactivated. */ enum { IRQD_TRIGGER_MASK = 0xf, @@ -233,6 +235,7 @@ enum { IRQD_DEFAULT_TRIGGER_SET = (1 << 25), IRQD_CAN_RESERVE = (1 << 26), IRQD_MSI_NOMASK_QUIRK = (1 << 27), + IRQD_AFFINITY_ON_ACTIVATE = (1 << 29), }; #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors) @@ -407,6 +410,16 @@ static inline bool irqd_msi_nomask_quirk(struct irq_data *d) return __irqd_to_state(d) & IRQD_MSI_NOMASK_QUIRK; } +static inline void irqd_set_affinity_on_activate(struct irq_data *d) +{ + __irqd_to_state(d) |= IRQD_AFFINITY_ON_ACTIVATE; +} + +static inline bool irqd_affinity_on_activate(struct irq_data *d) +{ + return __irqd_to_state(d) & IRQD_AFFINITY_ON_ACTIVATE; +} + #undef __irqd_to_state static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 025fcd029f83..3b66c77670d9 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -280,12 +280,16 @@ static bool irq_set_affinity_deactivated(struct irq_data *data, struct irq_desc *desc = irq_data_to_desc(data); /* + * Handle irq chips which can handle affinity only in activated + * state correctly + * * If the interrupt is not yet activated, just store the affinity * mask and do not call the chip driver at all. On activation the * driver has to make sure anyway that the interrupt is in a * useable state so startup works. */ - if (!IS_ENABLED(CONFIG_IRQ_DOMAIN_HIERARCHY) || irqd_is_activated(data)) + if (!IS_ENABLED(CONFIG_IRQ_DOMAIN_HIERARCHY) || + irqd_is_activated(data) || !irqd_affinity_on_activate(data)) return false; cpumask_copy(desc->irq_common_data.affinity, mask); -- GitLab From c59ea9bde42ede641006940a339eabe6669cc1be Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Fri, 26 Jun 2020 19:42:34 +0200 Subject: [PATCH 0305/1304] PCI: hotplug: ACPI: Fix context refcounting in acpiphp_grab_context() commit dae68d7fd4930315389117e9da35b763f12238f9 upstream. If context is not NULL in acpiphp_grab_context(), but the is_going_away flag is set for the device's parent, the reference counter of the context needs to be decremented before returning NULL or the context will never be freed, so make that happen. Fixes: edf5bf34d408 ("ACPI / dock: Use callback pointers from devices' ACPI hotplug contexts") Reported-by: Vasily Averin Cc: 3.15+ # 3.15+ Signed-off-by: Rafael J. Wysocki Signed-off-by: Greg Kroah-Hartman --- drivers/pci/hotplug/acpiphp_glue.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index c94c13525447..be35bbfa6968 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c @@ -122,13 +122,21 @@ static struct acpiphp_context *acpiphp_grab_context(struct acpi_device *adev) struct acpiphp_context *context; acpi_lock_hp_context(); + context = acpiphp_get_context(adev); - if (!context || context->func.parent->is_going_away) { - acpi_unlock_hp_context(); - return NULL; + if (!context) + goto unlock; + + if (context->func.parent->is_going_away) { + acpiphp_put_context(context); + context = NULL; + goto unlock; } + get_bridge(context->func.parent); acpiphp_put_context(context); + +unlock: acpi_unlock_hp_context(); return context; } -- GitLab From 71c6716cb61a7f474a47186c459acf7fbc780b3d Mon Sep 17 00:00:00 2001 From: Kai-Heng Feng Date: Tue, 28 Jul 2020 18:45:53 +0800 Subject: [PATCH 0306/1304] PCI: Mark AMD Navi10 GPU rev 0x00 ATS as broken commit 45beb31d3afb651bb5c41897e46bd4fa9980c51c upstream. We are seeing AMD Radeon Pro W5700 doesn't work when IOMMU is enabled: iommu ivhd0: AMD-Vi: Event logged [IOTLB_INV_TIMEOUT device=63:00.0 address=0x42b5b01a0] iommu ivhd0: AMD-Vi: Event logged [IOTLB_INV_TIMEOUT device=63:00.0 address=0x42b5b01c0] The error also makes graphics driver fail to probe the device. It appears to be the same issue as commit 5e89cd303e3a ("PCI: Mark AMD Navi14 GPU rev 0xc5 ATS as broken") addresses, and indeed the same ATS quirk can workaround the issue. See-also: 5e89cd303e3a ("PCI: Mark AMD Navi14 GPU rev 0xc5 ATS as broken") See-also: d28ca864c493 ("PCI: Mark AMD Stoney Radeon R7 GPU ATS as broken") See-also: 9b44b0b09dec ("PCI: Mark AMD Stoney GPU ATS as broken") Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=208725 Link: https://lore.kernel.org/r/20200728104554.28927-1-kai.heng.feng@canonical.com Signed-off-by: Kai-Heng Feng Signed-off-by: Bjorn Helgaas Acked-by: Alex Deucher Cc: stable@vger.kernel.org Signed-off-by: Greg Kroah-Hartman --- drivers/pci/quirks.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 9129ccd593d1..af2149632102 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -5068,7 +5068,8 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags); */ static void quirk_amd_harvest_no_ats(struct pci_dev *pdev) { - if (pdev->device == 0x7340 && pdev->revision != 0xc5) + if ((pdev->device == 0x7312 && pdev->revision != 0x00) || + (pdev->device == 0x7340 && pdev->revision != 0xc5)) return; pci_info(pdev, "disabling ATS\n"); @@ -5079,6 +5080,8 @@ static void quirk_amd_harvest_no_ats(struct pci_dev *pdev) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_amd_harvest_no_ats); /* AMD Iceland dGPU */ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_amd_harvest_no_ats); +/* AMD Navi10 dGPU */ +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7312, quirk_amd_harvest_no_ats); /* AMD Navi14 dGPU */ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7340, quirk_amd_harvest_no_ats); #endif /* CONFIG_PCI_ATS */ -- GitLab From ae33b1ebbce825c85dbabfdbbea7db72f51298d5 Mon Sep 17 00:00:00 2001 From: Rajat Jain Date: Mon, 6 Jul 2020 16:32:40 -0700 Subject: [PATCH 0307/1304] PCI: Add device even if driver attach failed commit 2194bc7c39610be7cabe7456c5f63a570604f015 upstream. device_attach() returning failure indicates a driver error while trying to probe the device. In such a scenario, the PCI device should still be added in the system and be visible to the user. When device_attach() fails, merely warn about it and keep the PCI device in the system. This partially reverts ab1a187bba5c ("PCI: Check device_attach() return value always"). Link: https://lore.kernel.org/r/20200706233240.3245512-1-rajatja@google.com Signed-off-by: Rajat Jain Signed-off-by: Bjorn Helgaas Reviewed-by: Greg Kroah-Hartman Cc: stable@vger.kernel.org # v4.6+ Signed-off-by: Greg Kroah-Hartman --- drivers/pci/bus.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c index 5cb40b2518f9..87a2829dffd4 100644 --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c @@ -323,12 +323,8 @@ void pci_bus_add_device(struct pci_dev *dev) dev->match_driver = true; retval = device_attach(&dev->dev); - if (retval < 0 && retval != -EPROBE_DEFER) { + if (retval < 0 && retval != -EPROBE_DEFER) pci_warn(dev, "device attach failed (%d)\n", retval); - pci_proc_detach_device(dev); - pci_remove_sysfs_dev_files(dev); - return; - } pci_dev_assign_added(dev, true); } -- GitLab From 56e2a4456647942e5f3aca88da164f30dcbf95ad Mon Sep 17 00:00:00 2001 From: Ansuel Smith Date: Mon, 15 Jun 2020 23:06:03 +0200 Subject: [PATCH 0308/1304] PCI: qcom: Define some PARF params needed for ipq8064 SoC commit 5149901e9e6deca487c01cc434a3ac4125c7b00b upstream. Set some specific value for Tx De-Emphasis, Tx Swing and Rx equalization needed on some ipq8064 based device (Netgear R7800 for example). Without this the system locks on kernel load. Link: https://lore.kernel.org/r/20200615210608.21469-8-ansuelsmth@gmail.com Fixes: 82a823833f4e ("PCI: qcom: Add Qualcomm PCIe controller driver") Signed-off-by: Ansuel Smith Signed-off-by: Lorenzo Pieralisi Reviewed-by: Rob Herring Acked-by: Stanimir Varbanov Cc: stable@vger.kernel.org # v4.5+ Signed-off-by: Greg Kroah-Hartman --- drivers/pci/controller/dwc/pcie-qcom.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c index e292801fff7f..06d951ad44ae 100644 --- a/drivers/pci/controller/dwc/pcie-qcom.c +++ b/drivers/pci/controller/dwc/pcie-qcom.c @@ -76,6 +76,18 @@ #define DBI_RO_WR_EN 1 #define PERST_DELAY_US 1000 +/* PARF registers */ +#define PCIE20_PARF_PCS_DEEMPH 0x34 +#define PCS_DEEMPH_TX_DEEMPH_GEN1(x) ((x) << 16) +#define PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x) ((x) << 8) +#define PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x) ((x) << 0) + +#define PCIE20_PARF_PCS_SWING 0x38 +#define PCS_SWING_TX_SWING_FULL(x) ((x) << 8) +#define PCS_SWING_TX_SWING_LOW(x) ((x) << 0) + +#define PCIE20_PARF_CONFIG_BITS 0x50 +#define PHY_RX0_EQ(x) ((x) << 24) #define PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE 0x358 #define SLV_ADDR_SPACE_SZ 0x10000000 @@ -275,6 +287,7 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie) struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; struct dw_pcie *pci = pcie->pci; struct device *dev = pci->dev; + struct device_node *node = dev->of_node; u32 val; int ret; @@ -319,6 +332,17 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie) val &= ~BIT(0); writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); + if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) { + writel(PCS_DEEMPH_TX_DEEMPH_GEN1(24) | + PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(24) | + PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(34), + pcie->parf + PCIE20_PARF_PCS_DEEMPH); + writel(PCS_SWING_TX_SWING_FULL(120) | + PCS_SWING_TX_SWING_LOW(120), + pcie->parf + PCIE20_PARF_PCS_SWING); + writel(PHY_RX0_EQ(4), pcie->parf + PCIE20_PARF_CONFIG_BITS); + } + /* enable external reference clock */ val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK); val |= BIT(16); -- GitLab From dd6dc2fd66824c2fc6bdf07ff44a00a9695636d2 Mon Sep 17 00:00:00 2001 From: Ansuel Smith Date: Mon, 15 Jun 2020 23:06:04 +0200 Subject: [PATCH 0309/1304] PCI: qcom: Add support for tx term offset for rev 2.1.0 commit de3c4bf648975ea0b1d344d811e9b0748907b47c upstream. Add tx term offset support to pcie qcom driver need in some revision of the ipq806x SoC. Ipq8064 needs tx term offset set to 7. Link: https://lore.kernel.org/r/20200615210608.21469-9-ansuelsmth@gmail.com Fixes: 82a823833f4e ("PCI: qcom: Add Qualcomm PCIe controller driver") Signed-off-by: Sham Muthayyan Signed-off-by: Ansuel Smith Signed-off-by: Lorenzo Pieralisi Acked-by: Stanimir Varbanov Cc: stable@vger.kernel.org # v4.5+ Signed-off-by: Greg Kroah-Hartman --- drivers/pci/controller/dwc/pcie-qcom.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c index 06d951ad44ae..1bdac298a943 100644 --- a/drivers/pci/controller/dwc/pcie-qcom.c +++ b/drivers/pci/controller/dwc/pcie-qcom.c @@ -45,7 +45,13 @@ #define PCIE_CAP_CPL_TIMEOUT_DISABLE 0x10 #define PCIE20_PARF_PHY_CTRL 0x40 +#define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK GENMASK(20, 16) +#define PHY_CTRL_PHY_TX0_TERM_OFFSET(x) ((x) << 16) + #define PCIE20_PARF_PHY_REFCLK 0x4C +#define PHY_REFCLK_SSP_EN BIT(16) +#define PHY_REFCLK_USE_PAD BIT(12) + #define PCIE20_PARF_DBI_BASE_ADDR 0x168 #define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16C #define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174 @@ -343,9 +349,18 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie) writel(PHY_RX0_EQ(4), pcie->parf + PCIE20_PARF_CONFIG_BITS); } + if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) { + /* set TX termination offset */ + val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); + val &= ~PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK; + val |= PHY_CTRL_PHY_TX0_TERM_OFFSET(7); + writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); + } + /* enable external reference clock */ val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK); - val |= BIT(16); + val &= ~PHY_REFCLK_USE_PAD; + val |= PHY_REFCLK_SSP_EN; writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK); ret = reset_control_deassert(res->phy_reset); -- GitLab From 54a7a9d75c0727433feb634b1025c84589949e02 Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Sat, 19 Jan 2019 11:35:04 -0600 Subject: [PATCH 0310/1304] PCI: Probe bridge window attributes once at enumeration-time commit 51c48b310183ab6ba5419edfc6a8de889cc04521 upstream. pci_bridge_check_ranges() determines whether a bridge supports the optional I/O and prefetchable memory windows and sets the flag bits in the bridge resources. This *could* be done once during enumeration except that the resource allocation code completely clears the flag bits, e.g., in the pci_assign_unassigned_bridge_resources() path. The problem with pci_bridge_check_ranges() in the resource allocation path is that we may allocate resources after devices have been claimed by drivers, and pci_bridge_check_ranges() *changes* the window registers to determine whether they're writable. This may break concurrent accesses to devices behind the bridge. Add a new pci_read_bridge_windows() to determine whether a bridge supports the optional windows, call it once during enumeration, remember the results, and change pci_bridge_check_ranges() so it doesn't touch the bridge windows but sets the flag bits based on those remembered results. Link: https://lore.kernel.org/linux-pci/1506151482-113560-1-git-send-email-wangzhou1@hisilicon.com Link: https://lists.gnu.org/archive/html/qemu-devel/2018-12/msg02082.html Reported-by: Yandong Xu Tested-by: Yandong Xu Signed-off-by: Bjorn Helgaas Cc: Michael S. Tsirkin Cc: Sagi Grimberg Cc: Ofer Hayut Cc: Roy Shterman Cc: Keith Busch Cc: Zhou Wang Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=208371 Signed-off-by: Dima Stepanov Signed-off-by: Greg Kroah-Hartman --- drivers/pci/probe.c | 52 +++++++++++++++++++++++++++++++++++++++++ drivers/pci/setup-bus.c | 45 ++++------------------------------- include/linux/pci.h | 3 +++ 3 files changed, 59 insertions(+), 41 deletions(-) diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index cbc0d8da7483..9a5b6a8e2502 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -348,6 +348,57 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) } } +static void pci_read_bridge_windows(struct pci_dev *bridge) +{ + u16 io; + u32 pmem, tmp; + + pci_read_config_word(bridge, PCI_IO_BASE, &io); + if (!io) { + pci_write_config_word(bridge, PCI_IO_BASE, 0xe0f0); + pci_read_config_word(bridge, PCI_IO_BASE, &io); + pci_write_config_word(bridge, PCI_IO_BASE, 0x0); + } + if (io) + bridge->io_window = 1; + + /* + * DECchip 21050 pass 2 errata: the bridge may miss an address + * disconnect boundary by one PCI data phase. Workaround: do not + * use prefetching on this device. + */ + if (bridge->vendor == PCI_VENDOR_ID_DEC && bridge->device == 0x0001) + return; + + pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem); + if (!pmem) { + pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, + 0xffe0fff0); + pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem); + pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 0x0); + } + if (!pmem) + return; + + bridge->pref_window = 1; + + if ((pmem & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) { + + /* + * Bridge claims to have a 64-bit prefetchable memory + * window; verify that the upper bits are actually + * writable. + */ + pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &pmem); + pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, + 0xffffffff); + pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &tmp); + pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, pmem); + if (tmp) + bridge->pref_64_window = 1; + } +} + static void pci_read_bridge_io(struct pci_bus *child) { struct pci_dev *dev = child->self; @@ -1712,6 +1763,7 @@ int pci_setup_device(struct pci_dev *dev) pci_read_irq(dev); dev->transparent = ((dev->class & 0xff) == 1); pci_read_bases(dev, 2, PCI_ROM_ADDRESS1); + pci_read_bridge_windows(dev); set_pcie_hotplug_bridge(dev); pos = pci_find_capability(dev, PCI_CAP_ID_SSVID); if (pos) { diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index 8e5b00a420a5..87c8190de622 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -735,58 +735,21 @@ int pci_claim_bridge_resource(struct pci_dev *bridge, int i) base/limit registers must be read-only and read as 0. */ static void pci_bridge_check_ranges(struct pci_bus *bus) { - u16 io; - u32 pmem; struct pci_dev *bridge = bus->self; - struct resource *b_res; + struct resource *b_res = &bridge->resource[PCI_BRIDGE_RESOURCES]; - b_res = &bridge->resource[PCI_BRIDGE_RESOURCES]; b_res[1].flags |= IORESOURCE_MEM; - pci_read_config_word(bridge, PCI_IO_BASE, &io); - if (!io) { - pci_write_config_word(bridge, PCI_IO_BASE, 0xe0f0); - pci_read_config_word(bridge, PCI_IO_BASE, &io); - pci_write_config_word(bridge, PCI_IO_BASE, 0x0); - } - if (io) + if (bridge->io_window) b_res[0].flags |= IORESOURCE_IO; - /* DECchip 21050 pass 2 errata: the bridge may miss an address - disconnect boundary by one PCI data phase. - Workaround: do not use prefetching on this device. */ - if (bridge->vendor == PCI_VENDOR_ID_DEC && bridge->device == 0x0001) - return; - - pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem); - if (!pmem) { - pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, - 0xffe0fff0); - pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem); - pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 0x0); - } - if (pmem) { + if (bridge->pref_window) { b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH; - if ((pmem & PCI_PREF_RANGE_TYPE_MASK) == - PCI_PREF_RANGE_TYPE_64) { + if (bridge->pref_64_window) { b_res[2].flags |= IORESOURCE_MEM_64; b_res[2].flags |= PCI_PREF_RANGE_TYPE_64; } } - - /* double check if bridge does support 64 bit pref */ - if (b_res[2].flags & IORESOURCE_MEM_64) { - u32 mem_base_hi, tmp; - pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, - &mem_base_hi); - pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, - 0xffffffff); - pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &tmp); - if (!tmp) - b_res[2].flags &= ~IORESOURCE_MEM_64; - pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, - mem_base_hi); - } } /* Helper function for sizing routines: find first available diff --git a/include/linux/pci.h b/include/linux/pci.h index b1f297f4b7b0..2517492dd185 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -373,6 +373,9 @@ struct pci_dev { bool match_driver; /* Skip attaching driver */ unsigned int transparent:1; /* Subtractive decode bridge */ + unsigned int io_window:1; /* Bridge has I/O window */ + unsigned int pref_window:1; /* Bridge has pref mem window */ + unsigned int pref_64_window:1; /* Pref mem window is 64-bit */ unsigned int multifunction:1; /* Multi-function device */ unsigned int is_busmaster:1; /* Is busmaster */ -- GitLab From 3b5318a963bddf2bb4e6a1a3d46f25ed7580c97f Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Tue, 16 Jun 2020 10:17:37 +0800 Subject: [PATCH 0311/1304] btrfs: free anon block device right after subvolume deletion commit 082b6c970f02fefd278c7833880cda29691a5f34 upstream. [BUG] When a lot of subvolumes are created, there is a user report about transaction aborted caused by slow anonymous block device reclaim: BTRFS: Transaction aborted (error -24) WARNING: CPU: 17 PID: 17041 at fs/btrfs/transaction.c:1576 create_pending_snapshot+0xbc4/0xd10 [btrfs] RIP: 0010:create_pending_snapshot+0xbc4/0xd10 [btrfs] Call Trace: create_pending_snapshots+0x82/0xa0 [btrfs] btrfs_commit_transaction+0x275/0x8c0 [btrfs] btrfs_mksubvol+0x4b9/0x500 [btrfs] btrfs_ioctl_snap_create_transid+0x174/0x180 [btrfs] btrfs_ioctl_snap_create_v2+0x11c/0x180 [btrfs] btrfs_ioctl+0x11a4/0x2da0 [btrfs] do_vfs_ioctl+0xa9/0x640 ksys_ioctl+0x67/0x90 __x64_sys_ioctl+0x1a/0x20 do_syscall_64+0x5a/0x110 entry_SYSCALL_64_after_hwframe+0x44/0xa9 ---[ end trace 33f2f83f3d5250e9 ]--- BTRFS: error (device sda1) in create_pending_snapshot:1576: errno=-24 unknown BTRFS info (device sda1): forced readonly BTRFS warning (device sda1): Skipping commit of aborted transaction. BTRFS: error (device sda1) in cleanup_transaction:1831: errno=-24 unknown [CAUSE] The anonymous device pool is shared and its size is 1M. It's possible to hit that limit if the subvolume deletion is not fast enough and the subvolumes to be cleaned keep the ids allocated. [WORKAROUND] We can't avoid the anon device pool exhaustion but we can shorten the time the id is attached to the subvolume root once the subvolume becomes invisible to the user. Reported-by: Greed Rong Link: https://lore.kernel.org/linux-btrfs/CA+UqX+NTrZ6boGnWHhSeZmEY5J76CTqmYjO2S+=tHJX7nb9DPw@mail.gmail.com/ CC: stable@vger.kernel.org # 4.4+ Reviewed-by: Josef Bacik Signed-off-by: Qu Wenruo Reviewed-by: David Sterba Signed-off-by: David Sterba Signed-off-by: Greg Kroah-Hartman --- fs/btrfs/inode.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 7befb7c12bd3..6154fbaf43e1 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -4458,6 +4458,8 @@ int btrfs_delete_subvolume(struct inode *dir, struct dentry *dentry) } } + free_anon_bdev(dest->anon_dev); + dest->anon_dev = 0; out_end_trans: trans->block_rsv = NULL; trans->bytes_reserved = 0; -- GitLab From 8eadf67bc216537337655cb1d73af0b00861a022 Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Tue, 16 Jun 2020 10:17:34 +0800 Subject: [PATCH 0312/1304] btrfs: don't allocate anonymous block device for user invisible roots commit 851fd730a743e072badaf67caf39883e32439431 upstream. [BUG] When a lot of subvolumes are created, there is a user report about transaction aborted: BTRFS: Transaction aborted (error -24) WARNING: CPU: 17 PID: 17041 at fs/btrfs/transaction.c:1576 create_pending_snapshot+0xbc4/0xd10 [btrfs] RIP: 0010:create_pending_snapshot+0xbc4/0xd10 [btrfs] Call Trace: create_pending_snapshots+0x82/0xa0 [btrfs] btrfs_commit_transaction+0x275/0x8c0 [btrfs] btrfs_mksubvol+0x4b9/0x500 [btrfs] btrfs_ioctl_snap_create_transid+0x174/0x180 [btrfs] btrfs_ioctl_snap_create_v2+0x11c/0x180 [btrfs] btrfs_ioctl+0x11a4/0x2da0 [btrfs] do_vfs_ioctl+0xa9/0x640 ksys_ioctl+0x67/0x90 __x64_sys_ioctl+0x1a/0x20 do_syscall_64+0x5a/0x110 entry_SYSCALL_64_after_hwframe+0x44/0xa9 ---[ end trace 33f2f83f3d5250e9 ]--- BTRFS: error (device sda1) in create_pending_snapshot:1576: errno=-24 unknown BTRFS info (device sda1): forced readonly BTRFS warning (device sda1): Skipping commit of aborted transaction. BTRFS: error (device sda1) in cleanup_transaction:1831: errno=-24 unknown [CAUSE] The error is EMFILE (Too many files open) and comes from the anonymous block device allocation. The ids are in a shared pool of size 1<<20. The ids are assigned to live subvolumes, ie. the root structure exists in memory (eg. after creation or after the root appears in some path). The pool could be exhausted if the numbers are not reclaimed fast enough, after subvolume deletion or if other system component uses the anon block devices. [WORKAROUND] Since it's not possible to completely solve the problem, we can only minimize the time the id is allocated to a subvolume root. Firstly, we can reduce the use of anon_dev by trees that are not subvolume roots, like data reloc tree. This patch will do extra check on root objectid, to skip roots that don't need anon_dev. Currently it's only data reloc tree and orphan roots. Reported-by: Greed Rong Link: https://lore.kernel.org/linux-btrfs/CA+UqX+NTrZ6boGnWHhSeZmEY5J76CTqmYjO2S+=tHJX7nb9DPw@mail.gmail.com/ CC: stable@vger.kernel.org # 4.4+ Reviewed-by: Josef Bacik Signed-off-by: Qu Wenruo Reviewed-by: David Sterba Signed-off-by: David Sterba Signed-off-by: Greg Kroah-Hartman --- fs/btrfs/disk-io.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 9740f7b5d4fb..3130844e219c 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1500,9 +1500,16 @@ int btrfs_init_fs_root(struct btrfs_root *root) spin_lock_init(&root->ino_cache_lock); init_waitqueue_head(&root->ino_cache_wait); - ret = get_anon_bdev(&root->anon_dev); - if (ret) - goto fail; + /* + * Don't assign anonymous block device to roots that are not exposed to + * userspace, the id pool is limited to 1M + */ + if (is_fstree(root->root_key.objectid) && + btrfs_root_refs(&root->root_item) > 0) { + ret = get_anon_bdev(&root->anon_dev); + if (ret) + goto fail; + } mutex_lock(&root->objectid_mutex); ret = btrfs_find_highest_objectid(root, -- GitLab From 6bf983c8db01d69eb3eb0a1ee90e43ad3a7b8709 Mon Sep 17 00:00:00 2001 From: Tom Rix Date: Tue, 7 Jul 2020 06:29:08 -0700 Subject: [PATCH 0313/1304] btrfs: ref-verify: fix memory leak in add_block_entry commit d60ba8de1164e1b42e296ff270c622a070ef8fe7 upstream. clang static analysis flags this error fs/btrfs/ref-verify.c:290:3: warning: Potential leak of memory pointed to by 're' [unix.Malloc] kfree(be); ^~~~~ The problem is in this block of code: if (root_objectid) { struct root_entry *exist_re; exist_re = insert_root_entry(&exist->roots, re); if (exist_re) kfree(re); } There is no 'else' block freeing when root_objectid is 0. Add the missing kfree to the else branch. Fixes: fd708b81d972 ("Btrfs: add a extent ref verify tool") CC: stable@vger.kernel.org # 4.19+ Signed-off-by: Tom Rix Reviewed-by: David Sterba Signed-off-by: David Sterba Signed-off-by: Greg Kroah-Hartman --- fs/btrfs/ref-verify.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c index dbc685ca017f..5dec52bd2897 100644 --- a/fs/btrfs/ref-verify.c +++ b/fs/btrfs/ref-verify.c @@ -297,6 +297,8 @@ static struct block_entry *add_block_entry(struct btrfs_fs_info *fs_info, exist_re = insert_root_entry(&exist->roots, re); if (exist_re) kfree(re); + } else { + kfree(re); } kfree(be); return exist; -- GitLab From fa511954694cbea4d0cb59c81c8670276920c08c Mon Sep 17 00:00:00 2001 From: Anand Jain Date: Fri, 10 Jul 2020 14:37:38 +0800 Subject: [PATCH 0314/1304] btrfs: don't traverse into the seed devices in show_devname commit 4faf55b03823e96c44dc4e364520000ed3b12fdb upstream. ->show_devname currently shows the lowest devid in the list. As the seed devices have the lowest devid in the sprouted filesystem, the userland tool such as findmnt end up seeing seed device instead of the device from the read-writable sprouted filesystem. As shown below. mount /dev/sda /btrfs mount: /btrfs: WARNING: device write-protected, mounted read-only. findmnt --output SOURCE,TARGET,UUID /btrfs SOURCE TARGET UUID /dev/sda /btrfs 899f7027-3e46-4626-93e7-7d4c9ad19111 btrfs dev add -f /dev/sdb /btrfs umount /btrfs mount /dev/sdb /btrfs findmnt --output SOURCE,TARGET,UUID /btrfs SOURCE TARGET UUID /dev/sda /btrfs 899f7027-3e46-4626-93e7-7d4c9ad19111 All sprouts from a single seed will show the same seed device and the same fsid. That's confusing. This is causing problems in our prototype as there isn't any reference to the sprout file-system(s) which is being used for actual read and write. This was added in the patch which implemented the show_devname in btrfs commit 9c5085c14798 ("Btrfs: implement ->show_devname"). I tried to look for any particular reason that we need to show the seed device, there isn't any. So instead, do not traverse through the seed devices, just show the lowest devid in the sprouted fsid. After the patch: mount /dev/sda /btrfs mount: /btrfs: WARNING: device write-protected, mounted read-only. findmnt --output SOURCE,TARGET,UUID /btrfs SOURCE TARGET UUID /dev/sda /btrfs 899f7027-3e46-4626-93e7-7d4c9ad19111 btrfs dev add -f /dev/sdb /btrfs mount -o rw,remount /dev/sdb /btrfs findmnt --output SOURCE,TARGET,UUID /btrfs SOURCE TARGET UUID /dev/sdb /btrfs 595ca0e6-b82e-46b5-b9e2-c72a6928be48 mount /dev/sda /btrfs1 mount: /btrfs1: WARNING: device write-protected, mounted read-only. btrfs dev add -f /dev/sdc /btrfs1 findmnt --output SOURCE,TARGET,UUID /btrfs1 SOURCE TARGET UUID /dev/sdc /btrfs1 ca1dbb7a-8446-4f95-853c-a20f3f82bdbb cat /proc/self/mounts | grep btrfs /dev/sdb /btrfs btrfs rw,relatime,noacl,space_cache,subvolid=5,subvol=/ 0 0 /dev/sdc /btrfs1 btrfs ro,relatime,noacl,space_cache,subvolid=5,subvol=/ 0 0 Reported-by: Martin K. Petersen CC: stable@vger.kernel.org # 4.19+ Tested-by: Martin K. Petersen Signed-off-by: Anand Jain Reviewed-by: David Sterba Signed-off-by: David Sterba Signed-off-by: Greg Kroah-Hartman --- fs/btrfs/super.c | 21 +++++++-------------- 1 file changed, 7 insertions(+), 14 deletions(-) diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 6a5b16a119ed..a670205c9808 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -2314,9 +2314,7 @@ static int btrfs_unfreeze(struct super_block *sb) static int btrfs_show_devname(struct seq_file *m, struct dentry *root) { struct btrfs_fs_info *fs_info = btrfs_sb(root->d_sb); - struct btrfs_fs_devices *cur_devices; struct btrfs_device *dev, *first_dev = NULL; - struct list_head *head; /* * Lightweight locking of the devices. We should not need @@ -2326,18 +2324,13 @@ static int btrfs_show_devname(struct seq_file *m, struct dentry *root) * least until until the rcu_read_unlock. */ rcu_read_lock(); - cur_devices = fs_info->fs_devices; - while (cur_devices) { - head = &cur_devices->devices; - list_for_each_entry_rcu(dev, head, dev_list) { - if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) - continue; - if (!dev->name) - continue; - if (!first_dev || dev->devid < first_dev->devid) - first_dev = dev; - } - cur_devices = cur_devices->seed; + list_for_each_entry_rcu(dev, &fs_info->fs_devices->devices, dev_list) { + if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) + continue; + if (!dev->name) + continue; + if (!first_dev || dev->devid < first_dev->devid) + first_dev = dev; } if (first_dev) -- GitLab From 35b4a28051b237a4e3f71f6778b1f536d6a82a5e Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Fri, 17 Jul 2020 15:12:27 -0400 Subject: [PATCH 0315/1304] btrfs: open device without device_list_mutex commit 18c850fdc5a801bad4977b0f1723761d42267e45 upstream. There's long existed a lockdep splat because we open our bdev's under the ->device_list_mutex at mount time, which acquires the bd_mutex. Usually this goes unnoticed, but if you do loopback devices at all suddenly the bd_mutex comes with a whole host of other dependencies, which results in the splat when you mount a btrfs file system. ====================================================== WARNING: possible circular locking dependency detected 5.8.0-0.rc3.1.fc33.x86_64+debug #1 Not tainted ------------------------------------------------------ systemd-journal/509 is trying to acquire lock: ffff970831f84db0 (&fs_info->reloc_mutex){+.+.}-{3:3}, at: btrfs_record_root_in_trans+0x44/0x70 [btrfs] but task is already holding lock: ffff97083144d598 (sb_pagefaults){.+.+}-{0:0}, at: btrfs_page_mkwrite+0x59/0x560 [btrfs] which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #6 (sb_pagefaults){.+.+}-{0:0}: __sb_start_write+0x13e/0x220 btrfs_page_mkwrite+0x59/0x560 [btrfs] do_page_mkwrite+0x4f/0x130 do_wp_page+0x3b0/0x4f0 handle_mm_fault+0xf47/0x1850 do_user_addr_fault+0x1fc/0x4b0 exc_page_fault+0x88/0x300 asm_exc_page_fault+0x1e/0x30 -> #5 (&mm->mmap_lock#2){++++}-{3:3}: __might_fault+0x60/0x80 _copy_from_user+0x20/0xb0 get_sg_io_hdr+0x9a/0xb0 scsi_cmd_ioctl+0x1ea/0x2f0 cdrom_ioctl+0x3c/0x12b4 sr_block_ioctl+0xa4/0xd0 block_ioctl+0x3f/0x50 ksys_ioctl+0x82/0xc0 __x64_sys_ioctl+0x16/0x20 do_syscall_64+0x52/0xb0 entry_SYSCALL_64_after_hwframe+0x44/0xa9 -> #4 (&cd->lock){+.+.}-{3:3}: __mutex_lock+0x7b/0x820 sr_block_open+0xa2/0x180 __blkdev_get+0xdd/0x550 blkdev_get+0x38/0x150 do_dentry_open+0x16b/0x3e0 path_openat+0x3c9/0xa00 do_filp_open+0x75/0x100 do_sys_openat2+0x8a/0x140 __x64_sys_openat+0x46/0x70 do_syscall_64+0x52/0xb0 entry_SYSCALL_64_after_hwframe+0x44/0xa9 -> #3 (&bdev->bd_mutex){+.+.}-{3:3}: __mutex_lock+0x7b/0x820 __blkdev_get+0x6a/0x550 blkdev_get+0x85/0x150 blkdev_get_by_path+0x2c/0x70 btrfs_get_bdev_and_sb+0x1b/0xb0 [btrfs] open_fs_devices+0x88/0x240 [btrfs] btrfs_open_devices+0x92/0xa0 [btrfs] btrfs_mount_root+0x250/0x490 [btrfs] legacy_get_tree+0x30/0x50 vfs_get_tree+0x28/0xc0 vfs_kern_mount.part.0+0x71/0xb0 btrfs_mount+0x119/0x380 [btrfs] legacy_get_tree+0x30/0x50 vfs_get_tree+0x28/0xc0 do_mount+0x8c6/0xca0 __x64_sys_mount+0x8e/0xd0 do_syscall_64+0x52/0xb0 entry_SYSCALL_64_after_hwframe+0x44/0xa9 -> #2 (&fs_devs->device_list_mutex){+.+.}-{3:3}: __mutex_lock+0x7b/0x820 btrfs_run_dev_stats+0x36/0x420 [btrfs] commit_cowonly_roots+0x91/0x2d0 [btrfs] btrfs_commit_transaction+0x4e6/0x9f0 [btrfs] btrfs_sync_file+0x38a/0x480 [btrfs] __x64_sys_fdatasync+0x47/0x80 do_syscall_64+0x52/0xb0 entry_SYSCALL_64_after_hwframe+0x44/0xa9 -> #1 (&fs_info->tree_log_mutex){+.+.}-{3:3}: __mutex_lock+0x7b/0x820 btrfs_commit_transaction+0x48e/0x9f0 [btrfs] btrfs_sync_file+0x38a/0x480 [btrfs] __x64_sys_fdatasync+0x47/0x80 do_syscall_64+0x52/0xb0 entry_SYSCALL_64_after_hwframe+0x44/0xa9 -> #0 (&fs_info->reloc_mutex){+.+.}-{3:3}: __lock_acquire+0x1241/0x20c0 lock_acquire+0xb0/0x400 __mutex_lock+0x7b/0x820 btrfs_record_root_in_trans+0x44/0x70 [btrfs] start_transaction+0xd2/0x500 [btrfs] btrfs_dirty_inode+0x44/0xd0 [btrfs] file_update_time+0xc6/0x120 btrfs_page_mkwrite+0xda/0x560 [btrfs] do_page_mkwrite+0x4f/0x130 do_wp_page+0x3b0/0x4f0 handle_mm_fault+0xf47/0x1850 do_user_addr_fault+0x1fc/0x4b0 exc_page_fault+0x88/0x300 asm_exc_page_fault+0x1e/0x30 other info that might help us debug this: Chain exists of: &fs_info->reloc_mutex --> &mm->mmap_lock#2 --> sb_pagefaults Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock(sb_pagefaults); lock(&mm->mmap_lock#2); lock(sb_pagefaults); lock(&fs_info->reloc_mutex); *** DEADLOCK *** 3 locks held by systemd-journal/509: #0: ffff97083bdec8b8 (&mm->mmap_lock#2){++++}-{3:3}, at: do_user_addr_fault+0x12e/0x4b0 #1: ffff97083144d598 (sb_pagefaults){.+.+}-{0:0}, at: btrfs_page_mkwrite+0x59/0x560 [btrfs] #2: ffff97083144d6a8 (sb_internal){.+.+}-{0:0}, at: start_transaction+0x3f8/0x500 [btrfs] stack backtrace: CPU: 0 PID: 509 Comm: systemd-journal Not tainted 5.8.0-0.rc3.1.fc33.x86_64+debug #1 Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 0.0.0 02/06/2015 Call Trace: dump_stack+0x92/0xc8 check_noncircular+0x134/0x150 __lock_acquire+0x1241/0x20c0 lock_acquire+0xb0/0x400 ? btrfs_record_root_in_trans+0x44/0x70 [btrfs] ? lock_acquire+0xb0/0x400 ? btrfs_record_root_in_trans+0x44/0x70 [btrfs] __mutex_lock+0x7b/0x820 ? btrfs_record_root_in_trans+0x44/0x70 [btrfs] ? kvm_sched_clock_read+0x14/0x30 ? sched_clock+0x5/0x10 ? sched_clock_cpu+0xc/0xb0 btrfs_record_root_in_trans+0x44/0x70 [btrfs] start_transaction+0xd2/0x500 [btrfs] btrfs_dirty_inode+0x44/0xd0 [btrfs] file_update_time+0xc6/0x120 btrfs_page_mkwrite+0xda/0x560 [btrfs] ? sched_clock+0x5/0x10 do_page_mkwrite+0x4f/0x130 do_wp_page+0x3b0/0x4f0 handle_mm_fault+0xf47/0x1850 do_user_addr_fault+0x1fc/0x4b0 exc_page_fault+0x88/0x300 ? asm_exc_page_fault+0x8/0x30 asm_exc_page_fault+0x1e/0x30 RIP: 0033:0x7fa3972fdbfe Code: Bad RIP value. Fix this by not holding the ->device_list_mutex at this point. The device_list_mutex exists to protect us from modifying the device list while the file system is running. However it can also be modified by doing a scan on a device. But this action is specifically protected by the uuid_mutex, which we are holding here. We cannot race with opening at this point because we have the ->s_mount lock held during the mount. Not having the ->device_list_mutex here is perfectly safe as we're not going to change the devices at this point. CC: stable@vger.kernel.org # 4.19+ Signed-off-by: Josef Bacik Reviewed-by: David Sterba [ add some comments ] Signed-off-by: David Sterba Signed-off-by: Greg Kroah-Hartman --- fs/btrfs/volumes.c | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index e0ba1e9ddcdf..4abb2a155ac5 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -155,7 +155,9 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, * * global::fs_devs - add, remove, updates to the global list * - * does not protect: manipulation of the fs_devices::devices list! + * does not protect: manipulation of the fs_devices::devices list in general + * but in mount context it could be used to exclude list modifications by eg. + * scan ioctl * * btrfs_device::name - renames (write side), read is RCU * @@ -168,6 +170,9 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, * may be used to exclude some operations from running concurrently without any * modifications to the list (see write_all_supers) * + * Is not required at mount and close times, because our device list is + * protected by the uuid_mutex at that point. + * * balance_mutex * ------------- * protects balance structures (status, state) and context accessed from @@ -656,6 +661,11 @@ static void btrfs_free_stale_devices(const char *path, } } +/* + * This is only used on mount, and we are protected from competing things + * messing with our fs_devices by the uuid_mutex, thus we do not need the + * fs_devices->device_list_mutex here. + */ static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices, struct btrfs_device *device, fmode_t flags, void *holder) @@ -1153,8 +1163,14 @@ int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, int ret; lockdep_assert_held(&uuid_mutex); + /* + * The device_list_mutex cannot be taken here in case opening the + * underlying device takes further locks like bd_mutex. + * + * We also don't need the lock here as this is called during mount and + * exclusion is provided by uuid_mutex + */ - mutex_lock(&fs_devices->device_list_mutex); if (fs_devices->opened) { fs_devices->opened++; ret = 0; @@ -1162,7 +1178,6 @@ int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, list_sort(NULL, &fs_devices->devices, devid_cmp); ret = open_fs_devices(fs_devices, flags, holder); } - mutex_unlock(&fs_devices->device_list_mutex); return ret; } -- GitLab From 7c1ddfc98703433b556b827ffc10dd3a133866e1 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Thu, 23 Jul 2020 19:08:55 +0200 Subject: [PATCH 0316/1304] btrfs: fix messages after changing compression level by remount commit 27942c9971cc405c60432eca9395e514a2ae9f5e upstream. Reported by Forza on IRC that remounting with compression options does not reflect the change in level, or at least it does not appear to do so according to the messages: mount -o compress=zstd:1 /dev/sda /mnt mount -o remount,compress=zstd:15 /mnt does not print the change to the level to syslog: [ 41.366060] BTRFS info (device vda): use zstd compression, level 1 [ 41.368254] BTRFS info (device vda): disk space caching is enabled [ 41.390429] BTRFS info (device vda): disk space caching is enabled What really happens is that the message is lost but the level is actualy changed. There's another weird output, if compression is reset to 'no': [ 45.413776] BTRFS info (device vda): use no compression, level 4 To fix that, save the previous compression level and print the message in that case too and use separate message for 'no' compression. CC: stable@vger.kernel.org # 4.19+ Signed-off-by: David Sterba Signed-off-by: Greg Kroah-Hartman --- fs/btrfs/super.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index a670205c9808..ed539496089f 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -432,6 +432,7 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options, char *compress_type; bool compress_force = false; enum btrfs_compression_type saved_compress_type; + int saved_compress_level; bool saved_compress_force; int no_compress = 0; @@ -514,6 +515,7 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options, info->compress_type : BTRFS_COMPRESS_NONE; saved_compress_force = btrfs_test_opt(info, FORCE_COMPRESS); + saved_compress_level = info->compress_level; if (token == Opt_compress || token == Opt_compress_force || strncmp(args[0].from, "zlib", 4) == 0) { @@ -552,6 +554,8 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options, no_compress = 0; } else if (strncmp(args[0].from, "no", 2) == 0) { compress_type = "no"; + info->compress_level = 0; + info->compress_type = 0; btrfs_clear_opt(info->mount_opt, COMPRESS); btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS); compress_force = false; @@ -572,11 +576,11 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options, */ btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS); } - if ((btrfs_test_opt(info, COMPRESS) && - (info->compress_type != saved_compress_type || - compress_force != saved_compress_force)) || - (!btrfs_test_opt(info, COMPRESS) && - no_compress == 1)) { + if (no_compress == 1) { + btrfs_info(info, "use no compression"); + } else if ((info->compress_type != saved_compress_type) || + (compress_force != saved_compress_force) || + (info->compress_level != saved_compress_level)) { btrfs_info(info, "%s %s compression, level %d", (compress_force) ? "force" : "use", compress_type, info->compress_level); -- GitLab From 627fa9d8071daad6aa84316c1fcb114a62db914f Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 27 Jul 2020 10:28:05 -0400 Subject: [PATCH 0317/1304] btrfs: only search for left_info if there is no right_info in try_merge_free_space commit bf53d4687b8f3f6b752f091eb85f62369a515dfd upstream. In try_to_merge_free_space we attempt to find entries to the left and right of the entry we are adding to see if they can be merged. We search for an entry past our current info (saved into right_info), and then if right_info exists and it has a rb_prev() we save the rb_prev() into left_info. However there's a slight problem in the case that we have a right_info, but no entry previous to that entry. At that point we will search for an entry just before the info we're attempting to insert. This will simply find right_info again, and assign it to left_info, making them both the same pointer. Now if right_info _can_ be merged with the range we're inserting, we'll add it to the info and free right_info. However further down we'll access left_info, which was right_info, and thus get a use-after-free. Fix this by only searching for the left entry if we don't find a right entry at all. The CVE referenced had a specially crafted file system that could trigger this use-after-free. However with the tree checker improvements we no longer trigger the conditions for the UAF. But the original conditions still apply, hence this fix. Reference: CVE-2019-19448 Fixes: 963030817060 ("Btrfs: use hybrid extents+bitmap rb tree for free space") CC: stable@vger.kernel.org # 4.4+ Signed-off-by: Josef Bacik Signed-off-by: David Sterba Signed-off-by: Greg Kroah-Hartman --- fs/btrfs/free-space-cache.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index c9965e89097f..4c65305fd418 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -2169,7 +2169,7 @@ static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl, static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *info, bool update_stat) { - struct btrfs_free_space *left_info; + struct btrfs_free_space *left_info = NULL; struct btrfs_free_space *right_info; bool merged = false; u64 offset = info->offset; @@ -2184,7 +2184,7 @@ static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl, if (right_info && rb_prev(&right_info->offset_index)) left_info = rb_entry(rb_prev(&right_info->offset_index), struct btrfs_free_space, offset_index); - else + else if (!right_info) left_info = tree_search_offset(ctl, offset - 1, 0, 0); if (right_info && !right_info->bitmap) { -- GitLab From 183af2d27dfced3167a88c80a2df776cb28a1255 Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Wed, 29 Jul 2020 10:17:50 +0100 Subject: [PATCH 0318/1304] btrfs: fix memory leaks after failure to lookup checksums during inode logging commit 4f26433e9b3eb7a55ed70d8f882ae9cd48ba448b upstream. While logging an inode, at copy_items(), if we fail to lookup the checksums for an extent we release the destination path, free the ins_data array and then return immediately. However a previous iteration of the for loop may have added checksums to the ordered_sums list, in which case we leak the memory used by them. So fix this by making sure we iterate the ordered_sums list and free all its checksums before returning. Fixes: 3650860b90cc2a ("Btrfs: remove almost all of the BUG()'s from tree-log.c") CC: stable@vger.kernel.org # 4.4+ Reviewed-by: Johannes Thumshirn Signed-off-by: Filipe Manana Reviewed-by: David Sterba Signed-off-by: David Sterba Signed-off-by: Greg Kroah-Hartman --- fs/btrfs/tree-log.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 928ac2c4899e..090315f4ac78 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -3988,11 +3988,8 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, fs_info->csum_root, ds + cs, ds + cs + cl - 1, &ordered_sums, 0); - if (ret) { - btrfs_release_path(dst_path); - kfree(ins_data); - return ret; - } + if (ret) + break; } } } @@ -4005,7 +4002,6 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, * we have to do this after the loop above to avoid changing the * log tree while trying to change the log tree. */ - ret = 0; while (!list_empty(&ordered_sums)) { struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next, struct btrfs_ordered_sum, -- GitLab From a34b58b5b43be7df523188dd2fa639075658ed0b Mon Sep 17 00:00:00 2001 From: Pavel Machek Date: Mon, 3 Aug 2020 11:35:06 +0200 Subject: [PATCH 0319/1304] btrfs: fix return value mixup in btrfs_get_extent commit 881a3a11c2b858fe9b69ef79ac5ee9978a266dc9 upstream. btrfs_get_extent() sets variable ret, but out: error path expect error to be in variable err so the error code is lost. Fixes: 6bf9e4bd6a27 ("btrfs: inode: Verify inode mode to avoid NULL pointer dereference") CC: stable@vger.kernel.org # 5.4+ Reviewed-by: Nikolay Borisov Signed-off-by: Pavel Machek (CIP) Reviewed-by: David Sterba Signed-off-by: David Sterba Signed-off-by: Greg Kroah-Hartman --- fs/btrfs/inode.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 6154fbaf43e1..1656ef0e959f 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -7014,7 +7014,7 @@ struct extent_map *btrfs_get_extent(struct btrfs_inode *inode, found_type == BTRFS_FILE_EXTENT_PREALLOC) { /* Only regular file could have regular/prealloc extent */ if (!S_ISREG(inode->vfs_inode.i_mode)) { - ret = -EUCLEAN; + err = -EUCLEAN; btrfs_crit(fs_info, "regular/prealloc extent found for non-regular inode %llu", btrfs_ino(inode)); -- GitLab From 0d4abc3512b0e79cd79c8d41dd3d7ed4a8bdbac1 Mon Sep 17 00:00:00 2001 From: Christian Eggers Date: Mon, 27 Jul 2020 12:16:05 +0200 Subject: [PATCH 0320/1304] dt-bindings: iio: io-channel-mux: Fix compatible string in example code commit add48ba425192c6e04ce70549129cacd01e2a09e upstream. The correct compatible string is "gpio-mux" (see bindings/mux/gpio-mux.txt). Cc: stable@vger.kernel.org # v4.13+ Reviewed-by: Peter Rosin Signed-off-by: Christian Eggers Link: https://lore.kernel.org/r/20200727101605.24384-1-ceggers@arri.de Signed-off-by: Rob Herring Signed-off-by: Greg Kroah-Hartman --- .../devicetree/bindings/iio/multiplexer/io-channel-mux.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/devicetree/bindings/iio/multiplexer/io-channel-mux.txt b/Documentation/devicetree/bindings/iio/multiplexer/io-channel-mux.txt index c82794002595..89647d714387 100644 --- a/Documentation/devicetree/bindings/iio/multiplexer/io-channel-mux.txt +++ b/Documentation/devicetree/bindings/iio/multiplexer/io-channel-mux.txt @@ -21,7 +21,7 @@ controller state. The mux controller state is described in Example: mux: mux-controller { - compatible = "mux-gpio"; + compatible = "gpio-mux"; #mux-control-cells = <0>; mux-gpios = <&pioA 0 GPIO_ACTIVE_HIGH>, -- GitLab From b86f06e13cc6700ec38e6226f4d2f1e5bbfc96b5 Mon Sep 17 00:00:00 2001 From: Alexandru Ardelean Date: Mon, 6 Jul 2020 14:02:57 +0300 Subject: [PATCH 0321/1304] iio: dac: ad5592r: fix unbalanced mutex unlocks in ad5592r_read_raw() commit 65afb0932a81c1de719ceee0db0b276094b10ac8 upstream. There are 2 exit paths where the lock isn't held, but try to unlock the mutex when exiting. In these places we should just return from the function. A neater approach would be to cleanup the ad5592r_read_raw(), but that would make this patch more difficult to backport to stable versions. Fixes 56ca9db862bf3: ("iio: dac: Add support for the AD5592R/AD5593R ADCs/DACs") Reported-by: Charles Stanhope Signed-off-by: Alexandru Ardelean Cc: Signed-off-by: Jonathan Cameron Signed-off-by: Greg Kroah-Hartman --- drivers/iio/dac/ad5592r-base.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/iio/dac/ad5592r-base.c b/drivers/iio/dac/ad5592r-base.c index 095530c233e4..7549abd544c0 100644 --- a/drivers/iio/dac/ad5592r-base.c +++ b/drivers/iio/dac/ad5592r-base.c @@ -417,7 +417,7 @@ static int ad5592r_read_raw(struct iio_dev *iio_dev, s64 tmp = *val * (3767897513LL / 25LL); *val = div_s64_rem(tmp, 1000000000LL, val2); - ret = IIO_VAL_INT_PLUS_MICRO; + return IIO_VAL_INT_PLUS_MICRO; } else { int mult; @@ -448,7 +448,7 @@ static int ad5592r_read_raw(struct iio_dev *iio_dev, ret = IIO_VAL_INT; break; default: - ret = -EINVAL; + return -EINVAL; } unlock: -- GitLab From 6ffc89cadbd02b83f23e572bb7c43ad9638f441f Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Fri, 31 Jul 2020 12:37:32 -0700 Subject: [PATCH 0322/1304] xtensa: fix xtensa_pmu_setup prototype MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 6d65d3769d1910379e1cfa61ebf387efc6bfb22c upstream. Fix the following build error in configurations with CONFIG_XTENSA_VARIANT_HAVE_PERF_EVENTS=y: arch/xtensa/kernel/perf_event.c:420:29: error: passing argument 3 of ‘cpuhp_setup_state’ from incompatible pointer type Cc: stable@vger.kernel.org Fixes: 25a77b55e74c ("xtensa/perf: Convert the hotplug notifier to state machine callbacks") Signed-off-by: Max Filippov Signed-off-by: Greg Kroah-Hartman --- arch/xtensa/kernel/perf_event.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/xtensa/kernel/perf_event.c b/arch/xtensa/kernel/perf_event.c index ff1d81385ed7..768e1f7ab871 100644 --- a/arch/xtensa/kernel/perf_event.c +++ b/arch/xtensa/kernel/perf_event.c @@ -404,7 +404,7 @@ static struct pmu xtensa_pmu = { .read = xtensa_pmu_read, }; -static int xtensa_pmu_setup(int cpu) +static int xtensa_pmu_setup(unsigned int cpu) { unsigned i; -- GitLab From d9710cc6bd97679f5ab38918cb67c6641e962453 Mon Sep 17 00:00:00 2001 From: Paul Aurich Date: Thu, 9 Jul 2020 22:01:16 -0700 Subject: [PATCH 0323/1304] cifs: Fix leak when handling lease break for cached root fid commit baf57b56d3604880ccb3956ec6c62ea894f5de99 upstream. Handling a lease break for the cached root didn't free the smb2_lease_break_work allocation, resulting in a leak: unreferenced object 0xffff98383a5af480 (size 128): comm "cifsd", pid 684, jiffies 4294936606 (age 534.868s) hex dump (first 32 bytes): c0 ff ff ff 1f 00 00 00 88 f4 5a 3a 38 98 ff ff ..........Z:8... 88 f4 5a 3a 38 98 ff ff 80 88 d6 8a ff ff ff ff ..Z:8........... backtrace: [<0000000068957336>] smb2_is_valid_oplock_break+0x1fa/0x8c0 [<0000000073b70b9e>] cifs_demultiplex_thread+0x73d/0xcc0 [<00000000905fa372>] kthread+0x11c/0x150 [<0000000079378e4e>] ret_from_fork+0x22/0x30 Avoid this leak by only allocating when necessary. Fixes: a93864d93977 ("cifs: add lease tracking to the cached root fid") Signed-off-by: Paul Aurich CC: Stable # v4.18+ Reviewed-by: Aurelien Aptel Signed-off-by: Steve French Signed-off-by: Greg Kroah-Hartman --- fs/cifs/smb2misc.c | 73 +++++++++++++++++++++++++++++++++------------- 1 file changed, 52 insertions(+), 21 deletions(-) diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c index 14265b4bbcc0..2fc96f7923ee 100644 --- a/fs/cifs/smb2misc.c +++ b/fs/cifs/smb2misc.c @@ -509,15 +509,31 @@ cifs_ses_oplock_break(struct work_struct *work) kfree(lw); } +static void +smb2_queue_pending_open_break(struct tcon_link *tlink, __u8 *lease_key, + __le32 new_lease_state) +{ + struct smb2_lease_break_work *lw; + + lw = kmalloc(sizeof(struct smb2_lease_break_work), GFP_KERNEL); + if (!lw) { + cifs_put_tlink(tlink); + return; + } + + INIT_WORK(&lw->lease_break, cifs_ses_oplock_break); + lw->tlink = tlink; + lw->lease_state = new_lease_state; + memcpy(lw->lease_key, lease_key, SMB2_LEASE_KEY_SIZE); + queue_work(cifsiod_wq, &lw->lease_break); +} + static bool -smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp, - struct smb2_lease_break_work *lw) +smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp) { - bool found; __u8 lease_state; struct list_head *tmp; struct cifsFileInfo *cfile; - struct cifs_pending_open *open; struct cifsInodeInfo *cinode; int ack_req = le32_to_cpu(rsp->Flags & SMB2_NOTIFY_BREAK_LEASE_FLAG_ACK_REQUIRED); @@ -556,22 +572,29 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp, &cinode->flags); cifs_queue_oplock_break(cfile); - kfree(lw); return true; } - found = false; + return false; +} + +static struct cifs_pending_open * +smb2_tcon_find_pending_open_lease(struct cifs_tcon *tcon, + struct smb2_lease_break *rsp) +{ + __u8 lease_state = le32_to_cpu(rsp->NewLeaseState); + int ack_req = le32_to_cpu(rsp->Flags & + SMB2_NOTIFY_BREAK_LEASE_FLAG_ACK_REQUIRED); + struct cifs_pending_open *open; + struct cifs_pending_open *found = NULL; + list_for_each_entry(open, &tcon->pending_opens, olist) { if (memcmp(open->lease_key, rsp->LeaseKey, SMB2_LEASE_KEY_SIZE)) continue; if (!found && ack_req) { - found = true; - memcpy(lw->lease_key, open->lease_key, - SMB2_LEASE_KEY_SIZE); - lw->tlink = cifs_get_tlink(open->tlink); - queue_work(cifsiod_wq, &lw->lease_break); + found = open; } cifs_dbg(FYI, "found in the pending open list\n"); @@ -592,14 +615,7 @@ smb2_is_valid_lease_break(char *buffer) struct TCP_Server_Info *server; struct cifs_ses *ses; struct cifs_tcon *tcon; - struct smb2_lease_break_work *lw; - - lw = kmalloc(sizeof(struct smb2_lease_break_work), GFP_KERNEL); - if (!lw) - return false; - - INIT_WORK(&lw->lease_break, cifs_ses_oplock_break); - lw->lease_state = rsp->NewLeaseState; + struct cifs_pending_open *open; cifs_dbg(FYI, "Checking for lease break\n"); @@ -617,11 +633,27 @@ smb2_is_valid_lease_break(char *buffer) spin_lock(&tcon->open_file_lock); cifs_stats_inc( &tcon->stats.cifs_stats.num_oplock_brks); - if (smb2_tcon_has_lease(tcon, rsp, lw)) { + if (smb2_tcon_has_lease(tcon, rsp)) { spin_unlock(&tcon->open_file_lock); spin_unlock(&cifs_tcp_ses_lock); return true; } + open = smb2_tcon_find_pending_open_lease(tcon, + rsp); + if (open) { + __u8 lease_key[SMB2_LEASE_KEY_SIZE]; + struct tcon_link *tlink; + + tlink = cifs_get_tlink(open->tlink); + memcpy(lease_key, open->lease_key, + SMB2_LEASE_KEY_SIZE); + spin_unlock(&tcon->open_file_lock); + spin_unlock(&cifs_tcp_ses_lock); + smb2_queue_pending_open_break(tlink, + lease_key, + rsp->NewLeaseState); + return true; + } spin_unlock(&tcon->open_file_lock); if (tcon->crfid.is_valid && @@ -639,7 +671,6 @@ smb2_is_valid_lease_break(char *buffer) } } spin_unlock(&cifs_tcp_ses_lock); - kfree(lw); cifs_dbg(FYI, "Can not process lease break - no lease matched\n"); return false; } -- GitLab From b11ac832808158b2df38482988aea703640127f5 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 24 Jul 2020 19:25:25 +1000 Subject: [PATCH 0324/1304] powerpc: Allow 4224 bytes of stack expansion for the signal frame commit 63dee5df43a31f3844efabc58972f0a206ca4534 upstream. We have powerpc specific logic in our page fault handling to decide if an access to an unmapped address below the stack pointer should expand the stack VMA. The code was originally added in 2004 "ported from 2.4". The rough logic is that the stack is allowed to grow to 1MB with no extra checking. Over 1MB the access must be within 2048 bytes of the stack pointer, or be from a user instruction that updates the stack pointer. The 2048 byte allowance below the stack pointer is there to cover the 288 byte "red zone" as well as the "about 1.5kB" needed by the signal delivery code. Unfortunately since then the signal frame has expanded, and is now 4224 bytes on 64-bit kernels with transactional memory enabled. This means if a process has consumed more than 1MB of stack, and its stack pointer lies less than 4224 bytes from the next page boundary, signal delivery will fault when trying to expand the stack and the process will see a SEGV. The total size of the signal frame is the size of struct rt_sigframe (which includes the red zone) plus __SIGNAL_FRAMESIZE (128 bytes on 64-bit). The 2048 byte allowance was correct until 2008 as the signal frame was: struct rt_sigframe { struct ucontext uc; /* 0 1440 */ /* --- cacheline 11 boundary (1408 bytes) was 32 bytes ago --- */ long unsigned int _unused[2]; /* 1440 16 */ unsigned int tramp[6]; /* 1456 24 */ struct siginfo * pinfo; /* 1480 8 */ void * puc; /* 1488 8 */ struct siginfo info; /* 1496 128 */ /* --- cacheline 12 boundary (1536 bytes) was 88 bytes ago --- */ char abigap[288]; /* 1624 288 */ /* size: 1920, cachelines: 15, members: 7 */ /* padding: 8 */ }; 1920 + 128 = 2048 Then in commit ce48b2100785 ("powerpc: Add VSX context save/restore, ptrace and signal support") (Jul 2008) the signal frame expanded to 2304 bytes: struct rt_sigframe { struct ucontext uc; /* 0 1696 */ <-- /* --- cacheline 13 boundary (1664 bytes) was 32 bytes ago --- */ long unsigned int _unused[2]; /* 1696 16 */ unsigned int tramp[6]; /* 1712 24 */ struct siginfo * pinfo; /* 1736 8 */ void * puc; /* 1744 8 */ struct siginfo info; /* 1752 128 */ /* --- cacheline 14 boundary (1792 bytes) was 88 bytes ago --- */ char abigap[288]; /* 1880 288 */ /* size: 2176, cachelines: 17, members: 7 */ /* padding: 8 */ }; 2176 + 128 = 2304 At this point we should have been exposed to the bug, though as far as I know it was never reported. I no longer have a system old enough to easily test on. Then in 2010 commit 320b2b8de126 ("mm: keep a guard page below a grow-down stack segment") caused our stack expansion code to never trigger, as there was always a VMA found for a write up to PAGE_SIZE below r1. That meant the bug was hidden as we continued to expand the signal frame in commit 2b0a576d15e0 ("powerpc: Add new transactional memory state to the signal context") (Feb 2013): struct rt_sigframe { struct ucontext uc; /* 0 1696 */ /* --- cacheline 13 boundary (1664 bytes) was 32 bytes ago --- */ struct ucontext uc_transact; /* 1696 1696 */ <-- /* --- cacheline 26 boundary (3328 bytes) was 64 bytes ago --- */ long unsigned int _unused[2]; /* 3392 16 */ unsigned int tramp[6]; /* 3408 24 */ struct siginfo * pinfo; /* 3432 8 */ void * puc; /* 3440 8 */ struct siginfo info; /* 3448 128 */ /* --- cacheline 27 boundary (3456 bytes) was 120 bytes ago --- */ char abigap[288]; /* 3576 288 */ /* size: 3872, cachelines: 31, members: 8 */ /* padding: 8 */ /* last cacheline: 32 bytes */ }; 3872 + 128 = 4000 And commit 573ebfa6601f ("powerpc: Increase stack redzone for 64-bit userspace to 512 bytes") (Feb 2014): struct rt_sigframe { struct ucontext uc; /* 0 1696 */ /* --- cacheline 13 boundary (1664 bytes) was 32 bytes ago --- */ struct ucontext uc_transact; /* 1696 1696 */ /* --- cacheline 26 boundary (3328 bytes) was 64 bytes ago --- */ long unsigned int _unused[2]; /* 3392 16 */ unsigned int tramp[6]; /* 3408 24 */ struct siginfo * pinfo; /* 3432 8 */ void * puc; /* 3440 8 */ struct siginfo info; /* 3448 128 */ /* --- cacheline 27 boundary (3456 bytes) was 120 bytes ago --- */ char abigap[512]; /* 3576 512 */ <-- /* size: 4096, cachelines: 32, members: 8 */ /* padding: 8 */ }; 4096 + 128 = 4224 Then finally in 2017, commit 1be7107fbe18 ("mm: larger stack guard gap, between vmas") exposed us to the existing bug, because it changed the stack VMA to be the correct/real size, meaning our stack expansion code is now triggered. Fix it by increasing the allowance to 4224 bytes. Hard-coding 4224 is obviously unsafe against future expansions of the signal frame in the same way as the existing code. We can't easily use sizeof() because the signal frame structure is not in a header. We will either fix that, or rip out all the custom stack expansion checking logic entirely. Fixes: ce48b2100785 ("powerpc: Add VSX context save/restore, ptrace and signal support") Cc: stable@vger.kernel.org # v2.6.27+ Reported-by: Tom Lane Tested-by: Daniel Axtens Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200724092528.1578671-2-mpe@ellerman.id.au Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/mm/fault.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 6e0ff8b600ce..eb5252177b66 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -233,6 +233,9 @@ static bool bad_kernel_fault(bool is_exec, unsigned long error_code, return is_exec || (address >= TASK_SIZE); } +// This comes from 64-bit struct rt_sigframe + __SIGNAL_FRAMESIZE +#define SIGFRAME_MAX_SIZE (4096 + 128) + static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address, struct vm_area_struct *vma, unsigned int flags, bool *must_retry) @@ -240,7 +243,7 @@ static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address, /* * N.B. The POWER/Open ABI allows programs to access up to * 288 bytes below the stack pointer. - * The kernel signal delivery code writes up to about 1.5kB + * The kernel signal delivery code writes a bit over 4KB * below the stack pointer (r1) before decrementing it. * The exec code can write slightly over 640kB to the stack * before setting the user r1. Thus we allow the stack to @@ -265,7 +268,7 @@ static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address, * between the last mapped region and the stack will * expand the stack rather than segfaulting. */ - if (address + 2048 >= uregs->gpr[1]) + if (address + SIGFRAME_MAX_SIZE >= uregs->gpr[1]) return false; if ((flags & FAULT_FLAG_WRITE) && (flags & FAULT_FLAG_USER) && -- GitLab From e83f99c428000d5e347b953da8d539ee67113134 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 4 Aug 2020 22:44:06 +1000 Subject: [PATCH 0325/1304] powerpc: Fix circular dependency between percpu.h and mmu.h commit 0c83b277ada72b585e6a3e52b067669df15bcedb upstream. Recently random.h started including percpu.h (see commit f227e3ec3b5c ("random32: update the net random state on interrupt and activity")), which broke corenet64_smp_defconfig: In file included from /linux/arch/powerpc/include/asm/paca.h:18, from /linux/arch/powerpc/include/asm/percpu.h:13, from /linux/include/linux/random.h:14, from /linux/lib/uuid.c:14: /linux/arch/powerpc/include/asm/mmu.h:139:22: error: unknown type name 'next_tlbcam_idx' 139 | DECLARE_PER_CPU(int, next_tlbcam_idx); This is due to a circular header dependency: asm/mmu.h includes asm/percpu.h, which includes asm/paca.h, which includes asm/mmu.h Which means DECLARE_PER_CPU() isn't defined when mmu.h needs it. We can fix it by moving the include of paca.h below the include of asm-generic/percpu.h. This moves the include of paca.h out of the #ifdef __powerpc64__, but that is OK because paca.h is almost entirely inside #ifdef CONFIG_PPC64 anyway. It also moves the include of paca.h out of the #ifdef CONFIG_SMP, which could possibly break something, but seems to have no ill effects. Fixes: f227e3ec3b5c ("random32: update the net random state on interrupt and activity") Cc: stable@vger.kernel.org # v5.8 Reported-by: Stephen Rothwell Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200804130558.292328-1-mpe@ellerman.id.au Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/include/asm/percpu.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/include/asm/percpu.h b/arch/powerpc/include/asm/percpu.h index dce863a7635c..8e5b7d0b851c 100644 --- a/arch/powerpc/include/asm/percpu.h +++ b/arch/powerpc/include/asm/percpu.h @@ -10,8 +10,6 @@ #ifdef CONFIG_SMP -#include - #define __my_cpu_offset local_paca->data_offset #endif /* CONFIG_SMP */ @@ -19,4 +17,6 @@ #include +#include + #endif /* _ASM_POWERPC_PERCPU_H_ */ -- GitLab From 62f8d71408990bc26690e02370c1ae7973625d2b Mon Sep 17 00:00:00 2001 From: Eugeniu Rosca Date: Tue, 2 Jun 2020 21:50:16 +0200 Subject: [PATCH 0326/1304] media: vsp1: dl: Fix NULL pointer dereference on unbind commit c92d30e4b78dc331909f8c6056c2792aa14e2166 upstream. In commit f3b98e3c4d2e16 ("media: vsp1: Provide support for extended command pools"), the vsp pointer used for referencing the VSP1 device structure from a command pool during vsp1_dl_ext_cmd_pool_destroy() was not populated. Correctly assign the pointer to prevent the following null-pointer-dereference when removing the device: [*] h3ulcb-kf #> echo fea28000.vsp > /sys/bus/platform/devices/fea28000.vsp/driver/unbind Unable to handle kernel NULL pointer dereference at virtual address 0000000000000028 Mem abort info: ESR = 0x96000006 EC = 0x25: DABT (current EL), IL = 32 bits SET = 0, FnV = 0 EA = 0, S1PTW = 0 Data abort info: ISV = 0, ISS = 0x00000006 CM = 0, WnR = 0 user pgtable: 4k pages, 48-bit VAs, pgdp=00000007318be000 [0000000000000028] pgd=00000007333a1003, pud=00000007333a6003, pmd=0000000000000000 Internal error: Oops: 96000006 [#1] PREEMPT SMP Modules linked in: CPU: 1 PID: 486 Comm: sh Not tainted 5.7.0-rc6-arm64-renesas-00118-ge644645abf47 #185 Hardware name: Renesas H3ULCB Kingfisher board based on r8a77951 (DT) pstate: 40000005 (nZcv daif -PAN -UAO) pc : vsp1_dlm_destroy+0xe4/0x11c lr : vsp1_dlm_destroy+0xc8/0x11c sp : ffff800012963b60 x29: ffff800012963b60 x28: ffff0006f83fc440 x27: 0000000000000000 x26: ffff0006f5e13e80 x25: ffff0006f5e13ed0 x24: ffff0006f5e13ed0 x23: ffff0006f5e13ed0 x22: dead000000000122 x21: ffff0006f5e3a080 x20: ffff0006f5df2938 x19: ffff0006f5df2980 x18: 0000000000000003 x17: 0000000000000000 x16: 0000000000000016 x15: 0000000000000003 x14: 00000000000393c0 x13: ffff800011a5ec18 x12: ffff800011d8d000 x11: ffff0006f83fcc68 x10: ffff800011a53d70 x9 : ffff8000111f3000 x8 : 0000000000000000 x7 : 0000000000210d00 x6 : 0000000000000000 x5 : ffff800010872e60 x4 : 0000000000000004 x3 : 0000000078068000 x2 : ffff800012781000 x1 : 0000000000002c00 x0 : 0000000000000000 Call trace: vsp1_dlm_destroy+0xe4/0x11c vsp1_wpf_destroy+0x10/0x20 vsp1_entity_destroy+0x24/0x4c vsp1_destroy_entities+0x54/0x130 vsp1_remove+0x1c/0x40 platform_drv_remove+0x28/0x50 __device_release_driver+0x178/0x220 device_driver_detach+0x44/0xc0 unbind_store+0xe0/0x104 drv_attr_store+0x20/0x30 sysfs_kf_write+0x48/0x70 kernfs_fop_write+0x148/0x230 __vfs_write+0x18/0x40 vfs_write+0xdc/0x1c4 ksys_write+0x68/0xf0 __arm64_sys_write+0x18/0x20 el0_svc_common.constprop.0+0x70/0x170 do_el0_svc+0x20/0x80 el0_sync_handler+0x134/0x1b0 el0_sync+0x140/0x180 Code: b40000c2 f9403a60 d2800084 a9400663 (f9401400) ---[ end trace 3875369841fb288a ]--- Fixes: f3b98e3c4d2e16 ("media: vsp1: Provide support for extended command pools") Cc: stable@vger.kernel.org # v4.19+ Signed-off-by: Eugeniu Rosca Reviewed-by: Kieran Bingham Tested-by: Kieran Bingham Reviewed-by: Laurent Pinchart Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Greg Kroah-Hartman --- drivers/media/platform/vsp1/vsp1_dl.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/media/platform/vsp1/vsp1_dl.c b/drivers/media/platform/vsp1/vsp1_dl.c index a5634ca85a31..a07caf981e15 100644 --- a/drivers/media/platform/vsp1/vsp1_dl.c +++ b/drivers/media/platform/vsp1/vsp1_dl.c @@ -431,6 +431,8 @@ vsp1_dl_cmd_pool_create(struct vsp1_device *vsp1, enum vsp1_extcmd_type type, if (!pool) return NULL; + pool->vsp1 = vsp1; + spin_lock_init(&pool->lock); INIT_LIST_HEAD(&pool->free); -- GitLab From 26f0092f35e239e072276aa6c33e7d90bf0ac66e Mon Sep 17 00:00:00 2001 From: Jonathan McDowell Date: Wed, 12 Aug 2020 20:37:23 +0100 Subject: [PATCH 0327/1304] net: ethernet: stmmac: Disable hardware multicast filter commit df43dd526e6609769ae513a81443c7aa727c8ca3 upstream. The IPQ806x does not appear to have a functional multicast ethernet address filter. This was observed as a failure to correctly receive IPv6 packets on a LAN to the all stations address. Checking the vendor driver shows that it does not attempt to enable the multicast filter and instead falls back to receiving all multicast packets, internally setting ALLMULTI. Use the new fallback support in the dwmac1000 driver to correctly achieve the same with the mainline IPQ806x driver. Confirmed to fix IPv6 functionality on an RB3011 router. Cc: stable@vger.kernel.org Signed-off-by: Jonathan McDowell Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c index 4d75158c64b2..826626e870d5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c @@ -350,6 +350,7 @@ static int ipq806x_gmac_probe(struct platform_device *pdev) plat_dat->has_gmac = true; plat_dat->bsp_priv = gmac; plat_dat->fix_mac_speed = ipq806x_gmac_fix_mac_speed; + plat_dat->multicast_filter_bins = 0; err = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); if (err) -- GitLab From c334db67ebb3b5af540481f22b2d1b4f837c0992 Mon Sep 17 00:00:00 2001 From: Jonathan McDowell Date: Wed, 12 Aug 2020 20:37:01 +0100 Subject: [PATCH 0328/1304] net: stmmac: dwmac1000: provide multicast filter fallback commit 592d751c1e174df5ff219946908b005eb48934b3 upstream. If we don't have a hardware multicast filter available then instead of silently failing to listen for the requested ethernet broadcast addresses fall back to receiving all multicast packets, in a similar fashion to other drivers with no multicast filter. Cc: stable@vger.kernel.org Signed-off-by: Jonathan McDowell Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c index e4e9a7591efe..4d617ba11ecb 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c @@ -176,6 +176,9 @@ static void dwmac1000_set_filter(struct mac_device_info *hw, value = GMAC_FRAME_FILTER_PR; } else if (dev->flags & IFF_ALLMULTI) { value = GMAC_FRAME_FILTER_PM; /* pass all multi */ + } else if (!netdev_mc_empty(dev) && (mcbitslog2 == 0)) { + /* Fall back to all multicast if we've no filter */ + value = GMAC_FRAME_FILTER_PM; } else if (!netdev_mc_empty(dev)) { struct netdev_hw_addr *ha; -- GitLab From f90339a4eccf1768f044ac98ec6d2a8afeeab58d Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Tue, 9 Jun 2020 16:11:29 -0700 Subject: [PATCH 0329/1304] net/compat: Add missing sock updates for SCM_RIGHTS commit d9539752d23283db4692384a634034f451261e29 upstream. Add missed sock updates to compat path via a new helper, which will be used more in coming patches. (The net/core/scm.c code is left as-is here to assist with -stable backports for the compat path.) Cc: Christoph Hellwig Cc: Sargun Dhillon Cc: Jakub Kicinski Cc: stable@vger.kernel.org Fixes: 48a87cc26c13 ("net: netprio: fd passed in SCM_RIGHTS datagram not set correctly") Fixes: d84295067fc7 ("net: net_cls: fd passed in SCM_RIGHTS datagram not set correctly") Acked-by: Christian Brauner Signed-off-by: Kees Cook Signed-off-by: Greg Kroah-Hartman --- include/net/sock.h | 4 ++++ net/compat.c | 1 + net/core/sock.c | 21 +++++++++++++++++++++ 3 files changed, 26 insertions(+) diff --git a/include/net/sock.h b/include/net/sock.h index e2df102e669e..77f36257cac9 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -845,6 +845,8 @@ static inline int sk_memalloc_socks(void) { return static_branch_unlikely(&memalloc_socks_key); } + +void __receive_sock(struct file *file); #else static inline int sk_memalloc_socks(void) @@ -852,6 +854,8 @@ static inline int sk_memalloc_socks(void) return 0; } +static inline void __receive_sock(struct file *file) +{ } #endif static inline gfp_t sk_gfp_mask(const struct sock *sk, gfp_t gfp_mask) diff --git a/net/compat.c b/net/compat.c index 3c4b0283b29a..2a8c7cb5f06a 100644 --- a/net/compat.c +++ b/net/compat.c @@ -289,6 +289,7 @@ void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm) break; } /* Bump the usage count and install the file. */ + __receive_sock(fp[i]); fd_install(new_fd, get_file(fp[i])); } diff --git a/net/core/sock.c b/net/core/sock.c index 6c3b031b6ad6..e6cbe137cb6f 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -2636,6 +2636,27 @@ int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct * } EXPORT_SYMBOL(sock_no_mmap); +/* + * When a file is received (via SCM_RIGHTS, etc), we must bump the + * various sock-based usage counts. + */ +void __receive_sock(struct file *file) +{ + struct socket *sock; + int error; + + /* + * The resulting value of "error" is ignored here since we only + * need to take action when the file is a socket and testing + * "sock" for NULL is sufficient. + */ + sock = sock_from_file(file, &error); + if (sock) { + sock_update_netprioidx(&sock->sk->sk_cgrp_data); + sock_update_classid(&sock->sk->sk_cgrp_data); + } +} + ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags) { ssize_t res; -- GitLab From 566cba3c7d1fe14fdc9c43931035e9ae4320f973 Mon Sep 17 00:00:00 2001 From: ChangSyun Peng Date: Fri, 31 Jul 2020 17:50:17 +0800 Subject: [PATCH 0330/1304] md/raid5: Fix Force reconstruct-write io stuck in degraded raid5 commit a1c6ae3d9f3dd6aa5981a332a6f700cf1c25edef upstream. In degraded raid5, we need to read parity to do reconstruct-write when data disks fail. However, we can not read parity from handle_stripe_dirtying() in force reconstruct-write mode. Reproducible Steps: 1. Create degraded raid5 mdadm -C /dev/md2 --assume-clean -l5 -n3 /dev/sda2 /dev/sdb2 missing 2. Set rmw_level to 0 echo 0 > /sys/block/md2/md/rmw_level 3. IO to raid5 Now some io may be stuck in raid5. We can use handle_stripe_fill() to read the parity in this situation. Cc: # v4.4+ Reviewed-by: Alex Wu Reviewed-by: BingJing Chang Reviewed-by: Danny Shih Signed-off-by: ChangSyun Peng Signed-off-by: Song Liu Signed-off-by: Greg Kroah-Hartman --- drivers/md/raid5.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 01021382131b..d91154d65455 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -3596,6 +3596,7 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s, * is missing/faulty, then we need to read everything we can. */ if (sh->raid_conf->level != 6 && + sh->raid_conf->rmw_level != PARITY_DISABLE_RMW && sh->sector < sh->raid_conf->mddev->recovery_cp) /* reconstruct-write isn't being forced */ return 0; @@ -4832,7 +4833,7 @@ static void handle_stripe(struct stripe_head *sh) * or to load a block that is being partially written. */ if (s.to_read || s.non_overwrite - || (conf->level == 6 && s.to_write && s.failed) + || (s.to_write && s.failed) || (s.syncing && (s.uptodate + s.compute < disks)) || s.replacing || s.expanding) -- GitLab From d6e2394ce6c9554a78a88fab2a779e3168088a47 Mon Sep 17 00:00:00 2001 From: Coly Li Date: Sat, 25 Jul 2020 20:00:16 +0800 Subject: [PATCH 0331/1304] bcache: allocate meta data pages as compound pages commit 5fe48867856367142d91a82f2cbf7a57a24cbb70 upstream. There are some meta data of bcache are allocated by multiple pages, and they are used as bio bv_page for I/Os to the cache device. for example cache_set->uuids, cache->disk_buckets, journal_write->data, bset_tree->data. For such meta data memory, all the allocated pages should be treated as a single memory block. Then the memory management and underlying I/O code can treat them more clearly. This patch adds __GFP_COMP flag to all the location allocating >0 order pages for the above mentioned meta data. Then their pages are treated as compound pages now. Signed-off-by: Coly Li Cc: stable@vger.kernel.org Signed-off-by: Jens Axboe Signed-off-by: Greg Kroah-Hartman --- drivers/md/bcache/bset.c | 2 +- drivers/md/bcache/btree.c | 2 +- drivers/md/bcache/journal.c | 4 ++-- drivers/md/bcache/super.c | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c index 268f1b685084..ec48cf86cab6 100644 --- a/drivers/md/bcache/bset.c +++ b/drivers/md/bcache/bset.c @@ -321,7 +321,7 @@ int bch_btree_keys_alloc(struct btree_keys *b, b->page_order = page_order; - t->data = (void *) __get_free_pages(gfp, b->page_order); + t->data = (void *) __get_free_pages(__GFP_COMP|gfp, b->page_order); if (!t->data) goto err; diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 38a8f8d2a908..d320574b9a4c 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -830,7 +830,7 @@ int bch_btree_cache_alloc(struct cache_set *c) mutex_init(&c->verify_lock); c->verify_ondisk = (void *) - __get_free_pages(GFP_KERNEL, ilog2(bucket_pages(c))); + __get_free_pages(GFP_KERNEL|__GFP_COMP, ilog2(bucket_pages(c))); c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL); diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index 7bb15cddca5e..182c2b7bd960 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -864,8 +864,8 @@ int bch_journal_alloc(struct cache_set *c) j->w[1].c = c; if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) || - !(j->w[0].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)) || - !(j->w[1].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS))) + !(j->w[0].data = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP, JSET_BITS)) || + !(j->w[1].data = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP, JSET_BITS))) return -ENOMEM; return 0; diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 46ad0bf18e1f..825bfde10c69 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -1693,7 +1693,7 @@ void bch_cache_set_unregister(struct cache_set *c) } #define alloc_bucket_pages(gfp, c) \ - ((void *) __get_free_pages(__GFP_ZERO|gfp, ilog2(bucket_pages(c)))) + ((void *) __get_free_pages(__GFP_ZERO|__GFP_COMP|gfp, ilog2(bucket_pages(c)))) struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) { -- GitLab From 2a72c283319c2be9b4667630d8d0c98b59371930 Mon Sep 17 00:00:00 2001 From: Coly Li Date: Sat, 25 Jul 2020 20:00:22 +0800 Subject: [PATCH 0332/1304] bcache: fix overflow in offset_to_stripe() commit 7a1481267999c02abf4a624515c1b5c7c1fccbd6 upstream. offset_to_stripe() returns the stripe number (in type unsigned int) from an offset (in type uint64_t) by the following calculation, do_div(offset, d->stripe_size); For large capacity backing device (e.g. 18TB) with small stripe size (e.g. 4KB), the result is 4831838208 and exceeds UINT_MAX. The actual returned value which caller receives is 536870912, due to the overflow. Indeed in bcache_device_init(), bcache_device->nr_stripes is limited in range [1, INT_MAX]. Therefore all valid stripe numbers in bcache are in range [0, bcache_dev->nr_stripes - 1]. This patch adds a upper limition check in offset_to_stripe(): the max valid stripe number should be less than bcache_device->nr_stripes. If the calculated stripe number from do_div() is equal to or larger than bcache_device->nr_stripe, -EINVAL will be returned. (Normally nr_stripes is less than INT_MAX, exceeding upper limitation doesn't mean overflow, therefore -EOVERFLOW is not used as error code.) This patch also changes nr_stripes' type of struct bcache_device from 'unsigned int' to 'int', and return value type of offset_to_stripe() from 'unsigned int' to 'int', to match their exact data ranges. All locations where bcache_device->nr_stripes and offset_to_stripe() are referenced also get updated for the above type change. Reported-and-tested-by: Ken Raeburn Signed-off-by: Coly Li Cc: stable@vger.kernel.org Link: https://bugzilla.redhat.com/show_bug.cgi?id=1783075 Signed-off-by: Jens Axboe Signed-off-by: Greg Kroah-Hartman --- drivers/md/bcache/bcache.h | 2 +- drivers/md/bcache/writeback.c | 14 +++++++++----- drivers/md/bcache/writeback.h | 19 +++++++++++++++++-- 3 files changed, 27 insertions(+), 8 deletions(-) diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index 2a2f189dd37c..1cc6ae3e058c 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -264,7 +264,7 @@ struct bcache_device { #define BCACHE_DEV_UNLINK_DONE 2 #define BCACHE_DEV_WB_RUNNING 3 #define BCACHE_DEV_RATE_DW_RUNNING 4 - unsigned int nr_stripes; + int nr_stripes; unsigned int stripe_size; atomic_t *stripe_sectors_dirty; unsigned long *full_dirty_stripes; diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index b5fc3c6c7178..aa58833fb012 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -506,15 +506,19 @@ void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned int inode, uint64_t offset, int nr_sectors) { struct bcache_device *d = c->devices[inode]; - unsigned int stripe_offset, stripe, sectors_dirty; + unsigned int stripe_offset, sectors_dirty; + int stripe; if (!d) return; + stripe = offset_to_stripe(d, offset); + if (stripe < 0) + return; + if (UUID_FLASH_ONLY(&c->uuids[inode])) atomic_long_add(nr_sectors, &c->flash_dev_dirty_sectors); - stripe = offset_to_stripe(d, offset); stripe_offset = offset & (d->stripe_size - 1); while (nr_sectors) { @@ -554,12 +558,12 @@ static bool dirty_pred(struct keybuf *buf, struct bkey *k) static void refill_full_stripes(struct cached_dev *dc) { struct keybuf *buf = &dc->writeback_keys; - unsigned int start_stripe, stripe, next_stripe; + unsigned int start_stripe, next_stripe; + int stripe; bool wrapped = false; stripe = offset_to_stripe(&dc->disk, KEY_OFFSET(&buf->last_scanned)); - - if (stripe >= dc->disk.nr_stripes) + if (stripe < 0) stripe = 0; start_stripe = stripe; diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h index e75dc33339f6..b902e574c5c4 100644 --- a/drivers/md/bcache/writeback.h +++ b/drivers/md/bcache/writeback.h @@ -28,10 +28,22 @@ static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d) return ret; } -static inline unsigned int offset_to_stripe(struct bcache_device *d, +static inline int offset_to_stripe(struct bcache_device *d, uint64_t offset) { do_div(offset, d->stripe_size); + + /* d->nr_stripes is in range [1, INT_MAX] */ + if (unlikely(offset >= d->nr_stripes)) { + pr_err("Invalid stripe %llu (>= nr_stripes %d).\n", + offset, d->nr_stripes); + return -EINVAL; + } + + /* + * Here offset is definitly smaller than INT_MAX, + * return it as int will never overflow. + */ return offset; } @@ -39,7 +51,10 @@ static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc, uint64_t offset, unsigned int nr_sectors) { - unsigned int stripe = offset_to_stripe(&dc->disk, offset); + int stripe = offset_to_stripe(&dc->disk, offset); + + if (stripe < 0) + return false; while (1) { if (atomic_read(dc->disk.stripe_sectors_dirty + stripe)) -- GitLab From 4cf1d191f77f8b81d0ed1cea344ca73c0d4cb2bc Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Mon, 3 Aug 2020 11:02:10 +0200 Subject: [PATCH 0333/1304] mac80211: fix misplaced while instead of if commit 5981fe5b0529ba25d95f37d7faa434183ad618c5 upstream. This never was intended to be a 'while' loop, it should've just been an 'if' instead of 'while'. Fix this. I noticed this while applying another patch from Ben that intended to fix a busy loop at this spot. Cc: stable@vger.kernel.org Fixes: b16798f5b907 ("mac80211: mark station unauthorized before key removal") Reported-by: Ben Greear Link: https://lore.kernel.org/r/20200803110209.253009ae41ff.I3522aad099392b31d5cf2dcca34cbac7e5832dde@changeid Signed-off-by: Johannes Berg Signed-off-by: Greg Kroah-Hartman --- net/mac80211/sta_info.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index ec2e83272f9d..2a82d438991b 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -979,7 +979,7 @@ static void __sta_info_destroy_part2(struct sta_info *sta) might_sleep(); lockdep_assert_held(&local->sta_mtx); - while (sta->sta_state == IEEE80211_STA_AUTHORIZED) { + if (sta->sta_state == IEEE80211_STA_AUTHORIZED) { ret = sta_info_move_state(sta, IEEE80211_STA_ASSOC); WARN_ON_ONCE(ret); } -- GitLab From 706695d477fb16a5920098535380ee39337e7ea8 Mon Sep 17 00:00:00 2001 From: Lukas Wunner Date: Wed, 8 Jul 2020 15:27:01 +0200 Subject: [PATCH 0334/1304] driver core: Avoid binding drivers to dead devices commit 654888327e9f655a9d55ad477a9583e90e8c9b5c upstream. Commit 3451a495ef24 ("driver core: Establish order of operations for device_add and device_del via bitflag") sought to prevent asynchronous driver binding to a device which is being removed. It added a per-device "dead" flag which is checked in the following code paths: * asynchronous binding in __driver_attach_async_helper() * synchronous binding in device_driver_attach() * asynchronous binding in __device_attach_async_helper() It did *not* check the flag upon: * synchronous binding in __device_attach() However __device_attach() may also be called asynchronously from: deferred_probe_work_func() bus_probe_device() device_initial_probe() __device_attach() So if the commit's intention was to check the "dead" flag in all asynchronous code paths, then a check is also necessary in __device_attach(). Add the missing check. Fixes: 3451a495ef24 ("driver core: Establish order of operations for device_add and device_del via bitflag") Signed-off-by: Lukas Wunner Cc: stable@vger.kernel.org # v5.1+ Cc: Alexander Duyck Link: https://lore.kernel.org/r/de88a23a6fe0ef70f7cfd13c8aea9ab51b4edab6.1594214103.git.lukas@wunner.de Signed-off-by: Greg Kroah-Hartman --- drivers/base/dd.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/base/dd.c b/drivers/base/dd.c index caaeb7910a04..0047bbdd43c0 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -792,7 +792,9 @@ static int __device_attach(struct device *dev, bool allow_async) int ret = 0; device_lock(dev); - if (dev->driver) { + if (dev->p->dead) { + goto out_unlock; + } else if (dev->driver) { if (device_is_bound(dev)) { ret = 1; goto out_unlock; -- GitLab From baa5bd366835ceb752e5f75ba993042a5b872dbf Mon Sep 17 00:00:00 2001 From: Huacai Chen Date: Thu, 16 Jul 2020 18:40:23 +0800 Subject: [PATCH 0335/1304] MIPS: CPU#0 is not hotpluggable commit 9cce844abf07b683cff5f0273977d5f8d0af94c7 upstream. Now CPU#0 is not hotpluggable on MIPS, so prevent to create /sys/devices /system/cpu/cpu0/online which confuses some user-space tools. Cc: stable@vger.kernel.org Signed-off-by: Huacai Chen Signed-off-by: Thomas Bogendoerfer Signed-off-by: Greg Kroah-Hartman --- arch/mips/kernel/topology.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/mips/kernel/topology.c b/arch/mips/kernel/topology.c index cd3e1f82e1a5..08ad6371fbe0 100644 --- a/arch/mips/kernel/topology.c +++ b/arch/mips/kernel/topology.c @@ -20,7 +20,7 @@ static int __init topology_init(void) for_each_present_cpu(i) { struct cpu *c = &per_cpu(cpu_devices, i); - c->hotpluggable = 1; + c->hotpluggable = !!i; ret = register_cpu(c, i); if (ret) printk(KERN_WARNING "topology_init: register_cpu %d " -- GitLab From 41d71ef2e791506bc3a8c5db155bb75daf623ce6 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Mon, 20 Apr 2020 16:02:21 -0400 Subject: [PATCH 0336/1304] ext2: fix missing percpu_counter_inc commit bc2fbaa4d3808aef82dd1064a8e61c16549fe956 upstream. sbi->s_freeinodes_counter is only decreased by the ext2 code, it is never increased. This patch fixes it. Note that sbi->s_freeinodes_counter is only used in the algorithm that tries to find the group for new allocations, so this bug is not easily visible (the only visibility is that the group finding algorithm selects inoptinal result). Link: https://lore.kernel.org/r/alpine.LRH.2.02.2004201538300.19436@file01.intranet.prod.int.rdu2.redhat.com Signed-off-by: Mikulas Patocka Cc: stable@vger.kernel.org Signed-off-by: Jan Kara Signed-off-by: Greg Kroah-Hartman --- fs/ext2/ialloc.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c index 5c3d7b7e4975..d8a03b1afbc3 100644 --- a/fs/ext2/ialloc.c +++ b/fs/ext2/ialloc.c @@ -80,6 +80,7 @@ static void ext2_release_inode(struct super_block *sb, int group, int dir) if (dir) le16_add_cpu(&desc->bg_used_dirs_count, -1); spin_unlock(sb_bgl_lock(EXT2_SB(sb), group)); + percpu_counter_inc(&EXT2_SB(sb)->s_freeinodes_counter); if (dir) percpu_counter_dec(&EXT2_SB(sb)->s_dirs_counter); mark_buffer_dirty(bh); @@ -531,7 +532,7 @@ struct inode *ext2_new_inode(struct inode *dir, umode_t mode, goto fail; } - percpu_counter_add(&sbi->s_freeinodes_counter, -1); + percpu_counter_dec(&sbi->s_freeinodes_counter); if (S_ISDIR(mode)) percpu_counter_inc(&sbi->s_dirs_counter); -- GitLab From 73cbb8af7e8a80715e79b78dba1057b966849a37 Mon Sep 17 00:00:00 2001 From: Junxiao Bi Date: Thu, 6 Aug 2020 23:18:02 -0700 Subject: [PATCH 0337/1304] ocfs2: change slot number type s16 to u16 commit 38d51b2dd171ad973afc1f5faab825ed05a2d5e9 upstream. Dan Carpenter reported the following static checker warning. fs/ocfs2/super.c:1269 ocfs2_parse_options() warn: '(-1)' 65535 can't fit into 32767 'mopt->slot' fs/ocfs2/suballoc.c:859 ocfs2_init_inode_steal_slot() warn: '(-1)' 65535 can't fit into 32767 'osb->s_inode_steal_slot' fs/ocfs2/suballoc.c:867 ocfs2_init_meta_steal_slot() warn: '(-1)' 65535 can't fit into 32767 'osb->s_meta_steal_slot' That's because OCFS2_INVALID_SLOT is (u16)-1. Slot number in ocfs2 can be never negative, so change s16 to u16. Fixes: 9277f8334ffc ("ocfs2: fix value of OCFS2_INVALID_SLOT") Reported-by: Dan Carpenter Signed-off-by: Junxiao Bi Signed-off-by: Andrew Morton Reviewed-by: Joseph Qi Reviewed-by: Gang He Cc: Mark Fasheh Cc: Joel Becker Cc: Junxiao Bi Cc: Changwei Ge Cc: Jun Piao Cc: Link: http://lkml.kernel.org/r/20200627001259.19757-1-junxiao.bi@oracle.com Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- fs/ocfs2/ocfs2.h | 4 ++-- fs/ocfs2/suballoc.c | 4 ++-- fs/ocfs2/super.c | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h index 231933618300..b9f62d29355b 100644 --- a/fs/ocfs2/ocfs2.h +++ b/fs/ocfs2/ocfs2.h @@ -338,8 +338,8 @@ struct ocfs2_super spinlock_t osb_lock; u32 s_next_generation; unsigned long osb_flags; - s16 s_inode_steal_slot; - s16 s_meta_steal_slot; + u16 s_inode_steal_slot; + u16 s_meta_steal_slot; atomic_t s_num_inodes_stolen; atomic_t s_num_meta_stolen; diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c index 15a89c513da2..0230b4ece0f0 100644 --- a/fs/ocfs2/suballoc.c +++ b/fs/ocfs2/suballoc.c @@ -893,9 +893,9 @@ static void __ocfs2_set_steal_slot(struct ocfs2_super *osb, int slot, int type) { spin_lock(&osb->osb_lock); if (type == INODE_ALLOC_SYSTEM_INODE) - osb->s_inode_steal_slot = slot; + osb->s_inode_steal_slot = (u16)slot; else if (type == EXTENT_ALLOC_SYSTEM_INODE) - osb->s_meta_steal_slot = slot; + osb->s_meta_steal_slot = (u16)slot; spin_unlock(&osb->osb_lock); } diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 3415e0b09398..2658d91c1f7b 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -92,7 +92,7 @@ struct mount_options unsigned long commit_interval; unsigned long mount_opt; unsigned int atime_quantum; - signed short slot; + unsigned short slot; int localalloc_opt; unsigned int resv_level; int dir_resv_level; @@ -1384,7 +1384,7 @@ static int ocfs2_parse_options(struct super_block *sb, goto bail; } if (option) - mopt->slot = (s16)option; + mopt->slot = (u16)option; break; case Opt_commit: if (match_int(&args[0], &option)) { -- GitLab From e88a72e86bd0c0c19ba00865ed68dce22824aac6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michal=20Koutn=C3=BD?= Date: Thu, 6 Aug 2020 23:22:18 -0700 Subject: [PATCH 0338/1304] mm/page_counter.c: fix protection usage propagation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit a6f23d14ec7d7d02220ad8bb2774be3322b9aeec upstream. When workload runs in cgroups that aren't directly below root cgroup and their parent specifies reclaim protection, it may end up ineffective. The reason is that propagate_protected_usage() is not called in all hierarchy up. All the protected usage is incorrectly accumulated in the workload's parent. This means that siblings_low_usage is overestimated and effective protection underestimated. Even though it is transitional phenomenon (uncharge path does correct propagation and fixes the wrong children_low_usage), it can undermine the intended protection unexpectedly. We have noticed this problem while seeing a swap out in a descendant of a protected memcg (intermediate node) while the parent was conveniently under its protection limit and the memory pressure was external to that hierarchy. Michal has pinpointed this down to the wrong siblings_low_usage which led to the unwanted reclaim. The fix is simply updating children_low_usage in respective ancestors also in the charging path. Fixes: 230671533d64 ("mm: memory.low hierarchical behavior") Signed-off-by: Michal Koutný Signed-off-by: Michal Hocko Signed-off-by: Andrew Morton Acked-by: Michal Hocko Acked-by: Roman Gushchin Cc: Johannes Weiner Cc: Tejun Heo Cc: [4.18+] Link: http://lkml.kernel.org/r/20200803153231.15477-1-mhocko@kernel.org Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- mm/page_counter.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mm/page_counter.c b/mm/page_counter.c index de31470655f6..147ff99187b8 100644 --- a/mm/page_counter.c +++ b/mm/page_counter.c @@ -77,7 +77,7 @@ void page_counter_charge(struct page_counter *counter, unsigned long nr_pages) long new; new = atomic_long_add_return(nr_pages, &c->usage); - propagate_protected_usage(counter, new); + propagate_protected_usage(c, new); /* * This is indeed racy, but we can live with some * inaccuracy in the watermark. @@ -121,7 +121,7 @@ bool page_counter_try_charge(struct page_counter *counter, new = atomic_long_add_return(nr_pages, &c->usage); if (new > c->max) { atomic_long_sub(nr_pages, &c->usage); - propagate_protected_usage(counter, new); + propagate_protected_usage(c, new); /* * This is racy, but we can live with some * inaccuracy in the failcnt. @@ -130,7 +130,7 @@ bool page_counter_try_charge(struct page_counter *counter, *fail = c; goto failed; } - propagate_protected_usage(counter, new); + propagate_protected_usage(c, new); /* * Just like with failcnt, we can live with some * inaccuracy in the watermark. -- GitLab From 892fd3637a2c0c29d3dd7402bbb0802eb231b69d Mon Sep 17 00:00:00 2001 From: Chengming Zhou Date: Wed, 29 Jul 2020 02:05:53 +0800 Subject: [PATCH 0339/1304] ftrace: Setup correct FTRACE_FL_REGS flags for module commit 8a224ffb3f52b0027f6b7279854c71a31c48fc97 upstream. When module loaded and enabled, we will use __ftrace_replace_code for module if any ftrace_ops referenced it found. But we will get wrong ftrace_addr for module rec in ftrace_get_addr_new, because rec->flags has not been setup correctly. It can cause the callback function of a ftrace_ops has FTRACE_OPS_FL_SAVE_REGS to be called with pt_regs set to NULL. So setup correct FTRACE_FL_REGS flags for rec when we call referenced_filters to find ftrace_ops references it. Link: https://lkml.kernel.org/r/20200728180554.65203-1-zhouchengming@bytedance.com Cc: stable@vger.kernel.org Fixes: 8c4f3c3fa9681 ("ftrace: Check module functions being traced on reload") Signed-off-by: Chengming Zhou Signed-off-by: Muchun Song Signed-off-by: Steven Rostedt (VMware) Signed-off-by: Greg Kroah-Hartman --- kernel/trace/ftrace.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 0c379cd40bea..8960fe94e8ee 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -5665,8 +5665,11 @@ static int referenced_filters(struct dyn_ftrace *rec) int cnt = 0; for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) { - if (ops_references_rec(ops, rec)) - cnt++; + if (ops_references_rec(ops, rec)) { + cnt++; + if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) + rec->flags |= FTRACE_FL_REGS; + } } return cnt; @@ -5843,8 +5846,8 @@ void ftrace_module_enable(struct module *mod) if (ftrace_start_up) cnt += referenced_filters(rec); - /* This clears FTRACE_FL_DISABLED */ - rec->flags = cnt; + rec->flags &= ~FTRACE_FL_DISABLED; + rec->flags += cnt; if (ftrace_start_up && cnt) { int failed = __ftrace_replace_code(rec, 1); -- GitLab From 46c9d3925ab0ceb4d19cee4be1a061b87faf1e11 Mon Sep 17 00:00:00 2001 From: Muchun Song Date: Tue, 28 Jul 2020 14:45:36 +0800 Subject: [PATCH 0340/1304] kprobes: Fix NULL pointer dereference at kprobe_ftrace_handler commit 0cb2f1372baa60af8456388a574af6133edd7d80 upstream. We found a case of kernel panic on our server. The stack trace is as follows(omit some irrelevant information): BUG: kernel NULL pointer dereference, address: 0000000000000080 RIP: 0010:kprobe_ftrace_handler+0x5e/0xe0 RSP: 0018:ffffb512c6550998 EFLAGS: 00010282 RAX: 0000000000000000 RBX: ffff8e9d16eea018 RCX: 0000000000000000 RDX: ffffffffbe1179c0 RSI: ffffffffc0535564 RDI: ffffffffc0534ec0 RBP: ffffffffc0534ec1 R08: ffff8e9d1bbb0f00 R09: 0000000000000004 R10: 0000000000000000 R11: 0000000000000000 R12: 0000000000000000 R13: ffff8e9d1f797060 R14: 000000000000bacc R15: ffff8e9ce13eca00 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000000000000080 CR3: 00000008453d0005 CR4: 00000000003606e0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Call Trace: ftrace_ops_assist_func+0x56/0xe0 ftrace_call+0x5/0x34 tcpa_statistic_send+0x5/0x130 [ttcp_engine] The tcpa_statistic_send is the function being kprobed. After analysis, the root cause is that the fourth parameter regs of kprobe_ftrace_handler is NULL. Why regs is NULL? We use the crash tool to analyze the kdump. crash> dis tcpa_statistic_send -r : callq 0xffffffffbd8018c0 The tcpa_statistic_send calls ftrace_caller instead of ftrace_regs_caller. So it is reasonable that the fourth parameter regs of kprobe_ftrace_handler is NULL. In theory, we should call the ftrace_regs_caller instead of the ftrace_caller. After in-depth analysis, we found a reproducible path. Writing a simple kernel module which starts a periodic timer. The timer's handler is named 'kprobe_test_timer_handler'. The module name is kprobe_test.ko. 1) insmod kprobe_test.ko 2) bpftrace -e 'kretprobe:kprobe_test_timer_handler {}' 3) echo 0 > /proc/sys/kernel/ftrace_enabled 4) rmmod kprobe_test 5) stop step 2) kprobe 6) insmod kprobe_test.ko 7) bpftrace -e 'kretprobe:kprobe_test_timer_handler {}' We mark the kprobe as GONE but not disarm the kprobe in the step 4). The step 5) also do not disarm the kprobe when unregister kprobe. So we do not remove the ip from the filter. In this case, when the module loads again in the step 6), we will replace the code to ftrace_caller via the ftrace_module_enable(). When we register kprobe again, we will not replace ftrace_caller to ftrace_regs_caller because the ftrace is disabled in the step 3). So the step 7) will trigger kernel panic. Fix this problem by disarming the kprobe when the module is going away. Link: https://lkml.kernel.org/r/20200728064536.24405-1-songmuchun@bytedance.com Cc: stable@vger.kernel.org Fixes: ae6aa16fdc16 ("kprobes: introduce ftrace based optimization") Acked-by: Masami Hiramatsu Signed-off-by: Muchun Song Co-developed-by: Chengming Zhou Signed-off-by: Chengming Zhou Signed-off-by: Steven Rostedt (VMware) Signed-off-by: Greg Kroah-Hartman --- kernel/kprobes.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 178327a75e73..eb4bffe6d764 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -2077,6 +2077,13 @@ static void kill_kprobe(struct kprobe *p) * the original probed function (which will be freed soon) any more. */ arch_remove_kprobe(p); + + /* + * The module is going away. We should disarm the kprobe which + * is using ftrace. + */ + if (kprobe_ftrace(p)) + disarm_kprobe_ftrace(p); } /* Disable one kprobe */ -- GitLab From b3b77736dd517ce1f03364486d88c375e0060679 Mon Sep 17 00:00:00 2001 From: Kevin Hao Date: Thu, 30 Jul 2020 16:23:18 +0800 Subject: [PATCH 0341/1304] tracing/hwlat: Honor the tracing_cpumask commit 96b4833b6827a62c295b149213c68b559514c929 upstream. In calculation of the cpu mask for the hwlat kernel thread, the wrong cpu mask is used instead of the tracing_cpumask, this causes the tracing/tracing_cpumask useless for hwlat tracer. Fixes it. Link: https://lkml.kernel.org/r/20200730082318.42584-2-haokexin@gmail.com Cc: Ingo Molnar Cc: stable@vger.kernel.org Fixes: 0330f7aa8ee6 ("tracing: Have hwlat trace migrate across tracing_cpumask CPUs") Signed-off-by: Kevin Hao Signed-off-by: Steven Rostedt (VMware) Signed-off-by: Greg Kroah-Hartman --- kernel/trace/trace_hwlat.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c index 8030e24dbf14..568918fae8d4 100644 --- a/kernel/trace/trace_hwlat.c +++ b/kernel/trace/trace_hwlat.c @@ -270,6 +270,7 @@ static bool disable_migrate; static void move_to_next_cpu(void) { struct cpumask *current_mask = &save_cpumask; + struct trace_array *tr = hwlat_trace; int next_cpu; if (disable_migrate) @@ -283,7 +284,7 @@ static void move_to_next_cpu(void) goto disable; get_online_cpus(); - cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask); + cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask); next_cpu = cpumask_next(smp_processor_id(), current_mask); put_online_cpus(); @@ -360,7 +361,7 @@ static int start_kthread(struct trace_array *tr) /* Just pick the first CPU on first iteration */ current_mask = &save_cpumask; get_online_cpus(); - cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask); + cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask); put_online_cpus(); next_cpu = cpumask_first(current_mask); -- GitLab From 2c98c4a0c35117926dcb0b0ec4d5034c64415149 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Tue, 4 Aug 2020 20:00:02 -0400 Subject: [PATCH 0342/1304] tracing: Use trace_sched_process_free() instead of exit() for pid tracing commit afcab636657421f7ebfa0783a91f90256bba0091 upstream. On exit, if a process is preempted after the trace_sched_process_exit() tracepoint but before the process is done exiting, then when it gets scheduled in, the function tracers will not filter it properly against the function tracing pid filters. That is because the function tracing pid filters hooks to the sched_process_exit() tracepoint to remove the exiting task's pid from the filter list. Because the filtering happens at the sched_switch tracepoint, when the exiting task schedules back in to finish up the exit, it will no longer be in the function pid filtering tables. This was noticeable in the notrace self tests on a preemptable kernel, as the tests would fail as it exits and preempted after being taken off the notrace filter table and on scheduling back in it would not be in the notrace list, and then the ending of the exit function would trace. The test detected this and would fail. Cc: stable@vger.kernel.org Cc: Namhyung Kim Fixes: 1e10486ffee0a ("ftrace: Add 'function-fork' trace option") Fixes: c37775d57830a ("tracing: Add infrastructure to allow set_event_pid to follow children" Signed-off-by: Steven Rostedt (VMware) Signed-off-by: Greg Kroah-Hartman --- kernel/trace/ftrace.c | 4 ++-- kernel/trace/trace_events.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 8960fe94e8ee..70f7743c1672 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -6450,12 +6450,12 @@ void ftrace_pid_follow_fork(struct trace_array *tr, bool enable) if (enable) { register_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork, tr); - register_trace_sched_process_exit(ftrace_pid_follow_sched_process_exit, + register_trace_sched_process_free(ftrace_pid_follow_sched_process_exit, tr); } else { unregister_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork, tr); - unregister_trace_sched_process_exit(ftrace_pid_follow_sched_process_exit, + unregister_trace_sched_process_free(ftrace_pid_follow_sched_process_exit, tr); } } diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index ec340e1cbffc..27726121d332 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -534,12 +534,12 @@ void trace_event_follow_fork(struct trace_array *tr, bool enable) if (enable) { register_trace_prio_sched_process_fork(event_filter_pid_sched_process_fork, tr, INT_MIN); - register_trace_prio_sched_process_exit(event_filter_pid_sched_process_exit, + register_trace_prio_sched_process_free(event_filter_pid_sched_process_exit, tr, INT_MAX); } else { unregister_trace_sched_process_fork(event_filter_pid_sched_process_fork, tr); - unregister_trace_sched_process_exit(event_filter_pid_sched_process_exit, + unregister_trace_sched_process_free(event_filter_pid_sched_process_exit, tr); } } -- GitLab From 203dbe7cda02b85d22f093617f0330cd03870161 Mon Sep 17 00:00:00 2001 From: Ahmad Fatoum Date: Thu, 11 Jun 2020 21:17:43 +0200 Subject: [PATCH 0343/1304] watchdog: f71808e_wdt: indicate WDIOF_CARDRESET support in watchdog_info.options commit e871e93fb08a619dfc015974a05768ed6880fd82 upstream. The driver supports populating bootstatus with WDIOF_CARDRESET, but so far userspace couldn't portably determine whether absence of this flag meant no watchdog reset or no driver support. Or-in the bit to fix this. Fixes: b97cb21a4634 ("watchdog: f71808e_wdt: Fix WDTMOUT_STS register read") Cc: stable@vger.kernel.org Signed-off-by: Ahmad Fatoum Reviewed-by: Guenter Roeck Link: https://lore.kernel.org/r/20200611191750.28096-3-a.fatoum@pengutronix.de Signed-off-by: Guenter Roeck Signed-off-by: Wim Van Sebroeck Signed-off-by: Greg Kroah-Hartman --- drivers/watchdog/f71808e_wdt.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c index 9a1c761258ce..7683936e0b10 100644 --- a/drivers/watchdog/f71808e_wdt.c +++ b/drivers/watchdog/f71808e_wdt.c @@ -690,7 +690,8 @@ static int __init watchdog_init(int sioaddr) watchdog.sioaddr = sioaddr; watchdog.ident.options = WDIOC_SETTIMEOUT | WDIOF_MAGICCLOSE - | WDIOF_KEEPALIVEPING; + | WDIOF_KEEPALIVEPING + | WDIOF_CARDRESET; snprintf(watchdog.ident.identity, sizeof(watchdog.ident.identity), "%s watchdog", -- GitLab From 49d0707efdad02d15c627ba19629c81a367748b1 Mon Sep 17 00:00:00 2001 From: Ahmad Fatoum Date: Thu, 11 Jun 2020 21:17:44 +0200 Subject: [PATCH 0344/1304] watchdog: f71808e_wdt: remove use of wrong watchdog_info option commit 802141462d844f2e6a4d63a12260d79b7afc4c34 upstream. The flags that should be or-ed into the watchdog_info.options by drivers all start with WDIOF_, e.g. WDIOF_SETTIMEOUT, which indicates that the driver's watchdog_ops has a usable set_timeout. WDIOC_SETTIMEOUT was used instead, which expands to 0xc0045706, which equals: WDIOF_FANFAULT | WDIOF_EXTERN1 | WDIOF_PRETIMEOUT | WDIOF_ALARMONLY | WDIOF_MAGICCLOSE | 0xc0045000 These were so far indicated to userspace on WDIOC_GETSUPPORT. As the driver has not yet been migrated to the new watchdog kernel API, the constant can just be dropped without substitute. Fixes: 96cb4eb019ce ("watchdog: f71808e_wdt: new watchdog driver for Fintek F71808E and F71882FG") Cc: stable@vger.kernel.org Signed-off-by: Ahmad Fatoum Reviewed-by: Guenter Roeck Link: https://lore.kernel.org/r/20200611191750.28096-4-a.fatoum@pengutronix.de Signed-off-by: Guenter Roeck Signed-off-by: Wim Van Sebroeck Signed-off-by: Greg Kroah-Hartman --- drivers/watchdog/f71808e_wdt.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c index 7683936e0b10..608eeba3270b 100644 --- a/drivers/watchdog/f71808e_wdt.c +++ b/drivers/watchdog/f71808e_wdt.c @@ -688,8 +688,7 @@ static int __init watchdog_init(int sioaddr) * into the module have been registered yet. */ watchdog.sioaddr = sioaddr; - watchdog.ident.options = WDIOC_SETTIMEOUT - | WDIOF_MAGICCLOSE + watchdog.ident.options = WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING | WDIOF_CARDRESET; -- GitLab From e1462b5e6a052221d06d064d28a2f16c4e1782f6 Mon Sep 17 00:00:00 2001 From: Ahmad Fatoum Date: Thu, 11 Jun 2020 21:17:45 +0200 Subject: [PATCH 0345/1304] watchdog: f71808e_wdt: clear watchdog timeout occurred flag commit 4f39d575844148fbf3081571a1f3b4ae04150958 upstream. The flag indicating a watchdog timeout having occurred normally persists till Power-On Reset of the Fintek Super I/O chip. The user can clear it by writing a `1' to the bit. The driver doesn't offer a restart method, so regular system reboot might not reset the Super I/O and if the watchdog isn't enabled, we won't touch the register containing the bit on the next boot. In this case all subsequent regular reboots will be wrongly flagged by the driver as being caused by the watchdog. Fix this by having the flag cleared after read. This is also done by other drivers like those for the i6300esb and mpc8xxx_wdt. Fixes: b97cb21a4634 ("watchdog: f71808e_wdt: Fix WDTMOUT_STS register read") Cc: stable@vger.kernel.org Signed-off-by: Ahmad Fatoum Reviewed-by: Guenter Roeck Link: https://lore.kernel.org/r/20200611191750.28096-5-a.fatoum@pengutronix.de Signed-off-by: Guenter Roeck Signed-off-by: Wim Van Sebroeck Signed-off-by: Greg Kroah-Hartman --- drivers/watchdog/f71808e_wdt.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c index 608eeba3270b..5d0ea419070d 100644 --- a/drivers/watchdog/f71808e_wdt.c +++ b/drivers/watchdog/f71808e_wdt.c @@ -704,6 +704,13 @@ static int __init watchdog_init(int sioaddr) wdt_conf = superio_inb(sioaddr, F71808FG_REG_WDT_CONF); watchdog.caused_reboot = wdt_conf & BIT(F71808FG_FLAG_WDTMOUT_STS); + /* + * We don't want WDTMOUT_STS to stick around till regular reboot. + * Write 1 to the bit to clear it to zero. + */ + superio_outb(sioaddr, F71808FG_REG_WDT_CONF, + wdt_conf | BIT(F71808FG_FLAG_WDTMOUT_STS)); + superio_exit(sioaddr); err = watchdog_set_timeout(timeout); -- GitLab From 73f74fc311d4d7fc94b9e130e3eb108c67314178 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Wed, 15 Jul 2020 10:08:20 +1000 Subject: [PATCH 0346/1304] pseries: Fix 64 bit logical memory block panic commit 89c140bbaeee7a55ed0360a88f294ead2b95201b upstream. Booting with a 4GB LMB size causes us to panic: qemu-system-ppc64: OS terminated: OS panic: Memory block size not suitable: 0x0 Fix pseries_memory_block_size() to handle 64 bit LMBs. Cc: stable@vger.kernel.org Signed-off-by: Anton Blanchard Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200715000820.1255764-1-anton@ozlabs.org Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/platforms/pseries/hotplug-memory.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c index b168c3742b43..afabe6918619 100644 --- a/arch/powerpc/platforms/pseries/hotplug-memory.c +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c @@ -31,7 +31,7 @@ static bool rtas_hp_event; unsigned long pseries_memory_block_size(void) { struct device_node *np; - unsigned int memblock_size = MIN_MEMORY_BLOCK_SIZE; + u64 memblock_size = MIN_MEMORY_BLOCK_SIZE; struct resource r; np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); -- GitLab From cec9fbfe39594e9fae46b17f6b0d8283ffcf1942 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Thu, 6 Aug 2020 14:15:23 -0700 Subject: [PATCH 0347/1304] module: Correctly truncate sysfs sections output commit 11990a5bd7e558e9203c1070fc52fb6f0488e75b upstream. The only-root-readable /sys/module/$module/sections/$section files did not truncate their output to the available buffer size. While most paths into the kernfs read handlers end up using PAGE_SIZE buffers, it's possible to get there through other paths (e.g. splice, sendfile). Actually limit the output to the "count" passed into the read function, and report it back correctly. *sigh* Reported-by: kernel test robot Link: https://lore.kernel.org/lkml/20200805002015.GE23458@shao2-debian Fixes: ed66f991bb19 ("module: Refactor section attr into bin attribute") Cc: stable@vger.kernel.org Reviewed-by: Greg Kroah-Hartman Acked-by: Jessica Yu Signed-off-by: Kees Cook Signed-off-by: Greg Kroah-Hartman --- kernel/module.c | 22 +++++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-) diff --git a/kernel/module.c b/kernel/module.c index ae8e7a1fa74a..d05e1bfdd355 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -1461,18 +1461,34 @@ struct module_sect_attrs { struct module_sect_attr attrs[0]; }; +#define MODULE_SECT_READ_SIZE (3 /* "0x", "\n" */ + (BITS_PER_LONG / 4)) static ssize_t module_sect_read(struct file *file, struct kobject *kobj, struct bin_attribute *battr, char *buf, loff_t pos, size_t count) { struct module_sect_attr *sattr = container_of(battr, struct module_sect_attr, battr); + char bounce[MODULE_SECT_READ_SIZE + 1]; + size_t wrote; if (pos != 0) return -EINVAL; - return sprintf(buf, "0x%px\n", - kallsyms_show_value(file->f_cred) ? (void *)sattr->address : NULL); + /* + * Since we're a binary read handler, we must account for the + * trailing NUL byte that sprintf will write: if "buf" is + * too small to hold the NUL, or the NUL is exactly the last + * byte, the read will look like it got truncated by one byte. + * Since there is no way to ask sprintf nicely to not write + * the NUL, we have to use a bounce buffer. + */ + wrote = scnprintf(bounce, sizeof(bounce), "0x%px\n", + kallsyms_show_value(file->f_cred) + ? (void *)sattr->address : NULL); + count = min(count, wrote); + memcpy(buf, bounce, count); + + return count; } static void free_sect_attrs(struct module_sect_attrs *sect_attrs) @@ -1521,7 +1537,7 @@ static void add_sect_attrs(struct module *mod, const struct load_info *info) goto out; sect_attrs->nsections++; sattr->battr.read = module_sect_read; - sattr->battr.size = 3 /* "0x", "\n" */ + (BITS_PER_LONG / 4); + sattr->battr.size = MODULE_SECT_READ_SIZE; sattr->battr.attr.mode = 0400; *(gattr++) = &(sattr++)->battr; } -- GitLab From 721df98627dc8add25252e59535f548e3a6f7d96 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Fri, 10 Jul 2020 18:10:53 +0300 Subject: [PATCH 0348/1304] perf intel-pt: Fix FUP packet state commit 401136bb084fd021acd9f8c51b52fe0a25e326b2 upstream. While walking code towards a FUP ip, the packet state is INTEL_PT_STATE_FUP or INTEL_PT_STATE_FUP_NO_TIP. That was mishandled resulting in the state becoming INTEL_PT_STATE_IN_SYNC prematurely. The result was an occasional lost EXSTOP event. Signed-off-by: Adrian Hunter Reviewed-by: Andi Kleen Cc: Jiri Olsa Cc: stable@vger.kernel.org Link: http://lore.kernel.org/lkml/20200710151104.15137-2-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: Greg Kroah-Hartman --- .../util/intel-pt-decoder/intel-pt-decoder.c | 21 +++++++------------ 1 file changed, 7 insertions(+), 14 deletions(-) diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c index 4357141c7c92..6522b6513895 100644 --- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c +++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c @@ -1129,6 +1129,7 @@ static int intel_pt_walk_fup(struct intel_pt_decoder *decoder) return 0; if (err == -EAGAIN || intel_pt_fup_with_nlip(decoder, &intel_pt_insn, ip, err)) { + decoder->pkt_state = INTEL_PT_STATE_IN_SYNC; if (intel_pt_fup_event(decoder)) return 0; return -EAGAIN; @@ -1780,17 +1781,13 @@ static int intel_pt_walk_trace(struct intel_pt_decoder *decoder) } if (decoder->set_fup_mwait) no_tip = true; + if (no_tip) + decoder->pkt_state = INTEL_PT_STATE_FUP_NO_TIP; + else + decoder->pkt_state = INTEL_PT_STATE_FUP; err = intel_pt_walk_fup(decoder); - if (err != -EAGAIN) { - if (err) - return err; - if (no_tip) - decoder->pkt_state = - INTEL_PT_STATE_FUP_NO_TIP; - else - decoder->pkt_state = INTEL_PT_STATE_FUP; - return 0; - } + if (err != -EAGAIN) + return err; if (no_tip) { no_tip = false; break; @@ -2375,15 +2372,11 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder) err = intel_pt_walk_tip(decoder); break; case INTEL_PT_STATE_FUP: - decoder->pkt_state = INTEL_PT_STATE_IN_SYNC; err = intel_pt_walk_fup(decoder); if (err == -EAGAIN) err = intel_pt_walk_fup_tip(decoder); - else if (!err) - decoder->pkt_state = INTEL_PT_STATE_FUP; break; case INTEL_PT_STATE_FUP_NO_TIP: - decoder->pkt_state = INTEL_PT_STATE_IN_SYNC; err = intel_pt_walk_fup(decoder); if (err == -EAGAIN) err = intel_pt_walk_trace(decoder); -- GitLab From 7d3146f3b1b75e45599e270c63fc88a6ce9ff644 Mon Sep 17 00:00:00 2001 From: Sibi Sankar Date: Tue, 2 Jun 2020 22:02:56 +0530 Subject: [PATCH 0349/1304] remoteproc: qcom: q6v5: Update running state before requesting stop commit 5b7be880074c73540948f8fc597e0407b98fabfa upstream. Sometimes the stop triggers a watchdog rather than a stop-ack. Update the running state to false on requesting stop to skip the watchdog instead. Error Logs: $ echo stop > /sys/class/remoteproc/remoteproc0/state ipa 1e40000.ipa: received modem stopping event remoteproc-modem: watchdog received: sys_m_smsm_mpss.c:291:APPS force stop qcom-q6v5-mss 4080000.remoteproc-modem: port failed halt ipa 1e40000.ipa: received modem offline event remoteproc0: stopped remote processor 4080000.remoteproc-modem Reviewed-by: Evan Green Fixes: 3b415c8fb263 ("remoteproc: q6v5: Extract common resource handling") Cc: stable@vger.kernel.org Signed-off-by: Sibi Sankar Link: https://lore.kernel.org/r/20200602163257.26978-1-sibis@codeaurora.org Signed-off-by: Bjorn Andersson Signed-off-by: Greg Kroah-Hartman --- drivers/remoteproc/qcom_q6v5.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/remoteproc/qcom_q6v5.c b/drivers/remoteproc/qcom_q6v5.c index 0d33e3079f0d..ef61cb709acd 100644 --- a/drivers/remoteproc/qcom_q6v5.c +++ b/drivers/remoteproc/qcom_q6v5.c @@ -151,6 +151,8 @@ int qcom_q6v5_request_stop(struct qcom_q6v5 *q6v5) { int ret; + q6v5->running = false; + qcom_smem_state_update_bits(q6v5->state, BIT(q6v5->stop_bit), BIT(q6v5->stop_bit)); -- GitLab From 87a56a59ad2427afc358fe1d50fedcbb8a21c31f Mon Sep 17 00:00:00 2001 From: Liu Ying Date: Thu, 9 Jul 2020 10:28:52 +0800 Subject: [PATCH 0350/1304] drm/imx: imx-ldb: Disable both channels for split mode in enc->disable() commit 3b2a999582c467d1883716b37ffcc00178a13713 upstream. Both of the two LVDS channels should be disabled for split mode in the encoder's ->disable() callback, because they are enabled in the encoder's ->enable() callback. Fixes: 6556f7f82b9c ("drm: imx: Move imx-drm driver out of staging") Cc: Philipp Zabel Cc: Sascha Hauer Cc: Pengutronix Kernel Team Cc: NXP Linux Team Cc: Signed-off-by: Liu Ying Signed-off-by: Philipp Zabel Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/imx/imx-ldb.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c index 42daa5c9ff8e..221a8cbc57f9 100644 --- a/drivers/gpu/drm/imx/imx-ldb.c +++ b/drivers/gpu/drm/imx/imx-ldb.c @@ -311,18 +311,19 @@ static void imx_ldb_encoder_disable(struct drm_encoder *encoder) { struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder); struct imx_ldb *ldb = imx_ldb_ch->ldb; + int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN; int mux, ret; drm_panel_disable(imx_ldb_ch->panel); - if (imx_ldb_ch == &ldb->channel[0]) + if (imx_ldb_ch == &ldb->channel[0] || dual) ldb->ldb_ctrl &= ~LDB_CH0_MODE_EN_MASK; - else if (imx_ldb_ch == &ldb->channel[1]) + if (imx_ldb_ch == &ldb->channel[1] || dual) ldb->ldb_ctrl &= ~LDB_CH1_MODE_EN_MASK; regmap_write(ldb->regmap, IOMUXC_GPR2, ldb->ldb_ctrl); - if (ldb->ldb_ctrl & LDB_SPLIT_MODE_EN) { + if (dual) { clk_disable_unprepare(ldb->clk[0]); clk_disable_unprepare(ldb->clk[1]); } -- GitLab From d12296621cf98b0e9820a28ec077e599d37a7193 Mon Sep 17 00:00:00 2001 From: Charles Keepax Date: Mon, 15 Jun 2020 14:53:21 +0100 Subject: [PATCH 0351/1304] mfd: arizona: Ensure 32k clock is put on driver unbind and error [ Upstream commit ddff6c45b21d0437ce0c85f8ac35d7b5480513d7 ] Whilst it doesn't matter if the internal 32k clock register settings are cleaned up on exit, as the part will be turned off losing any settings, hence the driver hasn't historially bothered. The external clock should however be cleaned up, as it could cause clocks to be left on, and will at best generate a warning on unbind. Add clean up on both the probe error path and unbind for the 32k clock. Fixes: cdd8da8cc66b ("mfd: arizona: Add gating of external MCLKn clocks") Signed-off-by: Charles Keepax Signed-off-by: Lee Jones Signed-off-by: Sasha Levin --- drivers/mfd/arizona-core.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c index a4403a57ddc8..09acaa2cf74a 100644 --- a/drivers/mfd/arizona-core.c +++ b/drivers/mfd/arizona-core.c @@ -1433,6 +1433,15 @@ int arizona_dev_init(struct arizona *arizona) arizona_irq_exit(arizona); err_pm: pm_runtime_disable(arizona->dev); + + switch (arizona->pdata.clk32k_src) { + case ARIZONA_32KZ_MCLK1: + case ARIZONA_32KZ_MCLK2: + arizona_clk32k_disable(arizona); + break; + default: + break; + } err_reset: arizona_enable_reset(arizona); regulator_disable(arizona->dcvdd); @@ -1455,6 +1464,15 @@ int arizona_dev_exit(struct arizona *arizona) regulator_disable(arizona->dcvdd); regulator_put(arizona->dcvdd); + switch (arizona->pdata.clk32k_src) { + case ARIZONA_32KZ_MCLK1: + case ARIZONA_32KZ_MCLK2: + arizona_clk32k_disable(arizona); + break; + default: + break; + } + mfd_remove_devices(arizona->dev); arizona_free_irq(arizona, ARIZONA_IRQ_UNDERCLOCKED, arizona); arizona_free_irq(arizona, ARIZONA_IRQ_OVERCLOCKED, arizona); -- GitLab From 0bcab21cf6eecdcc63fc9210447e0925a3100eb0 Mon Sep 17 00:00:00 2001 From: Kamal Heib Date: Tue, 23 Jun 2020 13:52:36 +0300 Subject: [PATCH 0352/1304] RDMA/ipoib: Return void from ipoib_ib_dev_stop() [ Upstream commit 95a5631f6c9f3045f26245e6045244652204dfdb ] The return value from ipoib_ib_dev_stop() is always 0 - change it to be void. Link: https://lore.kernel.org/r/20200623105236.18683-1-kamalheib1@gmail.com Signed-off-by: Kamal Heib Signed-off-by: Jason Gunthorpe Signed-off-by: Sasha Levin --- drivers/infiniband/ulp/ipoib/ipoib.h | 2 +- drivers/infiniband/ulp/ipoib/ipoib_ib.c | 4 +--- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index 85267bbf4836..ef1222101705 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h @@ -513,7 +513,7 @@ void ipoib_ib_dev_cleanup(struct net_device *dev); int ipoib_ib_dev_open_default(struct net_device *dev); int ipoib_ib_dev_open(struct net_device *dev); -int ipoib_ib_dev_stop(struct net_device *dev); +void ipoib_ib_dev_stop(struct net_device *dev); void ipoib_ib_dev_up(struct net_device *dev); void ipoib_ib_dev_down(struct net_device *dev); int ipoib_ib_dev_stop_default(struct net_device *dev); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 0f2e80f54d33..925258ffbde3 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -846,7 +846,7 @@ int ipoib_ib_dev_stop_default(struct net_device *dev) return 0; } -int ipoib_ib_dev_stop(struct net_device *dev) +void ipoib_ib_dev_stop(struct net_device *dev) { struct ipoib_dev_priv *priv = ipoib_priv(dev); @@ -854,8 +854,6 @@ int ipoib_ib_dev_stop(struct net_device *dev) clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); ipoib_flush_ah(dev); - - return 0; } int ipoib_ib_dev_open_default(struct net_device *dev) -- GitLab From 2cb3b14eb6b2af74ec146289f86963194c834eb8 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Thu, 25 Jun 2020 20:42:19 +0300 Subject: [PATCH 0353/1304] RDMA/ipoib: Fix ABBA deadlock with ipoib_reap_ah() [ Upstream commit 65936bf25f90fe440bb2d11624c7d10fab266639 ] ipoib_mcast_carrier_on_task() insanely open codes a rtnl_lock() such that the only time flush_workqueue() can be called is if it also clears IPOIB_FLAG_OPER_UP. Thus the flush inside ipoib_flush_ah() will deadlock if it gets unlucky enough, and lockdep doesn't help us to find it early: CPU0 CPU1 CPU2 __ipoib_ib_dev_flush() down_read(vlan_rwsem) ipoib_vlan_add() rtnl_trylock() down_write(vlan_rwsem) ipoib_mcast_carrier_on_task() while (!rtnl_trylock()) msleep(20); ipoib_flush_ah() flush_workqueue(priv->wq) Clean up the ah_reaper related functions and lifecycle to make sense: - Start/Stop of the reaper should only be done in open/stop NDOs, not in any other places - cancel and flush of the reaper should only happen in the stop NDO. cancel is only functional when combined with IPOIB_STOP_REAPER. - Non-stop places were flushing the AH's just need to flush out dead AH's synchronously and ignore the background task completely. It is fully locked and harmless to leave running. Which ultimately fixes the ABBA deadlock by removing the unnecessary flush_workqueue() from the problematic place under the vlan_rwsem. Fixes: efc82eeeae4e ("IB/ipoib: No longer use flush as a parameter") Link: https://lore.kernel.org/r/20200625174219.290842-1-kamalheib1@gmail.com Reported-by: Kamal Heib Tested-by: Kamal Heib Signed-off-by: Jason Gunthorpe Signed-off-by: Sasha Levin --- drivers/infiniband/ulp/ipoib/ipoib_ib.c | 65 ++++++++++------------- drivers/infiniband/ulp/ipoib/ipoib_main.c | 2 + 2 files changed, 31 insertions(+), 36 deletions(-) diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 925258ffbde3..82b9c5b6e3e6 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -669,14 +669,13 @@ int ipoib_send(struct net_device *dev, struct sk_buff *skb, return rc; } -static void __ipoib_reap_ah(struct net_device *dev) +static void ipoib_reap_dead_ahs(struct ipoib_dev_priv *priv) { - struct ipoib_dev_priv *priv = ipoib_priv(dev); struct ipoib_ah *ah, *tah; LIST_HEAD(remove_list); unsigned long flags; - netif_tx_lock_bh(dev); + netif_tx_lock_bh(priv->dev); spin_lock_irqsave(&priv->lock, flags); list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list) @@ -687,37 +686,37 @@ static void __ipoib_reap_ah(struct net_device *dev) } spin_unlock_irqrestore(&priv->lock, flags); - netif_tx_unlock_bh(dev); + netif_tx_unlock_bh(priv->dev); } void ipoib_reap_ah(struct work_struct *work) { struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, ah_reap_task.work); - struct net_device *dev = priv->dev; - __ipoib_reap_ah(dev); + ipoib_reap_dead_ahs(priv); if (!test_bit(IPOIB_STOP_REAPER, &priv->flags)) queue_delayed_work(priv->wq, &priv->ah_reap_task, round_jiffies_relative(HZ)); } -static void ipoib_flush_ah(struct net_device *dev) +static void ipoib_start_ah_reaper(struct ipoib_dev_priv *priv) { - struct ipoib_dev_priv *priv = ipoib_priv(dev); - - cancel_delayed_work(&priv->ah_reap_task); - flush_workqueue(priv->wq); - ipoib_reap_ah(&priv->ah_reap_task.work); + clear_bit(IPOIB_STOP_REAPER, &priv->flags); + queue_delayed_work(priv->wq, &priv->ah_reap_task, + round_jiffies_relative(HZ)); } -static void ipoib_stop_ah(struct net_device *dev) +static void ipoib_stop_ah_reaper(struct ipoib_dev_priv *priv) { - struct ipoib_dev_priv *priv = ipoib_priv(dev); - set_bit(IPOIB_STOP_REAPER, &priv->flags); - ipoib_flush_ah(dev); + cancel_delayed_work(&priv->ah_reap_task); + /* + * After ipoib_stop_ah_reaper() we always go through + * ipoib_reap_dead_ahs() which ensures the work is really stopped and + * does a final flush out of the dead_ah's list + */ } static int recvs_pending(struct net_device *dev) @@ -846,16 +845,6 @@ int ipoib_ib_dev_stop_default(struct net_device *dev) return 0; } -void ipoib_ib_dev_stop(struct net_device *dev) -{ - struct ipoib_dev_priv *priv = ipoib_priv(dev); - - priv->rn_ops->ndo_stop(dev); - - clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); - ipoib_flush_ah(dev); -} - int ipoib_ib_dev_open_default(struct net_device *dev) { struct ipoib_dev_priv *priv = ipoib_priv(dev); @@ -899,10 +888,7 @@ int ipoib_ib_dev_open(struct net_device *dev) return -1; } - clear_bit(IPOIB_STOP_REAPER, &priv->flags); - queue_delayed_work(priv->wq, &priv->ah_reap_task, - round_jiffies_relative(HZ)); - + ipoib_start_ah_reaper(priv); if (priv->rn_ops->ndo_open(dev)) { pr_warn("%s: Failed to open dev\n", dev->name); goto dev_stop; @@ -913,13 +899,20 @@ int ipoib_ib_dev_open(struct net_device *dev) return 0; dev_stop: - set_bit(IPOIB_STOP_REAPER, &priv->flags); - cancel_delayed_work(&priv->ah_reap_task); - set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); - ipoib_ib_dev_stop(dev); + ipoib_stop_ah_reaper(priv); return -1; } +void ipoib_ib_dev_stop(struct net_device *dev) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + + priv->rn_ops->ndo_stop(dev); + + clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); + ipoib_stop_ah_reaper(priv); +} + void ipoib_pkey_dev_check_presence(struct net_device *dev) { struct ipoib_dev_priv *priv = ipoib_priv(dev); @@ -1230,7 +1223,7 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, ipoib_mcast_dev_flush(dev); if (oper_up) set_bit(IPOIB_FLAG_OPER_UP, &priv->flags); - ipoib_flush_ah(dev); + ipoib_reap_dead_ahs(priv); } if (level >= IPOIB_FLUSH_NORMAL) @@ -1305,7 +1298,7 @@ void ipoib_ib_dev_cleanup(struct net_device *dev) * the neighbor garbage collection is stopped and reaped. * That should all be done now, so make a final ah flush. */ - ipoib_stop_ah(dev); + ipoib_reap_dead_ahs(priv); clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 6093e8268583..d0c35eb687ae 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -1979,6 +1979,8 @@ static void ipoib_ndo_uninit(struct net_device *dev) /* no more works over the priv->wq */ if (priv->wq) { + /* See ipoib_mcast_carrier_on_task() */ + WARN_ON(test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)); flush_workqueue(priv->wq); destroy_workqueue(priv->wq); priv->wq = NULL; -- GitLab From a676d83b8f87ec1e4f4bd5540ff8c17a27deede8 Mon Sep 17 00:00:00 2001 From: Paul Kocialkowski Date: Thu, 30 Apr 2020 18:42:44 +0200 Subject: [PATCH 0354/1304] media: rockchip: rga: Introduce color fmt macros and refactor CSC mode logic [ Upstream commit ded874ece29d3fe2abd3775810a06056067eb68c ] This introduces two macros: RGA_COLOR_FMT_IS_YUV and RGA_COLOR_FMT_IS_RGB which allow quick checking of the colorspace familily of a RGA color format. These macros are then used to refactor the logic for CSC mode selection. The two nested tests for input colorspace are simplified into a single one, with a logical and, making the whole more readable. Signed-off-by: Paul Kocialkowski Reviewed-by: Ezequiel Garcia Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Sasha Levin --- drivers/media/platform/rockchip/rga/rga-hw.c | 23 +++++++++----------- drivers/media/platform/rockchip/rga/rga-hw.h | 5 +++++ 2 files changed, 15 insertions(+), 13 deletions(-) diff --git a/drivers/media/platform/rockchip/rga/rga-hw.c b/drivers/media/platform/rockchip/rga/rga-hw.c index 96d1b1b3fe8e..6d12491b79d7 100644 --- a/drivers/media/platform/rockchip/rga/rga-hw.c +++ b/drivers/media/platform/rockchip/rga/rga-hw.c @@ -208,22 +208,19 @@ static void rga_cmd_set_trans_info(struct rga_ctx *ctx) dst_info.data.format = ctx->out.fmt->hw_format; dst_info.data.swap = ctx->out.fmt->color_swap; - if (ctx->in.fmt->hw_format >= RGA_COLOR_FMT_YUV422SP) { - if (ctx->out.fmt->hw_format < RGA_COLOR_FMT_YUV422SP) { - switch (ctx->in.colorspace) { - case V4L2_COLORSPACE_REC709: - src_info.data.csc_mode = - RGA_SRC_CSC_MODE_BT709_R0; - break; - default: - src_info.data.csc_mode = - RGA_SRC_CSC_MODE_BT601_R0; - break; - } + if (RGA_COLOR_FMT_IS_YUV(ctx->in.fmt->hw_format) && + RGA_COLOR_FMT_IS_RGB(ctx->out.fmt->hw_format)) { + switch (ctx->in.colorspace) { + case V4L2_COLORSPACE_REC709: + src_info.data.csc_mode = RGA_SRC_CSC_MODE_BT709_R0; + break; + default: + src_info.data.csc_mode = RGA_SRC_CSC_MODE_BT601_R0; + break; } } - if (ctx->out.fmt->hw_format >= RGA_COLOR_FMT_YUV422SP) { + if (RGA_COLOR_FMT_IS_YUV(ctx->out.fmt->hw_format)) { switch (ctx->out.colorspace) { case V4L2_COLORSPACE_REC709: dst_info.data.csc_mode = RGA_SRC_CSC_MODE_BT709_R0; diff --git a/drivers/media/platform/rockchip/rga/rga-hw.h b/drivers/media/platform/rockchip/rga/rga-hw.h index ca3c204abe42..3e4b70eb9ced 100644 --- a/drivers/media/platform/rockchip/rga/rga-hw.h +++ b/drivers/media/platform/rockchip/rga/rga-hw.h @@ -103,6 +103,11 @@ #define RGA_COLOR_FMT_CP_8BPP 15 #define RGA_COLOR_FMT_MASK 15 +#define RGA_COLOR_FMT_IS_YUV(fmt) \ + (((fmt) >= RGA_COLOR_FMT_YUV422SP) && ((fmt) < RGA_COLOR_FMT_CP_1BPP)) +#define RGA_COLOR_FMT_IS_RGB(fmt) \ + ((fmt) < RGA_COLOR_FMT_YUV422SP) + #define RGA_COLOR_NONE_SWAP 0 #define RGA_COLOR_RB_SWAP 1 #define RGA_COLOR_ALPHA_SWAP 2 -- GitLab From d7ee731744d7c6396116aa93b776ed2ce9afa912 Mon Sep 17 00:00:00 2001 From: Paul Kocialkowski Date: Thu, 30 Apr 2020 18:42:45 +0200 Subject: [PATCH 0355/1304] media: rockchip: rga: Only set output CSC mode for RGB input [ Upstream commit 0f879bab72f47e8ba2421a984e7acfa763d3e84e ] Setting the output CSC mode is required for a YUV output, but must not be set when the input is also YUV. Doing this (as tested with a YUV420P to YUV420P conversion) results in wrong colors. Adapt the logic to only set the output CSC mode when the output is YUV and the input is RGB. Also add a comment to clarify the rationale. Fixes: f7e7b48e6d79 ("[media] rockchip/rga: v4l2 m2m support") Signed-off-by: Paul Kocialkowski Reviewed-by: Ezequiel Garcia Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Sasha Levin --- drivers/media/platform/rockchip/rga/rga-hw.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/media/platform/rockchip/rga/rga-hw.c b/drivers/media/platform/rockchip/rga/rga-hw.c index 6d12491b79d7..681de42f12e9 100644 --- a/drivers/media/platform/rockchip/rga/rga-hw.c +++ b/drivers/media/platform/rockchip/rga/rga-hw.c @@ -208,6 +208,11 @@ static void rga_cmd_set_trans_info(struct rga_ctx *ctx) dst_info.data.format = ctx->out.fmt->hw_format; dst_info.data.swap = ctx->out.fmt->color_swap; + /* + * CSC mode must only be set when the colorspace families differ between + * input and output. It must remain unset (zeroed) if both are the same. + */ + if (RGA_COLOR_FMT_IS_YUV(ctx->in.fmt->hw_format) && RGA_COLOR_FMT_IS_RGB(ctx->out.fmt->hw_format)) { switch (ctx->in.colorspace) { @@ -220,7 +225,8 @@ static void rga_cmd_set_trans_info(struct rga_ctx *ctx) } } - if (RGA_COLOR_FMT_IS_YUV(ctx->out.fmt->hw_format)) { + if (RGA_COLOR_FMT_IS_RGB(ctx->in.fmt->hw_format) && + RGA_COLOR_FMT_IS_YUV(ctx->out.fmt->hw_format)) { switch (ctx->out.colorspace) { case V4L2_COLORSPACE_REC709: dst_info.data.csc_mode = RGA_SRC_CSC_MODE_BT709_R0; -- GitLab From fe34945c7898deb4cae866ae0330b781abdff5ac Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Wed, 8 Jul 2020 14:49:51 +0200 Subject: [PATCH 0356/1304] USB: serial: ftdi_sio: make process-packet buffer unsigned [ Upstream commit ab4cc4ef6724ea588e835fc1e764c4b4407a70b7 ] Use an unsigned type for the process-packet buffer argument and give it a more apt name. Reviewed-by: Greg Kroah-Hartman Signed-off-by: Johan Hovold Signed-off-by: Sasha Levin --- drivers/usb/serial/ftdi_sio.c | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 3c0f38cd3a5a..d0ae6318d6e9 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -2037,12 +2037,12 @@ static int ftdi_prepare_write_buffer(struct usb_serial_port *port, #define FTDI_RS_ERR_MASK (FTDI_RS_BI | FTDI_RS_PE | FTDI_RS_FE | FTDI_RS_OE) static int ftdi_process_packet(struct usb_serial_port *port, - struct ftdi_private *priv, char *packet, int len) + struct ftdi_private *priv, unsigned char *buf, int len) { + unsigned char status; + unsigned char *ch; int i; - char status; char flag; - char *ch; if (len < 2) { dev_dbg(&port->dev, "malformed packet\n"); @@ -2052,7 +2052,7 @@ static int ftdi_process_packet(struct usb_serial_port *port, /* Compare new line status to the old one, signal if different/ N.B. packet may be processed more than once, but differences are only processed once. */ - status = packet[0] & FTDI_STATUS_B0_MASK; + status = buf[0] & FTDI_STATUS_B0_MASK; if (status != priv->prev_status) { char diff_status = status ^ priv->prev_status; @@ -2078,7 +2078,7 @@ static int ftdi_process_packet(struct usb_serial_port *port, } /* save if the transmitter is empty or not */ - if (packet[1] & FTDI_RS_TEMT) + if (buf[1] & FTDI_RS_TEMT) priv->transmit_empty = 1; else priv->transmit_empty = 0; @@ -2092,29 +2092,29 @@ static int ftdi_process_packet(struct usb_serial_port *port, * data payload to avoid over-reporting. */ flag = TTY_NORMAL; - if (packet[1] & FTDI_RS_ERR_MASK) { + if (buf[1] & FTDI_RS_ERR_MASK) { /* Break takes precedence over parity, which takes precedence * over framing errors */ - if (packet[1] & FTDI_RS_BI) { + if (buf[1] & FTDI_RS_BI) { flag = TTY_BREAK; port->icount.brk++; usb_serial_handle_break(port); - } else if (packet[1] & FTDI_RS_PE) { + } else if (buf[1] & FTDI_RS_PE) { flag = TTY_PARITY; port->icount.parity++; - } else if (packet[1] & FTDI_RS_FE) { + } else if (buf[1] & FTDI_RS_FE) { flag = TTY_FRAME; port->icount.frame++; } /* Overrun is special, not associated with a char */ - if (packet[1] & FTDI_RS_OE) { + if (buf[1] & FTDI_RS_OE) { port->icount.overrun++; tty_insert_flip_char(&port->port, 0, TTY_OVERRUN); } } port->icount.rx += len; - ch = packet + 2; + ch = buf + 2; if (port->port.console && port->sysrq) { for (i = 0; i < len; i++, ch++) { -- GitLab From e9bca40189aec751786c44dfc89fe14a960a4a88 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Wed, 8 Jul 2020 14:49:52 +0200 Subject: [PATCH 0357/1304] USB: serial: ftdi_sio: clean up receive processing [ Upstream commit ce054039ba5e47b75a3be02a00274e52b06a6456 ] Clean up receive processing by dropping the character pointer and keeping the length argument unchanged throughout the function. Also make it more apparent that sysrq processing can consume a characters by adding an explicit continue. Reviewed-by: Greg Kroah-Hartman Signed-off-by: Johan Hovold Signed-off-by: Sasha Levin --- drivers/usb/serial/ftdi_sio.c | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index d0ae6318d6e9..ce9cc1f90b05 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -2040,7 +2040,6 @@ static int ftdi_process_packet(struct usb_serial_port *port, struct ftdi_private *priv, unsigned char *buf, int len) { unsigned char status; - unsigned char *ch; int i; char flag; @@ -2083,8 +2082,7 @@ static int ftdi_process_packet(struct usb_serial_port *port, else priv->transmit_empty = 0; - len -= 2; - if (!len) + if (len == 2) return 0; /* status only */ /* @@ -2113,19 +2111,20 @@ static int ftdi_process_packet(struct usb_serial_port *port, } } - port->icount.rx += len; - ch = buf + 2; + port->icount.rx += len - 2; if (port->port.console && port->sysrq) { - for (i = 0; i < len; i++, ch++) { - if (!usb_serial_handle_sysrq_char(port, *ch)) - tty_insert_flip_char(&port->port, *ch, flag); + for (i = 2; i < len; i++) { + if (usb_serial_handle_sysrq_char(port, buf[i])) + continue; + tty_insert_flip_char(&port->port, buf[i], flag); } } else { - tty_insert_flip_string_fixed_flag(&port->port, ch, flag, len); + tty_insert_flip_string_fixed_flag(&port->port, buf + 2, flag, + len - 2); } - return len; + return len - 2; } static void ftdi_process_read_urb(struct urb *urb) -- GitLab From 388a802632cf66c2d0a447110b87bae2ed90b1e2 Mon Sep 17 00:00:00 2001 From: Yoshihiro Shimoda Date: Thu, 21 May 2020 16:01:05 +0900 Subject: [PATCH 0358/1304] mmc: renesas_sdhi_internal_dmac: clean up the code for dma complete [ Upstream commit 2b26e34e9af3fa24fa1266e9ea2d66a1f7d62dc0 ] To add end() operation in the future, clean the code of renesas_sdhi_internal_dmac_complete_tasklet_fn(). No behavior change. Signed-off-by: Yoshihiro Shimoda Link: https://lore.kernel.org/r/1590044466-28372-3-git-send-email-yoshihiro.shimoda.uh@renesas.com Tested-by: Wolfram Sang Signed-off-by: Ulf Hansson Signed-off-by: Sasha Levin --- drivers/mmc/host/renesas_sdhi_internal_dmac.c | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c index 382172fb3da8..74eea8247490 100644 --- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c +++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c @@ -222,15 +222,12 @@ static void renesas_sdhi_internal_dmac_issue_tasklet_fn(unsigned long arg) DTRAN_CTRL_DM_START); } -static void renesas_sdhi_internal_dmac_complete_tasklet_fn(unsigned long arg) +static bool renesas_sdhi_internal_dmac_complete(struct tmio_mmc_host *host) { - struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg; enum dma_data_direction dir; - spin_lock_irq(&host->lock); - if (!host->data) - goto out; + return false; if (host->data->flags & MMC_DATA_READ) dir = DMA_FROM_DEVICE; @@ -243,6 +240,17 @@ static void renesas_sdhi_internal_dmac_complete_tasklet_fn(unsigned long arg) if (dir == DMA_FROM_DEVICE) clear_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags); + return true; +} + +static void renesas_sdhi_internal_dmac_complete_tasklet_fn(unsigned long arg) +{ + struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg; + + spin_lock_irq(&host->lock); + if (!renesas_sdhi_internal_dmac_complete(host)) + goto out; + tmio_mmc_do_data_irq(host); out: spin_unlock_irq(&host->lock); -- GitLab From c00c5131441cbc1775bb1c3164217aa52358396d Mon Sep 17 00:00:00 2001 From: Steve Longerbeam Date: Wed, 17 Jun 2020 15:40:37 -0700 Subject: [PATCH 0359/1304] gpu: ipu-v3: image-convert: Combine rotate/no-rotate irq handlers [ Upstream commit 0f6245f42ce9b7e4d20f2cda8d5f12b55a44d7d1 ] Combine the rotate_irq() and norotate_irq() handlers into a single eof_irq() handler. Signed-off-by: Steve Longerbeam Signed-off-by: Philipp Zabel Signed-off-by: Sasha Levin --- drivers/gpu/ipu-v3/ipu-image-convert.c | 58 +++++++++----------------- 1 file changed, 20 insertions(+), 38 deletions(-) diff --git a/drivers/gpu/ipu-v3/ipu-image-convert.c b/drivers/gpu/ipu-v3/ipu-image-convert.c index 91653adc41cc..cdaf1d74e31a 100644 --- a/drivers/gpu/ipu-v3/ipu-image-convert.c +++ b/drivers/gpu/ipu-v3/ipu-image-convert.c @@ -998,9 +998,10 @@ static irqreturn_t do_irq(struct ipu_image_convert_run *run) return IRQ_WAKE_THREAD; } -static irqreturn_t norotate_irq(int irq, void *data) +static irqreturn_t eof_irq(int irq, void *data) { struct ipu_image_convert_chan *chan = data; + struct ipu_image_convert_priv *priv = chan->priv; struct ipu_image_convert_ctx *ctx; struct ipu_image_convert_run *run; unsigned long flags; @@ -1017,45 +1018,26 @@ static irqreturn_t norotate_irq(int irq, void *data) ctx = run->ctx; - if (ipu_rot_mode_is_irt(ctx->rot_mode)) { - /* this is a rotation operation, just ignore */ - spin_unlock_irqrestore(&chan->irqlock, flags); - return IRQ_HANDLED; - } - - ret = do_irq(run); -out: - spin_unlock_irqrestore(&chan->irqlock, flags); - return ret; -} - -static irqreturn_t rotate_irq(int irq, void *data) -{ - struct ipu_image_convert_chan *chan = data; - struct ipu_image_convert_priv *priv = chan->priv; - struct ipu_image_convert_ctx *ctx; - struct ipu_image_convert_run *run; - unsigned long flags; - irqreturn_t ret; - - spin_lock_irqsave(&chan->irqlock, flags); - - /* get current run and its context */ - run = chan->current_run; - if (!run) { + if (irq == chan->out_eof_irq) { + if (ipu_rot_mode_is_irt(ctx->rot_mode)) { + /* this is a rotation op, just ignore */ + ret = IRQ_HANDLED; + goto out; + } + } else if (irq == chan->rot_out_eof_irq) { + if (!ipu_rot_mode_is_irt(ctx->rot_mode)) { + /* this was NOT a rotation op, shouldn't happen */ + dev_err(priv->ipu->dev, + "Unexpected rotation interrupt\n"); + ret = IRQ_HANDLED; + goto out; + } + } else { + dev_err(priv->ipu->dev, "Received unknown irq %d\n", irq); ret = IRQ_NONE; goto out; } - ctx = run->ctx; - - if (!ipu_rot_mode_is_irt(ctx->rot_mode)) { - /* this was NOT a rotation operation, shouldn't happen */ - dev_err(priv->ipu->dev, "Unexpected rotation interrupt\n"); - spin_unlock_irqrestore(&chan->irqlock, flags); - return IRQ_HANDLED; - } - ret = do_irq(run); out: spin_unlock_irqrestore(&chan->irqlock, flags); @@ -1148,7 +1130,7 @@ static int get_ipu_resources(struct ipu_image_convert_chan *chan) chan->out_chan, IPU_IRQ_EOF); - ret = request_threaded_irq(chan->out_eof_irq, norotate_irq, do_bh, + ret = request_threaded_irq(chan->out_eof_irq, eof_irq, do_bh, 0, "ipu-ic", chan); if (ret < 0) { dev_err(priv->ipu->dev, "could not acquire irq %d\n", @@ -1161,7 +1143,7 @@ static int get_ipu_resources(struct ipu_image_convert_chan *chan) chan->rotation_out_chan, IPU_IRQ_EOF); - ret = request_threaded_irq(chan->rot_out_eof_irq, rotate_irq, do_bh, + ret = request_threaded_irq(chan->rot_out_eof_irq, eof_irq, do_bh, 0, "ipu-ic", chan); if (ret < 0) { dev_err(priv->ipu->dev, "could not acquire irq %d\n", -- GitLab From a24e10abad8ff9df527fc3ceed74c0d361f68230 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Fri, 19 Jun 2020 16:42:14 +0800 Subject: [PATCH 0360/1304] dm rq: don't call blk_mq_queue_stopped() in dm_stop_queue() [ Upstream commit e766668c6cd49d741cfb49eaeb38998ba34d27bc ] dm_stop_queue() only uses blk_mq_quiesce_queue() so it doesn't formally stop the blk-mq queue; therefore there is no point making the blk_mq_queue_stopped() check -- it will never be stopped. In addition, even though dm_stop_queue() actually tries to quiesce hw queues via blk_mq_quiesce_queue(), checking with blk_queue_quiesced() to avoid unnecessary queue quiesce isn't reliable because: the QUEUE_FLAG_QUIESCED flag is set before synchronize_rcu() and dm_stop_queue() may be called when synchronize_rcu() from another blk_mq_quiesce_queue() is in-progress. Fixes: 7b17c2f7292ba ("dm: Fix a race condition related to stopping and starting queues") Signed-off-by: Ming Lei Signed-off-by: Mike Snitzer Signed-off-by: Sasha Levin --- drivers/md/dm-rq.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index 4d36373e1c0f..9fde174ce396 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -95,9 +95,6 @@ static void dm_old_stop_queue(struct request_queue *q) static void dm_mq_stop_queue(struct request_queue *q) { - if (blk_mq_queue_stopped(q)) - return; - blk_mq_quiesce_queue(q); } -- GitLab From 9f00bbb7a21ac7f695aa449981330d706ab46fb9 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Thu, 9 Jul 2020 08:59:43 +0530 Subject: [PATCH 0361/1304] selftests/powerpc: ptrace-pkey: Rename variables to make it easier to follow code [ Upstream commit 9a11f12e0a6c374b3ef1ce81e32ce477d28eb1b8 ] Rename variable to indicate that they are invalid values which we will use to test ptrace update of pkeys. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200709032946.881753-21-aneesh.kumar@linux.ibm.com Signed-off-by: Sasha Levin --- .../selftests/powerpc/ptrace/ptrace-pkey.c | 26 +++++++++---------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c b/tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c index bdbbbe8431e0..f9216c7a1829 100644 --- a/tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c +++ b/tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c @@ -44,7 +44,7 @@ struct shared_info { unsigned long amr2; /* AMR value that ptrace should refuse to write to the child. */ - unsigned long amr3; + unsigned long invalid_amr; /* IAMR value the parent expects to read from the child. */ unsigned long expected_iamr; @@ -57,8 +57,8 @@ struct shared_info { * (even though they're valid ones) because userspace doesn't have * access to those registers. */ - unsigned long new_iamr; - unsigned long new_uamor; + unsigned long invalid_iamr; + unsigned long invalid_uamor; }; static int sys_pkey_alloc(unsigned long flags, unsigned long init_access_rights) @@ -100,7 +100,7 @@ static int child(struct shared_info *info) info->amr1 |= 3ul << pkeyshift(pkey1); info->amr2 |= 3ul << pkeyshift(pkey2); - info->amr3 |= info->amr2 | 3ul << pkeyshift(pkey3); + info->invalid_amr |= info->amr2 | 3ul << pkeyshift(pkey3); if (disable_execute) info->expected_iamr |= 1ul << pkeyshift(pkey1); @@ -111,8 +111,8 @@ static int child(struct shared_info *info) info->expected_uamor |= 3ul << pkeyshift(pkey1) | 3ul << pkeyshift(pkey2); - info->new_iamr |= 1ul << pkeyshift(pkey1) | 1ul << pkeyshift(pkey2); - info->new_uamor |= 3ul << pkeyshift(pkey1); + info->invalid_iamr |= 1ul << pkeyshift(pkey1) | 1ul << pkeyshift(pkey2); + info->invalid_uamor |= 3ul << pkeyshift(pkey1); /* * We won't use pkey3. We just want a plausible but invalid key to test @@ -196,9 +196,9 @@ static int parent(struct shared_info *info, pid_t pid) PARENT_SKIP_IF_UNSUPPORTED(ret, &info->child_sync); PARENT_FAIL_IF(ret, &info->child_sync); - info->amr1 = info->amr2 = info->amr3 = regs[0]; - info->expected_iamr = info->new_iamr = regs[1]; - info->expected_uamor = info->new_uamor = regs[2]; + info->amr1 = info->amr2 = info->invalid_amr = regs[0]; + info->expected_iamr = info->invalid_iamr = regs[1]; + info->expected_uamor = info->invalid_uamor = regs[2]; /* Wake up child so that it can set itself up. */ ret = prod_child(&info->child_sync); @@ -234,10 +234,10 @@ static int parent(struct shared_info *info, pid_t pid) return ret; /* Write invalid AMR value in child. */ - ret = ptrace_write_regs(pid, NT_PPC_PKEY, &info->amr3, 1); + ret = ptrace_write_regs(pid, NT_PPC_PKEY, &info->invalid_amr, 1); PARENT_FAIL_IF(ret, &info->child_sync); - printf("%-30s AMR: %016lx\n", ptrace_write_running, info->amr3); + printf("%-30s AMR: %016lx\n", ptrace_write_running, info->invalid_amr); /* Wake up child so that it can verify it didn't change. */ ret = prod_child(&info->child_sync); @@ -249,7 +249,7 @@ static int parent(struct shared_info *info, pid_t pid) /* Try to write to IAMR. */ regs[0] = info->amr1; - regs[1] = info->new_iamr; + regs[1] = info->invalid_iamr; ret = ptrace_write_regs(pid, NT_PPC_PKEY, regs, 2); PARENT_FAIL_IF(!ret, &info->child_sync); @@ -257,7 +257,7 @@ static int parent(struct shared_info *info, pid_t pid) ptrace_write_running, regs[0], regs[1]); /* Try to write to IAMR and UAMOR. */ - regs[2] = info->new_uamor; + regs[2] = info->invalid_uamor; ret = ptrace_write_regs(pid, NT_PPC_PKEY, regs, 3); PARENT_FAIL_IF(!ret, &info->child_sync); -- GitLab From a075f690c28eaea5616fad4fd8c698ee8b23e2d9 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Thu, 9 Jul 2020 08:59:44 +0530 Subject: [PATCH 0362/1304] selftests/powerpc: ptrace-pkey: Update the test to mark an invalid pkey correctly [ Upstream commit 0eaa3b5ca7b5a76e3783639c828498343be66a01 ] Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200709032946.881753-22-aneesh.kumar@linux.ibm.com Signed-off-by: Sasha Levin --- .../selftests/powerpc/ptrace/ptrace-pkey.c | 30 ++++++++----------- 1 file changed, 12 insertions(+), 18 deletions(-) diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c b/tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c index f9216c7a1829..bc33d748d95b 100644 --- a/tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c +++ b/tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c @@ -66,11 +66,6 @@ static int sys_pkey_alloc(unsigned long flags, unsigned long init_access_rights) return syscall(__NR_pkey_alloc, flags, init_access_rights); } -static int sys_pkey_free(int pkey) -{ - return syscall(__NR_pkey_free, pkey); -} - static int child(struct shared_info *info) { unsigned long reg; @@ -100,7 +95,11 @@ static int child(struct shared_info *info) info->amr1 |= 3ul << pkeyshift(pkey1); info->amr2 |= 3ul << pkeyshift(pkey2); - info->invalid_amr |= info->amr2 | 3ul << pkeyshift(pkey3); + /* + * invalid amr value where we try to force write + * things which are deined by a uamor setting. + */ + info->invalid_amr = info->amr2 | (~0x0UL & ~info->expected_uamor); if (disable_execute) info->expected_iamr |= 1ul << pkeyshift(pkey1); @@ -111,17 +110,12 @@ static int child(struct shared_info *info) info->expected_uamor |= 3ul << pkeyshift(pkey1) | 3ul << pkeyshift(pkey2); - info->invalid_iamr |= 1ul << pkeyshift(pkey1) | 1ul << pkeyshift(pkey2); - info->invalid_uamor |= 3ul << pkeyshift(pkey1); - /* - * We won't use pkey3. We just want a plausible but invalid key to test - * whether ptrace will let us write to AMR bits we are not supposed to. - * - * This also tests whether the kernel restores the UAMOR permissions - * after a key is freed. + * Create an IAMR value different from expected value. + * Kernel will reject an IAMR and UAMOR change. */ - sys_pkey_free(pkey3); + info->invalid_iamr = info->expected_iamr | (1ul << pkeyshift(pkey1) | 1ul << pkeyshift(pkey2)); + info->invalid_uamor = info->expected_uamor & ~(0x3ul << pkeyshift(pkey1)); printf("%-30s AMR: %016lx pkey1: %d pkey2: %d pkey3: %d\n", user_write, info->amr1, pkey1, pkey2, pkey3); @@ -196,9 +190,9 @@ static int parent(struct shared_info *info, pid_t pid) PARENT_SKIP_IF_UNSUPPORTED(ret, &info->child_sync); PARENT_FAIL_IF(ret, &info->child_sync); - info->amr1 = info->amr2 = info->invalid_amr = regs[0]; - info->expected_iamr = info->invalid_iamr = regs[1]; - info->expected_uamor = info->invalid_uamor = regs[2]; + info->amr1 = info->amr2 = regs[0]; + info->expected_iamr = regs[1]; + info->expected_uamor = regs[2]; /* Wake up child so that it can set itself up. */ ret = prod_child(&info->child_sync); -- GitLab From 0b80d3cdb0fe028ecb63ea9d330d8fba1cecf294 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Thu, 9 Jul 2020 08:59:45 +0530 Subject: [PATCH 0363/1304] selftests/powerpc: ptrace-pkey: Don't update expected UAMOR value [ Upstream commit 3563b9bea0ca7f53e4218b5e268550341a49f333 ] With commit 4a4a5e5d2aad ("powerpc/pkeys: key allocation/deallocation must not change pkey registers") we are not updating UAMOR on key allocation. So don't update the expected uamor value in the test. Fixes: 4a4a5e5d2aad ("powerpc/pkeys: key allocation/deallocation must not change pkey registers") Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200709032946.881753-23-aneesh.kumar@linux.ibm.com Signed-off-by: Sasha Levin --- tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c b/tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c index bc33d748d95b..3694613f418f 100644 --- a/tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c +++ b/tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c @@ -101,15 +101,20 @@ static int child(struct shared_info *info) */ info->invalid_amr = info->amr2 | (~0x0UL & ~info->expected_uamor); + /* + * if PKEY_DISABLE_EXECUTE succeeded we should update the expected_iamr + */ if (disable_execute) info->expected_iamr |= 1ul << pkeyshift(pkey1); else info->expected_iamr &= ~(1ul << pkeyshift(pkey1)); - info->expected_iamr &= ~(1ul << pkeyshift(pkey2) | 1ul << pkeyshift(pkey3)); + /* + * We allocated pkey2 and pkey 3 above. Clear the IAMR bits. + */ + info->expected_iamr &= ~(1ul << pkeyshift(pkey2)); + info->expected_iamr &= ~(1ul << pkeyshift(pkey3)); - info->expected_uamor |= 3ul << pkeyshift(pkey1) | - 3ul << pkeyshift(pkey2); /* * Create an IAMR value different from expected value. * Kernel will reject an IAMR and UAMOR change. -- GitLab From b9b2092af3c1ac84fa30f72b767a5a50cab2ba04 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Tue, 14 Jul 2020 20:22:11 +0100 Subject: [PATCH 0364/1304] iommu/omap: Check for failure of a call to omap_iommu_dump_ctx [ Upstream commit dee9d154f40c58d02f69acdaa5cfd1eae6ebc28b ] It is possible for the call to omap_iommu_dump_ctx to return a negative error number, so check for the failure and return the error number rather than pass the negative value to simple_read_from_buffer. Fixes: 14e0e6796a0d ("OMAP: iommu: add initial debugfs support") Signed-off-by: Colin Ian King Link: https://lore.kernel.org/r/20200714192211.744776-1-colin.king@canonical.com Addresses-Coverity: ("Improper use of negative value") Signed-off-by: Joerg Roedel Signed-off-by: Sasha Levin --- drivers/iommu/omap-iommu-debug.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c index 50217548c3b8..5ce55fabc9d8 100644 --- a/drivers/iommu/omap-iommu-debug.c +++ b/drivers/iommu/omap-iommu-debug.c @@ -101,8 +101,11 @@ static ssize_t debug_read_regs(struct file *file, char __user *userbuf, mutex_lock(&iommu_debug_lock); bytes = omap_iommu_dump_ctx(obj, p, count); + if (bytes < 0) + goto err; bytes = simple_read_from_buffer(userbuf, count, ppos, buf, bytes); +err: mutex_unlock(&iommu_debug_lock); kfree(buf); -- GitLab From a5040830b48953d89ec5254e65e6c5b161165762 Mon Sep 17 00:00:00 2001 From: Liu Yi L Date: Fri, 24 Jul 2020 09:49:14 +0800 Subject: [PATCH 0365/1304] iommu/vt-d: Enforce PASID devTLB field mask [ Upstream commit 5f77d6ca5ca74e4b4a5e2e010f7ff50c45dea326 ] Set proper masks to avoid invalid input spillover to reserved bits. Signed-off-by: Liu Yi L Signed-off-by: Jacob Pan Signed-off-by: Lu Baolu Reviewed-by: Eric Auger Link: https://lore.kernel.org/r/20200724014925.15523-2-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel Signed-off-by: Sasha Levin --- include/linux/intel-iommu.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index b1b4411b4c6b..539f4a84412f 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -308,8 +308,8 @@ enum { #define QI_DEV_EIOTLB_ADDR(a) ((u64)(a) & VTD_PAGE_MASK) #define QI_DEV_EIOTLB_SIZE (((u64)1) << 11) -#define QI_DEV_EIOTLB_GLOB(g) ((u64)g) -#define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32) +#define QI_DEV_EIOTLB_GLOB(g) ((u64)(g) & 0x1) +#define QI_DEV_EIOTLB_PASID(p) ((u64)((p) & 0xfffff) << 32) #define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16) #define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4) #define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \ -- GitLab From 7fc9f681fad28ac720acbb157445810fe0b3bf3c Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Mon, 29 Jun 2020 17:38:07 +0200 Subject: [PATCH 0366/1304] i2c: rcar: slave: only send STOP event when we have been addressed [ Upstream commit 314139f9f0abdba61ed9a8463bbcb0bf900ac5a2 ] When the SSR interrupt is activated, it will detect every STOP condition on the bus, not only the ones after we have been addressed. So, enable this interrupt only after we have been addressed, and disable it otherwise. Fixes: de20d1857dd6 ("i2c: rcar: add slave support") Signed-off-by: Wolfram Sang Signed-off-by: Wolfram Sang Signed-off-by: Sasha Levin --- drivers/i2c/busses/i2c-rcar.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c index 6e49e438ef5a..11d197761685 100644 --- a/drivers/i2c/busses/i2c-rcar.c +++ b/drivers/i2c/busses/i2c-rcar.c @@ -587,13 +587,14 @@ static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv) rcar_i2c_write(priv, ICSIER, SDR | SSR | SAR); } - rcar_i2c_write(priv, ICSSR, ~SAR & 0xff); + /* Clear SSR, too, because of old STOPs to other clients than us */ + rcar_i2c_write(priv, ICSSR, ~(SAR | SSR) & 0xff); } /* master sent stop */ if (ssr_filtered & SSR) { i2c_slave_event(priv->slave, I2C_SLAVE_STOP, &value); - rcar_i2c_write(priv, ICSIER, SAR | SSR); + rcar_i2c_write(priv, ICSIER, SAR); rcar_i2c_write(priv, ICSSR, ~SSR & 0xff); } @@ -848,7 +849,7 @@ static int rcar_reg_slave(struct i2c_client *slave) priv->slave = slave; rcar_i2c_write(priv, ICSAR, slave->addr); rcar_i2c_write(priv, ICSSR, 0); - rcar_i2c_write(priv, ICSIER, SAR | SSR); + rcar_i2c_write(priv, ICSIER, SAR); rcar_i2c_write(priv, ICSCR, SIE | SDBS); return 0; -- GitLab From f0b9f54f4763091d4616d4fcf104d65967820f93 Mon Sep 17 00:00:00 2001 From: Xu Wang Date: Mon, 13 Jul 2020 03:21:43 +0000 Subject: [PATCH 0367/1304] clk: clk-atlas6: fix return value check in atlas6_clk_init() [ Upstream commit 12b90b40854a8461a02ef19f6f4474cc88d64b66 ] In case of error, the function clk_register() returns ERR_PTR() and never returns NULL. The NULL test in the return value check should be replaced with IS_ERR(). Signed-off-by: Xu Wang Link: https://lore.kernel.org/r/20200713032143.21362-1-vulab@iscas.ac.cn Acked-by: Barry Song Fixes: 7bf21bc81f28 ("clk: sirf: re-arch to make the codes support both prima2 and atlas6") Signed-off-by: Stephen Boyd Signed-off-by: Sasha Levin --- drivers/clk/sirf/clk-atlas6.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/clk/sirf/clk-atlas6.c b/drivers/clk/sirf/clk-atlas6.c index 0cd11e6893af..25ed60776560 100644 --- a/drivers/clk/sirf/clk-atlas6.c +++ b/drivers/clk/sirf/clk-atlas6.c @@ -136,7 +136,7 @@ static void __init atlas6_clk_init(struct device_node *np) for (i = pll1; i < maxclk; i++) { atlas6_clks[i] = clk_register(NULL, atlas6_clk_hw_array[i]); - BUG_ON(!atlas6_clks[i]); + BUG_ON(IS_ERR(atlas6_clks[i])); } clk_register_clkdev(atlas6_clks[cpu], NULL, "cpu"); clk_register_clkdev(atlas6_clks[io], NULL, "io"); -- GitLab From 17dc3213fbc0e7f9fd962ba9dd4ca6d215f53fa7 Mon Sep 17 00:00:00 2001 From: Rayagonda Kokatanur Date: Fri, 17 Jul 2020 21:46:06 -0700 Subject: [PATCH 0368/1304] pwm: bcm-iproc: handle clk_get_rate() return MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 6ced5ff0be8e94871ba846dfbddf69d21363f3d7 ] Handle clk_get_rate() returning 0 to avoid possible division by zero. Fixes: daa5abc41c80 ("pwm: Add support for Broadcom iProc PWM controller") Signed-off-by: Rayagonda Kokatanur Signed-off-by: Scott Branden Reviewed-by: Ray Jui Reviewed-by: Uwe Kleine-König Signed-off-by: Thierry Reding Signed-off-by: Sasha Levin --- drivers/pwm/pwm-bcm-iproc.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/pwm/pwm-bcm-iproc.c b/drivers/pwm/pwm-bcm-iproc.c index 31b01035d0ab..8cfba3614e60 100644 --- a/drivers/pwm/pwm-bcm-iproc.c +++ b/drivers/pwm/pwm-bcm-iproc.c @@ -85,8 +85,6 @@ static void iproc_pwmc_get_state(struct pwm_chip *chip, struct pwm_device *pwm, u64 tmp, multi, rate; u32 value, prescale; - rate = clk_get_rate(ip->clk); - value = readl(ip->base + IPROC_PWM_CTRL_OFFSET); if (value & BIT(IPROC_PWM_CTRL_EN_SHIFT(pwm->hwpwm))) @@ -99,6 +97,13 @@ static void iproc_pwmc_get_state(struct pwm_chip *chip, struct pwm_device *pwm, else state->polarity = PWM_POLARITY_INVERSED; + rate = clk_get_rate(ip->clk); + if (rate == 0) { + state->period = 0; + state->duty_cycle = 0; + return; + } + value = readl(ip->base + IPROC_PWM_PRESCALE_OFFSET); prescale = value >> IPROC_PWM_PRESCALE_SHIFT(pwm->hwpwm); prescale &= IPROC_PWM_PRESCALE_MAX; -- GitLab From bb1da23aa45bbe1edb74379d5b541c62f0d2836a Mon Sep 17 00:00:00 2001 From: Thomas Hebb Date: Sun, 26 Jul 2020 21:08:14 -0700 Subject: [PATCH 0369/1304] tools build feature: Use CC and CXX from parent [ Upstream commit e3232c2f39acafd5a29128425bc30b9884642cfa ] commit c8c188679ccf ("tools build: Use the same CC for feature detection and actual build") changed these assignments from unconditional (:=) to conditional (?=) so that they wouldn't clobber values from the environment. However, conditional assignment does not work properly for variables that Make implicitly sets, among which are CC and CXX. To quote tools/scripts/Makefile.include, which handles this properly: # Makefiles suck: This macro sets a default value of $(2) for the # variable named by $(1), unless the variable has been set by # environment or command line. This is necessary for CC and AR # because make sets default values, so the simpler ?= approach # won't work as expected. In other words, the conditional assignments will not run even if the variables are not overridden in the environment; Make will set CC to "cc" and CXX to "g++" when it starts[1], meaning the variables are not empty by the time the conditional assignments are evaluated. This breaks cross-compilation when CROSS_COMPILE is set but CC isn't, since "cc" gets used for feature detection instead of the cross compiler (and likewise for CXX). To fix the issue, just pass down the values of CC and CXX computed by the parent Makefile, which gets included by the Makefile that actually builds whatever we're detecting features for and so is guaranteed to have good values. This is a better solution anyway, since it means we aren't trying to replicate the logic of the parent build system and so don't risk it getting out of sync. Leave PKG_CONFIG alone, since 1) there's no common logic to compute it in Makefile.include, and 2) it's not an implicit variable, so conditional assignment works properly. [1] https://www.gnu.org/software/make/manual/html_node/Implicit-Variables.html Fixes: c8c188679ccf ("tools build: Use the same CC for feature detection and actual build") Signed-off-by: Thomas Hebb Acked-by: Jiri Olsa Cc: David Carrillo-Cisneros Cc: Ian Rogers Cc: Igor Lubashev Cc: Namhyung Kim Cc: Quentin Monnet Cc: Song Liu Cc: Stephane Eranian Cc: thomas hebb Link: http://lore.kernel.org/lkml/0a6e69d1736b0fa231a648f50b0cce5d8a6734ef.1595822871.git.tommyhebb@gmail.com Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: Sasha Levin --- tools/build/Makefile.feature | 2 +- tools/build/feature/Makefile | 2 -- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature index 42a787856cd8..7d9d70c0b380 100644 --- a/tools/build/Makefile.feature +++ b/tools/build/Makefile.feature @@ -7,7 +7,7 @@ endif feature_check = $(eval $(feature_check_code)) define feature_check_code - feature-$(1) := $(shell $(MAKE) OUTPUT=$(OUTPUT_FEATURES) CFLAGS="$(EXTRA_CFLAGS) $(FEATURE_CHECK_CFLAGS-$(1))" CXXFLAGS="$(EXTRA_CXXFLAGS) $(FEATURE_CHECK_CXXFLAGS-$(1))" LDFLAGS="$(LDFLAGS) $(FEATURE_CHECK_LDFLAGS-$(1))" -C $(feature_dir) $(OUTPUT_FEATURES)test-$1.bin >/dev/null 2>/dev/null && echo 1 || echo 0) + feature-$(1) := $(shell $(MAKE) OUTPUT=$(OUTPUT_FEATURES) CC=$(CC) CXX=$(CXX) CFLAGS="$(EXTRA_CFLAGS) $(FEATURE_CHECK_CFLAGS-$(1))" CXXFLAGS="$(EXTRA_CXXFLAGS) $(FEATURE_CHECK_CXXFLAGS-$(1))" LDFLAGS="$(LDFLAGS) $(FEATURE_CHECK_LDFLAGS-$(1))" -C $(feature_dir) $(OUTPUT_FEATURES)test-$1.bin >/dev/null 2>/dev/null && echo 1 || echo 0) endef feature_set = $(eval $(feature_set_code)) diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile index bf8a8ebcca1e..c4845b66b9ba 100644 --- a/tools/build/feature/Makefile +++ b/tools/build/feature/Makefile @@ -62,8 +62,6 @@ FILES= \ FILES := $(addprefix $(OUTPUT),$(FILES)) -CC ?= $(CROSS_COMPILE)gcc -CXX ?= $(CROSS_COMPILE)g++ PKG_CONFIG ?= $(CROSS_COMPILE)pkg-config LLVM_CONFIG ?= llvm-config -- GitLab From 49b30a64d320b23d7c3a3a67f2501a6ab5518a29 Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Sun, 26 Jul 2020 18:16:06 +0200 Subject: [PATCH 0370/1304] i2c: rcar: avoid race when unregistering slave MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit c7c9e914f9a0478fba4dc6f227cfd69cf84a4063 ] Due to the lockless design of the driver, it is theoretically possible to access a NULL pointer, if a slave interrupt was running while we were unregistering the slave. To make this rock solid, disable the interrupt for a short time while we are clearing the interrupt_enable register. This patch is purely based on code inspection. The OOPS is super-hard to trigger because clearing SAR (the address) makes interrupts even more unlikely to happen as well. While here, reinit SCR to SDBS because this bit should always be set according to documentation. There is no effect, though, because the interface is disabled. Fixes: 7b814d852af6 ("i2c: rcar: avoid race when unregistering slave client") Signed-off-by: Wolfram Sang Reviewed-by: Niklas Söderlund Signed-off-by: Wolfram Sang Signed-off-by: Sasha Levin --- drivers/i2c/busses/i2c-rcar.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c index 11d197761685..dcdce18fc706 100644 --- a/drivers/i2c/busses/i2c-rcar.c +++ b/drivers/i2c/busses/i2c-rcar.c @@ -861,12 +861,14 @@ static int rcar_unreg_slave(struct i2c_client *slave) WARN_ON(!priv->slave); - /* disable irqs and ensure none is running before clearing ptr */ + /* ensure no irq is running before clearing ptr */ + disable_irq(priv->irq); rcar_i2c_write(priv, ICSIER, 0); - rcar_i2c_write(priv, ICSCR, 0); + rcar_i2c_write(priv, ICSSR, 0); + enable_irq(priv->irq); + rcar_i2c_write(priv, ICSCR, SDBS); rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */ - synchronize_irq(priv->irq); priv->slave = NULL; pm_runtime_put(rcar_i2c_priv_to_dev(priv)); -- GitLab From ff62a41403720bccbdfa7dc681410d68548621b7 Mon Sep 17 00:00:00 2001 From: Stafford Horne Date: Tue, 16 Jun 2020 06:19:46 +0900 Subject: [PATCH 0371/1304] openrisc: Fix oops caused when dumping stack [ Upstream commit 57b8e277c33620e115633cdf700a260b55095460 ] When dumping a stack with 'cat /proc/#/stack' the kernel would oops. For example: # cat /proc/690/stack Unable to handle kernel access at virtual address 0x7fc60f58 Oops#: 0000 CPU #: 0 PC: c00097fc SR: 0000807f SP: d6f09b9c GPR00: 00000000 GPR01: d6f09b9c GPR02: d6f09bb8 GPR03: d6f09bc4 GPR04: 7fc60f5c GPR05: c00099b4 GPR06: 00000000 GPR07: d6f09ba3 GPR08: ffffff00 GPR09: c0009804 GPR10: d6f08000 GPR11: 00000000 GPR12: ffffe000 GPR13: dbb86000 GPR14: 00000001 GPR15: dbb86250 GPR16: 7fc60f63 GPR17: 00000f5c GPR18: d6f09bc4 GPR19: 00000000 GPR20: c00099b4 GPR21: ffffffc0 GPR22: 00000000 GPR23: 00000000 GPR24: 00000001 GPR25: 000002c6 GPR26: d78b6850 GPR27: 00000001 GPR28: 00000000 GPR29: dbb86000 GPR30: ffffffff GPR31: dbb862fc RES: 00000000 oGPR11: ffffffff Process cat (pid: 702, stackpage=d79d6000) Stack: Call trace: [<598977f2>] save_stack_trace_tsk+0x40/0x74 [<95063f0e>] stack_trace_save_tsk+0x44/0x58 [] proc_pid_stack+0xd0/0x13c [] proc_single_show+0x6c/0xf0 [] seq_read+0x1b4/0x688 [<2d6c7480>] do_iter_read+0x208/0x248 [<2182a2fb>] vfs_readv+0x64/0x90 This was caused by the stack trace code in save_stack_trace_tsk using the wrong stack pointer. It was using the user stack pointer instead of the kernel stack pointer. Fix this by using the right stack. Also for good measure we add try_get_task_stack/put_task_stack to ensure the task is not lost while we are walking it's stack. Fixes: eecac38b0423a ("openrisc: support framepointers and STACKTRACE_SUPPORT") Signed-off-by: Stafford Horne Signed-off-by: Sasha Levin --- arch/openrisc/kernel/stacktrace.c | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/arch/openrisc/kernel/stacktrace.c b/arch/openrisc/kernel/stacktrace.c index 43f140a28bc7..54d38809e22c 100644 --- a/arch/openrisc/kernel/stacktrace.c +++ b/arch/openrisc/kernel/stacktrace.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -68,12 +69,25 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) { unsigned long *sp = NULL; + if (!try_get_task_stack(tsk)) + return; + if (tsk == current) sp = (unsigned long *) &sp; - else - sp = (unsigned long *) KSTK_ESP(tsk); + else { + unsigned long ksp; + + /* Locate stack from kernel context */ + ksp = task_thread_info(tsk)->ksp; + ksp += STACK_FRAME_OVERHEAD; /* redzone */ + ksp += sizeof(struct pt_regs); + + sp = (unsigned long *) ksp; + } unwind_stack(trace, sp, save_stack_address_nosched); + + put_task_stack(tsk); } EXPORT_SYMBOL_GPL(save_stack_trace_tsk); -- GitLab From fe9a71128e3dfb4f0fdd6edb7a0ef1d980dfcf1d Mon Sep 17 00:00:00 2001 From: "Ewan D. Milne" Date: Wed, 29 Jul 2020 19:10:11 -0400 Subject: [PATCH 0372/1304] scsi: lpfc: nvmet: Avoid hang / use-after-free again when destroying targetport [ Upstream commit af6de8c60fe9433afa73cea6fcccdccd98ad3e5e ] We cannot wait on a completion object in the lpfc_nvme_targetport structure in the _destroy_targetport() code path because the NVMe/fc transport will free that structure immediately after the .targetport_delete() callback. This results in a use-after-free, and a crash if slub_debug=FZPU is enabled. An earlier fix put put the completion on the stack, but commit 2a0fb340fcc8 ("scsi: lpfc: Correct localport timeout duration error") subsequently changed the code to reference the completion through a pointer in the object rather than the local stack variable. Fix this by using the stack variable directly. Link: https://lore.kernel.org/r/20200729231011.13240-1-emilne@redhat.com Fixes: 2a0fb340fcc8 ("scsi: lpfc: Correct localport timeout duration error") Reviewed-by: James Smart Signed-off-by: Ewan D. Milne Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin --- drivers/scsi/lpfc/lpfc_nvmet.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c index 768eba8c111d..5bc33817568e 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.c +++ b/drivers/scsi/lpfc/lpfc_nvmet.c @@ -1712,7 +1712,7 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba) } tgtp->tport_unreg_cmp = &tport_unreg_cmp; nvmet_fc_unregister_targetport(phba->targetport); - if (!wait_for_completion_timeout(tgtp->tport_unreg_cmp, + if (!wait_for_completion_timeout(&tport_unreg_cmp, msecs_to_jiffies(LPFC_NVMET_WAIT_TMO))) lpfc_printf_log(phba, KERN_ERR, LOG_NVME, "6179 Unreg targetport %p timeout " -- GitLab From ddf2b7891323fe30bfac761d2e12d3378a0d7eb2 Mon Sep 17 00:00:00 2001 From: Krzysztof Sobota Date: Fri, 17 Jul 2020 12:31:09 +0200 Subject: [PATCH 0373/1304] watchdog: initialize device before misc_register [ Upstream commit cb36e29bb0e4b0c33c3d5866a0a4aebace4c99b7 ] When watchdog device is being registered, it calls misc_register that makes watchdog available for systemd to open. This is a data race scenario, because when device is open it may still have device struct not initialized - this in turn causes a crash. This patch moves device initialization before misc_register call and it solves the problem printed below. ------------[ cut here ]------------ WARNING: CPU: 3 PID: 1 at lib/kobject.c:612 kobject_get+0x50/0x54 kobject: '(null)' ((ptrval)): is not initialized, yet kobject_get() is being called. Modules linked in: k2_reset_status(O) davinci_wdt(+) sfn_platform_hwbcn(O) fsmddg_sfn(O) clk_misc_mmap(O) clk_sw_bcn(O) fsp_reset(O) cma_mod(O) slave_sup_notif(O) fpga_master(O) latency(O+) evnotify(O) enable_arm_pmu(O) xge(O) rio_mport_cdev br_netfilter bridge stp llc nvrd_checksum(O) ipv6 CPU: 3 PID: 1 Comm: systemd Tainted: G O 4.19.113-g2579778-fsm4_k2 #1 Hardware name: Keystone [] (unwind_backtrace) from [] (show_stack+0x18/0x1c) [] (show_stack) from [] (dump_stack+0xb4/0xe8) [] (dump_stack) from [] (__warn+0xfc/0x114) [] (__warn) from [] (warn_slowpath_fmt+0x50/0x74) [] (warn_slowpath_fmt) from [] (kobject_get+0x50/0x54) [] (kobject_get) from [] (get_device+0x1c/0x24) [] (get_device) from [] (watchdog_open+0x90/0xf0) [] (watchdog_open) from [] (misc_open+0x130/0x17c) [] (misc_open) from [] (chrdev_open+0xec/0x1a8) [] (chrdev_open) from [] (do_dentry_open+0x204/0x3cc) [] (do_dentry_open) from [] (path_openat+0x330/0x1148) [] (path_openat) from [] (do_filp_open+0x78/0xec) [] (do_filp_open) from [] (do_sys_open+0x130/0x1f4) [] (do_sys_open) from [] (ret_fast_syscall+0x0/0x28) Exception stack(0xd2ceffa8 to 0xd2cefff0) ffa0: b6f69968 00000000 ffffff9c b6ebd210 000a0001 00000000 ffc0: b6f69968 00000000 00000000 00000142 fffffffd ffffffff 00b65530 bed7bb78 ffe0: 00000142 bed7ba70 b6cc2503 b6cc41d6 ---[ end trace 7b16eb105513974f ]--- ------------[ cut here ]------------ WARNING: CPU: 3 PID: 1 at lib/refcount.c:153 kobject_get+0x24/0x54 refcount_t: increment on 0; use-after-free. Modules linked in: k2_reset_status(O) davinci_wdt(+) sfn_platform_hwbcn(O) fsmddg_sfn(O) clk_misc_mmap(O) clk_sw_bcn(O) fsp_reset(O) cma_mod(O) slave_sup_notif(O) fpga_master(O) latency(O+) evnotify(O) enable_arm_pmu(O) xge(O) rio_mport_cdev br_netfilter bridge stp llc nvrd_checksum(O) ipv6 CPU: 3 PID: 1 Comm: systemd Tainted: G W O 4.19.113-g2579778-fsm4_k2 #1 Hardware name: Keystone [] (unwind_backtrace) from [] (show_stack+0x18/0x1c) [] (show_stack) from [] (dump_stack+0xb4/0xe8) [] (dump_stack) from [] (__warn+0xfc/0x114) [] (__warn) from [] (warn_slowpath_fmt+0x50/0x74) [] (warn_slowpath_fmt) from [] (kobject_get+0x24/0x54) [] (kobject_get) from [] (get_device+0x1c/0x24) [] (get_device) from [] (watchdog_open+0x90/0xf0) [] (watchdog_open) from [] (misc_open+0x130/0x17c) [] (misc_open) from [] (chrdev_open+0xec/0x1a8) [] (chrdev_open) from [] (do_dentry_open+0x204/0x3cc) [] (do_dentry_open) from [] (path_openat+0x330/0x1148) [] (path_openat) from [] (do_filp_open+0x78/0xec) [] (do_filp_open) from [] (do_sys_open+0x130/0x1f4) [] (do_sys_open) from [] (ret_fast_syscall+0x0/0x28) Exception stack(0xd2ceffa8 to 0xd2cefff0) ffa0: b6f69968 00000000 ffffff9c b6ebd210 000a0001 00000000 ffc0: b6f69968 00000000 00000000 00000142 fffffffd ffffffff 00b65530 bed7bb78 ffe0: 00000142 bed7ba70 b6cc2503 b6cc41d6 ---[ end trace 7b16eb1055139750 ]--- Fixes: 72139dfa2464 ("watchdog: Fix the race between the release of watchdog_core_data and cdev") Reviewed-by: Guenter Roeck Reviewed-by: Alexander Sverdlin Signed-off-by: Krzysztof Sobota Link: https://lore.kernel.org/r/20200717103109.14660-1-krzysztof.sobota@nokia.com Signed-off-by: Guenter Roeck Signed-off-by: Wim Van Sebroeck Signed-off-by: Sasha Levin --- drivers/watchdog/watchdog_dev.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c index 10b2090f3e5e..1c322caecf7f 100644 --- a/drivers/watchdog/watchdog_dev.c +++ b/drivers/watchdog/watchdog_dev.c @@ -947,6 +947,15 @@ static int watchdog_cdev_register(struct watchdog_device *wdd) if (IS_ERR_OR_NULL(watchdog_kworker)) return -ENODEV; + device_initialize(&wd_data->dev); + wd_data->dev.devt = MKDEV(MAJOR(watchdog_devt), wdd->id); + wd_data->dev.class = &watchdog_class; + wd_data->dev.parent = wdd->parent; + wd_data->dev.groups = wdd->groups; + wd_data->dev.release = watchdog_core_data_release; + dev_set_drvdata(&wd_data->dev, wdd); + dev_set_name(&wd_data->dev, "watchdog%d", wdd->id); + kthread_init_work(&wd_data->work, watchdog_ping_work); hrtimer_init(&wd_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); wd_data->timer.function = watchdog_timer_expired; @@ -967,15 +976,6 @@ static int watchdog_cdev_register(struct watchdog_device *wdd) } } - device_initialize(&wd_data->dev); - wd_data->dev.devt = MKDEV(MAJOR(watchdog_devt), wdd->id); - wd_data->dev.class = &watchdog_class; - wd_data->dev.parent = wdd->parent; - wd_data->dev.groups = wdd->groups; - wd_data->dev.release = watchdog_core_data_release; - dev_set_drvdata(&wd_data->dev, wdd); - dev_set_name(&wd_data->dev, "watchdog%d", wdd->id); - /* Fill in the data structures */ cdev_init(&wd_data->cdev, &watchdog_fops); -- GitLab From 27a545d597ddecc87cf20e1e4809985fd32736c7 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 6 Aug 2020 15:35:34 -0700 Subject: [PATCH 0374/1304] Input: sentelic - fix error return when fsp_reg_write fails [ Upstream commit ea38f06e0291986eb93beb6d61fd413607a30ca4 ] Currently when the call to fsp_reg_write fails -EIO is not being returned because the count is being returned instead of the return value in retval. Fix this by returning the value in retval instead of count. Addresses-Coverity: ("Unused value") Fixes: fc69f4a6af49 ("Input: add new driver for Sentelic Finger Sensing Pad") Signed-off-by: Colin Ian King Link: https://lore.kernel.org/r/20200603141218.131663-1-colin.king@canonical.com Signed-off-by: Dmitry Torokhov Signed-off-by: Sasha Levin --- drivers/input/mouse/sentelic.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/input/mouse/sentelic.c b/drivers/input/mouse/sentelic.c index 1d6010d463e2..022a8cb58a06 100644 --- a/drivers/input/mouse/sentelic.c +++ b/drivers/input/mouse/sentelic.c @@ -454,7 +454,7 @@ static ssize_t fsp_attr_set_setreg(struct psmouse *psmouse, void *data, fsp_reg_write_enable(psmouse, false); - return count; + return retval; } PSMOUSE_DEFINE_WO_ATTR(setreg, S_IWUSR, NULL, fsp_attr_set_setreg); -- GitLab From 3cfd94ed90ee1eb4854aef1a4f9d8a3334654671 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 26 Jun 2020 13:34:37 +0300 Subject: [PATCH 0375/1304] drm/vmwgfx: Use correct vmw_legacy_display_unit pointer [ Upstream commit 1d2c0c565bc0da25f5e899a862fb58e612b222df ] The "entry" pointer is an offset from the list head and it doesn't point to a valid vmw_legacy_display_unit struct. Presumably the intent was to point to the last entry. Also the "i++" wasn't used so I have removed that as well. Fixes: d7e1958dbe4a ("drm/vmwgfx: Support older hardware.") Signed-off-by: Dan Carpenter Reviewed-by: Roland Scheidegger Signed-off-by: Roland Scheidegger Signed-off-by: Sasha Levin --- drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index 723578117191..0743a7311700 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c @@ -79,7 +79,7 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv) struct vmw_legacy_display_unit *entry; struct drm_framebuffer *fb = NULL; struct drm_crtc *crtc = NULL; - int i = 0; + int i; /* If there is no display topology the host just assumes * that the guest will set the same layout as the host. @@ -90,12 +90,11 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv) crtc = &entry->base.crtc; w = max(w, crtc->x + crtc->mode.hdisplay); h = max(h, crtc->y + crtc->mode.vdisplay); - i++; } if (crtc == NULL) return 0; - fb = entry->base.crtc.primary->state->fb; + fb = crtc->primary->state->fb; return vmw_kms_write_svga(dev_priv, w, h, fb->pitches[0], fb->format->cpp[0] * 8, -- GitLab From ae71f731f061272806651be14f355cafd9560462 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 26 Jun 2020 13:39:59 +0300 Subject: [PATCH 0376/1304] drm/vmwgfx: Fix two list_for_each loop exit tests [ Upstream commit 4437c1152ce0e57ab8f401aa696ea6291cc07ab1 ] These if statements are supposed to be true if we ended the list_for_each_entry() loops without hitting a break statement but they don't work. In the first loop, we increment "i" after the "if (i == unit)" condition so we don't necessarily know that "i" is not equal to unit at the end of the loop. In the second loop we exit when mode is not pointing to a valid drm_display_mode struct so it doesn't make sense to check "mode->type". Fixes: a278724aa23c ("drm/vmwgfx: Implement fbdev on kms v2") Signed-off-by: Dan Carpenter Reviewed-by: Roland Scheidegger Signed-off-by: Roland Scheidegger Signed-off-by: Sasha Levin --- drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 6a712a8d59e9..e486b6517ac5 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -2861,7 +2861,7 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv, ++i; } - if (i != unit) { + if (&con->head == &dev_priv->dev->mode_config.connector_list) { DRM_ERROR("Could not find initial display unit.\n"); ret = -EINVAL; goto out_unlock; @@ -2885,13 +2885,13 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv, break; } - if (mode->type & DRM_MODE_TYPE_PREFERRED) - *p_mode = mode; - else { + if (&mode->head == &con->modes) { WARN_ONCE(true, "Could not find initial preferred mode.\n"); *p_mode = list_first_entry(&con->modes, struct drm_display_mode, head); + } else { + *p_mode = mode; } out_unlock: -- GitLab From c08be09b2c1c5389f1d24677b595822ac7a75a21 Mon Sep 17 00:00:00 2001 From: Wang Hai Date: Mon, 10 Aug 2020 10:57:05 +0800 Subject: [PATCH 0377/1304] net: qcom/emac: add missed clk_disable_unprepare in error path of emac_clks_phase1_init [ Upstream commit 50caa777a3a24d7027748e96265728ce748b41ef ] Fix the missing clk_disable_unprepare() before return from emac_clks_phase1_init() in the error handling case. Fixes: b9b17debc69d ("net: emac: emac gigabit ethernet controller driver") Reported-by: Hulk Robot Signed-off-by: Wang Hai Acked-by: Timur Tabi Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/ethernet/qualcomm/emac/emac.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c index 2a0cbc535a2e..19673ed929e6 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac.c @@ -493,13 +493,24 @@ static int emac_clks_phase1_init(struct platform_device *pdev, ret = clk_prepare_enable(adpt->clk[EMAC_CLK_CFG_AHB]); if (ret) - return ret; + goto disable_clk_axi; ret = clk_set_rate(adpt->clk[EMAC_CLK_HIGH_SPEED], 19200000); if (ret) - return ret; + goto disable_clk_cfg_ahb; + + ret = clk_prepare_enable(adpt->clk[EMAC_CLK_HIGH_SPEED]); + if (ret) + goto disable_clk_cfg_ahb; - return clk_prepare_enable(adpt->clk[EMAC_CLK_HIGH_SPEED]); + return 0; + +disable_clk_cfg_ahb: + clk_disable_unprepare(adpt->clk[EMAC_CLK_CFG_AHB]); +disable_clk_axi: + clk_disable_unprepare(adpt->clk[EMAC_CLK_AXI]); + + return ret; } /* Enable clocks; needs emac_clks_phase1_init to be called before */ -- GitLab From a906b868953a9c9bba44649a8fe760e818dd7224 Mon Sep 17 00:00:00 2001 From: Jeffrey Mitchell Date: Wed, 5 Aug 2020 12:23:19 -0500 Subject: [PATCH 0378/1304] nfs: Fix getxattr kernel panic and memory overflow [ Upstream commit b4487b93545214a9db8cbf32e86411677b0cca21 ] Move the buffer size check to decode_attr_security_label() before memcpy() Only call memcpy() if the buffer is large enough Fixes: aa9c2669626c ("NFS: Client implementation of Labeled-NFS") Signed-off-by: Jeffrey Mitchell [Trond: clean up duplicate test of label->len != 0] Signed-off-by: Trond Myklebust Signed-off-by: Sasha Levin --- fs/nfs/nfs4proc.c | 2 -- fs/nfs/nfs4xdr.c | 6 +++++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 05cb68ca1ba1..1ef75b1deffa 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -5603,8 +5603,6 @@ static int _nfs4_get_security_label(struct inode *inode, void *buf, return ret; if (!(fattr.valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL)) return -ENOENT; - if (buflen < label.len) - return -ERANGE; return 0; } diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index c4cf0192d7bb..0a5cae8f8aff 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -4280,7 +4280,11 @@ static int decode_attr_security_label(struct xdr_stream *xdr, uint32_t *bitmap, goto out_overflow; if (len < NFS4_MAXLABELLEN) { if (label) { - memcpy(label->label, p, len); + if (label->len) { + if (label->len < len) + return -ERANGE; + memcpy(label->label, p, len); + } label->len = len; label->pi = pi; label->lfs = lfs; -- GitLab From 95fc841ea8b681eea44e766c182c18f17818acbf Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Tue, 11 Aug 2020 18:35:33 -0700 Subject: [PATCH 0379/1304] fs/minix: set s_maxbytes correctly [ Upstream commit 32ac86efff91a3e4ef8c3d1cadd4559e23c8e73a ] The minix filesystem leaves super_block::s_maxbytes at MAX_NON_LFS rather than setting it to the actual filesystem-specific limit. This is broken because it means userspace doesn't see the standard behavior like getting EFBIG and SIGXFSZ when exceeding the maximum file size. Fix this by setting s_maxbytes correctly. Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Signed-off-by: Eric Biggers Signed-off-by: Andrew Morton Cc: Alexander Viro Cc: Qiujun Huang Link: http://lkml.kernel.org/r/20200628060846.682158-5-ebiggers@kernel.org Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin --- fs/minix/inode.c | 12 +++++++----- fs/minix/itree_v1.c | 2 +- fs/minix/itree_v2.c | 3 +-- fs/minix/minix.h | 1 - 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/fs/minix/inode.c b/fs/minix/inode.c index 4f994de46e6b..03fe8bac36cf 100644 --- a/fs/minix/inode.c +++ b/fs/minix/inode.c @@ -155,8 +155,10 @@ static int minix_remount (struct super_block * sb, int * flags, char * data) return 0; } -static bool minix_check_superblock(struct minix_sb_info *sbi) +static bool minix_check_superblock(struct super_block *sb) { + struct minix_sb_info *sbi = minix_sb(sb); + if (sbi->s_imap_blocks == 0 || sbi->s_zmap_blocks == 0) return false; @@ -166,7 +168,7 @@ static bool minix_check_superblock(struct minix_sb_info *sbi) * of indirect blocks which places the limit well above U32_MAX. */ if (sbi->s_version == MINIX_V1 && - sbi->s_max_size > (7 + 512 + 512*512) * BLOCK_SIZE) + sb->s_maxbytes > (7 + 512 + 512*512) * BLOCK_SIZE) return false; return true; @@ -207,7 +209,7 @@ static int minix_fill_super(struct super_block *s, void *data, int silent) sbi->s_zmap_blocks = ms->s_zmap_blocks; sbi->s_firstdatazone = ms->s_firstdatazone; sbi->s_log_zone_size = ms->s_log_zone_size; - sbi->s_max_size = ms->s_max_size; + s->s_maxbytes = ms->s_max_size; s->s_magic = ms->s_magic; if (s->s_magic == MINIX_SUPER_MAGIC) { sbi->s_version = MINIX_V1; @@ -238,7 +240,7 @@ static int minix_fill_super(struct super_block *s, void *data, int silent) sbi->s_zmap_blocks = m3s->s_zmap_blocks; sbi->s_firstdatazone = m3s->s_firstdatazone; sbi->s_log_zone_size = m3s->s_log_zone_size; - sbi->s_max_size = m3s->s_max_size; + s->s_maxbytes = m3s->s_max_size; sbi->s_ninodes = m3s->s_ninodes; sbi->s_nzones = m3s->s_zones; sbi->s_dirsize = 64; @@ -250,7 +252,7 @@ static int minix_fill_super(struct super_block *s, void *data, int silent) } else goto out_no_fs; - if (!minix_check_superblock(sbi)) + if (!minix_check_superblock(s)) goto out_illegal_sb; /* diff --git a/fs/minix/itree_v1.c b/fs/minix/itree_v1.c index 046cc96ee7ad..c0d418209ead 100644 --- a/fs/minix/itree_v1.c +++ b/fs/minix/itree_v1.c @@ -29,7 +29,7 @@ static int block_to_path(struct inode * inode, long block, int offsets[DEPTH]) if (block < 0) { printk("MINIX-fs: block_to_path: block %ld < 0 on dev %pg\n", block, inode->i_sb->s_bdev); - } else if (block >= (minix_sb(inode->i_sb)->s_max_size/BLOCK_SIZE)) { + } else if (block >= inode->i_sb->s_maxbytes/BLOCK_SIZE) { if (printk_ratelimit()) printk("MINIX-fs: block_to_path: " "block %ld too big on dev %pg\n", diff --git a/fs/minix/itree_v2.c b/fs/minix/itree_v2.c index f7fc7ecccccc..ee8af2f9e282 100644 --- a/fs/minix/itree_v2.c +++ b/fs/minix/itree_v2.c @@ -32,8 +32,7 @@ static int block_to_path(struct inode * inode, long block, int offsets[DEPTH]) if (block < 0) { printk("MINIX-fs: block_to_path: block %ld < 0 on dev %pg\n", block, sb->s_bdev); - } else if ((u64)block * (u64)sb->s_blocksize >= - minix_sb(sb)->s_max_size) { + } else if ((u64)block * (u64)sb->s_blocksize >= sb->s_maxbytes) { if (printk_ratelimit()) printk("MINIX-fs: block_to_path: " "block %ld too big on dev %pg\n", diff --git a/fs/minix/minix.h b/fs/minix/minix.h index df081e8afcc3..168d45d3de73 100644 --- a/fs/minix/minix.h +++ b/fs/minix/minix.h @@ -32,7 +32,6 @@ struct minix_sb_info { unsigned long s_zmap_blocks; unsigned long s_firstdatazone; unsigned long s_log_zone_size; - unsigned long s_max_size; int s_dirsize; int s_namelen; struct buffer_head ** s_imap; -- GitLab From c7ac366be04e49147498f0837bca5c020c3fb994 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Tue, 11 Aug 2020 18:35:36 -0700 Subject: [PATCH 0380/1304] fs/minix: fix block limit check for V1 filesystems [ Upstream commit 0a12c4a8069607247cb8edc3b035a664e636fd9a ] The minix filesystem reads its maximum file size from its on-disk superblock. This value isn't necessarily a multiple of the block size. When it's not, the V1 block mapping code doesn't allow mapping the last possible block. Commit 6ed6a722f9ab ("minixfs: fix block limit check") fixed this in the V2 mapping code. Fix it in the V1 mapping code too. Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Signed-off-by: Eric Biggers Signed-off-by: Andrew Morton Cc: Alexander Viro Cc: Qiujun Huang Link: http://lkml.kernel.org/r/20200628060846.682158-6-ebiggers@kernel.org Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin --- fs/minix/itree_v1.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/minix/itree_v1.c b/fs/minix/itree_v1.c index c0d418209ead..405573a79aab 100644 --- a/fs/minix/itree_v1.c +++ b/fs/minix/itree_v1.c @@ -29,7 +29,7 @@ static int block_to_path(struct inode * inode, long block, int offsets[DEPTH]) if (block < 0) { printk("MINIX-fs: block_to_path: block %ld < 0 on dev %pg\n", block, inode->i_sb->s_bdev); - } else if (block >= inode->i_sb->s_maxbytes/BLOCK_SIZE) { + } else if ((u64)block * BLOCK_SIZE >= inode->i_sb->s_maxbytes) { if (printk_ratelimit()) printk("MINIX-fs: block_to_path: " "block %ld too big on dev %pg\n", -- GitLab From 9f3fb90d30db4969c41cc22d72c0f6ca2f29954c Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Tue, 11 Aug 2020 18:35:39 -0700 Subject: [PATCH 0381/1304] fs/minix: remove expected error message in block_to_path() [ Upstream commit f666f9fb9a36f1c833b9d18923572f0e4d304754 ] When truncating a file to a size within the last allowed logical block, block_to_path() is called with the *next* block. This exceeds the limit, causing the "block %ld too big" error message to be printed. This case isn't actually an error; there are just no more blocks past that point. So, remove this error message. Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Signed-off-by: Eric Biggers Signed-off-by: Andrew Morton Cc: Alexander Viro Cc: Qiujun Huang Link: http://lkml.kernel.org/r/20200628060846.682158-7-ebiggers@kernel.org Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin --- fs/minix/itree_v1.c | 12 ++++++------ fs/minix/itree_v2.c | 12 ++++++------ 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/fs/minix/itree_v1.c b/fs/minix/itree_v1.c index 405573a79aab..1fed906042aa 100644 --- a/fs/minix/itree_v1.c +++ b/fs/minix/itree_v1.c @@ -29,12 +29,12 @@ static int block_to_path(struct inode * inode, long block, int offsets[DEPTH]) if (block < 0) { printk("MINIX-fs: block_to_path: block %ld < 0 on dev %pg\n", block, inode->i_sb->s_bdev); - } else if ((u64)block * BLOCK_SIZE >= inode->i_sb->s_maxbytes) { - if (printk_ratelimit()) - printk("MINIX-fs: block_to_path: " - "block %ld too big on dev %pg\n", - block, inode->i_sb->s_bdev); - } else if (block < 7) { + return 0; + } + if ((u64)block * BLOCK_SIZE >= inode->i_sb->s_maxbytes) + return 0; + + if (block < 7) { offsets[n++] = block; } else if ((block -= 7) < 512) { offsets[n++] = 7; diff --git a/fs/minix/itree_v2.c b/fs/minix/itree_v2.c index ee8af2f9e282..9d00f31a2d9d 100644 --- a/fs/minix/itree_v2.c +++ b/fs/minix/itree_v2.c @@ -32,12 +32,12 @@ static int block_to_path(struct inode * inode, long block, int offsets[DEPTH]) if (block < 0) { printk("MINIX-fs: block_to_path: block %ld < 0 on dev %pg\n", block, sb->s_bdev); - } else if ((u64)block * (u64)sb->s_blocksize >= sb->s_maxbytes) { - if (printk_ratelimit()) - printk("MINIX-fs: block_to_path: " - "block %ld too big on dev %pg\n", - block, sb->s_bdev); - } else if (block < DIRCOUNT) { + return 0; + } + if ((u64)block * (u64)sb->s_blocksize >= sb->s_maxbytes) + return 0; + + if (block < DIRCOUNT) { offsets[n++] = block; } else if ((block -= DIRCOUNT) < INDIRCOUNT(sb)) { offsets[n++] = DIRCOUNT; -- GitLab From aefe207d95d02a1b4711bee17cde0014d7b8223f Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Tue, 11 Aug 2020 18:35:53 -0700 Subject: [PATCH 0382/1304] fs/ufs: avoid potential u32 multiplication overflow [ Upstream commit 88b2e9b06381551b707d980627ad0591191f7a2d ] The 64 bit ino is being compared to the product of two u32 values, however, the multiplication is being performed using a 32 bit multiply so there is a potential of an overflow. To be fully safe, cast uspi->s_ncg to a u64 to ensure a 64 bit multiplication occurs to avoid any chance of overflow. Fixes: f3e2a520f5fb ("ufs: NFS support") Signed-off-by: Colin Ian King Signed-off-by: Andrew Morton Cc: Evgeniy Dushistov Cc: Alexey Dobriyan Link: http://lkml.kernel.org/r/20200715170355.1081713-1-colin.king@canonical.com Addresses-Coverity: ("Unintentional integer overflow") Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin --- fs/ufs/super.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/ufs/super.c b/fs/ufs/super.c index a4e07e910f1b..6e59e45d7bfb 100644 --- a/fs/ufs/super.c +++ b/fs/ufs/super.c @@ -100,7 +100,7 @@ static struct inode *ufs_nfs_get_inode(struct super_block *sb, u64 ino, u32 gene struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; struct inode *inode; - if (ino < UFS_ROOTINO || ino > uspi->s_ncg * uspi->s_ipg) + if (ino < UFS_ROOTINO || ino > (u64)uspi->s_ncg * uspi->s_ipg) return ERR_PTR(-ESTALE); inode = ufs_iget(sb, ino); -- GitLab From 8e69ac04403c0f3d721a552d7987fd8cd1daa517 Mon Sep 17 00:00:00 2001 From: Tiezhu Yang Date: Tue, 11 Aug 2020 18:36:16 -0700 Subject: [PATCH 0383/1304] test_kmod: avoid potential double free in trigger_config_run_type() [ Upstream commit 0776d1231bec0c7ab43baf440a3f5ef5f49dd795 ] Reset the member "test_fs" of the test configuration after a call of the function "kfree_const" to a null pointer so that a double memory release will not be performed. Fixes: d9c6a72d6fa2 ("kmod: add test driver to stress test the module loader") Signed-off-by: Tiezhu Yang Signed-off-by: Luis Chamberlain Signed-off-by: Andrew Morton Acked-by: Luis Chamberlain Cc: Alexei Starovoitov Cc: Al Viro Cc: Christian Brauner Cc: Chuck Lever Cc: David Howells Cc: David S. Miller Cc: Greg Kroah-Hartman Cc: Jakub Kicinski Cc: James Morris Cc: Jarkko Sakkinen Cc: J. Bruce Fields Cc: Jens Axboe Cc: Josh Triplett Cc: Kees Cook Cc: Lars Ellenberg Cc: Nikolay Aleksandrov Cc: Philipp Reisner Cc: Roopa Prabhu Cc: "Serge E. Hallyn" Cc: Sergei Trofimovich Cc: Sergey Kvachonok Cc: Shuah Khan Cc: Tony Vroon Cc: Christoph Hellwig Link: http://lkml.kernel.org/r/20200610154923.27510-4-mcgrof@kernel.org Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin --- lib/test_kmod.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/test_kmod.c b/lib/test_kmod.c index 9cf77628fc91..87a0cc750ea2 100644 --- a/lib/test_kmod.c +++ b/lib/test_kmod.c @@ -745,7 +745,7 @@ static int trigger_config_run_type(struct kmod_test_device *test_dev, break; case TEST_KMOD_FS_TYPE: kfree_const(config->test_fs); - config->test_driver = NULL; + config->test_fs = NULL; copied = config_copy_test_fs(config, test_str, strlen(test_str)); break; -- GitLab From 59547851489e5168b7291eac5ecc5bb24a71d7f5 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Thu, 23 Jul 2020 16:02:46 +0300 Subject: [PATCH 0384/1304] mfd: dln2: Run event handler loop under spinlock [ Upstream commit 3d858942250820b9adc35f963a257481d6d4c81d ] The event handler loop must be run with interrupts disabled. Otherwise we will have a warning: [ 1970.785649] irq 31 handler lineevent_irq_handler+0x0/0x20 enabled interrupts [ 1970.792739] WARNING: CPU: 0 PID: 0 at kernel/irq/handle.c:159 __handle_irq_event_percpu+0x162/0x170 [ 1970.860732] RIP: 0010:__handle_irq_event_percpu+0x162/0x170 ... [ 1970.946994] Call Trace: [ 1970.949446] [ 1970.951471] handle_irq_event_percpu+0x2c/0x80 [ 1970.955921] handle_irq_event+0x23/0x43 [ 1970.959766] handle_simple_irq+0x57/0x70 [ 1970.963695] generic_handle_irq+0x42/0x50 [ 1970.967717] dln2_rx+0xc1/0x210 [dln2] [ 1970.971479] ? usb_hcd_unmap_urb_for_dma+0xa6/0x1c0 [ 1970.976362] __usb_hcd_giveback_urb+0x77/0xe0 [ 1970.980727] usb_giveback_urb_bh+0x8e/0xe0 [ 1970.984837] tasklet_action_common.isra.0+0x4a/0xe0 ... Recently xHCI driver switched to tasklets in the commit 36dc01657b49 ("usb: host: xhci: Support running urb giveback in tasklet context"). The handle_irq_event_* functions are expected to be called with interrupts disabled and they rightfully complain here because we run in tasklet context with interrupts enabled. Use a event spinlock to protect event handler from being interrupted. Note, that there are only two users of this GPIO and ADC drivers and both of them are using generic_handle_irq() which makes above happen. Fixes: 338a12814297 ("mfd: Add support for Diolan DLN-2 devices") Signed-off-by: Andy Shevchenko Signed-off-by: Lee Jones Signed-off-by: Sasha Levin --- drivers/mfd/dln2.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/mfd/dln2.c b/drivers/mfd/dln2.c index 6ea0dd37b453..fe614ba5fec9 100644 --- a/drivers/mfd/dln2.c +++ b/drivers/mfd/dln2.c @@ -290,7 +290,11 @@ static void dln2_rx(struct urb *urb) len = urb->actual_length - sizeof(struct dln2_header); if (handle == DLN2_HANDLE_EVENT) { + unsigned long flags; + + spin_lock_irqsave(&dln2->event_cb_lock, flags); dln2_run_event_callbacks(dln2, id, echo, data, len); + spin_unlock_irqrestore(&dln2->event_cb_lock, flags); } else { /* URB will be re-submitted in _dln2_transfer (free_rx_slot) */ if (dln2_transfer_complete(dln2, urb, handle, echo)) -- GitLab From 9a1b7ced0fd9305d25708ff7fd9d7d4fb794f7a1 Mon Sep 17 00:00:00 2001 From: Dinghao Liu Date: Thu, 13 Aug 2020 15:46:30 +0800 Subject: [PATCH 0385/1304] ALSA: echoaudio: Fix potential Oops in snd_echo_resume() [ Upstream commit 5a25de6df789cc805a9b8ba7ab5deef5067af47e ] Freeing chip on error may lead to an Oops at the next time the system goes to resume. Fix this by removing all snd_echo_free() calls on error. Fixes: 47b5d028fdce8 ("ALSA: Echoaudio - Add suspend support #2") Signed-off-by: Dinghao Liu Link: https://lore.kernel.org/r/20200813074632.17022-1-dinghao.liu@zju.edu.cn Signed-off-by: Takashi Iwai Signed-off-by: Sasha Levin --- sound/pci/echoaudio/echoaudio.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/sound/pci/echoaudio/echoaudio.c b/sound/pci/echoaudio/echoaudio.c index 3ef2b27ebbe8..f32c55ffffc7 100644 --- a/sound/pci/echoaudio/echoaudio.c +++ b/sound/pci/echoaudio/echoaudio.c @@ -2216,7 +2216,6 @@ static int snd_echo_resume(struct device *dev) if (err < 0) { kfree(commpage_bak); dev_err(dev, "resume init_hw err=%d\n", err); - snd_echo_free(chip); return err; } @@ -2243,7 +2242,6 @@ static int snd_echo_resume(struct device *dev) if (request_irq(pci->irq, snd_echo_interrupt, IRQF_SHARED, KBUILD_MODNAME, chip)) { dev_err(chip->card->dev, "cannot grab irq\n"); - snd_echo_free(chip); return -EBUSY; } chip->irq = pci->irq; -- GitLab From 3b87dc3e0ba624cc2a9df08a14d1653122d383f9 Mon Sep 17 00:00:00 2001 From: Vincent Whitchurch Date: Mon, 10 Aug 2020 15:34:04 +0200 Subject: [PATCH 0386/1304] perf bench mem: Always memset source before memcpy [ Upstream commit 1beaef29c34154ccdcb3f1ae557f6883eda18840 ] For memcpy, the source pages are memset to zero only when --cycles is used. This leads to wildly different results with or without --cycles, since all sources pages are likely to be mapped to the same zero page without explicit writes. Before this fix: $ export cmd="./perf stat -e LLC-loads -- ./perf bench \ mem memcpy -s 1024MB -l 100 -f default" $ $cmd 2,935,826 LLC-loads 3.821677452 seconds time elapsed $ $cmd --cycles 217,533,436 LLC-loads 8.616725985 seconds time elapsed After this fix: $ $cmd 214,459,686 LLC-loads 8.674301124 seconds time elapsed $ $cmd --cycles 214,758,651 LLC-loads 8.644480006 seconds time elapsed Fixes: 47b5757bac03c338 ("perf bench mem: Move boilerplate memory allocation to the infrastructure") Signed-off-by: Vincent Whitchurch Cc: Alexander Shishkin Cc: Jiri Olsa Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Cc: kernel@axis.com Link: http://lore.kernel.org/lkml/20200810133404.30829-1-vincent.whitchurch@axis.com Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: Sasha Levin --- tools/perf/bench/mem-functions.c | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/tools/perf/bench/mem-functions.c b/tools/perf/bench/mem-functions.c index 0251dd348124..4864fc67d01b 100644 --- a/tools/perf/bench/mem-functions.c +++ b/tools/perf/bench/mem-functions.c @@ -222,12 +222,8 @@ static int bench_mem_common(int argc, const char **argv, struct bench_mem_info * return 0; } -static u64 do_memcpy_cycles(const struct function *r, size_t size, void *src, void *dst) +static void memcpy_prefault(memcpy_t fn, size_t size, void *src, void *dst) { - u64 cycle_start = 0ULL, cycle_end = 0ULL; - memcpy_t fn = r->fn.memcpy; - int i; - /* Make sure to always prefault zero pages even if MMAP_THRESH is crossed: */ memset(src, 0, size); @@ -236,6 +232,15 @@ static u64 do_memcpy_cycles(const struct function *r, size_t size, void *src, vo * to not measure page fault overhead: */ fn(dst, src, size); +} + +static u64 do_memcpy_cycles(const struct function *r, size_t size, void *src, void *dst) +{ + u64 cycle_start = 0ULL, cycle_end = 0ULL; + memcpy_t fn = r->fn.memcpy; + int i; + + memcpy_prefault(fn, size, src, dst); cycle_start = get_cycles(); for (i = 0; i < nr_loops; ++i) @@ -251,11 +256,7 @@ static double do_memcpy_gettimeofday(const struct function *r, size_t size, void memcpy_t fn = r->fn.memcpy; int i; - /* - * We prefault the freshly allocated memory range here, - * to not measure page fault overhead: - */ - fn(dst, src, size); + memcpy_prefault(fn, size, src, dst); BUG_ON(gettimeofday(&tv_start, NULL)); for (i = 0; i < nr_loops; ++i) -- GitLab From df8caaf9ef87cbc050bff4d9569d2c1c87d23389 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Daniel=20D=C3=ADaz?= Date: Wed, 12 Aug 2020 17:15:17 -0500 Subject: [PATCH 0387/1304] tools build feature: Quote CC and CXX for their arguments MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit fa5c893181ed2ca2f96552f50073786d2cfce6c0 ] When using a cross-compilation environment, such as OpenEmbedded, the CC an CXX variables are set to something more than just a command: there are arguments (such as --sysroot) that need to be passed on to the compiler so that the right set of headers and libraries are used. For the particular case that our systems detected, CC is set to the following: export CC="aarch64-linaro-linux-gcc --sysroot=/oe/build/tmp/work/machine/perf/1.0-r9/recipe-sysroot" Without quotes, detection is as follows: Auto-detecting system features: ... dwarf: [ OFF ] ... dwarf_getlocations: [ OFF ] ... glibc: [ OFF ] ... gtk2: [ OFF ] ... libbfd: [ OFF ] ... libcap: [ OFF ] ... libelf: [ OFF ] ... libnuma: [ OFF ] ... numa_num_possible_cpus: [ OFF ] ... libperl: [ OFF ] ... libpython: [ OFF ] ... libcrypto: [ OFF ] ... libunwind: [ OFF ] ... libdw-dwarf-unwind: [ OFF ] ... zlib: [ OFF ] ... lzma: [ OFF ] ... get_cpuid: [ OFF ] ... bpf: [ OFF ] ... libaio: [ OFF ] ... libzstd: [ OFF ] ... disassembler-four-args: [ OFF ] Makefile.config:414: *** No gnu/libc-version.h found, please install glibc-dev[el]. Stop. Makefile.perf:230: recipe for target 'sub-make' failed make[1]: *** [sub-make] Error 2 Makefile:69: recipe for target 'all' failed make: *** [all] Error 2 With CC and CXX quoted, some of those features are now detected. Fixes: e3232c2f39ac ("tools build feature: Use CC and CXX from parent") Signed-off-by: Daniel Díaz Reviewed-by: Thomas Hebb Cc: Alexei Starovoitov Cc: Andrii Nakryiko Cc: Daniel Borkmann Cc: Jiri Olsa Cc: John Fastabend Cc: KP Singh Cc: Martin KaFai Lau Cc: Namhyung Kim Cc: Song Liu Cc: Stephane Eranian Cc: Yonghong Song Link: http://lore.kernel.org/lkml/20200812221518.2869003-1-daniel.diaz@linaro.org Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: Sasha Levin --- tools/build/Makefile.feature | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature index 7d9d70c0b380..7c17f17ea2cd 100644 --- a/tools/build/Makefile.feature +++ b/tools/build/Makefile.feature @@ -7,7 +7,7 @@ endif feature_check = $(eval $(feature_check_code)) define feature_check_code - feature-$(1) := $(shell $(MAKE) OUTPUT=$(OUTPUT_FEATURES) CC=$(CC) CXX=$(CXX) CFLAGS="$(EXTRA_CFLAGS) $(FEATURE_CHECK_CFLAGS-$(1))" CXXFLAGS="$(EXTRA_CXXFLAGS) $(FEATURE_CHECK_CXXFLAGS-$(1))" LDFLAGS="$(LDFLAGS) $(FEATURE_CHECK_LDFLAGS-$(1))" -C $(feature_dir) $(OUTPUT_FEATURES)test-$1.bin >/dev/null 2>/dev/null && echo 1 || echo 0) + feature-$(1) := $(shell $(MAKE) OUTPUT=$(OUTPUT_FEATURES) CC="$(CC)" CXX="$(CXX)" CFLAGS="$(EXTRA_CFLAGS) $(FEATURE_CHECK_CFLAGS-$(1))" CXXFLAGS="$(EXTRA_CXXFLAGS) $(FEATURE_CHECK_CXXFLAGS-$(1))" LDFLAGS="$(LDFLAGS) $(FEATURE_CHECK_LDFLAGS-$(1))" -C $(feature_dir) $(OUTPUT_FEATURES)test-$1.bin >/dev/null 2>/dev/null && echo 1 || echo 0) endef feature_set = $(eval $(feature_set_code)) -- GitLab From 014ec97717f4794a1c085aa4fdc924018c4fd999 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Fri, 14 Aug 2020 14:42:45 +0200 Subject: [PATCH 0388/1304] sh: landisk: Add missing initialization of sh_io_port_base [ Upstream commit 0c64a0dce51faa9c706fdf1f957d6f19878f4b81 ] The Landisk setup code maps the CF IDE area using ioremap_prot(), and passes the resulting virtual addresses to the pata_platform driver, disguising them as I/O port addresses. Hence the pata_platform driver translates them again using ioport_map(). As CONFIG_GENERIC_IOMAP=n, and CONFIG_HAS_IOPORT_MAP=y, the SuperH-specific mapping code in arch/sh/kernel/ioport.c translates I/O port addresses to virtual addresses by adding sh_io_port_base, which defaults to -1, thus breaking the assumption of an identity mapping. Fix this by setting sh_io_port_base to zero. Fixes: 37b7a97884ba64bf ("sh: machvec IO death.") Signed-off-by: Geert Uytterhoeven Signed-off-by: Rich Felker Signed-off-by: Sasha Levin --- arch/sh/boards/mach-landisk/setup.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/sh/boards/mach-landisk/setup.c b/arch/sh/boards/mach-landisk/setup.c index f1147caebacf..af69fb7fef7c 100644 --- a/arch/sh/boards/mach-landisk/setup.c +++ b/arch/sh/boards/mach-landisk/setup.c @@ -85,6 +85,9 @@ device_initcall(landisk_devices_setup); static void __init landisk_setup(char **cmdline_p) { + /* I/O port identity mapping */ + __set_io_port_base(0); + /* LED ON */ __raw_writeb(__raw_readb(PA_LED) | 0x03, PA_LED); -- GitLab From 2406c45db3de8da8052eccb1dcfde69ea5988b19 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 6 Aug 2020 23:26:22 -0700 Subject: [PATCH 0389/1304] khugepaged: retract_page_tables() remember to test exit commit 18e77600f7a1ed69f8ce46c9e11cad0985712dfa upstream. Only once have I seen this scenario (and forgot even to notice what forced the eventual crash): a sequence of "BUG: Bad page map" alerts from vm_normal_page(), from zap_pte_range() servicing exit_mmap(); pmd:00000000, pte values corresponding to data in physical page 0. The pte mappings being zapped in this case were supposed to be from a huge page of ext4 text (but could as well have been shmem): my belief is that it was racing with collapse_file()'s retract_page_tables(), found *pmd pointing to a page table, locked it, but *pmd had become 0 by the time start_pte was decided. In most cases, that possibility is excluded by holding mmap lock; but exit_mmap() proceeds without mmap lock. Most of what's run by khugepaged checks khugepaged_test_exit() after acquiring mmap lock: khugepaged_collapse_pte_mapped_thps() and hugepage_vma_revalidate() do so, for example. But retract_page_tables() did not: fix that. The fix is for retract_page_tables() to check khugepaged_test_exit(), after acquiring mmap lock, before doing anything to the page table. Getting the mmap lock serializes with __mmput(), which briefly takes and drops it in __khugepaged_exit(); then the khugepaged_test_exit() check on mm_users makes sure we don't touch the page table once exit_mmap() might reach it, since exit_mmap() will be proceeding without mmap lock, not expecting anyone to be racing with it. Fixes: f3f0e1d2150b ("khugepaged: add support of collapse for tmpfs/shmem pages") Signed-off-by: Hugh Dickins Signed-off-by: Andrew Morton Acked-by: Kirill A. Shutemov Cc: Andrea Arcangeli Cc: Mike Kravetz Cc: Song Liu Cc: [4.8+] Link: http://lkml.kernel.org/r/alpine.LSU.2.11.2008021215400.27773@eggly.anvils Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- mm/khugepaged.c | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/mm/khugepaged.c b/mm/khugepaged.c index ecefdba4b0dd..483c4573695a 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -1251,6 +1251,7 @@ static void collect_mm_slot(struct mm_slot *mm_slot) static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff) { struct vm_area_struct *vma; + struct mm_struct *mm; unsigned long addr; pmd_t *pmd, _pmd; @@ -1264,7 +1265,8 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff) continue; if (vma->vm_end < addr + HPAGE_PMD_SIZE) continue; - pmd = mm_find_pmd(vma->vm_mm, addr); + mm = vma->vm_mm; + pmd = mm_find_pmd(mm, addr); if (!pmd) continue; /* @@ -1273,14 +1275,16 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff) * re-fault. Not ideal, but it's more important to not disturb * the system too much. */ - if (down_write_trylock(&vma->vm_mm->mmap_sem)) { - spinlock_t *ptl = pmd_lock(vma->vm_mm, pmd); - /* assume page table is clear */ - _pmd = pmdp_collapse_flush(vma, addr, pmd); - spin_unlock(ptl); - up_write(&vma->vm_mm->mmap_sem); - mm_dec_nr_ptes(vma->vm_mm); - pte_free(vma->vm_mm, pmd_pgtable(_pmd)); + if (down_write_trylock(&mm->mmap_sem)) { + if (!khugepaged_test_exit(mm)) { + spinlock_t *ptl = pmd_lock(mm, pmd); + /* assume page table is clear */ + _pmd = pmdp_collapse_flush(vma, addr, pmd); + spin_unlock(ptl); + mm_dec_nr_ptes(mm); + pte_free(mm, pmd_pgtable(_pmd)); + } + up_write(&mm->mmap_sem); } } i_mmap_unlock_write(mapping); -- GitLab From c4368ab3ef78880ea27f5400e84e424184e8cee6 Mon Sep 17 00:00:00 2001 From: Tomasz Maciej Nowak Date: Thu, 27 Feb 2020 17:52:32 +0100 Subject: [PATCH 0390/1304] arm64: dts: marvell: espressobin: add ethernet alias MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 5253cb8c00a6f4356760efb38bca0e0393aa06de upstream. The maker of this board and its variants, stores MAC address in U-Boot environment. Add alias for bootloader to recognise, to which ethernet node inject the factory MAC address. Signed-off-by: Tomasz Maciej Nowak Signed-off-by: Gregory CLEMENT [pali: Backported to 5.4 and older versions] Signed-off-by: Pali Rohár Signed-off-by: Greg Kroah-Hartman --- arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts b/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts index 3ab25ad402b9..6cbdd66921aa 100644 --- a/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts +++ b/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts @@ -19,6 +19,12 @@ model = "Globalscale Marvell ESPRESSOBin Board"; compatible = "globalscale,espressobin", "marvell,armada3720", "marvell,armada3710"; + aliases { + ethernet0 = ð0; + serial0 = &uart0; + serial1 = &uart1; + }; + chosen { stdout-path = "serial0:115200n8"; }; -- GitLab From bfdd8596994870126a62e673b82e6208f96727cd Mon Sep 17 00:00:00 2001 From: Marius Iacob Date: Sat, 1 Aug 2020 15:34:46 +0300 Subject: [PATCH 0391/1304] drm: Added orientation quirk for ASUS tablet model T103HAF commit b5ac98cbb8e5e30c34ebc837d1e5a3982d2b5f5c upstream. Signed-off-by: Marius Iacob Cc: stable@vger.kernel.org Signed-off-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20200801123445.1514567-1-themariusus@gmail.com Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/drm_panel_orientation_quirks.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c index fa5c25d36d3d..652de972c3ae 100644 --- a/drivers/gpu/drm/drm_panel_orientation_quirks.c +++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c @@ -107,6 +107,12 @@ static const struct dmi_system_id orientation_data[] = { DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T101HA"), }, .driver_data = (void *)&lcd800x1280_rightside_up, + }, { /* Asus T103HAF */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T103HAF"), + }, + .driver_data = (void *)&lcd800x1280_rightside_up, }, { /* GPD MicroPC (generic strings, also match on bios date) */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"), -- GitLab From 2c1684171564a7e91afce2897469c81bf7d6085a Mon Sep 17 00:00:00 2001 From: Sandeep Raghuraman Date: Thu, 6 Aug 2020 22:52:20 +0530 Subject: [PATCH 0392/1304] drm/amdgpu: Fix bug where DPM is not enabled after hibernate and resume commit f87812284172a9809820d10143b573d833cd3f75 upstream. Reproducing bug report here: After hibernating and resuming, DPM is not enabled. This remains the case even if you test hibernate using the steps here: https://www.kernel.org/doc/html/latest/power/basic-pm-debugging.html I debugged the problem, and figured out that in the file hardwaremanager.c, in the function, phm_enable_dynamic_state_management(), the check 'if (!hwmgr->pp_one_vf && smum_is_dpm_running(hwmgr) && !amdgpu_passthrough(adev) && adev->in_suspend)' returns true for the hibernate case, and false for the suspend case. This means that for the hibernate case, the AMDGPU driver doesn't enable DPM (even though it should) and simply returns from that function. In the suspend case, it goes ahead and enables DPM, even though it doesn't need to. I debugged further, and found out that in the case of suspend, for the CIK/Hawaii GPUs, smum_is_dpm_running(hwmgr) returns false, while in the case of hibernate, smum_is_dpm_running(hwmgr) returns true. For CIK, the ci_is_dpm_running() function calls the ci_is_smc_ram_running() function, which is ultimately used to determine if DPM is currently enabled or not, and this seems to provide the wrong answer. I've changed the ci_is_dpm_running() function to instead use the same method that some other AMD GPU chips do (e.g Fiji), which seems to read the voltage controller. I've tested on my R9 390 and it seems to work correctly for both suspend and hibernate use cases, and has been stable so far. Bug: https://bugzilla.kernel.org/show_bug.cgi?id=208839 Signed-off-by: Sandeep Raghuraman Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c index 083aa71487e8..db87cb8930d2 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c @@ -2723,7 +2723,10 @@ static int ci_initialize_mc_reg_table(struct pp_hwmgr *hwmgr) static bool ci_is_dpm_running(struct pp_hwmgr *hwmgr) { - return ci_is_smc_ram_running(hwmgr); + return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device, + CGS_IND_REG__SMC, FEATURE_STATUS, + VOLTAGE_CONTROLLER_ON)) + ? true : false; } static int ci_smu_init(struct pp_hwmgr *hwmgr) -- GitLab From d18b78abc0c6e7d3119367c931c583e02d466495 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Fri, 21 Aug 2020 11:05:39 +0200 Subject: [PATCH 0393/1304] Linux 4.19.141 Tested-by: Guenter Roeck Tested-by: Shuah Khan Signed-off-by: Greg Kroah-Hartman --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index c5ee1c10a39c..5b64e1141984 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 4 PATCHLEVEL = 19 -SUBLEVEL = 140 +SUBLEVEL = 141 EXTRAVERSION = NAME = "People's Front" -- GitLab From 7bce156524ee7f2be727cce84eae205c76922a98 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Fri, 21 Aug 2020 14:42:26 +0200 Subject: [PATCH 0394/1304] ANDROID: Revert "PCI: Probe bridge window attributes once at enumeration-time" This reverts commit 54a7a9d75c0727433feb634b1025c84589949e02 which is commit 51c48b310183ab6ba5419edfc6a8de889cc04521 as it breaks the api for struct pci_dev for an issue that should not be relevant for Android systems. Bug: 161946584 Signed-off-by: Greg Kroah-Hartman Change-Id: I3fdf5ce243cbc2c4956decf773e057b7ce9a480a --- drivers/pci/probe.c | 52 ----------------------------------------- drivers/pci/setup-bus.c | 45 +++++++++++++++++++++++++++++++---- include/linux/pci.h | 3 --- 3 files changed, 41 insertions(+), 59 deletions(-) diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 9a5b6a8e2502..cbc0d8da7483 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -348,57 +348,6 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) } } -static void pci_read_bridge_windows(struct pci_dev *bridge) -{ - u16 io; - u32 pmem, tmp; - - pci_read_config_word(bridge, PCI_IO_BASE, &io); - if (!io) { - pci_write_config_word(bridge, PCI_IO_BASE, 0xe0f0); - pci_read_config_word(bridge, PCI_IO_BASE, &io); - pci_write_config_word(bridge, PCI_IO_BASE, 0x0); - } - if (io) - bridge->io_window = 1; - - /* - * DECchip 21050 pass 2 errata: the bridge may miss an address - * disconnect boundary by one PCI data phase. Workaround: do not - * use prefetching on this device. - */ - if (bridge->vendor == PCI_VENDOR_ID_DEC && bridge->device == 0x0001) - return; - - pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem); - if (!pmem) { - pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, - 0xffe0fff0); - pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem); - pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 0x0); - } - if (!pmem) - return; - - bridge->pref_window = 1; - - if ((pmem & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) { - - /* - * Bridge claims to have a 64-bit prefetchable memory - * window; verify that the upper bits are actually - * writable. - */ - pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &pmem); - pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, - 0xffffffff); - pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &tmp); - pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, pmem); - if (tmp) - bridge->pref_64_window = 1; - } -} - static void pci_read_bridge_io(struct pci_bus *child) { struct pci_dev *dev = child->self; @@ -1763,7 +1712,6 @@ int pci_setup_device(struct pci_dev *dev) pci_read_irq(dev); dev->transparent = ((dev->class & 0xff) == 1); pci_read_bases(dev, 2, PCI_ROM_ADDRESS1); - pci_read_bridge_windows(dev); set_pcie_hotplug_bridge(dev); pos = pci_find_capability(dev, PCI_CAP_ID_SSVID); if (pos) { diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index 87c8190de622..8e5b00a420a5 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -735,21 +735,58 @@ int pci_claim_bridge_resource(struct pci_dev *bridge, int i) base/limit registers must be read-only and read as 0. */ static void pci_bridge_check_ranges(struct pci_bus *bus) { + u16 io; + u32 pmem; struct pci_dev *bridge = bus->self; - struct resource *b_res = &bridge->resource[PCI_BRIDGE_RESOURCES]; + struct resource *b_res; + b_res = &bridge->resource[PCI_BRIDGE_RESOURCES]; b_res[1].flags |= IORESOURCE_MEM; - if (bridge->io_window) + pci_read_config_word(bridge, PCI_IO_BASE, &io); + if (!io) { + pci_write_config_word(bridge, PCI_IO_BASE, 0xe0f0); + pci_read_config_word(bridge, PCI_IO_BASE, &io); + pci_write_config_word(bridge, PCI_IO_BASE, 0x0); + } + if (io) b_res[0].flags |= IORESOURCE_IO; - if (bridge->pref_window) { + /* DECchip 21050 pass 2 errata: the bridge may miss an address + disconnect boundary by one PCI data phase. + Workaround: do not use prefetching on this device. */ + if (bridge->vendor == PCI_VENDOR_ID_DEC && bridge->device == 0x0001) + return; + + pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem); + if (!pmem) { + pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, + 0xffe0fff0); + pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem); + pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 0x0); + } + if (pmem) { b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH; - if (bridge->pref_64_window) { + if ((pmem & PCI_PREF_RANGE_TYPE_MASK) == + PCI_PREF_RANGE_TYPE_64) { b_res[2].flags |= IORESOURCE_MEM_64; b_res[2].flags |= PCI_PREF_RANGE_TYPE_64; } } + + /* double check if bridge does support 64 bit pref */ + if (b_res[2].flags & IORESOURCE_MEM_64) { + u32 mem_base_hi, tmp; + pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, + &mem_base_hi); + pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, + 0xffffffff); + pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &tmp); + if (!tmp) + b_res[2].flags &= ~IORESOURCE_MEM_64; + pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, + mem_base_hi); + } } /* Helper function for sizing routines: find first available diff --git a/include/linux/pci.h b/include/linux/pci.h index bb31ed3e0978..01dff69a3a21 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -375,9 +375,6 @@ struct pci_dev { bool match_driver; /* Skip attaching driver */ unsigned int transparent:1; /* Subtractive decode bridge */ - unsigned int io_window:1; /* Bridge has I/O window */ - unsigned int pref_window:1; /* Bridge has pref mem window */ - unsigned int pref_64_window:1; /* Pref mem window is 64-bit */ unsigned int multifunction:1; /* Multi-function device */ unsigned int is_busmaster:1; /* Is busmaster */ -- GitLab From 5473d45de03644eef58c95d5f67ca1821cbfd122 Mon Sep 17 00:00:00 2001 From: Chanho Park Date: Tue, 4 Aug 2020 17:57:58 +0900 Subject: [PATCH 0395/1304] ANDROID: tty: fix tty name overflow The tty name can be up to 8 chars if id is greater than 10 such as ttyUSB11. In that case, the name will be overflowed. To prevent this, this patch removes snprintf and adds comparison the idx value of pdev_tty_port only if pdev_tty_port is specified. Bug: 157525691 Bug: 161501868 Fixes: 21d085e1cc2c ("ANDROID: serdev: restrict claim of platform devices") Change-Id: I2a766c9a83a09a1d386686638d8e9c529eeeb735 Signed-off-by: Chanho Park --- drivers/tty/serdev/serdev-ttyport.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/drivers/tty/serdev/serdev-ttyport.c b/drivers/tty/serdev/serdev-ttyport.c index b6b7ea6b5b82..10704522c353 100644 --- a/drivers/tty/serdev/serdev-ttyport.c +++ b/drivers/tty/serdev/serdev-ttyport.c @@ -301,12 +301,16 @@ struct device *serdev_tty_port_register(struct tty_port *port, * be ignored. */ if (parent->bus == &platform_bus_type) { - char tty_port_name[7]; - - sprintf(tty_port_name, "%s%d", drv->name, idx); - if (pdev_tty_port && - !strcmp(pdev_tty_port, tty_port_name)) { - platform = true; + if (pdev_tty_port) { + unsigned long pdev_idx; + int tty_len = strlen(drv->name); + + if (!strncmp(pdev_tty_port, drv->name, tty_len)) { + if (!kstrtoul(pdev_tty_port + tty_len, 10, + &pdev_idx) && pdev_idx == idx) { + platform = true; + } + } } } -- GitLab From b93a3871edf5126531f4a80df740c1d13b50a6f8 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 8 Jul 2020 16:49:11 +0100 Subject: [PATCH 0396/1304] drm/vgem: Replace opencoded version of drm_gem_dumb_map_offset() [ Upstream commit 119c53d2d4044c59c450c4f5a568d80b9d861856 ] drm_gem_dumb_map_offset() now exists and does everything vgem_gem_dump_map does and *ought* to do. In particular, vgem_gem_dumb_map() was trying to reject mmapping an imported dmabuf by checking the existence of obj->filp. Unfortunately, we always allocated an obj->filp, even if unused for an imported dmabuf. Instead, the drm_gem_dumb_map_offset(), since commit 90378e589192 ("drm/gem: drm_gem_dumb_map_offset(): reject dma-buf"), uses the obj->import_attach to reject such invalid mmaps. This prevents vgem from allowing userspace mmapping the dumb handle and attempting to incorrectly fault in remote pages belonging to another device, where there may not even be a struct page. v2: Use the default drm_gem_dumb_map_offset() callback Fixes: af33a9190d02 ("drm/vgem: Enable dmabuf import interfaces") Signed-off-by: Chris Wilson Reviewed-by: Daniel Vetter Cc: # v4.13+ Link: https://patchwork.freedesktop.org/patch/msgid/20200708154911.21236-1-chris@chris-wilson.co.uk Signed-off-by: Sasha Levin --- drivers/gpu/drm/vgem/vgem_drv.c | 27 --------------------------- 1 file changed, 27 deletions(-) diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c index 4709f08f39e4..1c1a435d354b 100644 --- a/drivers/gpu/drm/vgem/vgem_drv.c +++ b/drivers/gpu/drm/vgem/vgem_drv.c @@ -219,32 +219,6 @@ static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev, return 0; } -static int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev, - uint32_t handle, uint64_t *offset) -{ - struct drm_gem_object *obj; - int ret; - - obj = drm_gem_object_lookup(file, handle); - if (!obj) - return -ENOENT; - - if (!obj->filp) { - ret = -EINVAL; - goto unref; - } - - ret = drm_gem_create_mmap_offset(obj); - if (ret) - goto unref; - - *offset = drm_vma_node_offset_addr(&obj->vma_node); -unref: - drm_gem_object_put_unlocked(obj); - - return ret; -} - static struct drm_ioctl_desc vgem_ioctls[] = { DRM_IOCTL_DEF_DRV(VGEM_FENCE_ATTACH, vgem_fence_attach_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(VGEM_FENCE_SIGNAL, vgem_fence_signal_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), @@ -438,7 +412,6 @@ static struct drm_driver vgem_driver = { .fops = &vgem_driver_fops, .dumb_create = vgem_gem_dumb_create, - .dumb_map_offset = vgem_gem_dumb_map, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, -- GitLab From 6cb22ed4f00f73cb5e36581d42542ce1192c99b3 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Fri, 10 Jul 2020 22:11:23 +0900 Subject: [PATCH 0397/1304] perf probe: Fix memory leakage when the probe point is not found [ Upstream commit 12d572e785b15bc764e956caaa8a4c846fd15694 ] Fix the memory leakage in debuginfo__find_trace_events() when the probe point is not found in the debuginfo. If there is no probe point found in the debuginfo, debuginfo__find_probes() will NOT return -ENOENT, but 0. Thus the caller of debuginfo__find_probes() must check the tf.ntevs and release the allocated memory for the array of struct probe_trace_event. The current code releases the memory only if the debuginfo__find_probes() hits an error but not checks tf.ntevs. In the result, the memory allocated on *tevs are not released if tf.ntevs == 0. This fixes the memory leakage by checking tf.ntevs == 0 in addition to ret < 0. Fixes: ff741783506c ("perf probe: Introduce debuginfo to encapsulate dwarf information") Signed-off-by: Masami Hiramatsu Reviewed-by: Srikar Dronamraju Cc: Andi Kleen Cc: Oleg Nesterov Cc: stable@vger.kernel.org Link: http://lore.kernel.org/lkml/159438668346.62703.10887420400718492503.stgit@devnote2 Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: Sasha Levin --- tools/perf/util/probe-finder.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index 60169196b948..4da4ec255246 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c @@ -1351,7 +1351,7 @@ int debuginfo__find_trace_events(struct debuginfo *dbg, tf.ntevs = 0; ret = debuginfo__find_probes(dbg, &tf.pf); - if (ret < 0) { + if (ret < 0 || tf.ntevs == 0) { for (i = 0; i < tf.ntevs; i++) clear_probe_trace_event(&tf.tevs[i]); zfree(tevs); -- GitLab From 17c08ee00b59067ed0537d91fd89873b3bba6491 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 6 Aug 2020 23:26:25 -0700 Subject: [PATCH 0398/1304] khugepaged: khugepaged_test_exit() check mmget_still_valid() [ Upstream commit bbe98f9cadff58cdd6a4acaeba0efa8565dabe65 ] Move collapse_huge_page()'s mmget_still_valid() check into khugepaged_test_exit() itself. collapse_huge_page() is used for anon THP only, and earned its mmget_still_valid() check because it inserts a huge pmd entry in place of the page table's pmd entry; whereas collapse_file()'s retract_page_tables() or collapse_pte_mapped_thp() merely clears the page table's pmd entry. But core dumping without mmap lock must have been as open to mistaking a racily cleared pmd entry for a page table at physical page 0, as exit_mmap() was. And we certainly have no interest in mapping as a THP once dumping core. Fixes: 59ea6d06cfa9 ("coredump: fix race condition between collapse_huge_page() and core dumping") Signed-off-by: Hugh Dickins Signed-off-by: Andrew Morton Cc: Andrea Arcangeli Cc: Song Liu Cc: Mike Kravetz Cc: Kirill A. Shutemov Cc: [4.8+] Link: http://lkml.kernel.org/r/alpine.LSU.2.11.2008021217020.27773@eggly.anvils Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin --- mm/khugepaged.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 483c4573695a..fbb3ac9ce086 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -394,7 +394,7 @@ static void insert_to_mm_slots_hash(struct mm_struct *mm, static inline int khugepaged_test_exit(struct mm_struct *mm) { - return atomic_read(&mm->mm_users) == 0; + return atomic_read(&mm->mm_users) == 0 || !mmget_still_valid(mm); } static bool hugepage_vma_check(struct vm_area_struct *vma, @@ -1005,9 +1005,6 @@ static void collapse_huge_page(struct mm_struct *mm, * handled by the anon_vma lock + PG_lock. */ down_write(&mm->mmap_sem); - result = SCAN_ANY_PROCESS; - if (!mmget_still_valid(mm)) - goto out; result = hugepage_vma_revalidate(mm, address, &vma); if (result) goto out; -- GitLab From 2ef7ebb143705147bc74db2bc1bd0214c212e6bc Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 20 Aug 2020 17:42:02 -0700 Subject: [PATCH 0399/1304] khugepaged: adjust VM_BUG_ON_MM() in __khugepaged_enter() [ Upstream commit f3f99d63a8156c7a4a6b20aac22b53c5579c7dc1 ] syzbot crashes on the VM_BUG_ON_MM(khugepaged_test_exit(mm), mm) in __khugepaged_enter(): yes, when one thread is about to dump core, has set core_state, and is waiting for others, another might do something calling __khugepaged_enter(), which now crashes because I lumped the core_state test (known as "mmget_still_valid") into khugepaged_test_exit(). I still think it's best to lump them together, so just in this exceptional case, check mm->mm_users directly instead of khugepaged_test_exit(). Fixes: bbe98f9cadff ("khugepaged: khugepaged_test_exit() check mmget_still_valid()") Reported-by: syzbot Signed-off-by: Hugh Dickins Signed-off-by: Andrew Morton Acked-by: Yang Shi Cc: "Kirill A. Shutemov" Cc: Andrea Arcangeli Cc: Song Liu Cc: Mike Kravetz Cc: Eric Dumazet Cc: [4.8+] Link: http://lkml.kernel.org/r/alpine.LSU.2.11.2008141503370.18085@eggly.anvils Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin --- mm/khugepaged.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/khugepaged.c b/mm/khugepaged.c index fbb3ac9ce086..f37be43f8cae 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -427,7 +427,7 @@ int __khugepaged_enter(struct mm_struct *mm) return -ENOMEM; /* __khugepaged_exit() must not run from under us */ - VM_BUG_ON_MM(khugepaged_test_exit(mm), mm); + VM_BUG_ON_MM(atomic_read(&mm->mm_users) == 0, mm); if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) { free_mm_slot(mm_slot); return 0; -- GitLab From dd39b6f6c29b354f14f835c8dcda82058b428229 Mon Sep 17 00:00:00 2001 From: Marcos Paulo de Souza Date: Fri, 21 Feb 2020 14:56:12 +0100 Subject: [PATCH 0400/1304] btrfs: export helpers for subvolume name/id resolution [ Upstream commit c0c907a47dccf2cf26251a8fb4a8e7a3bf79ce84 ] The functions will be used outside of export.c and super.c to allow resolving subvolume name from a given id, eg. for subvolume deletion by id ioctl. Signed-off-by: Marcos Paulo de Souza Reviewed-by: David Sterba [ split from the next patch ] Signed-off-by: David Sterba Signed-off-by: Sasha Levin --- fs/btrfs/ctree.h | 2 ++ fs/btrfs/export.c | 8 ++++---- fs/btrfs/export.h | 5 +++++ fs/btrfs/super.c | 8 ++++---- 4 files changed, 15 insertions(+), 8 deletions(-) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 15cb96ad15d8..554727d82d43 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -3271,6 +3271,8 @@ void btrfs_sysfs_remove_mounted(struct btrfs_fs_info *fs_info); int btrfs_parse_options(struct btrfs_fs_info *info, char *options, unsigned long new_flags); int btrfs_sync_fs(struct super_block *sb, int wait); +char *btrfs_get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info, + u64 subvol_objectid); static inline __printf(2, 3) __cold void btrfs_no_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...) diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c index 1f3755b3a37a..665ec85cb09b 100644 --- a/fs/btrfs/export.c +++ b/fs/btrfs/export.c @@ -57,9 +57,9 @@ static int btrfs_encode_fh(struct inode *inode, u32 *fh, int *max_len, return type; } -static struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid, - u64 root_objectid, u32 generation, - int check_generation) +struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid, + u64 root_objectid, u32 generation, + int check_generation) { struct btrfs_fs_info *fs_info = btrfs_sb(sb); struct btrfs_root *root; @@ -152,7 +152,7 @@ static struct dentry *btrfs_fh_to_dentry(struct super_block *sb, struct fid *fh, return btrfs_get_dentry(sb, objectid, root_objectid, generation, 1); } -static struct dentry *btrfs_get_parent(struct dentry *child) +struct dentry *btrfs_get_parent(struct dentry *child) { struct inode *dir = d_inode(child); struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); diff --git a/fs/btrfs/export.h b/fs/btrfs/export.h index 57488ecd7d4e..f32f4113c976 100644 --- a/fs/btrfs/export.h +++ b/fs/btrfs/export.h @@ -18,4 +18,9 @@ struct btrfs_fid { u64 parent_root_objectid; } __attribute__ ((packed)); +struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid, + u64 root_objectid, u32 generation, + int check_generation); +struct dentry *btrfs_get_parent(struct dentry *child); + #endif diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index ed539496089f..3e6e21a7c5e6 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -1000,8 +1000,8 @@ static int btrfs_parse_subvol_options(const char *options, char **subvol_name, return error; } -static char *get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info, - u64 subvol_objectid) +char *btrfs_get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info, + u64 subvol_objectid) { struct btrfs_root *root = fs_info->tree_root; struct btrfs_root *fs_root; @@ -1412,8 +1412,8 @@ static struct dentry *mount_subvol(const char *subvol_name, u64 subvol_objectid, goto out; } } - subvol_name = get_subvol_name_from_objectid(btrfs_sb(mnt->mnt_sb), - subvol_objectid); + subvol_name = btrfs_get_subvol_name_from_objectid( + btrfs_sb(mnt->mnt_sb), subvol_objectid); if (IS_ERR(subvol_name)) { root = ERR_CAST(subvol_name); subvol_name = NULL; -- GitLab From f50a0abaf3333cf6d967f56e0b5fa6bcb8ccc8f2 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 22 Jul 2020 11:12:46 -0400 Subject: [PATCH 0401/1304] btrfs: don't show full path of bind mounts in subvol= [ Upstream commit 3ef3959b29c4a5bd65526ab310a1a18ae533172a ] Chris Murphy reported a problem where rpm ostree will bind mount a bunch of things for whatever voodoo it's doing. But when it does this /proc/mounts shows something like /dev/sda /mnt/test btrfs rw,relatime,subvolid=256,subvol=/foo 0 0 /dev/sda /mnt/test/baz btrfs rw,relatime,subvolid=256,subvol=/foo/bar 0 0 Despite subvolid=256 being subvol=/foo. This is because we're just spitting out the dentry of the mount point, which in the case of bind mounts is the source path for the mountpoint. Instead we should spit out the path to the actual subvol. Fix this by looking up the name for the subvolid we have mounted. With this fix the same test looks like this /dev/sda /mnt/test btrfs rw,relatime,subvolid=256,subvol=/foo 0 0 /dev/sda /mnt/test/baz btrfs rw,relatime,subvolid=256,subvol=/foo 0 0 Reported-by: Chris Murphy CC: stable@vger.kernel.org # 4.4+ Signed-off-by: Josef Bacik Reviewed-by: David Sterba Signed-off-by: David Sterba Signed-off-by: Sasha Levin --- fs/btrfs/super.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 3e6e21a7c5e6..4d2810a32b4a 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -1282,6 +1282,7 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry) { struct btrfs_fs_info *info = btrfs_sb(dentry->d_sb); const char *compress_type; + const char *subvol_name; if (btrfs_test_opt(info, DEGRADED)) seq_puts(seq, ",degraded"); @@ -1366,8 +1367,13 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry) seq_puts(seq, ",ref_verify"); seq_printf(seq, ",subvolid=%llu", BTRFS_I(d_inode(dentry))->root->root_key.objectid); - seq_puts(seq, ",subvol="); - seq_dentry(seq, dentry, " \t\n\\"); + subvol_name = btrfs_get_subvol_name_from_objectid(info, + BTRFS_I(d_inode(dentry))->root->root_key.objectid); + if (!IS_ERR(subvol_name)) { + seq_puts(seq, ",subvol="); + seq_escape(seq, subvol_name, " \t\n\\"); + kfree(subvol_name); + } return 0; } -- GitLab From 2a3d84f1c2654c7210eab41e5894db4745db5029 Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Wed, 17 Jul 2019 14:41:45 +0300 Subject: [PATCH 0402/1304] btrfs: Move free_pages_out label in inline extent handling branch in compress_file_range [ Upstream commit cecc8d9038d164eda61fbcd72520975a554ea63e ] This label is only executed if compress_file_range fails to create an inline extent. So move its code in the semantically related inline extent handling branch. No functional changes. Signed-off-by: Nikolay Borisov Reviewed-by: David Sterba Signed-off-by: David Sterba Signed-off-by: Sasha Levin --- fs/btrfs/inode.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 1656ef0e959f..8507192cd644 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -628,7 +628,14 @@ static noinline void compress_file_range(struct inode *inode, PAGE_SET_WRITEBACK | page_error_op | PAGE_END_WRITEBACK); - goto free_pages_out; + + for (i = 0; i < nr_pages; i++) { + WARN_ON(pages[i]->mapping); + put_page(pages[i]); + } + kfree(pages); + + return; } } @@ -706,13 +713,6 @@ static noinline void compress_file_range(struct inode *inode, *num_added += 1; return; - -free_pages_out: - for (i = 0; i < nr_pages; i++) { - WARN_ON(pages[i]->mapping); - put_page(pages[i]); - } - kfree(pages); } static void free_async_extent_pages(struct async_extent *async_extent) -- GitLab From 35c1576814711791ecde4677bf5ef11254a0a940 Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Tue, 28 Jul 2020 16:39:26 +0800 Subject: [PATCH 0403/1304] btrfs: inode: fix NULL pointer dereference if inode doesn't need compression [ Upstream commit 1e6e238c3002ea3611465ce5f32777ddd6a40126 ] [BUG] There is a bug report of NULL pointer dereference caused in compress_file_extent(): Oops: Kernel access of bad area, sig: 11 [#1] LE PAGE_SIZE=64K MMU=Hash SMP NR_CPUS=2048 NUMA pSeries Workqueue: btrfs-delalloc btrfs_delalloc_helper [btrfs] NIP [c008000006dd4d34] compress_file_range.constprop.41+0x75c/0x8a0 [btrfs] LR [c008000006dd4d1c] compress_file_range.constprop.41+0x744/0x8a0 [btrfs] Call Trace: [c000000c69093b00] [c008000006dd4d1c] compress_file_range.constprop.41+0x744/0x8a0 [btrfs] (unreliable) [c000000c69093bd0] [c008000006dd4ebc] async_cow_start+0x44/0xa0 [btrfs] [c000000c69093c10] [c008000006e14824] normal_work_helper+0xdc/0x598 [btrfs] [c000000c69093c80] [c0000000001608c0] process_one_work+0x2c0/0x5b0 [c000000c69093d10] [c000000000160c38] worker_thread+0x88/0x660 [c000000c69093db0] [c00000000016b55c] kthread+0x1ac/0x1c0 [c000000c69093e20] [c00000000000b660] ret_from_kernel_thread+0x5c/0x7c ---[ end trace f16954aa20d822f6 ]--- [CAUSE] For the following execution route of compress_file_range(), it's possible to hit NULL pointer dereference: compress_file_extent() |- pages = NULL; |- start = async_chunk->start = 0; |- end = async_chunk = 4095; |- nr_pages = 1; |- inode_need_compress() == false; <<< Possible, see later explanation | Now, we have nr_pages = 1, pages = NULL |- cont: |- ret = cow_file_range_inline(); |- if (ret <= 0) { |- for (i = 0; i < nr_pages; i++) { |- WARN_ON(pages[i]->mapping); <<< Crash To enter above call execution branch, we need the following race: Thread 1 (chattr) | Thread 2 (writeback) --------------------------+------------------------------ | btrfs_run_delalloc_range | |- inode_need_compress = true | |- cow_file_range_async() btrfs_ioctl_set_flag() | |- binode_flags |= | BTRFS_INODE_NOCOMPRESS | | compress_file_range() | |- inode_need_compress = false | |- nr_page = 1 while pages = NULL | | Then hit the crash [FIX] This patch will fix it by checking @pages before doing accessing it. This patch is only designed as a hot fix and easy to backport. More elegant fix may make btrfs only check inode_need_compress() once to avoid such race, but that would be another story. Reported-by: Luciano Chavez Fixes: 4d3a800ebb12 ("btrfs: merge nr_pages input and output parameter in compress_pages") CC: stable@vger.kernel.org # 4.14.x: cecc8d9038d16: btrfs: Move free_pages_out label in inline extent handling branch in compress_file_range CC: stable@vger.kernel.org # 4.14+ Signed-off-by: Qu Wenruo Signed-off-by: David Sterba Signed-off-by: Sasha Levin --- fs/btrfs/inode.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 8507192cd644..bdfe159a60da 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -629,11 +629,18 @@ static noinline void compress_file_range(struct inode *inode, page_error_op | PAGE_END_WRITEBACK); - for (i = 0; i < nr_pages; i++) { - WARN_ON(pages[i]->mapping); - put_page(pages[i]); + /* + * Ensure we only free the compressed pages if we have + * them allocated, as we can still reach here with + * inode_need_compress() == false. + */ + if (pages) { + for (i = 0; i < nr_pages; i++) { + WARN_ON(pages[i]->mapping); + put_page(pages[i]); + } + kfree(pages); } - kfree(pages); return; } -- GitLab From 76c38196391b9a33894e0af8465dcef65e8deeab Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 21 Jul 2020 10:17:50 -0400 Subject: [PATCH 0404/1304] btrfs: sysfs: use NOFS for device creation [ Upstream commit a47bd78d0c44621efb98b525d04d60dc4d1a79b0 ] Dave hit this splat during testing btrfs/078: ====================================================== WARNING: possible circular locking dependency detected 5.8.0-rc6-default+ #1191 Not tainted ------------------------------------------------------ kswapd0/75 is trying to acquire lock: ffffa040e9d04ff8 (&delayed_node->mutex){+.+.}-{3:3}, at: __btrfs_release_delayed_node.part.0+0x3f/0x310 [btrfs] but task is already holding lock: ffffffff8b0c8040 (fs_reclaim){+.+.}-{0:0}, at: __fs_reclaim_acquire+0x5/0x30 which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #2 (fs_reclaim){+.+.}-{0:0}: __lock_acquire+0x56f/0xaa0 lock_acquire+0xa3/0x440 fs_reclaim_acquire.part.0+0x25/0x30 __kmalloc_track_caller+0x49/0x330 kstrdup+0x2e/0x60 __kernfs_new_node.constprop.0+0x44/0x250 kernfs_new_node+0x25/0x50 kernfs_create_link+0x34/0xa0 sysfs_do_create_link_sd+0x5e/0xd0 btrfs_sysfs_add_devices_dir+0x65/0x100 [btrfs] btrfs_init_new_device+0x44c/0x12b0 [btrfs] btrfs_ioctl+0xc3c/0x25c0 [btrfs] ksys_ioctl+0x68/0xa0 __x64_sys_ioctl+0x16/0x20 do_syscall_64+0x50/0xe0 entry_SYSCALL_64_after_hwframe+0x44/0xa9 -> #1 (&fs_info->chunk_mutex){+.+.}-{3:3}: __lock_acquire+0x56f/0xaa0 lock_acquire+0xa3/0x440 __mutex_lock+0xa0/0xaf0 btrfs_chunk_alloc+0x137/0x3e0 [btrfs] find_free_extent+0xb44/0xfb0 [btrfs] btrfs_reserve_extent+0x9b/0x180 [btrfs] btrfs_alloc_tree_block+0xc1/0x350 [btrfs] alloc_tree_block_no_bg_flush+0x4a/0x60 [btrfs] __btrfs_cow_block+0x143/0x7a0 [btrfs] btrfs_cow_block+0x15f/0x310 [btrfs] push_leaf_right+0x150/0x240 [btrfs] split_leaf+0x3cd/0x6d0 [btrfs] btrfs_search_slot+0xd14/0xf70 [btrfs] btrfs_insert_empty_items+0x64/0xc0 [btrfs] __btrfs_commit_inode_delayed_items+0xb2/0x840 [btrfs] btrfs_async_run_delayed_root+0x10e/0x1d0 [btrfs] btrfs_work_helper+0x2f9/0x650 [btrfs] process_one_work+0x22c/0x600 worker_thread+0x50/0x3b0 kthread+0x137/0x150 ret_from_fork+0x1f/0x30 -> #0 (&delayed_node->mutex){+.+.}-{3:3}: check_prev_add+0x98/0xa20 validate_chain+0xa8c/0x2a00 __lock_acquire+0x56f/0xaa0 lock_acquire+0xa3/0x440 __mutex_lock+0xa0/0xaf0 __btrfs_release_delayed_node.part.0+0x3f/0x310 [btrfs] btrfs_evict_inode+0x3bf/0x560 [btrfs] evict+0xd6/0x1c0 dispose_list+0x48/0x70 prune_icache_sb+0x54/0x80 super_cache_scan+0x121/0x1a0 do_shrink_slab+0x175/0x420 shrink_slab+0xb1/0x2e0 shrink_node+0x192/0x600 balance_pgdat+0x31f/0x750 kswapd+0x206/0x510 kthread+0x137/0x150 ret_from_fork+0x1f/0x30 other info that might help us debug this: Chain exists of: &delayed_node->mutex --> &fs_info->chunk_mutex --> fs_reclaim Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock(fs_reclaim); lock(&fs_info->chunk_mutex); lock(fs_reclaim); lock(&delayed_node->mutex); *** DEADLOCK *** 3 locks held by kswapd0/75: #0: ffffffff8b0c8040 (fs_reclaim){+.+.}-{0:0}, at: __fs_reclaim_acquire+0x5/0x30 #1: ffffffff8b0b50b8 (shrinker_rwsem){++++}-{3:3}, at: shrink_slab+0x54/0x2e0 #2: ffffa040e057c0e8 (&type->s_umount_key#26){++++}-{3:3}, at: trylock_super+0x16/0x50 stack backtrace: CPU: 2 PID: 75 Comm: kswapd0 Not tainted 5.8.0-rc6-default+ #1191 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.12.0-59-gc9ba527-rebuilt.opensuse.org 04/01/2014 Call Trace: dump_stack+0x78/0xa0 check_noncircular+0x16f/0x190 check_prev_add+0x98/0xa20 validate_chain+0xa8c/0x2a00 __lock_acquire+0x56f/0xaa0 lock_acquire+0xa3/0x440 ? __btrfs_release_delayed_node.part.0+0x3f/0x310 [btrfs] __mutex_lock+0xa0/0xaf0 ? __btrfs_release_delayed_node.part.0+0x3f/0x310 [btrfs] ? __lock_acquire+0x56f/0xaa0 ? __btrfs_release_delayed_node.part.0+0x3f/0x310 [btrfs] ? lock_acquire+0xa3/0x440 ? btrfs_evict_inode+0x138/0x560 [btrfs] ? btrfs_evict_inode+0x2fe/0x560 [btrfs] ? __btrfs_release_delayed_node.part.0+0x3f/0x310 [btrfs] __btrfs_release_delayed_node.part.0+0x3f/0x310 [btrfs] btrfs_evict_inode+0x3bf/0x560 [btrfs] evict+0xd6/0x1c0 dispose_list+0x48/0x70 prune_icache_sb+0x54/0x80 super_cache_scan+0x121/0x1a0 do_shrink_slab+0x175/0x420 shrink_slab+0xb1/0x2e0 shrink_node+0x192/0x600 balance_pgdat+0x31f/0x750 kswapd+0x206/0x510 ? _raw_spin_unlock_irqrestore+0x3e/0x50 ? finish_wait+0x90/0x90 ? balance_pgdat+0x750/0x750 kthread+0x137/0x150 ? kthread_stop+0x2a0/0x2a0 ret_from_fork+0x1f/0x30 This is because we're holding the chunk_mutex while adding this device and adding its sysfs entries. We actually hold different locks in different places when calling this function, the dev_replace semaphore for instance in dev replace, so instead of moving this call around simply wrap it's operations in NOFS. CC: stable@vger.kernel.org # 4.14+ Reported-by: David Sterba Signed-off-by: Josef Bacik Reviewed-by: David Sterba Signed-off-by: David Sterba Signed-off-by: Sasha Levin --- fs/btrfs/sysfs.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index aefb0169d46d..afec808a763b 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -10,6 +10,7 @@ #include #include #include +#include #include "ctree.h" #include "disk-io.h" @@ -766,7 +767,9 @@ int btrfs_sysfs_add_device_link(struct btrfs_fs_devices *fs_devices, { int error = 0; struct btrfs_device *dev; + unsigned int nofs_flag; + nofs_flag = memalloc_nofs_save(); list_for_each_entry(dev, &fs_devices->devices, dev_list) { struct hd_struct *disk; struct kobject *disk_kobj; @@ -785,6 +788,7 @@ int btrfs_sysfs_add_device_link(struct btrfs_fs_devices *fs_devices, if (error) break; } + memalloc_nofs_restore(nofs_flag); return error; } -- GitLab From 9660983738399465fd0e3b1977a61bbd29b2e5be Mon Sep 17 00:00:00 2001 From: Jann Horn Date: Thu, 20 Aug 2020 17:42:11 -0700 Subject: [PATCH 0405/1304] romfs: fix uninitialized memory leak in romfs_dev_read() commit bcf85fcedfdd17911982a3e3564fcfec7b01eebd upstream. romfs has a superblock field that limits the size of the filesystem; data beyond that limit is never accessed. romfs_dev_read() fetches a caller-supplied number of bytes from the backing device. It returns 0 on success or an error code on failure; therefore, its API can't represent short reads, it's all-or-nothing. However, when romfs_dev_read() detects that the requested operation would cross the filesystem size limit, it currently silently truncates the requested number of bytes. This e.g. means that when the content of a file with size 0x1000 starts one byte before the filesystem size limit, ->readpage() will only fill a single byte of the supplied page while leaving the rest uninitialized, leaking that uninitialized memory to userspace. Fix it by returning an error code instead of truncating the read when the requested read operation would go beyond the end of the filesystem. Fixes: da4458bda237 ("NOMMU: Make it possible for RomFS to use MTD devices directly") Signed-off-by: Jann Horn Signed-off-by: Andrew Morton Reviewed-by: Greg Kroah-Hartman Cc: David Howells Cc: Link: http://lkml.kernel.org/r/20200818013202.2246365-1-jannh@google.com Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- fs/romfs/storage.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/fs/romfs/storage.c b/fs/romfs/storage.c index f86f51f99ace..1dcadd22b440 100644 --- a/fs/romfs/storage.c +++ b/fs/romfs/storage.c @@ -221,10 +221,8 @@ int romfs_dev_read(struct super_block *sb, unsigned long pos, size_t limit; limit = romfs_maxsize(sb); - if (pos >= limit) + if (pos >= limit || buflen > limit - pos) return -EIO; - if (buflen > limit - pos) - buflen = limit - pos; #ifdef CONFIG_ROMFS_ON_MTD if (sb->s_mtd) -- GitLab From 3a54b901fd77e6f44a6ed6af2961671287e77fdf Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Thu, 20 Aug 2020 17:42:14 -0700 Subject: [PATCH 0406/1304] kernel/relay.c: fix memleak on destroy relay channel commit 71e843295c680898959b22dc877ae3839cc22470 upstream. kmemleak report memory leak as follows: unreferenced object 0x607ee4e5f948 (size 8): comm "syz-executor.1", pid 2098, jiffies 4295031601 (age 288.468s) hex dump (first 8 bytes): 00 00 00 00 00 00 00 00 ........ backtrace: relay_open kernel/relay.c:583 [inline] relay_open+0xb6/0x970 kernel/relay.c:563 do_blk_trace_setup+0x4a8/0xb20 kernel/trace/blktrace.c:557 __blk_trace_setup+0xb6/0x150 kernel/trace/blktrace.c:597 blk_trace_ioctl+0x146/0x280 kernel/trace/blktrace.c:738 blkdev_ioctl+0xb2/0x6a0 block/ioctl.c:613 block_ioctl+0xe5/0x120 fs/block_dev.c:1871 vfs_ioctl fs/ioctl.c:48 [inline] __do_sys_ioctl fs/ioctl.c:753 [inline] __se_sys_ioctl fs/ioctl.c:739 [inline] __x64_sys_ioctl+0x170/0x1ce fs/ioctl.c:739 do_syscall_64+0x33/0x40 arch/x86/entry/common.c:46 entry_SYSCALL_64_after_hwframe+0x44/0xa9 'chan->buf' is malloced in relay_open() by alloc_percpu() but not free while destroy the relay channel. Fix it by adding free_percpu() before return from relay_destroy_channel(). Fixes: 017c59c042d0 ("relay: Use per CPU constructs for the relay channel buffer pointers") Reported-by: Hulk Robot Signed-off-by: Wei Yongjun Signed-off-by: Andrew Morton Reviewed-by: Chris Wilson Cc: Al Viro Cc: Michael Ellerman Cc: David Rientjes Cc: Michel Lespinasse Cc: Daniel Axtens Cc: Thomas Gleixner Cc: Akash Goel Cc: Link: http://lkml.kernel.org/r/20200817122826.48518-1-weiyongjun1@huawei.com Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- kernel/relay.c | 1 + 1 file changed, 1 insertion(+) diff --git a/kernel/relay.c b/kernel/relay.c index 13c19f39e31e..735cb208f023 100644 --- a/kernel/relay.c +++ b/kernel/relay.c @@ -197,6 +197,7 @@ static struct rchan_buf *relay_create_buf(struct rchan *chan) static void relay_destroy_channel(struct kref *kref) { struct rchan *chan = container_of(kref, struct rchan, kref); + free_percpu(chan->buf); kfree(chan); } -- GitLab From 84b8dc232afadf3aab425a104def45a1e7346a58 Mon Sep 17 00:00:00 2001 From: Doug Berger Date: Thu, 20 Aug 2020 17:42:24 -0700 Subject: [PATCH 0407/1304] mm: include CMA pages in lowmem_reserve at boot commit e08d3fdfe2dafa0331843f70ce1ff6c1c4900bf4 upstream. The lowmem_reserve arrays provide a means of applying pressure against allocations from lower zones that were targeted at higher zones. Its values are a function of the number of pages managed by higher zones and are assigned by a call to the setup_per_zone_lowmem_reserve() function. The function is initially called at boot time by the function init_per_zone_wmark_min() and may be called later by accesses of the /proc/sys/vm/lowmem_reserve_ratio sysctl file. The function init_per_zone_wmark_min() was moved up from a module_init to a core_initcall to resolve a sequencing issue with khugepaged. Unfortunately this created a sequencing issue with CMA page accounting. The CMA pages are added to the managed page count of a zone when cma_init_reserved_areas() is called at boot also as a core_initcall. This makes it uncertain whether the CMA pages will be added to the managed page counts of their zones before or after the call to init_per_zone_wmark_min() as it becomes dependent on link order. With the current link order the pages are added to the managed count after the lowmem_reserve arrays are initialized at boot. This means the lowmem_reserve values at boot may be lower than the values used later if /proc/sys/vm/lowmem_reserve_ratio is accessed even if the ratio values are unchanged. In many cases the difference is not significant, but for example an ARM platform with 1GB of memory and the following memory layout cma: Reserved 256 MiB at 0x0000000030000000 Zone ranges: DMA [mem 0x0000000000000000-0x000000002fffffff] Normal empty HighMem [mem 0x0000000030000000-0x000000003fffffff] would result in 0 lowmem_reserve for the DMA zone. This would allow userspace to deplete the DMA zone easily. Funnily enough $ cat /proc/sys/vm/lowmem_reserve_ratio would fix up the situation because as a side effect it forces setup_per_zone_lowmem_reserve. This commit breaks the link order dependency by invoking init_per_zone_wmark_min() as a postcore_initcall so that the CMA pages have the chance to be properly accounted in their zone(s) and allowing the lowmem_reserve arrays to receive consistent values. Fixes: bc22af74f271 ("mm: update min_free_kbytes from khugepaged after core initialization") Signed-off-by: Doug Berger Signed-off-by: Andrew Morton Acked-by: Michal Hocko Cc: Jason Baron Cc: David Rientjes Cc: "Kirill A. Shutemov" Cc: Link: http://lkml.kernel.org/r/1597423766-27849-1-git-send-email-opendmb@gmail.com Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- mm/page_alloc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 7181dfe76440..b5c7b63a3102 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -7395,7 +7395,7 @@ int __meminit init_per_zone_wmark_min(void) return 0; } -core_initcall(init_per_zone_wmark_min) +postcore_initcall(init_per_zone_wmark_min) /* * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so -- GitLab From c666936d8d8b0ace4f3260d71a4eedefd53011d9 Mon Sep 17 00:00:00 2001 From: Charan Teja Reddy Date: Thu, 20 Aug 2020 17:42:27 -0700 Subject: [PATCH 0408/1304] mm, page_alloc: fix core hung in free_pcppages_bulk() commit 88e8ac11d2ea3acc003cf01bb5a38c8aa76c3cfd upstream. The following race is observed with the repeated online, offline and a delay between two successive online of memory blocks of movable zone. P1 P2 Online the first memory block in the movable zone. The pcp struct values are initialized to default values,i.e., pcp->high = 0 & pcp->batch = 1. Allocate the pages from the movable zone. Try to Online the second memory block in the movable zone thus it entered the online_pages() but yet to call zone_pcp_update(). This process is entered into the exit path thus it tries to release the order-0 pages to pcp lists through free_unref_page_commit(). As pcp->high = 0, pcp->count = 1 proceed to call the function free_pcppages_bulk(). Update the pcp values thus the new pcp values are like, say, pcp->high = 378, pcp->batch = 63. Read the pcp's batch value using READ_ONCE() and pass the same to free_pcppages_bulk(), pcp values passed here are, batch = 63, count = 1. Since num of pages in the pcp lists are less than ->batch, then it will stuck in while(list_empty(list)) loop with interrupts disabled thus a core hung. Avoid this by ensuring free_pcppages_bulk() is called with proper count of pcp list pages. The mentioned race is some what easily reproducible without [1] because pcp's are not updated for the first memory block online and thus there is a enough race window for P2 between alloc+free and pcp struct values update through onlining of second memory block. With [1], the race still exists but it is very narrow as we update the pcp struct values for the first memory block online itself. This is not limited to the movable zone, it could also happen in cases with the normal zone (e.g., hotplug to a node that only has DMA memory, or no other memory yet). [1]: https://patchwork.kernel.org/patch/11696389/ Fixes: 5f8dcc21211a ("page-allocator: split per-cpu list into one-list-per-migrate-type") Signed-off-by: Charan Teja Reddy Signed-off-by: Andrew Morton Acked-by: David Hildenbrand Acked-by: David Rientjes Acked-by: Michal Hocko Cc: Michal Hocko Cc: Vlastimil Babka Cc: Vinayak Menon Cc: [2.6+] Link: http://lkml.kernel.org/r/1597150703-19003-1-git-send-email-charante@codeaurora.org Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- mm/page_alloc.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index b5c7b63a3102..5717ee66c8b3 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1115,6 +1115,11 @@ static void free_pcppages_bulk(struct zone *zone, int count, struct page *page, *tmp; LIST_HEAD(head); + /* + * Ensure proper count is passed which otherwise would stuck in the + * below while (list_empty(list)) loop. + */ + count = min(pcp->count, count); while (count) { struct list_head *list; -- GitLab From a5d3f789b272d76dadca4d35365a39ef516b31bf Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Fri, 31 Jul 2020 18:21:35 +0200 Subject: [PATCH 0409/1304] ext4: fix checking of directory entry validity for inline directories commit 7303cb5bfe845f7d43cd9b2dbd37dbb266efda9b upstream. ext4_search_dir() and ext4_generic_delete_entry() can be called both for standard director blocks and for inline directories stored inside inode or inline xattr space. For the second case we didn't call ext4_check_dir_entry() with proper constraints that could result in accepting corrupted directory entry as well as false positive filesystem errors like: EXT4-fs error (device dm-0): ext4_search_dir:1395: inode #28320400: block 113246792: comm dockerd: bad entry in directory: directory entry too close to block end - offset=0, inode=28320403, rec_len=32, name_len=8, size=4096 Fix the arguments passed to ext4_check_dir_entry(). Fixes: 109ba779d6cc ("ext4: check for directory entries too close to block end") CC: stable@vger.kernel.org Signed-off-by: Jan Kara Link: https://lore.kernel.org/r/20200731162135.8080-1-jack@suse.cz Signed-off-by: Theodore Ts'o Signed-off-by: Greg Kroah-Hartman --- fs/ext4/namei.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index a8f2e3549bb9..a2425e2d439c 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -1309,8 +1309,8 @@ int ext4_search_dir(struct buffer_head *bh, char *search_buf, int buf_size, ext4_match(fname, de)) { /* found a match - just to be sure, do * a full check */ - if (ext4_check_dir_entry(dir, NULL, de, bh, bh->b_data, - bh->b_size, offset)) + if (ext4_check_dir_entry(dir, NULL, de, bh, search_buf, + buf_size, offset)) return -1; *res_dir = de; return 1; @@ -2344,7 +2344,7 @@ int ext4_generic_delete_entry(handle_t *handle, de = (struct ext4_dir_entry_2 *)entry_buf; while (i < buf_size - csum_size) { if (ext4_check_dir_entry(dir, NULL, de, bh, - bh->b_data, bh->b_size, i)) + entry_buf, buf_size, i)) return -EFSCORRUPTED; if (de == de_del) { if (pde) -- GitLab From 402ff143b90b48c1d1b29127fa538a2d227c2161 Mon Sep 17 00:00:00 2001 From: "zhangyi (F)" Date: Sat, 20 Jun 2020 14:19:48 +0800 Subject: [PATCH 0410/1304] jbd2: add the missing unlock_buffer() in the error path of jbd2_write_superblock() commit ef3f5830b859604eda8723c26d90ab23edc027a4 upstream. jbd2_write_superblock() is under the buffer lock of journal superblock before ending that superblock write, so add a missing unlock_buffer() in in the error path before submitting buffer. Fixes: 742b06b5628f ("jbd2: check superblock mapped prior to committing") Signed-off-by: zhangyi (F) Reviewed-by: Ritesh Harjani Cc: stable@kernel.org Link: https://lore.kernel.org/r/20200620061948.2049579-1-yi.zhang@huawei.com Signed-off-by: Theodore Ts'o Signed-off-by: Greg Kroah-Hartman --- fs/jbd2/journal.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index a15a22d20909..8a50722bca29 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -1370,8 +1370,10 @@ static int jbd2_write_superblock(journal_t *journal, int write_flags) int ret; /* Buffer got discarded which means block device got invalidated */ - if (!buffer_mapped(bh)) + if (!buffer_mapped(bh)) { + unlock_buffer(bh); return -EIO; + } trace_jbd2_write_superblock(journal, write_flags); if (!(journal->j_flags & JBD2_BARRIER)) -- GitLab From d25a2b92cf4e168dc8ab8ace143173bd7f5aa77e Mon Sep 17 00:00:00 2001 From: Steffen Maier Date: Thu, 13 Aug 2020 17:28:56 +0200 Subject: [PATCH 0411/1304] scsi: zfcp: Fix use-after-free in request timeout handlers commit 2d9a2c5f581be3991ba67fa9e7497c711220ea8e upstream. Before v4.15 commit 75492a51568b ("s390/scsi: Convert timers to use timer_setup()"), we intentionally only passed zfcp_adapter as context argument to zfcp_fsf_request_timeout_handler(). Since we only trigger adapter recovery, it was unnecessary to sync against races between timeout and (late) completion. Likewise, we only passed zfcp_erp_action as context argument to zfcp_erp_timeout_handler(). Since we only wakeup an ERP action, it was unnecessary to sync against races between timeout and (late) completion. Meanwhile the timeout handlers get timer_list as context argument and do a timer-specific container-of to zfcp_fsf_req which can have been freed. Fix it by making sure that any request timeout handlers, that might just have started before del_timer(), are completed by using del_timer_sync() instead. This ensures the request free happens afterwards. Space time diagram of potential use-after-free: Basic idea is to have 2 or more pending requests whose timeouts run out at almost the same time. req 1 timeout ERP thread req 2 timeout ---------------- ---------------- --------------------------------------- zfcp_fsf_request_timeout_handler fsf_req = from_timer(fsf_req, t, timer) adapter = fsf_req->adapter zfcp_qdio_siosl(adapter) zfcp_erp_adapter_reopen(adapter,...) zfcp_erp_strategy ... zfcp_fsf_req_dismiss_all list_for_each_entry_safe zfcp_fsf_req_complete 1 del_timer 1 zfcp_fsf_req_free 1 zfcp_fsf_req_complete 2 zfcp_fsf_request_timeout_handler del_timer 2 fsf_req = from_timer(fsf_req, t, timer) zfcp_fsf_req_free 2 adapter = fsf_req->adapter ^^^^^^^ already freed Link: https://lore.kernel.org/r/20200813152856.50088-1-maier@linux.ibm.com Fixes: 75492a51568b ("s390/scsi: Convert timers to use timer_setup()") Cc: #4.15+ Suggested-by: Julian Wiedmann Reviewed-by: Julian Wiedmann Signed-off-by: Steffen Maier Signed-off-by: Martin K. Petersen Signed-off-by: Greg Kroah-Hartman --- drivers/s390/scsi/zfcp_fsf.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 91aa4bfcf8d6..5bb278a604ed 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -403,7 +403,7 @@ static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req) return; } - del_timer(&req->timer); + del_timer_sync(&req->timer); zfcp_fsf_protstatus_eval(req); zfcp_fsf_fsfstatus_eval(req); req->handler(req); @@ -758,7 +758,7 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req) req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free); req->issued = get_tod_clock(); if (zfcp_qdio_send(qdio, &req->qdio_req)) { - del_timer(&req->timer); + del_timer_sync(&req->timer); /* lookup request again, list might have changed */ zfcp_reqlist_find_rm(adapter->req_list, req_id); zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1"); -- GitLab From e05c6786abadeda99b8bf3099643fe96b4d16977 Mon Sep 17 00:00:00 2001 From: Krunoslav Kovac Date: Thu, 6 Aug 2020 17:54:47 -0400 Subject: [PATCH 0412/1304] drm/amd/display: fix pow() crashing when given base 0 commit d2e59d0ff4c44d1f6f8ed884a5bea7d1bb7fd98c upstream. [Why&How] pow(a,x) is implemented as exp(x*log(a)). log(0) will crash. So return 0^x = 0, unless x=0, convention seems to be 0^0 = 1. Cc: stable@vger.kernel.org Signed-off-by: Krunoslav Kovac Reviewed-by: Anthony Koo Acked-by: Rodrigo Siqueira Signed-off-by: Alex Deucher Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/amd/display/include/fixed31_32.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/amd/display/include/fixed31_32.h b/drivers/gpu/drm/amd/display/include/fixed31_32.h index 52a73332befb..343f869c5277 100644 --- a/drivers/gpu/drm/amd/display/include/fixed31_32.h +++ b/drivers/gpu/drm/amd/display/include/fixed31_32.h @@ -431,6 +431,9 @@ struct fixed31_32 dc_fixpt_log(struct fixed31_32 arg); */ static inline struct fixed31_32 dc_fixpt_pow(struct fixed31_32 arg1, struct fixed31_32 arg2) { + if (arg1.value == 0) + return arg2.value == 0 ? dc_fixpt_one : dc_fixpt_zero; + return dc_fixpt_exp( dc_fixpt_mul( dc_fixpt_log(arg1), -- GitLab From 1c263d0e54f4348df126e4c3c1011253d7651544 Mon Sep 17 00:00:00 2001 From: Liang Chen Date: Fri, 6 Mar 2020 15:01:33 +0800 Subject: [PATCH 0413/1304] kthread: Do not preempt current task if it is going to call schedule() commit 26c7295be0c5e6da3fa45970e9748be983175b1b upstream. when we create a kthread with ktrhead_create_on_cpu(),the child thread entry is ktread.c:ktrhead() which will be preempted by the parent after call complete(done) while schedule() is not called yet,then the parent will call wait_task_inactive(child) but the child is still on the runqueue, so the parent will schedule_hrtimeout() for 1 jiffy,it will waste a lot of time,especially on startup. parent child ktrhead_create_on_cpu() wait_fo_completion(&done) -----> ktread.c:ktrhead() |----- complete(done);--wakeup and preempted by parent kthread_bind() <------------| |-> schedule();--dequeue here wait_task_inactive(child) | schedule_hrtimeout(1 jiffy) -| So we hope the child just wakeup parent but not preempted by parent, and the child is going to call schedule() soon,then the parent will not call schedule_hrtimeout(1 jiffy) as the child is already dequeue. The same issue for ktrhead_park()&&kthread_parkme(). This patch can save 120ms on rk312x startup with CONFIG_HZ=300. Signed-off-by: Liang Chen Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Steven Rostedt (VMware) Link: https://lkml.kernel.org/r/20200306070133.18335-2-cl@rock-chips.com Signed-off-by: Chanho Park Signed-off-by: Greg Kroah-Hartman --- kernel/kthread.c | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/kernel/kthread.c b/kernel/kthread.c index 087d18d771b5..b786eda90bb5 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -190,8 +190,15 @@ static void __kthread_parkme(struct kthread *self) if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags)) break; + /* + * Thread is going to call schedule(), do not preempt it, + * or the caller of kthread_park() may spend more time in + * wait_task_inactive(). + */ + preempt_disable(); complete(&self->parked); - schedule(); + schedule_preempt_disabled(); + preempt_enable(); } __set_current_state(TASK_RUNNING); } @@ -236,8 +243,14 @@ static int kthread(void *_create) /* OK, tell user we're spawned, wait for stop or wakeup */ __set_current_state(TASK_UNINTERRUPTIBLE); create->result = current; + /* + * Thread is going to call schedule(), do not preempt it, + * or the creator may spend more time in wait_task_inactive(). + */ + preempt_disable(); complete(done); - schedule(); + schedule_preempt_disabled(); + preempt_enable(); ret = -EINTR; if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) { -- GitLab From 30872ac7dbf038869a2489d64bb8923cdb27d756 Mon Sep 17 00:00:00 2001 From: Lukas Wunner Date: Mon, 3 Aug 2020 13:09:01 +0200 Subject: [PATCH 0414/1304] spi: Prevent adding devices below an unregistering controller [ Upstream commit ddf75be47ca748f8b12d28ac64d624354fddf189 ] CONFIG_OF_DYNAMIC and CONFIG_ACPI allow adding SPI devices at runtime using a DeviceTree overlay or DSDT patch. CONFIG_SPI_SLAVE allows the same via sysfs. But there are no precautions to prevent adding a device below a controller that's being removed. Such a device is unusable and may not even be able to unbind cleanly as it becomes inaccessible once the controller has been torn down. E.g. it is then impossible to quiesce the device's interrupt. of_spi_notify() and acpi_spi_notify() do hold a ref on the controller, but otherwise run lockless against spi_unregister_controller(). Fix by holding the spi_add_lock in spi_unregister_controller() and bailing out of spi_add_device() if the controller has been unregistered concurrently. Fixes: ce79d54ae447 ("spi/of: Add OF notifier handler") Signed-off-by: Lukas Wunner Cc: stable@vger.kernel.org # v3.19+ Cc: Geert Uytterhoeven Cc: Octavian Purdila Cc: Pantelis Antoniou Link: https://lore.kernel.org/r/a8c3205088a969dc8410eec1eba9aface60f36af.1596451035.git.lukas@wunner.de Signed-off-by: Mark Brown Signed-off-by: Sasha Levin --- drivers/spi/Kconfig | 3 +++ drivers/spi/spi.c | 21 ++++++++++++++++++++- 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 671d078349cc..0a7fd56c1ed9 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -817,4 +817,7 @@ config SPI_SLAVE_SYSTEM_CONTROL endif # SPI_SLAVE +config SPI_DYNAMIC + def_bool ACPI || OF_DYNAMIC || SPI_SLAVE + endif # SPI diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index f589d8100e95..92e6b6774d98 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -432,6 +432,12 @@ static LIST_HEAD(spi_controller_list); */ static DEFINE_MUTEX(board_lock); +/* + * Prevents addition of devices with same chip select and + * addition of devices below an unregistering controller. + */ +static DEFINE_MUTEX(spi_add_lock); + /** * spi_alloc_device - Allocate a new SPI device * @ctlr: Controller to which device is connected @@ -510,7 +516,6 @@ static int spi_dev_check(struct device *dev, void *data) */ int spi_add_device(struct spi_device *spi) { - static DEFINE_MUTEX(spi_add_lock); struct spi_controller *ctlr = spi->controller; struct device *dev = ctlr->dev.parent; int status; @@ -538,6 +543,13 @@ int spi_add_device(struct spi_device *spi) goto done; } + /* Controller may unregister concurrently */ + if (IS_ENABLED(CONFIG_SPI_DYNAMIC) && + !device_is_registered(&ctlr->dev)) { + status = -ENODEV; + goto done; + } + if (ctlr->cs_gpios) spi->cs_gpio = ctlr->cs_gpios[spi->chip_select]; @@ -2306,6 +2318,10 @@ void spi_unregister_controller(struct spi_controller *ctlr) struct spi_controller *found; int id = ctlr->bus_num; + /* Prevent addition of new devices, unregister existing ones */ + if (IS_ENABLED(CONFIG_SPI_DYNAMIC)) + mutex_lock(&spi_add_lock); + device_for_each_child(&ctlr->dev, NULL, __unregister); /* First make sure that this controller was ever added */ @@ -2326,6 +2342,9 @@ void spi_unregister_controller(struct spi_controller *ctlr) if (found == ctlr) idr_remove(&spi_master_idr, id); mutex_unlock(&board_lock); + + if (IS_ENABLED(CONFIG_SPI_DYNAMIC)) + mutex_unlock(&spi_add_lock); } EXPORT_SYMBOL_GPL(spi_unregister_controller); -- GitLab From 2ba7c21c03c4c73d9afe568e9b824682226b097f Mon Sep 17 00:00:00 2001 From: Stanley Chu Date: Fri, 12 Jun 2020 09:26:24 +0800 Subject: [PATCH 0415/1304] scsi: ufs: Add DELAY_BEFORE_LPM quirk for Micron devices [ Upstream commit c0a18ee0ce78d7957ec1a53be35b1b3beba80668 ] It is confirmed that Micron device needs DELAY_BEFORE_LPM quirk to have a delay before VCC is powered off. Sdd Micron vendor ID and this quirk for Micron devices. Link: https://lore.kernel.org/r/20200612012625.6615-2-stanley.chu@mediatek.com Reviewed-by: Bean Huo Reviewed-by: Alim Akhtar Signed-off-by: Stanley Chu Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin --- drivers/scsi/ufs/ufs_quirks.h | 1 + drivers/scsi/ufs/ufshcd.c | 2 ++ 2 files changed, 3 insertions(+) diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h index 5d2dfdb41a6f..758d3a67047d 100644 --- a/drivers/scsi/ufs/ufs_quirks.h +++ b/drivers/scsi/ufs/ufs_quirks.h @@ -21,6 +21,7 @@ #define UFS_ANY_VENDOR 0xFFFF #define UFS_ANY_MODEL "ANY_MODEL" +#define UFS_VENDOR_MICRON 0x12C #define UFS_VENDOR_TOSHIBA 0x198 #define UFS_VENDOR_SAMSUNG 0x1CE #define UFS_VENDOR_SKHYNIX 0x1AD diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index bd21c9cdf818..ab628fd37e02 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -218,6 +218,8 @@ ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state, static struct ufs_dev_fix ufs_fixups[] = { /* UFS cards deviations table */ + UFS_FIX(UFS_VENDOR_MICRON, UFS_ANY_MODEL, + UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM), UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM), UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ), -- GitLab From 7d057ec39676bcb5fbf8177dad5f781f86e7d316 Mon Sep 17 00:00:00 2001 From: Bodo Stroesser Date: Thu, 18 Jun 2020 15:16:32 +0200 Subject: [PATCH 0416/1304] scsi: target: tcmu: Fix crash in tcmu_flush_dcache_range on ARM [ Upstream commit 3145550a7f8b08356c8ff29feaa6c56aca12901d ] This patch fixes the following crash (see https://bugzilla.kernel.org/show_bug.cgi?id=208045) Process iscsi_trx (pid: 7496, stack limit = 0x0000000010dd111a) CPU: 0 PID: 7496 Comm: iscsi_trx Not tainted 4.19.118-0419118-generic #202004230533 Hardware name: Greatwall QingTian DF720/F601, BIOS 601FBE20 Sep 26 2019 pstate: 80400005 (Nzcv daif +PAN -UAO) pc : flush_dcache_page+0x18/0x40 lr : is_ring_space_avail+0x68/0x2f8 [target_core_user] sp : ffff000015123a80 x29: ffff000015123a80 x28: 0000000000000000 x27: 0000000000001000 x26: ffff000023ea5000 x25: ffffcfa25bbe08b8 x24: 0000000000000078 x23: ffff7e0000000000 x22: ffff000023ea5001 x21: ffffcfa24b79c000 x20: 0000000000000fff x19: ffff7e00008fa940 x18: 0000000000000000 x17: 0000000000000000 x16: ffff2d047e709138 x15: 0000000000000000 x14: 0000000000000000 x13: 0000000000000000 x12: ffff2d047fbd0a40 x11: 0000000000000000 x10: 0000000000000030 x9 : 0000000000000000 x8 : ffffc9a254820a00 x7 : 00000000000013b0 x6 : 000000000000003f x5 : 0000000000000040 x4 : ffffcfa25bbe08e8 x3 : 0000000000001000 x2 : 0000000000000078 x1 : ffffcfa25bbe08b8 x0 : ffff2d040bc88a18 Call trace: flush_dcache_page+0x18/0x40 is_ring_space_avail+0x68/0x2f8 [target_core_user] queue_cmd_ring+0x1f8/0x680 [target_core_user] tcmu_queue_cmd+0xe4/0x158 [target_core_user] __target_execute_cmd+0x30/0xf0 [target_core_mod] target_execute_cmd+0x294/0x390 [target_core_mod] transport_generic_new_cmd+0x1e8/0x358 [target_core_mod] transport_handle_cdb_direct+0x50/0xb0 [target_core_mod] iscsit_execute_cmd+0x2b4/0x350 [iscsi_target_mod] iscsit_sequence_cmd+0xd8/0x1d8 [iscsi_target_mod] iscsit_process_scsi_cmd+0xac/0xf8 [iscsi_target_mod] iscsit_get_rx_pdu+0x404/0xd00 [iscsi_target_mod] iscsi_target_rx_thread+0xb8/0x130 [iscsi_target_mod] kthread+0x130/0x138 ret_from_fork+0x10/0x18 Code: f9000bf3 aa0003f3 aa1e03e0 d503201f (f9400260) ---[ end trace 1e451c73f4266776 ]--- The solution is based on patch: "scsi: target: tcmu: Optimize use of flush_dcache_page" which restricts the use of tcmu_flush_dcache_range() to addresses from vmalloc'ed areas only. This patch now replaces the virt_to_page() call in tcmu_flush_dcache_range() - which is wrong for vmalloced addrs - by vmalloc_to_page(). The patch was tested on ARM with kernel 4.19.118 and 5.7.2 Link: https://lore.kernel.org/r/20200618131632.32748-3-bstroesser@ts.fujitsu.com Tested-by: JiangYu Tested-by: Daniel Meyerholt Acked-by: Mike Christie Signed-off-by: Bodo Stroesser Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin --- drivers/target/target_core_user.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 8da89925a874..9c05e820857a 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -612,7 +612,7 @@ static inline void tcmu_flush_dcache_range(void *vaddr, size_t size) size = round_up(size+offset, PAGE_SIZE); while (size) { - flush_dcache_page(virt_to_page(start)); + flush_dcache_page(vmalloc_to_page(start)); start += PAGE_SIZE; size -= PAGE_SIZE; } -- GitLab From b07b9521991cca961cc552213741afa7cf5906b5 Mon Sep 17 00:00:00 2001 From: Chuhong Yuan Date: Fri, 5 Jun 2020 18:17:28 +0200 Subject: [PATCH 0417/1304] media: budget-core: Improve exception handling in budget_register() [ Upstream commit fc0456458df8b3421dba2a5508cd817fbc20ea71 ] budget_register() has no error handling after its failure. Add the missed undo functions for error handling to fix it. Signed-off-by: Chuhong Yuan Signed-off-by: Sean Young Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Sasha Levin --- drivers/media/pci/ttpci/budget-core.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/drivers/media/pci/ttpci/budget-core.c b/drivers/media/pci/ttpci/budget-core.c index b3dc45b91101..9b545c743168 100644 --- a/drivers/media/pci/ttpci/budget-core.c +++ b/drivers/media/pci/ttpci/budget-core.c @@ -383,20 +383,25 @@ static int budget_register(struct budget *budget) ret = dvbdemux->dmx.add_frontend(&dvbdemux->dmx, &budget->hw_frontend); if (ret < 0) - return ret; + goto err_release_dmx; budget->mem_frontend.source = DMX_MEMORY_FE; ret = dvbdemux->dmx.add_frontend(&dvbdemux->dmx, &budget->mem_frontend); if (ret < 0) - return ret; + goto err_release_dmx; ret = dvbdemux->dmx.connect_frontend(&dvbdemux->dmx, &budget->hw_frontend); if (ret < 0) - return ret; + goto err_release_dmx; dvb_net_init(&budget->dvb_adapter, &budget->dvb_net, &dvbdemux->dmx); return 0; + +err_release_dmx: + dvb_dmxdev_release(&budget->dmxdev); + dvb_dmx_release(&budget->demux); + return ret; } static void budget_unregister(struct budget *budget) -- GitLab From b5a5b21f34866d8a0d432e0d9274e5dc7a8b6de1 Mon Sep 17 00:00:00 2001 From: Huacai Chen Date: Sat, 20 Jun 2020 20:04:43 +0800 Subject: [PATCH 0418/1304] rtc: goldfish: Enable interrupt in set_alarm() when necessary [ Upstream commit 22f8d5a1bf230cf8567a4121fc3789babb46336d ] When use goldfish rtc, the "hwclock" command fails with "select() to /dev/rtc to wait for clock tick timed out". This is because "hwclock" need the set_alarm() hook to enable interrupt when alrm->enabled is true. This operation is missing in goldfish rtc (but other rtc drivers, such as cmos rtc, enable interrupt here), so add it. Signed-off-by: Huacai Chen Signed-off-by: Jiaxun Yang Signed-off-by: Alexandre Belloni Link: https://lore.kernel.org/r/1592654683-31314-1-git-send-email-chenhc@lemote.com Signed-off-by: Sasha Levin --- drivers/rtc/rtc-goldfish.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/rtc/rtc-goldfish.c b/drivers/rtc/rtc-goldfish.c index a1c44d0c8557..30cbe22c57a8 100644 --- a/drivers/rtc/rtc-goldfish.c +++ b/drivers/rtc/rtc-goldfish.c @@ -87,6 +87,7 @@ static int goldfish_rtc_set_alarm(struct device *dev, rtc_alarm64 = rtc_alarm * NSEC_PER_SEC; writel((rtc_alarm64 >> 32), base + TIMER_ALARM_HIGH); writel(rtc_alarm64, base + TIMER_ALARM_LOW); + writel(1, base + TIMER_IRQ_ENABLED); } else { /* * if this function was called with enabled=0 -- GitLab From 773ae06f9c40d57c74f2d3e80e52290acc1064fa Mon Sep 17 00:00:00 2001 From: Evgeny Novikov Date: Fri, 10 Jul 2020 11:02:23 +0200 Subject: [PATCH 0419/1304] media: vpss: clean up resources in init [ Upstream commit 9c487b0b0ea7ff22127fe99a7f67657d8730ff94 ] If platform_driver_register() fails within vpss_init() resources are not cleaned up. The patch fixes this issue by introducing the corresponding error handling. Found by Linux Driver Verification project (linuxtesting.org). Signed-off-by: Evgeny Novikov Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Sasha Levin --- drivers/media/platform/davinci/vpss.c | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/drivers/media/platform/davinci/vpss.c b/drivers/media/platform/davinci/vpss.c index 89a86c19579b..50fc71d0cb9f 100644 --- a/drivers/media/platform/davinci/vpss.c +++ b/drivers/media/platform/davinci/vpss.c @@ -514,19 +514,31 @@ static void vpss_exit(void) static int __init vpss_init(void) { + int ret; + if (!request_mem_region(VPSS_CLK_CTRL, 4, "vpss_clock_control")) return -EBUSY; oper_cfg.vpss_regs_base2 = ioremap(VPSS_CLK_CTRL, 4); if (unlikely(!oper_cfg.vpss_regs_base2)) { - release_mem_region(VPSS_CLK_CTRL, 4); - return -ENOMEM; + ret = -ENOMEM; + goto err_ioremap; } writel(VPSS_CLK_CTRL_VENCCLKEN | - VPSS_CLK_CTRL_DACCLKEN, oper_cfg.vpss_regs_base2); + VPSS_CLK_CTRL_DACCLKEN, oper_cfg.vpss_regs_base2); + + ret = platform_driver_register(&vpss_driver); + if (ret) + goto err_pd_register; + + return 0; - return platform_driver_register(&vpss_driver); +err_pd_register: + iounmap(oper_cfg.vpss_regs_base2); +err_ioremap: + release_mem_region(VPSS_CLK_CTRL, 4); + return ret; } subsys_initcall(vpss_init); module_exit(vpss_exit); -- GitLab From c4af8c25bd9b16ecffbeb5c1a70c4cfec2f0c2ef Mon Sep 17 00:00:00 2001 From: Xiongfeng Wang Date: Tue, 21 Jul 2020 22:24:07 -0700 Subject: [PATCH 0420/1304] Input: psmouse - add a newline when printing 'proto' by sysfs [ Upstream commit 4aec14de3a15cf9789a0e19c847f164776f49473 ] When I cat parameter 'proto' by sysfs, it displays as follows. It's better to add a newline for easy reading. root@syzkaller:~# cat /sys/module/psmouse/parameters/proto autoroot@syzkaller:~# Signed-off-by: Xiongfeng Wang Link: https://lore.kernel.org/r/20200720073846.120724-1-wangxiongfeng2@huawei.com Signed-off-by: Dmitry Torokhov Signed-off-by: Sasha Levin --- drivers/input/mouse/psmouse-base.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c index d3ff1fc09af7..a9040c0fb4c3 100644 --- a/drivers/input/mouse/psmouse-base.c +++ b/drivers/input/mouse/psmouse-base.c @@ -2044,7 +2044,7 @@ static int psmouse_get_maxproto(char *buffer, const struct kernel_param *kp) { int type = *((unsigned int *)kp->arg); - return sprintf(buffer, "%s", psmouse_protocol_by_type(type)->name); + return sprintf(buffer, "%s\n", psmouse_protocol_by_type(type)->name); } static int __init psmouse_init(void) -- GitLab From 465bc917d164313baec14b0965e4e70db5e29ade Mon Sep 17 00:00:00 2001 From: Greg Ungerer Date: Sat, 13 Jun 2020 17:17:52 +1000 Subject: [PATCH 0421/1304] m68knommu: fix overwriting of bits in ColdFire V3 cache control [ Upstream commit bdee0e793cea10c516ff48bf3ebb4ef1820a116b ] The Cache Control Register (CACR) of the ColdFire V3 has bits that control high level caching functions, and also enable/disable the use of the alternate stack pointer register (the EUSP bit) to provide separate supervisor and user stack pointer registers. The code as it is today will blindly clear the EUSP bit on cache actions like invalidation. So it is broken for this case - and that will result in failed booting (interrupt entry and exit processing will be completely hosed). This only affects ColdFire V3 parts that support the alternate stack register (like the 5329 for example) - generally speaking new parts do, older parts don't. It has no impact on ColdFire V3 parts with the single stack pointer, like the 5307 for example. Fix the cache bit defines used, so they maintain the EUSP bit when carrying out cache actions through the CACR register. Signed-off-by: Greg Ungerer Signed-off-by: Sasha Levin --- arch/m68k/include/asm/m53xxacr.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/m68k/include/asm/m53xxacr.h b/arch/m68k/include/asm/m53xxacr.h index 9138a624c5c8..692f90e7fecc 100644 --- a/arch/m68k/include/asm/m53xxacr.h +++ b/arch/m68k/include/asm/m53xxacr.h @@ -89,9 +89,9 @@ * coherency though in all cases. And for copyback caches we will need * to push cached data as well. */ -#define CACHE_INIT CACR_CINVA -#define CACHE_INVALIDATE CACR_CINVA -#define CACHE_INVALIDATED CACR_CINVA +#define CACHE_INIT (CACHE_MODE + CACR_CINVA - CACR_EC) +#define CACHE_INVALIDATE (CACHE_MODE + CACR_CINVA) +#define CACHE_INVALIDATED (CACHE_MODE + CACR_CINVA) #define ACR0_MODE ((CONFIG_RAMBASE & 0xff000000) + \ (0x000f0000) + \ -- GitLab From 88f7857c90e457e44ff45872d012e37cb3e3a41e Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 30 Jun 2020 15:55:45 -0400 Subject: [PATCH 0422/1304] svcrdma: Fix another Receive buffer leak [ Upstream commit 64d26422516b2e347b32e6d9b1d40b3c19a62aae ] During a connection tear down, the Receive queue is flushed before the device resources are freed. Typically, all the Receives flush with IB_WR_FLUSH_ERR. However, any pending successful Receives flush with IB_WR_SUCCESS, and the server automatically posts a fresh Receive to replace the completing one. This happens even after the connection has closed and the RQ is drained. Receives that are posted after the RQ is drained appear never to complete, causing a Receive resource leak. The leaked Receive buffer is left DMA-mapped. To prevent these late-posted recv_ctxt's from leaking, block new Receive posting after XPT_CLOSE is set. Signed-off-by: Chuck Lever Signed-off-by: Sasha Levin --- net/sunrpc/xprtrdma/svc_rdma_recvfrom.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c index 16c8174658fd..252495ff9010 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c @@ -268,6 +268,8 @@ static int svc_rdma_post_recv(struct svcxprt_rdma *rdma) { struct svc_rdma_recv_ctxt *ctxt; + if (test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags)) + return 0; ctxt = svc_rdma_recv_ctxt_get(rdma); if (!ctxt) return -ENOMEM; -- GitLab From 1bc31e520faf8af5555030e7662c0577f25a1be0 Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Tue, 14 Jul 2020 10:36:09 -0700 Subject: [PATCH 0423/1304] xfs: fix inode quota reservation checks [ Upstream commit f959b5d037e71a4d69b5bf71faffa065d9269b4a ] xfs_trans_dqresv is the function that we use to make reservations against resource quotas. Each resource contains two counters: the q_core counter, which tracks resources allocated on disk; and the dquot reservation counter, which tracks how much of that resource has either been allocated or reserved by threads that are working on metadata updates. For disk blocks, we compare the proposed reservation counter against the hard and soft limits to decide if we're going to fail the operation. However, for inodes we inexplicably compare against the q_core counter, not the incore reservation count. Since the q_core counter is always lower than the reservation count and we unlock the dquot between reservation and transaction commit, this means that multiple threads can reserve the last inode count before we hit the hard limit, and when they commit, we'll be well over the hard limit. Fix this by checking against the incore inode reservation counter, since we would appear to maintain that correctly (and that's what we report in GETQUOTA). Signed-off-by: Darrick J. Wong Reviewed-by: Allison Collins Reviewed-by: Chandan Babu R Reviewed-by: Christoph Hellwig Signed-off-by: Sasha Levin --- fs/xfs/xfs_trans_dquot.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/xfs/xfs_trans_dquot.c b/fs/xfs/xfs_trans_dquot.c index c23257a26c2b..b8f05d5909b5 100644 --- a/fs/xfs/xfs_trans_dquot.c +++ b/fs/xfs/xfs_trans_dquot.c @@ -657,7 +657,7 @@ xfs_trans_dqresv( } } if (ninos > 0) { - total_count = be64_to_cpu(dqp->q_core.d_icount) + ninos; + total_count = dqp->q_res_icount + ninos; timer = be32_to_cpu(dqp->q_core.d_itimer); warns = be16_to_cpu(dqp->q_core.d_iwarns); warnlimit = dqp->q_mount->m_quotainfo->qi_iwarnlimit; -- GitLab From 96de3dbf27ae663e0c276ff8ac2dfed58d87a9ab Mon Sep 17 00:00:00 2001 From: Zhe Li Date: Fri, 19 Jun 2020 17:06:35 +0800 Subject: [PATCH 0424/1304] jffs2: fix UAF problem [ Upstream commit 798b7347e4f29553db4b996393caf12f5b233daf ] The log of UAF problem is listed below. BUG: KASAN: use-after-free in jffs2_rmdir+0xa4/0x1cc [jffs2] at addr c1f165fc Read of size 4 by task rm/8283 ============================================================================= BUG kmalloc-32 (Tainted: P B O ): kasan: bad access detected ----------------------------------------------------------------------------- INFO: Allocated in 0xbbbbbbbb age=3054364 cpu=0 pid=0 0xb0bba6ef jffs2_write_dirent+0x11c/0x9c8 [jffs2] __slab_alloc.isra.21.constprop.25+0x2c/0x44 __kmalloc+0x1dc/0x370 jffs2_write_dirent+0x11c/0x9c8 [jffs2] jffs2_do_unlink+0x328/0x5fc [jffs2] jffs2_rmdir+0x110/0x1cc [jffs2] vfs_rmdir+0x180/0x268 do_rmdir+0x2cc/0x300 ret_from_syscall+0x0/0x3c INFO: Freed in 0x205b age=3054364 cpu=0 pid=0 0x2e9173 jffs2_add_fd_to_list+0x138/0x1dc [jffs2] jffs2_add_fd_to_list+0x138/0x1dc [jffs2] jffs2_garbage_collect_dirent.isra.3+0x21c/0x288 [jffs2] jffs2_garbage_collect_live+0x16bc/0x1800 [jffs2] jffs2_garbage_collect_pass+0x678/0x11d4 [jffs2] jffs2_garbage_collect_thread+0x1e8/0x3b0 [jffs2] kthread+0x1a8/0x1b0 ret_from_kernel_thread+0x5c/0x64 Call Trace: [c17ddd20] [c02452d4] kasan_report.part.0+0x298/0x72c (unreliable) [c17ddda0] [d2509680] jffs2_rmdir+0xa4/0x1cc [jffs2] [c17dddd0] [c026da04] vfs_rmdir+0x180/0x268 [c17dde00] [c026f4e4] do_rmdir+0x2cc/0x300 [c17ddf40] [c001a658] ret_from_syscall+0x0/0x3c The root cause is that we don't get "jffs2_inode_info.sem" before we scan list "jffs2_inode_info.dents" in function jffs2_rmdir. This patch add codes to get "jffs2_inode_info.sem" before we scan "jffs2_inode_info.dents" to slove the UAF problem. Signed-off-by: Zhe Li Reviewed-by: Hou Tao Signed-off-by: Richard Weinberger Signed-off-by: Sasha Levin --- fs/jffs2/dir.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c index f20cff1194bb..776493713153 100644 --- a/fs/jffs2/dir.c +++ b/fs/jffs2/dir.c @@ -590,10 +590,14 @@ static int jffs2_rmdir (struct inode *dir_i, struct dentry *dentry) int ret; uint32_t now = JFFS2_NOW(); + mutex_lock(&f->sem); for (fd = f->dents ; fd; fd = fd->next) { - if (fd->ino) + if (fd->ino) { + mutex_unlock(&f->sem); return -ENOTEMPTY; + } } + mutex_unlock(&f->sem); ret = jffs2_do_unlink(c, dir_f, dentry->d_name.name, dentry->d_name.len, f, now); -- GitLab From f831c6c95d02d667b450b579bbfa781e96661120 Mon Sep 17 00:00:00 2001 From: Xiubo Li Date: Thu, 23 Jul 2020 15:32:25 +0800 Subject: [PATCH 0425/1304] ceph: fix use-after-free for fsc->mdsc [ Upstream commit a7caa88f8b72c136f9a401f498471b8a8e35370d ] If the ceph_mdsc_init() fails, it will free the mdsc already. Reported-by: syzbot+b57f46d8d6ea51960b8c@syzkaller.appspotmail.com Signed-off-by: Xiubo Li Reviewed-by: Jeff Layton Signed-off-by: Ilya Dryomov Signed-off-by: Sasha Levin --- fs/ceph/mds_client.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index a2e903203bf9..0fa14d8b9c64 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -3682,7 +3682,6 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc) return -ENOMEM; } - fsc->mdsc = mdsc; init_completion(&mdsc->safe_umount_waiters); init_waitqueue_head(&mdsc->session_close_wq); INIT_LIST_HEAD(&mdsc->waiting_for_map); @@ -3723,6 +3722,8 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc) strscpy(mdsc->nodename, utsname()->nodename, sizeof(mdsc->nodename)); + + fsc->mdsc = mdsc; return 0; } -- GitLab From 62646cb9ab6603423a51b3f4704ba1746b83182d Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Mon, 3 Aug 2020 11:37:20 -0700 Subject: [PATCH 0426/1304] cpufreq: intel_pstate: Fix cpuinfo_max_freq when MSR_TURBO_RATIO_LIMIT is 0 [ Upstream commit 4daca379c703ff55edc065e8e5173dcfeecf0148 ] The MSR_TURBO_RATIO_LIMIT can be 0. This is not an error. User can update this MSR via BIOS settings on some systems or can use msr tools to update. Also some systems boot with value = 0. This results in display of cpufreq/cpuinfo_max_freq wrong. This value will be equal to cpufreq/base_frequency, even though turbo is enabled. But platform will still function normally in HWP mode as we get max 1-core frequency from the MSR_HWP_CAPABILITIES. This MSR is already used to calculate cpu->pstate.turbo_freq, which is used for to set policy->cpuinfo.max_freq. But some other places cpu->pstate.turbo_pstate is used. For example to set policy->max. To fix this, also update cpu->pstate.turbo_pstate when updating cpu->pstate.turbo_freq. Signed-off-by: Srinivas Pandruvada Signed-off-by: Rafael J. Wysocki Signed-off-by: Sasha Levin --- drivers/cpufreq/intel_pstate.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index e7b3d4ed8eff..99166000ffb7 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -1431,6 +1431,7 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) intel_pstate_get_hwp_max(cpu->cpu, &phy_max, ¤t_max); cpu->pstate.turbo_freq = phy_max * cpu->pstate.scaling; + cpu->pstate.turbo_pstate = phy_max; } else { cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling; } -- GitLab From 4f419fd2f86d7db1d7fcb6f2fb9fde2c9fdd8bbe Mon Sep 17 00:00:00 2001 From: Javed Hasan Date: Wed, 29 Jul 2020 01:18:23 -0700 Subject: [PATCH 0427/1304] scsi: libfc: Free skb in fc_disc_gpn_id_resp() for valid cases [ Upstream commit ec007ef40abb6a164d148b0dc19789a7a2de2cc8 ] In fc_disc_gpn_id_resp(), skb is supposed to get freed in all cases except for PTR_ERR. However, in some cases it didn't. This fix is to call fc_frame_free(fp) before function returns. Link: https://lore.kernel.org/r/20200729081824.30996-2-jhasan@marvell.com Reviewed-by: Girish Basrur Reviewed-by: Santosh Vernekar Reviewed-by: Saurav Kashyap Reviewed-by: Shyam Sundar Signed-off-by: Javed Hasan Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin --- drivers/scsi/libfc/fc_disc.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index 8839f509b19a..78cf5b32bca6 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c @@ -593,8 +593,12 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp, if (PTR_ERR(fp) == -FC_EX_CLOSED) goto out; - if (IS_ERR(fp)) - goto redisc; + if (IS_ERR(fp)) { + mutex_lock(&disc->disc_mutex); + fc_disc_restart(disc); + mutex_unlock(&disc->disc_mutex); + goto out; + } cp = fc_frame_payload_get(fp, sizeof(*cp)); if (!cp) @@ -621,7 +625,7 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp, new_rdata->disc_id = disc->disc_id; fc_rport_login(new_rdata); } - goto out; + goto free_fp; } rdata->disc_id = disc->disc_id; mutex_unlock(&rdata->rp_mutex); @@ -638,6 +642,8 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp, fc_disc_restart(disc); mutex_unlock(&disc->disc_mutex); } +free_fp: + fc_frame_free(fp); out: kref_put(&rdata->kref, fc_rport_destroy); if (!IS_ERR(fp)) -- GitLab From 9f489d30979bd352ffbd555020b6ca79efb93b89 Mon Sep 17 00:00:00 2001 From: Mao Wenan Date: Sun, 2 Aug 2020 15:44:09 +0800 Subject: [PATCH 0428/1304] virtio_ring: Avoid loop when vq is broken in virtqueue_poll [ Upstream commit 481a0d7422db26fb63e2d64f0652667a5c6d0f3e ] The loop may exist if vq->broken is true, virtqueue_get_buf_ctx_packed or virtqueue_get_buf_ctx_split will return NULL, so virtnet_poll will reschedule napi to receive packet, it will lead cpu usage(si) to 100%. call trace as below: virtnet_poll virtnet_receive virtqueue_get_buf_ctx virtqueue_get_buf_ctx_packed virtqueue_get_buf_ctx_split virtqueue_napi_complete virtqueue_poll //return true virtqueue_napi_schedule //it will reschedule napi to fix this, return false if vq is broken in virtqueue_poll. Signed-off-by: Mao Wenan Acked-by: Michael S. Tsirkin Link: https://lore.kernel.org/r/1596354249-96204-1-git-send-email-wenan.mao@linux.alibaba.com Signed-off-by: Michael S. Tsirkin Acked-by: Jason Wang Signed-off-by: Sasha Levin --- drivers/virtio/virtio_ring.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 6228b48d1e12..df7980aef927 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -828,6 +828,9 @@ bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx) { struct vring_virtqueue *vq = to_vvq(_vq); + if (unlikely(vq->broken)) + return false; + virtio_mb(vq->weak_barriers); return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev, vq->vring.used->idx); } -- GitLab From c4e7716b534941ed1bfe7410ccaf531471cd042e Mon Sep 17 00:00:00 2001 From: Gaurav Singh Date: Thu, 6 Aug 2020 23:17:25 -0700 Subject: [PATCH 0429/1304] tools/testing/selftests/cgroup/cgroup_util.c: cg_read_strcmp: fix null pointer dereference [ Upstream commit d830020656c5b68ced962ed3cb51a90e0a89d4c4 ] Haven't reproduced this issue. This PR is does a minor code cleanup. Signed-off-by: Gaurav Singh Signed-off-by: Andrew Morton Reviewed-by: Andrew Morton Cc: Shuah Khan Cc: Tejun Heo Cc: Michal Koutn Cc: Roman Gushchin Cc: Christian Brauner Cc: Chris Down Link: http://lkml.kernel.org/r/20200726013808.22242-1-gaurav1086@gmail.com Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin --- tools/testing/selftests/cgroup/cgroup_util.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/cgroup/cgroup_util.c b/tools/testing/selftests/cgroup/cgroup_util.c index 075cb0c73014..90418d79ef67 100644 --- a/tools/testing/selftests/cgroup/cgroup_util.c +++ b/tools/testing/selftests/cgroup/cgroup_util.c @@ -95,7 +95,7 @@ int cg_read_strcmp(const char *cgroup, const char *control, /* Handle the case of comparing against empty string */ if (!expected) - size = 32; + return -1; else size = strlen(expected) + 1; -- GitLab From c90652abae15465210207418ac8c1ecb230e3b1e Mon Sep 17 00:00:00 2001 From: Eiichi Tsukata Date: Thu, 6 Aug 2020 15:18:48 -0700 Subject: [PATCH 0430/1304] xfs: Fix UBSAN null-ptr-deref in xfs_sysfs_init [ Upstream commit 96cf2a2c75567ff56195fe3126d497a2e7e4379f ] If xfs_sysfs_init is called with parent_kobj == NULL, UBSAN shows the following warning: UBSAN: null-ptr-deref in ./fs/xfs/xfs_sysfs.h:37:23 member access within null pointer of type 'struct xfs_kobj' Call Trace: dump_stack+0x10e/0x195 ubsan_type_mismatch_common+0x241/0x280 __ubsan_handle_type_mismatch_v1+0x32/0x40 init_xfs_fs+0x12b/0x28f do_one_initcall+0xdd/0x1d0 do_initcall_level+0x151/0x1b6 do_initcalls+0x50/0x8f do_basic_setup+0x29/0x2b kernel_init_freeable+0x19f/0x20b kernel_init+0x11/0x1e0 ret_from_fork+0x22/0x30 Fix it by checking parent_kobj before the code accesses its member. Signed-off-by: Eiichi Tsukata Reviewed-by: Darrick J. Wong [darrick: minor whitespace edits] Signed-off-by: Darrick J. Wong Signed-off-by: Sasha Levin --- fs/xfs/xfs_sysfs.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/fs/xfs/xfs_sysfs.h b/fs/xfs/xfs_sysfs.h index e9f810fc6731..43585850f154 100644 --- a/fs/xfs/xfs_sysfs.h +++ b/fs/xfs/xfs_sysfs.h @@ -32,9 +32,11 @@ xfs_sysfs_init( struct xfs_kobj *parent_kobj, const char *name) { + struct kobject *parent; + + parent = parent_kobj ? &parent_kobj->kobject : NULL; init_completion(&kobj->complete); - return kobject_init_and_add(&kobj->kobject, ktype, - &parent_kobj->kobject, "%s", name); + return kobject_init_and_add(&kobj->kobject, ktype, parent, "%s", name); } static inline void -- GitLab From 6afcb8b93400b774f3d74df7e1fc63805cbc92b3 Mon Sep 17 00:00:00 2001 From: Luc Van Oostenryck Date: Tue, 11 Aug 2020 18:33:54 -0700 Subject: [PATCH 0431/1304] alpha: fix annotation of io{read,write}{16,32}be() [ Upstream commit bd72866b8da499e60633ff28f8a4f6e09ca78efe ] These accessors must be used to read/write a big-endian bus. The value returned or written is native-endian. However, these accessors are defined using be{16,32}_to_cpu() or cpu_to_be{16,32}() to make the endian conversion but these expect a __be{16,32} when none is present. Keeping them would need a force cast that would solve nothing at all. So, do the conversion using swab{16,32}, like done in asm-generic for similar situations. Reported-by: kernel test robot Signed-off-by: Luc Van Oostenryck Signed-off-by: Andrew Morton Cc: Richard Henderson Cc: Ivan Kokshaysky Cc: Matt Turner Cc: Stephen Boyd Cc: Arnd Bergmann Link: http://lkml.kernel.org/r/20200622114232.80039-1-luc.vanoostenryck@gmail.com Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin --- arch/alpha/include/asm/io.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h index eb09d5aee910..0bba9e991189 100644 --- a/arch/alpha/include/asm/io.h +++ b/arch/alpha/include/asm/io.h @@ -507,10 +507,10 @@ extern inline void writeq(u64 b, volatile void __iomem *addr) } #endif -#define ioread16be(p) be16_to_cpu(ioread16(p)) -#define ioread32be(p) be32_to_cpu(ioread32(p)) -#define iowrite16be(v,p) iowrite16(cpu_to_be16(v), (p)) -#define iowrite32be(v,p) iowrite32(cpu_to_be32(v), (p)) +#define ioread16be(p) swab16(ioread16(p)) +#define ioread32be(p) swab32(ioread32(p)) +#define iowrite16be(v,p) iowrite16(swab16(v), (p)) +#define iowrite32be(v,p) iowrite32(swab32(v), (p)) #define inb_p inb #define inw_p inw -- GitLab From 7c89e40ede2b0f1e292fb54d2e73fe952b4d44d8 Mon Sep 17 00:00:00 2001 From: Helge Deller Date: Tue, 11 Aug 2020 18:36:04 -0700 Subject: [PATCH 0432/1304] fs/signalfd.c: fix inconsistent return codes for signalfd4 [ Upstream commit a089e3fd5a82aea20f3d9ec4caa5f4c65cc2cfcc ] The kernel signalfd4() syscall returns different error codes when called either in compat or native mode. This behaviour makes correct emulation in qemu and testing programs like LTP more complicated. Fix the code to always return -in both modes- EFAULT for unaccessible user memory, and EINVAL when called with an invalid signal mask. Signed-off-by: Helge Deller Signed-off-by: Andrew Morton Cc: Alexander Viro Cc: Laurent Vivier Link: http://lkml.kernel.org/r/20200530100707.GA10159@ls3530.fritz.box Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin --- fs/signalfd.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/fs/signalfd.c b/fs/signalfd.c index 4fcd1498acf5..3c40a3bf772c 100644 --- a/fs/signalfd.c +++ b/fs/signalfd.c @@ -313,9 +313,10 @@ SYSCALL_DEFINE4(signalfd4, int, ufd, sigset_t __user *, user_mask, { sigset_t mask; - if (sizemask != sizeof(sigset_t) || - copy_from_user(&mask, user_mask, sizeof(mask))) + if (sizemask != sizeof(sigset_t)) return -EINVAL; + if (copy_from_user(&mask, user_mask, sizeof(mask))) + return -EFAULT; return do_signalfd4(ufd, &mask, flags); } @@ -324,9 +325,10 @@ SYSCALL_DEFINE3(signalfd, int, ufd, sigset_t __user *, user_mask, { sigset_t mask; - if (sizemask != sizeof(sigset_t) || - copy_from_user(&mask, user_mask, sizeof(mask))) + if (sizemask != sizeof(sigset_t)) return -EINVAL; + if (copy_from_user(&mask, user_mask, sizeof(mask))) + return -EFAULT; return do_signalfd4(ufd, &mask, 0); } -- GitLab From b3ddf6ba5e28a57729fff1605ae08e21be5c92e3 Mon Sep 17 00:00:00 2001 From: Eric Sandeen Date: Wed, 17 Jun 2020 14:19:04 -0500 Subject: [PATCH 0433/1304] ext4: fix potential negative array index in do_split() [ Upstream commit 5872331b3d91820e14716632ebb56b1399b34fe1 ] If for any reason a directory passed to do_split() does not have enough active entries to exceed half the size of the block, we can end up iterating over all "count" entries without finding a split point. In this case, count == move, and split will be zero, and we will attempt a negative index into map[]. Guard against this by detecting this case, and falling back to split-to-half-of-count instead; in this case we will still have plenty of space (> half blocksize) in each split block. Fixes: ef2b02d3e617 ("ext34: ensure do_split leaves enough free space in both blocks") Signed-off-by: Eric Sandeen Reviewed-by: Andreas Dilger Reviewed-by: Jan Kara Link: https://lore.kernel.org/r/f53e246b-647c-64bb-16ec-135383c70ad7@redhat.com Signed-off-by: Theodore Ts'o Signed-off-by: Sasha Levin --- fs/ext4/namei.c | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index a2425e2d439c..186a2dd05bd8 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -1732,7 +1732,7 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir, blocksize, hinfo, map); map -= count; dx_sort_map(map, count); - /* Split the existing block in the middle, size-wise */ + /* Ensure that neither split block is over half full */ size = 0; move = 0; for (i = count-1; i >= 0; i--) { @@ -1742,8 +1742,18 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir, size += map[i].size; move++; } - /* map index at which we will split */ - split = count - move; + /* + * map index at which we will split + * + * If the sum of active entries didn't exceed half the block size, just + * split it in half by count; each resulting block will have at least + * half the space free. + */ + if (i > 0) + split = count - move; + else + split = count/2; + hash2 = map[split].hash; continued = hash2 == map[split - 1].hash; dxtrace(printk(KERN_INFO "Split block %lu at %x, %i/%i\n", -- GitLab From 1373f884a081d336aafd9ed742229fb2cd7a3613 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Tue, 28 Jul 2020 15:04:33 +0200 Subject: [PATCH 0434/1304] ext4: don't allow overlapping system zones [ Upstream commit bf9a379d0980e7413d94cb18dac73db2bfc5f470 ] Currently, add_system_zone() just silently merges two added system zones that overlap. However the overlap should not happen and it generally suggests that some unrelated metadata overlap which indicates the fs is corrupted. We should have caught such problems earlier (e.g. in ext4_check_descriptors()) but add this check as another line of defense. In later patch we also use this for stricter checking of journal inode extent tree. Reviewed-by: Lukas Czerner Signed-off-by: Jan Kara Link: https://lore.kernel.org/r/20200728130437.7804-3-jack@suse.cz Signed-off-by: Theodore Ts'o Signed-off-by: Sasha Levin --- fs/ext4/block_validity.c | 36 +++++++++++++----------------------- 1 file changed, 13 insertions(+), 23 deletions(-) diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c index d203cc935ff8..552164034d34 100644 --- a/fs/ext4/block_validity.c +++ b/fs/ext4/block_validity.c @@ -68,7 +68,7 @@ static int add_system_zone(struct ext4_system_blocks *system_blks, ext4_fsblk_t start_blk, unsigned int count) { - struct ext4_system_zone *new_entry = NULL, *entry; + struct ext4_system_zone *new_entry, *entry; struct rb_node **n = &system_blks->root.rb_node, *node; struct rb_node *parent = NULL, *new_node = NULL; @@ -79,30 +79,20 @@ static int add_system_zone(struct ext4_system_blocks *system_blks, n = &(*n)->rb_left; else if (start_blk >= (entry->start_blk + entry->count)) n = &(*n)->rb_right; - else { - if (start_blk + count > (entry->start_blk + - entry->count)) - entry->count = (start_blk + count - - entry->start_blk); - new_node = *n; - new_entry = rb_entry(new_node, struct ext4_system_zone, - node); - break; - } + else /* Unexpected overlap of system zones. */ + return -EFSCORRUPTED; } - if (!new_entry) { - new_entry = kmem_cache_alloc(ext4_system_zone_cachep, - GFP_KERNEL); - if (!new_entry) - return -ENOMEM; - new_entry->start_blk = start_blk; - new_entry->count = count; - new_node = &new_entry->node; - - rb_link_node(new_node, parent, n); - rb_insert_color(new_node, &system_blks->root); - } + new_entry = kmem_cache_alloc(ext4_system_zone_cachep, + GFP_KERNEL); + if (!new_entry) + return -ENOMEM; + new_entry->start_blk = start_blk; + new_entry->count = count; + new_node = &new_entry->node; + + rb_link_node(new_node, parent, n); + rb_insert_color(new_node, &system_blks->root); /* Can we merge to the left? */ node = rb_prev(new_node); -- GitLab From b9d4cab67f7e2fe7c444f82cefef5660c079ad71 Mon Sep 17 00:00:00 2001 From: Srinivas Kandagatla Date: Tue, 11 Aug 2020 13:02:05 +0100 Subject: [PATCH 0435/1304] ASoC: q6routing: add dummy register read/write function [ Upstream commit 796a58fe2b8c9b6668db00d92512ec84be663027 ] Most of the DAPM widgets for DSP ASoC components reuse reg field of the widgets for its internal calculations, however these are not real registers. So read/writes to these numbers are not really valid. However ASoC core will read these registers to get default state during startup. With recent changes to ASoC core, every register read/write failures are reported very verbosely. Prior to this fails to reads are totally ignored, so we never saw any error messages. To fix this add dummy read/write function to return default value. Fixes: e3a33673e845 ("ASoC: qdsp6: q6routing: Add q6routing driver") Reported-by: John Stultz Signed-off-by: Srinivas Kandagatla Link: https://lore.kernel.org/r/20200811120205.21805-2-srinivas.kandagatla@linaro.org Signed-off-by: Mark Brown Signed-off-by: Sasha Levin --- sound/soc/qcom/qdsp6/q6routing.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/sound/soc/qcom/qdsp6/q6routing.c b/sound/soc/qcom/qdsp6/q6routing.c index c6b51571be94..44eee18c658a 100644 --- a/sound/soc/qcom/qdsp6/q6routing.c +++ b/sound/soc/qcom/qdsp6/q6routing.c @@ -968,6 +968,20 @@ static int msm_routing_probe(struct snd_soc_component *c) return 0; } +static unsigned int q6routing_reg_read(struct snd_soc_component *component, + unsigned int reg) +{ + /* default value */ + return 0; +} + +static int q6routing_reg_write(struct snd_soc_component *component, + unsigned int reg, unsigned int val) +{ + /* dummy */ + return 0; +} + static const struct snd_soc_component_driver msm_soc_routing_component = { .ops = &q6pcm_routing_ops, .probe = msm_routing_probe, @@ -976,6 +990,8 @@ static const struct snd_soc_component_driver msm_soc_routing_component = { .num_dapm_widgets = ARRAY_SIZE(msm_qdsp6_widgets), .dapm_routes = intercon, .num_dapm_routes = ARRAY_SIZE(intercon), + .read = q6routing_reg_read, + .write = q6routing_reg_write, }; static int q6pcm_routing_probe(struct platform_device *pdev) -- GitLab From 04efb368bc0bd90cbb07b8e9fa79dcea11f5b8ce Mon Sep 17 00:00:00 2001 From: Przemyslaw Patynowski Date: Thu, 6 Aug 2020 13:40:59 +0000 Subject: [PATCH 0436/1304] i40e: Set RX_ONLY mode for unicast promiscuous on VLAN [ Upstream commit 4bd5e02a2ed1575c2f65bd3c557a077dd399f0e8 ] Trusted VF with unicast promiscuous mode set, could listen to TX traffic of other VFs. Set unicast promiscuous mode to RX traffic, if VSI has port VLAN configured. Rename misleading I40E_AQC_SET_VSI_PROMISC_TX bit to I40E_AQC_SET_VSI_PROMISC_RX_ONLY. Aligned unicast promiscuous with VLAN to the one without VLAN. Fixes: 6c41a7606967 ("i40e: Add promiscuous on VLAN support") Fixes: 3b1200891b7f ("i40e: When in promisc mode apply promisc mode to Tx Traffic as well") Signed-off-by: Przemyslaw Patynowski Signed-off-by: Aleksandr Loktionov Signed-off-by: Arkadiusz Kubalewski Tested-by: Andrew Bowers Signed-off-by: Tony Nguyen Signed-off-by: Sasha Levin --- .../net/ethernet/intel/i40e/i40e_adminq_cmd.h | 2 +- drivers/net/ethernet/intel/i40e/i40e_common.c | 35 ++++++++++++++----- 2 files changed, 28 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index 80e3eec6134e..a5e5e7e14e6c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -1206,7 +1206,7 @@ struct i40e_aqc_set_vsi_promiscuous_modes { #define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04 #define I40E_AQC_SET_VSI_DEFAULT 0x08 #define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10 -#define I40E_AQC_SET_VSI_PROMISC_TX 0x8000 +#define I40E_AQC_SET_VSI_PROMISC_RX_ONLY 0x8000 __le16 seid; #define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF __le16 vlan_tag; diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index eb0ae6ab01e2..e75b4c4872c0 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -1970,6 +1970,21 @@ i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags, return status; } +/** + * i40e_is_aq_api_ver_ge + * @aq: pointer to AdminQ info containing HW API version to compare + * @maj: API major value + * @min: API minor value + * + * Assert whether current HW API version is greater/equal than provided. + **/ +static bool i40e_is_aq_api_ver_ge(struct i40e_adminq_info *aq, u16 maj, + u16 min) +{ + return (aq->api_maj_ver > maj || + (aq->api_maj_ver == maj && aq->api_min_ver >= min)); +} + /** * i40e_aq_add_vsi * @hw: pointer to the hw struct @@ -2095,18 +2110,16 @@ i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, if (set) { flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; - if (rx_only_promisc && - (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) || - (hw->aq.api_maj_ver > 1))) - flags |= I40E_AQC_SET_VSI_PROMISC_TX; + if (rx_only_promisc && i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) + flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY; } cmd->promiscuous_flags = cpu_to_le16(flags); cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST); - if (((hw->aq.api_maj_ver >= 1) && (hw->aq.api_min_ver >= 5)) || - (hw->aq.api_maj_ver > 1)) - cmd->valid_flags |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_TX); + if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) + cmd->valid_flags |= + cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY); cmd->seid = cpu_to_le16(seid); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); @@ -2203,11 +2216,17 @@ enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw, i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_vsi_promiscuous_modes); - if (enable) + if (enable) { flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; + if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) + flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY; + } cmd->promiscuous_flags = cpu_to_le16(flags); cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST); + if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) + cmd->valid_flags |= + cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY); cmd->seid = cpu_to_le16(seid); cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID); -- GitLab From 46858b2f609ffd81d6f437a6099d4782fa2e33a8 Mon Sep 17 00:00:00 2001 From: Grzegorz Szczurek Date: Tue, 11 Aug 2020 10:56:49 +0000 Subject: [PATCH 0437/1304] i40e: Fix crash during removing i40e driver [ Upstream commit 5b6d4a7f20b09c47ca598760f6dafd554af8b6d5 ] Fix the reason of crashing system by add waiting time to finish reset recovery process before starting remove driver procedure. Now VSI is releasing if VSI is not in reset recovery mode. Without this fix it was possible to start remove driver if other processing command need reset recovery procedure which resulted in null pointer dereference. VSI used by the ethtool process has been cleared by remove driver process. [ 6731.508665] BUG: kernel NULL pointer dereference, address: 0000000000000000 [ 6731.508668] #PF: supervisor read access in kernel mode [ 6731.508670] #PF: error_code(0x0000) - not-present page [ 6731.508671] PGD 0 P4D 0 [ 6731.508674] Oops: 0000 [#1] SMP PTI [ 6731.508679] Hardware name: Intel Corporation S2600WT2R/S2600WT2R, BIOS SE5C610.86B.01.01.0021.032120170601 03/21/2017 [ 6731.508694] RIP: 0010:i40e_down+0x252/0x310 [i40e] [ 6731.508696] Code: c7 78 de fa c0 e8 61 02 3a c1 66 83 bb f6 0c 00 00 00 0f 84 bf 00 00 00 45 31 e4 45 31 ff eb 03 41 89 c7 48 8b 83 98 0c 00 00 <4a> 8b 3c 20 e8 a5 79 02 00 48 83 bb d0 0c 00 00 00 74 10 48 8b 83 [ 6731.508698] RSP: 0018:ffffb75ac7b3faf0 EFLAGS: 00010246 [ 6731.508700] RAX: 0000000000000000 RBX: ffff9c9874bd5000 RCX: 0000000000000007 [ 6731.508701] RDX: 0000000000000000 RSI: 0000000000000096 RDI: ffff9c987f4d9780 [ 6731.508703] RBP: ffffb75ac7b3fb30 R08: 0000000000005b60 R09: 0000000000000004 [ 6731.508704] R10: ffffb75ac64fbd90 R11: 0000000000000001 R12: 0000000000000000 [ 6731.508706] R13: ffff9c97a08e0000 R14: ffff9c97a08e0a68 R15: 0000000000000000 [ 6731.508708] FS: 00007f2617cd2740(0000) GS:ffff9c987f4c0000(0000) knlGS:0000000000000000 [ 6731.508710] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 6731.508711] CR2: 0000000000000000 CR3: 0000001e765c4006 CR4: 00000000003606e0 [ 6731.508713] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [ 6731.508714] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 [ 6731.508715] Call Trace: [ 6731.508734] i40e_vsi_close+0x84/0x90 [i40e] [ 6731.508742] i40e_quiesce_vsi.part.98+0x3c/0x40 [i40e] [ 6731.508749] i40e_pf_quiesce_all_vsi+0x55/0x60 [i40e] [ 6731.508757] i40e_prep_for_reset+0x59/0x130 [i40e] [ 6731.508765] i40e_reconfig_rss_queues+0x5a/0x120 [i40e] [ 6731.508774] i40e_set_channels+0xda/0x170 [i40e] [ 6731.508778] ethtool_set_channels+0xe9/0x150 [ 6731.508781] dev_ethtool+0x1b94/0x2920 [ 6731.508805] dev_ioctl+0xc2/0x590 [ 6731.508811] sock_do_ioctl+0xae/0x150 [ 6731.508813] sock_ioctl+0x34f/0x3c0 [ 6731.508821] ksys_ioctl+0x98/0xb0 [ 6731.508828] __x64_sys_ioctl+0x1a/0x20 [ 6731.508831] do_syscall_64+0x57/0x1c0 [ 6731.508835] entry_SYSCALL_64_after_hwframe+0x44/0xa9 Fixes: 4b8164467b85 ("i40e: Add common function for finding VSI by type") Signed-off-by: Grzegorz Szczurek Signed-off-by: Arkadiusz Kubalewski Tested-by: Aaron Brown Signed-off-by: Tony Nguyen Signed-off-by: Sasha Levin --- drivers/net/ethernet/intel/i40e/i40e_main.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index a74b01bf581e..3200c75b9ed2 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -14152,6 +14152,9 @@ static void i40e_remove(struct pci_dev *pdev) i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0); i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0); + while (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) + usleep_range(1000, 2000); + /* no more scheduling of any task */ set_bit(__I40E_SUSPENDED, pf->state); set_bit(__I40E_DOWN, pf->state); -- GitLab From 99728347a5351990818805246f84847b760cbc3f Mon Sep 17 00:00:00 2001 From: Fugang Duan Date: Thu, 13 Aug 2020 15:13:14 +0800 Subject: [PATCH 0438/1304] net: fec: correct the error path for regulator disable in probe [ Upstream commit c6165cf0dbb82ded90163dce3ac183fc7a913dc4 ] Correct the error path for regulator disable. Fixes: 9269e5560b26 ("net: fec: add phy-reset-gpios PROBE_DEFER check") Signed-off-by: Fugang Duan Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/ethernet/freescale/fec_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 48c58f93b124..3b6da228140e 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -3659,11 +3659,11 @@ fec_probe(struct platform_device *pdev) failed_irq: failed_init: fec_ptp_stop(pdev); - if (fep->reg_phy) - regulator_disable(fep->reg_phy); failed_reset: pm_runtime_put_noidle(&pdev->dev); pm_runtime_disable(&pdev->dev); + if (fep->reg_phy) + regulator_disable(fep->reg_phy); failed_regulator: clk_disable_unprepare(fep->clk_ahb); failed_clk_ahb: -- GitLab From a74506f2d91c7395e353202e616aa8712a606bff Mon Sep 17 00:00:00 2001 From: Jarod Wilson Date: Thu, 13 Aug 2020 10:09:00 -0400 Subject: [PATCH 0439/1304] bonding: show saner speed for broadcast mode [ Upstream commit 4ca0d9ac3fd8f9f90b72a15d8da2aca3ffb58418 ] Broadcast mode bonds transmit a copy of all traffic simultaneously out of all interfaces, so the "speed" of the bond isn't really the aggregate of all interfaces, but rather, the speed of the slowest active interface. Also, the type of the speed field is u32, not unsigned long, so adjust that accordingly, as required to make min() function here without complaining about mismatching types. Fixes: bb5b052f751b ("bond: add support to read speed and duplex via ethtool") CC: Jay Vosburgh CC: Veaceslav Falico CC: Andy Gospodarek CC: "David S. Miller" CC: netdev@vger.kernel.org Acked-by: Jay Vosburgh Signed-off-by: Jarod Wilson Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/bonding/bond_main.c | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 11429df74306..76fd5fc437eb 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -4200,13 +4200,23 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev) return ret; } +static u32 bond_mode_bcast_speed(struct slave *slave, u32 speed) +{ + if (speed == 0 || speed == SPEED_UNKNOWN) + speed = slave->speed; + else + speed = min(speed, slave->speed); + + return speed; +} + static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev, struct ethtool_link_ksettings *cmd) { struct bonding *bond = netdev_priv(bond_dev); - unsigned long speed = 0; struct list_head *iter; struct slave *slave; + u32 speed = 0; cmd->base.duplex = DUPLEX_UNKNOWN; cmd->base.port = PORT_OTHER; @@ -4218,8 +4228,13 @@ static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev, */ bond_for_each_slave(bond, slave, iter) { if (bond_slave_can_tx(slave)) { - if (slave->speed != SPEED_UNKNOWN) - speed += slave->speed; + if (slave->speed != SPEED_UNKNOWN) { + if (BOND_MODE(bond) == BOND_MODE_BROADCAST) + speed = bond_mode_bcast_speed(slave, + speed); + else + speed += slave->speed; + } if (cmd->base.duplex == DUPLEX_UNKNOWN && slave->duplex != DUPLEX_UNKNOWN) cmd->base.duplex = slave->duplex; -- GitLab From 8dcf188973549e89ed751a10911eab5ff91a98e8 Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Fri, 14 Aug 2020 20:05:58 -0700 Subject: [PATCH 0440/1304] bonding: fix a potential double-unregister [ Upstream commit 832707021666411d04795c564a4adea5d6b94f17 ] When we tear down a network namespace, we unregister all the netdevices within it. So we may queue a slave device and a bonding device together in the same unregister queue. If the only slave device is non-ethernet, it would automatically unregister the bonding device as well. Thus, we may end up unregistering the bonding device twice. Workaround this special case by checking reg_state. Fixes: 9b5e383c11b0 ("net: Introduce unregister_netdevice_many()") Reported-by: syzbot+af23e7f3e0a7e10c8b67@syzkaller.appspotmail.com Cc: Eric Dumazet Cc: Andy Gospodarek Cc: Jay Vosburgh Signed-off-by: Cong Wang Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/bonding/bond_main.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 76fd5fc437eb..ee7138a92d5e 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -2029,7 +2029,8 @@ static int bond_release_and_destroy(struct net_device *bond_dev, int ret; ret = __bond_release_one(bond_dev, slave_dev, false, true); - if (ret == 0 && !bond_has_slaves(bond)) { + if (ret == 0 && !bond_has_slaves(bond) && + bond_dev->reg_state != NETREG_UNREGISTERING) { bond_dev->priv_flags |= IFF_DISABLE_NETPOLL; netdev_info(bond_dev, "Destroying bond %s\n", bond_dev->name); -- GitLab From 5a5120b162798fdf07147dff03b23b69f147d311 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Wed, 12 Aug 2020 18:55:41 +0200 Subject: [PATCH 0441/1304] s390/runtime_instrumentation: fix storage key handling [ Upstream commit 9eaba29c7985236e16468f4e6a49cc18cf01443e ] The key member of the runtime instrumentation control block contains only the access key, not the complete storage key. Therefore the value must be shifted by four bits. Note: this is only relevant for debugging purposes in case somebody compiles a kernel with a default storage access key set to a value not equal to zero. Fixes: e4b8b3f33fca ("s390: add support for runtime instrumentation") Reported-by: Claudio Imbrenda Signed-off-by: Heiko Carstens Signed-off-by: Sasha Levin --- arch/s390/kernel/runtime_instr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/s390/kernel/runtime_instr.c b/arch/s390/kernel/runtime_instr.c index 125c7f6e8715..1788a5454b6f 100644 --- a/arch/s390/kernel/runtime_instr.c +++ b/arch/s390/kernel/runtime_instr.c @@ -57,7 +57,7 @@ static void init_runtime_instr_cb(struct runtime_instr_cb *cb) cb->k = 1; cb->ps = 1; cb->pc = 1; - cb->key = PAGE_DEFAULT_KEY; + cb->key = PAGE_DEFAULT_KEY >> 4; cb->v = 1; } -- GitLab From c5599b901df07d2a91e8b13f35f0393a25b2b832 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Wed, 12 Aug 2020 18:56:28 +0200 Subject: [PATCH 0442/1304] s390/ptrace: fix storage key handling [ Upstream commit fd78c59446b8d050ecf3e0897c5a486c7de7c595 ] The key member of the runtime instrumentation control block contains only the access key, not the complete storage key. Therefore the value must be shifted by four bits. Since existing user space does not necessarily query and set the access key correctly, just ignore the user space provided key and use the correct one. Note: this is only relevant for debugging purposes in case somebody compiles a kernel with a default storage access key set to a value not equal to zero. Fixes: 262832bc5acd ("s390/ptrace: add runtime instrumention register get/set") Reported-by: Claudio Imbrenda Signed-off-by: Heiko Carstens Signed-off-by: Sasha Levin --- arch/s390/kernel/ptrace.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index 65fefbf61e1c..3ffa2847c110 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c @@ -1286,7 +1286,6 @@ static bool is_ri_cb_valid(struct runtime_instr_cb *cb) cb->pc == 1 && cb->qc == 0 && cb->reserved2 == 0 && - cb->key == PAGE_DEFAULT_KEY && cb->reserved3 == 0 && cb->reserved4 == 0 && cb->reserved5 == 0 && @@ -1350,7 +1349,11 @@ static int s390_runtime_instr_set(struct task_struct *target, kfree(data); return -EINVAL; } - + /* + * Override access key in any case, since user space should + * not be able to set it, nor should it care about it. + */ + ri_cb.key = PAGE_DEFAULT_KEY >> 4; preempt_disable(); if (!target->thread.ri_cb) target->thread.ri_cb = data; -- GitLab From bd79b3b960f26209bf3c06067d8909bf93831564 Mon Sep 17 00:00:00 2001 From: Srinivas Kandagatla Date: Tue, 11 Aug 2020 11:34:52 +0100 Subject: [PATCH 0443/1304] ASoC: msm8916-wcd-analog: fix register Interrupt offset [ Upstream commit ff69c97ef84c9f7795adb49e9f07c9adcdd0c288 ] For some reason interrupt set and clear register offsets are not set correctly. This patch corrects them! Fixes: 585e881e5b9e ("ASoC: codecs: Add msm8916-wcd analog codec") Signed-off-by: Srinivas Kandagatla Tested-by: Stephan Gerhold Reviewed-by: Stephan Gerhold Link: https://lore.kernel.org/r/20200811103452.20448-1-srinivas.kandagatla@linaro.org Signed-off-by: Mark Brown Signed-off-by: Sasha Levin --- sound/soc/codecs/msm8916-wcd-analog.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sound/soc/codecs/msm8916-wcd-analog.c b/sound/soc/codecs/msm8916-wcd-analog.c index cbdb6d4bb91e..f4aba065c925 100644 --- a/sound/soc/codecs/msm8916-wcd-analog.c +++ b/sound/soc/codecs/msm8916-wcd-analog.c @@ -16,8 +16,8 @@ #define CDC_D_REVISION1 (0xf000) #define CDC_D_PERPH_SUBTYPE (0xf005) -#define CDC_D_INT_EN_SET (0x015) -#define CDC_D_INT_EN_CLR (0x016) +#define CDC_D_INT_EN_SET (0xf015) +#define CDC_D_INT_EN_CLR (0xf016) #define MBHC_SWITCH_INT BIT(7) #define MBHC_MIC_ELECTRICAL_INS_REM_DET BIT(6) #define MBHC_BUTTON_PRESS_DET BIT(5) -- GitLab From 6b2dd0c04bb889a88fbf92f2480f295d36e61ec9 Mon Sep 17 00:00:00 2001 From: Dinghao Liu Date: Thu, 13 Aug 2020 16:41:10 +0800 Subject: [PATCH 0444/1304] ASoC: intel: Fix memleak in sst_media_open [ Upstream commit 062fa09f44f4fb3776a23184d5d296b0c8872eb9 ] When power_up_sst() fails, stream needs to be freed just like when try_module_get() fails. However, current code is returning directly and ends up leaking memory. Fixes: 0121327c1a68b ("ASoC: Intel: mfld-pcm: add control for powering up/down dsp") Signed-off-by: Dinghao Liu Acked-by: Pierre-Louis Bossart Link: https://lore.kernel.org/r/20200813084112.26205-1-dinghao.liu@zju.edu.cn Signed-off-by: Mark Brown Signed-off-by: Sasha Levin --- sound/soc/intel/atom/sst-mfld-platform-pcm.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c index 6868e71e3a3f..0572c3c96450 100644 --- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c +++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c @@ -339,7 +339,7 @@ static int sst_media_open(struct snd_pcm_substream *substream, ret_val = power_up_sst(stream); if (ret_val < 0) - return ret_val; + goto out_power_up; /* Make sure, that the period size is always even */ snd_pcm_hw_constraint_step(substream->runtime, 0, @@ -348,8 +348,9 @@ static int sst_media_open(struct snd_pcm_substream *substream, return snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); out_ops: - kfree(stream); mutex_unlock(&sst_lock); +out_power_up: + kfree(stream); return ret_val; } -- GitLab From d200964e4deaa72f2a7d2eaefc9953d3df8784ce Mon Sep 17 00:00:00 2001 From: Alex Williamson Date: Mon, 17 Aug 2020 11:09:13 -0600 Subject: [PATCH 0445/1304] vfio/type1: Add proper error unwind for vfio_iommu_replay() [ Upstream commit aae7a75a821a793ed6b8ad502a5890fb8e8f172d ] The vfio_iommu_replay() function does not currently unwind on error, yet it does pin pages, perform IOMMU mapping, and modify the vfio_dma structure to indicate IOMMU mapping. The IOMMU mappings are torn down when the domain is destroyed, but the other actions go on to cause trouble later. For example, the iommu->domain_list can be empty if we only have a non-IOMMU backed mdev attached. We don't currently check if the list is empty before getting the first entry in the list, which leads to a bogus domain pointer. If a vfio_dma entry is erroneously marked as iommu_mapped, we'll attempt to use that bogus pointer to retrieve the existing physical page addresses. This is the scenario that uncovered this issue, attempting to hot-add a vfio-pci device to a container with an existing mdev device and DMA mappings, one of which could not be pinned, causing a failure adding the new group to the existing container and setting the conditions for a subsequent attempt to explode. To resolve this, we can first check if the domain_list is empty so that we can reject replay of a bogus domain, should we ever encounter this inconsistent state again in the future. The real fix though is to add the necessary unwind support, which means cleaning up the current pinning if an IOMMU mapping fails, then walking back through the r-b tree of DMA entries, reading from the IOMMU which ranges are mapped, and unmapping and unpinning those ranges. To be able to do this, we also defer marking the DMA entry as IOMMU mapped until all entries are processed, in order to allow the unwind to know the disposition of each entry. Fixes: a54eb55045ae ("vfio iommu type1: Add support for mediated devices") Reported-by: Zhiyi Guo Tested-by: Zhiyi Guo Reviewed-by: Cornelia Huck Signed-off-by: Alex Williamson Signed-off-by: Sasha Levin --- drivers/vfio/vfio_iommu_type1.c | 71 ++++++++++++++++++++++++++++++--- 1 file changed, 66 insertions(+), 5 deletions(-) diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 6dbdadb936a8..52083b710b87 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -1193,13 +1193,16 @@ static int vfio_bus_type(struct device *dev, void *data) static int vfio_iommu_replay(struct vfio_iommu *iommu, struct vfio_domain *domain) { - struct vfio_domain *d; + struct vfio_domain *d = NULL; struct rb_node *n; unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; int ret; /* Arbitrarily pick the first domain in the list for lookups */ - d = list_first_entry(&iommu->domain_list, struct vfio_domain, next); + if (!list_empty(&iommu->domain_list)) + d = list_first_entry(&iommu->domain_list, + struct vfio_domain, next); + n = rb_first(&iommu->dma_list); for (; n; n = rb_next(n)) { @@ -1217,6 +1220,11 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu, phys_addr_t p; dma_addr_t i; + if (WARN_ON(!d)) { /* mapped w/o a domain?! */ + ret = -EINVAL; + goto unwind; + } + phys = iommu_iova_to_phys(d->domain, iova); if (WARN_ON(!phys)) { @@ -1246,7 +1254,7 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu, if (npage <= 0) { WARN_ON(!npage); ret = (int)npage; - return ret; + goto unwind; } phys = pfn << PAGE_SHIFT; @@ -1255,14 +1263,67 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu, ret = iommu_map(domain->domain, iova, phys, size, dma->prot | domain->prot); - if (ret) - return ret; + if (ret) { + if (!dma->iommu_mapped) + vfio_unpin_pages_remote(dma, iova, + phys >> PAGE_SHIFT, + size >> PAGE_SHIFT, + true); + goto unwind; + } iova += size; } + } + + /* All dmas are now mapped, defer to second tree walk for unwind */ + for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) { + struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node); + dma->iommu_mapped = true; } + return 0; + +unwind: + for (; n; n = rb_prev(n)) { + struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node); + dma_addr_t iova; + + if (dma->iommu_mapped) { + iommu_unmap(domain->domain, dma->iova, dma->size); + continue; + } + + iova = dma->iova; + while (iova < dma->iova + dma->size) { + phys_addr_t phys, p; + size_t size; + dma_addr_t i; + + phys = iommu_iova_to_phys(domain->domain, iova); + if (!phys) { + iova += PAGE_SIZE; + continue; + } + + size = PAGE_SIZE; + p = phys + size; + i = iova + size; + while (i < dma->iova + dma->size && + p == iommu_iova_to_phys(domain->domain, i)) { + size += PAGE_SIZE; + p += PAGE_SIZE; + i += PAGE_SIZE; + } + + iommu_unmap(domain->domain, iova, size); + vfio_unpin_pages_remote(dma, iova, phys >> PAGE_SHIFT, + size >> PAGE_SHIFT, true); + } + } + + return ret; } /* -- GitLab From ec877b0e7cb26c9cd87361ea2c50c5255b36d56e Mon Sep 17 00:00:00 2001 From: Jim Mattson Date: Mon, 17 Aug 2020 11:16:55 -0700 Subject: [PATCH 0446/1304] kvm: x86: Toggling CR4.SMAP does not load PDPTEs in PAE mode [ Upstream commit 427890aff8558eb4326e723835e0eae0e6fe3102 ] See the SDM, volume 3, section 4.4.1: If PAE paging would be in use following an execution of MOV to CR0 or MOV to CR4 (see Section 4.1.1) and the instruction is modifying any of CR0.CD, CR0.NW, CR0.PG, CR4.PAE, CR4.PGE, CR4.PSE, or CR4.SMEP; then the PDPTEs are loaded from the address in CR3. Fixes: 0be0226f07d14 ("KVM: MMU: fix SMAP virtualization") Cc: Xiao Guangrong Signed-off-by: Jim Mattson Reviewed-by: Peter Shier Reviewed-by: Oliver Upton Message-Id: <20200817181655.3716509-2-jmattson@google.com> Signed-off-by: Paolo Bonzini Signed-off-by: Sasha Levin --- arch/x86/kvm/x86.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 5b2440e591fc..ff1f764c4709 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -857,7 +857,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) { unsigned long old_cr4 = kvm_read_cr4(vcpu); unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE | - X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE; + X86_CR4_SMEP | X86_CR4_PKE; if (kvm_valid_cr4(vcpu, cr4)) return 1; -- GitLab From e9701d5dd61067d16010388e843b1cc7ab403753 Mon Sep 17 00:00:00 2001 From: Jim Mattson Date: Mon, 17 Aug 2020 11:16:54 -0700 Subject: [PATCH 0447/1304] kvm: x86: Toggling CR4.PKE does not load PDPTEs in PAE mode [ Upstream commit cb957adb4ea422bd758568df5b2478ea3bb34f35 ] See the SDM, volume 3, section 4.4.1: If PAE paging would be in use following an execution of MOV to CR0 or MOV to CR4 (see Section 4.1.1) and the instruction is modifying any of CR0.CD, CR0.NW, CR0.PG, CR4.PAE, CR4.PGE, CR4.PSE, or CR4.SMEP; then the PDPTEs are loaded from the address in CR3. Fixes: b9baba8614890 ("KVM, pkeys: expose CPUID/CR4 to guest") Cc: Huaitong Han Signed-off-by: Jim Mattson Reviewed-by: Peter Shier Reviewed-by: Oliver Upton Message-Id: <20200817181655.3716509-1-jmattson@google.com> Signed-off-by: Paolo Bonzini Signed-off-by: Sasha Levin --- arch/x86/kvm/x86.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index ff1f764c4709..430a4bc66f60 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -857,7 +857,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) { unsigned long old_cr4 = kvm_read_cr4(vcpu); unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE | - X86_CR4_SMEP | X86_CR4_PKE; + X86_CR4_SMEP; if (kvm_valid_cr4(vcpu, cr4)) return 1; -- GitLab From f0a40332820b52fce33c2fe14eb647af02952f6d Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Fri, 7 Aug 2020 18:19:08 +0900 Subject: [PATCH 0448/1304] kconfig: qconf: do not limit the pop-up menu to the first row [ Upstream commit fa8de0a3bf3c02e6f00b7746e7e934db522cdda9 ] If you right-click the first row in the option tree, the pop-up menu shows up, but if you right-click the second row or below, the event is ignored due to the following check: if (e->y() <= header()->geometry().bottom()) { Perhaps, the intention was to show the pop-menu only when the tree header was right-clicked, but this handler is not called in that case. Since the origin of e->y() starts from the bottom of the header, this check is odd. Going forward, you can right-click anywhere in the tree to get the pop-up menu. Signed-off-by: Masahiro Yamada Signed-off-by: Sasha Levin --- scripts/kconfig/qconf.cc | 68 ++++++++++++++++++++-------------------- 1 file changed, 34 insertions(+), 34 deletions(-) diff --git a/scripts/kconfig/qconf.cc b/scripts/kconfig/qconf.cc index 8f004db6f603..294d4329f481 100644 --- a/scripts/kconfig/qconf.cc +++ b/scripts/kconfig/qconf.cc @@ -869,40 +869,40 @@ void ConfigList::focusInEvent(QFocusEvent *e) void ConfigList::contextMenuEvent(QContextMenuEvent *e) { - if (e->y() <= header()->geometry().bottom()) { - if (!headerPopup) { - QAction *action; - - headerPopup = new QMenu(this); - action = new QAction("Show Name", this); - action->setCheckable(true); - connect(action, SIGNAL(toggled(bool)), - parent(), SLOT(setShowName(bool))); - connect(parent(), SIGNAL(showNameChanged(bool)), - action, SLOT(setOn(bool))); - action->setChecked(showName); - headerPopup->addAction(action); - action = new QAction("Show Range", this); - action->setCheckable(true); - connect(action, SIGNAL(toggled(bool)), - parent(), SLOT(setShowRange(bool))); - connect(parent(), SIGNAL(showRangeChanged(bool)), - action, SLOT(setOn(bool))); - action->setChecked(showRange); - headerPopup->addAction(action); - action = new QAction("Show Data", this); - action->setCheckable(true); - connect(action, SIGNAL(toggled(bool)), - parent(), SLOT(setShowData(bool))); - connect(parent(), SIGNAL(showDataChanged(bool)), - action, SLOT(setOn(bool))); - action->setChecked(showData); - headerPopup->addAction(action); - } - headerPopup->exec(e->globalPos()); - e->accept(); - } else - e->ignore(); + if (!headerPopup) { + QAction *action; + + headerPopup = new QMenu(this); + action = new QAction("Show Name", this); + action->setCheckable(true); + connect(action, SIGNAL(toggled(bool)), + parent(), SLOT(setShowName(bool))); + connect(parent(), SIGNAL(showNameChanged(bool)), + action, SLOT(setOn(bool))); + action->setChecked(showName); + headerPopup->addAction(action); + + action = new QAction("Show Range", this); + action->setCheckable(true); + connect(action, SIGNAL(toggled(bool)), + parent(), SLOT(setShowRange(bool))); + connect(parent(), SIGNAL(showRangeChanged(bool)), + action, SLOT(setOn(bool))); + action->setChecked(showRange); + headerPopup->addAction(action); + + action = new QAction("Show Data", this); + action->setCheckable(true); + connect(action, SIGNAL(toggled(bool)), + parent(), SLOT(setShowData(bool))); + connect(parent(), SIGNAL(showDataChanged(bool)), + action, SLOT(setOn(bool))); + action->setChecked(showData); + headerPopup->addAction(action); + } + + headerPopup->exec(e->globalPos()); + e->accept(); } ConfigView*ConfigView::viewList; -- GitLab From a2152780222402f071071f4fd83da184c879bb80 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Tue, 18 Aug 2020 01:36:29 +0900 Subject: [PATCH 0449/1304] kconfig: qconf: fix signal connection to invalid slots [ Upstream commit d85de3399f97467baa2026fbbbe587850d01ba8a ] If you right-click in the ConfigList window, you will see the following messages in the console: QObject::connect: No such slot QAction::setOn(bool) in scripts/kconfig/qconf.cc:888 QObject::connect: (sender name: 'config') QObject::connect: No such slot QAction::setOn(bool) in scripts/kconfig/qconf.cc:897 QObject::connect: (sender name: 'config') QObject::connect: No such slot QAction::setOn(bool) in scripts/kconfig/qconf.cc:906 QObject::connect: (sender name: 'config') Right, there is no such slot in QAction. I think this is a typo of setChecked. Due to this bug, when you toggled the menu "Option->Show Name/Range/Data" the state of the context menu was not previously updated. Fix this. Fixes: d5d973c3f8a9 ("Port xconfig to Qt5 - Put back some of the old implementation(part 2)") Signed-off-by: Masahiro Yamada Signed-off-by: Sasha Levin --- scripts/kconfig/qconf.cc | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/scripts/kconfig/qconf.cc b/scripts/kconfig/qconf.cc index 294d4329f481..1ee33d2e15bf 100644 --- a/scripts/kconfig/qconf.cc +++ b/scripts/kconfig/qconf.cc @@ -878,7 +878,7 @@ void ConfigList::contextMenuEvent(QContextMenuEvent *e) connect(action, SIGNAL(toggled(bool)), parent(), SLOT(setShowName(bool))); connect(parent(), SIGNAL(showNameChanged(bool)), - action, SLOT(setOn(bool))); + action, SLOT(setChecked(bool))); action->setChecked(showName); headerPopup->addAction(action); @@ -887,7 +887,7 @@ void ConfigList::contextMenuEvent(QContextMenuEvent *e) connect(action, SIGNAL(toggled(bool)), parent(), SLOT(setShowRange(bool))); connect(parent(), SIGNAL(showRangeChanged(bool)), - action, SLOT(setOn(bool))); + action, SLOT(setChecked(bool))); action->setChecked(showRange); headerPopup->addAction(action); @@ -896,7 +896,7 @@ void ConfigList::contextMenuEvent(QContextMenuEvent *e) connect(action, SIGNAL(toggled(bool)), parent(), SLOT(setShowData(bool))); connect(parent(), SIGNAL(showDataChanged(bool)), - action, SLOT(setOn(bool))); + action, SLOT(setChecked(bool))); action->setChecked(showData); headerPopup->addAction(action); } @@ -1228,7 +1228,7 @@ QMenu* ConfigInfoView::createStandardContextMenu(const QPoint & pos) action->setCheckable(true); connect(action, SIGNAL(toggled(bool)), SLOT(setShowDebug(bool))); - connect(this, SIGNAL(showDebugChanged(bool)), action, SLOT(setOn(bool))); + connect(this, SIGNAL(showDebugChanged(bool)), action, SLOT(setChecked(bool))); action->setChecked(showDebug()); popup->addSeparator(); popup->addAction(action); -- GitLab From 1eb3a9ed6d529814f5061cd92e3c80fa61877000 Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Fri, 10 Jul 2020 16:16:51 +0200 Subject: [PATCH 0450/1304] efi: avoid error message when booting under Xen [ Upstream commit 6163a985e50cb19d5bdf73f98e45b8af91a77658 ] efifb_probe() will issue an error message in case the kernel is booted as Xen dom0 from UEFI as EFI_MEMMAP won't be set in this case. Avoid that message by calling efi_mem_desc_lookup() only if EFI_MEMMAP is set. Fixes: 38ac0287b7f4 ("fbdev/efifb: Honour UEFI memory map attributes when mapping the FB") Signed-off-by: Juergen Gross Acked-by: Ard Biesheuvel Acked-by: Bartlomiej Zolnierkiewicz Signed-off-by: Juergen Gross Signed-off-by: Sasha Levin --- drivers/video/fbdev/efifb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c index cc1006375cac..f50cc1a7c31a 100644 --- a/drivers/video/fbdev/efifb.c +++ b/drivers/video/fbdev/efifb.c @@ -449,7 +449,7 @@ static int efifb_probe(struct platform_device *dev) info->apertures->ranges[0].base = efifb_fix.smem_start; info->apertures->ranges[0].size = size_remap; - if (efi_enabled(EFI_BOOT) && + if (efi_enabled(EFI_MEMMAP) && !efi_mem_desc_lookup(efifb_fix.smem_start, &md)) { if ((efifb_fix.smem_start + efifb_fix.smem_len) > (md.phys_addr + (md.num_pages << EFI_PAGE_SHIFT))) { -- GitLab From 32d8e2b4b1774b33a2be126064ad6ded16bad2b9 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Thu, 20 Aug 2020 06:30:47 +0200 Subject: [PATCH 0451/1304] Fix build error when CONFIG_ACPI is not set/enabled: MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit ee87e1557c42dc9c2da11c38e11b87c311569853 ] ../arch/x86/pci/xen.c: In function ‘pci_xen_init’: ../arch/x86/pci/xen.c:410:2: error: implicit declaration of function ‘acpi_noirq_set’; did you mean ‘acpi_irq_get’? [-Werror=implicit-function-declaration] acpi_noirq_set(); Fixes: 88e9ca161c13 ("xen/pci: Use acpi_noirq_set() helper to avoid #ifdef") Signed-off-by: Randy Dunlap Reviewed-by: Juergen Gross Cc: Andy Shevchenko Cc: Bjorn Helgaas Cc: Konrad Rzeszutek Wilk Cc: xen-devel@lists.xenproject.org Cc: linux-pci@vger.kernel.org Signed-off-by: Juergen Gross Signed-off-by: Sasha Levin --- arch/x86/pci/xen.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c index 9112d1cb397b..22da9bfd8a45 100644 --- a/arch/x86/pci/xen.c +++ b/arch/x86/pci/xen.c @@ -25,6 +25,7 @@ #include #include #include +#include #include static int xen_pcifront_enable_irq(struct pci_dev *dev) -- GitLab From f0e3b33f47cfe719868e15b3a5d5d25f17312ef6 Mon Sep 17 00:00:00 2001 From: Selvin Xavier Date: Wed, 5 Aug 2020 21:45:48 -0700 Subject: [PATCH 0452/1304] RDMA/bnxt_re: Do not add user qps to flushlist [ Upstream commit a812f2d60a9fb7818f9c81f967180317b52545c0 ] Driver shall add only the kernel qps to the flush list for clean up. During async error events from the HW, driver is adding qps to this list without checking if the qp is kernel qp or not. Add a check to avoid user qp addition to the flush list. Fixes: 942c9b6ca8de ("RDMA/bnxt_re: Avoid Hard lockup during error CQE processing") Fixes: c50866e2853a ("bnxt_re: fix the regression due to changes in alloc_pbl") Link: https://lore.kernel.org/r/1596689148-4023-1-git-send-email-selvin.xavier@broadcom.com Signed-off-by: Selvin Xavier Signed-off-by: Jason Gunthorpe Signed-off-by: Sasha Levin --- drivers/infiniband/hw/bnxt_re/main.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index 589b0d4677d5..f1b666c80f36 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -753,7 +753,8 @@ static int bnxt_re_handle_qp_async_event(struct creq_qp_event *qp_event, struct ib_event event; unsigned int flags; - if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) { + if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR && + rdma_is_kernel_res(&qp->ib_qp.res)) { flags = bnxt_re_lock_cqs(qp); bnxt_qplib_add_flush_qp(&qp->qplib_qp); bnxt_re_unlock_cqs(qp, flags); -- GitLab From d676b22edb8034427b916a7a17088b54f62156c5 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 21 Aug 2020 10:15:12 +0100 Subject: [PATCH 0453/1304] afs: Fix NULL deref in afs_dynroot_depopulate() [ Upstream commit 5e0b17b026eb7c6de9baa9b0d45a51b05f05abe1 ] If an error occurs during the construction of an afs superblock, it's possible that an error occurs after a superblock is created, but before we've created the root dentry. If the superblock has a dynamic root (ie. what's normally mounted on /afs), the afs_kill_super() will call afs_dynroot_depopulate() to unpin any created dentries - but this will oops if the root hasn't been created yet. Fix this by skipping that bit of code if there is no root dentry. This leads to an oops looking like: general protection fault, ... KASAN: null-ptr-deref in range [0x0000000000000068-0x000000000000006f] ... RIP: 0010:afs_dynroot_depopulate+0x25f/0x529 fs/afs/dynroot.c:385 ... Call Trace: afs_kill_super+0x13b/0x180 fs/afs/super.c:535 deactivate_locked_super+0x94/0x160 fs/super.c:335 afs_get_tree+0x1124/0x1460 fs/afs/super.c:598 vfs_get_tree+0x89/0x2f0 fs/super.c:1547 do_new_mount fs/namespace.c:2875 [inline] path_mount+0x1387/0x2070 fs/namespace.c:3192 do_mount fs/namespace.c:3205 [inline] __do_sys_mount fs/namespace.c:3413 [inline] __se_sys_mount fs/namespace.c:3390 [inline] __x64_sys_mount+0x27f/0x300 fs/namespace.c:3390 do_syscall_64+0x2d/0x70 arch/x86/entry/common.c:46 entry_SYSCALL_64_after_hwframe+0x44/0xa9 which is oopsing on this line: inode_lock(root->d_inode); presumably because sb->s_root was NULL. Fixes: 0da0b7fd73e4 ("afs: Display manually added cells in dynamic root mount") Reported-by: syzbot+c1eff8205244ae7e11a6@syzkaller.appspotmail.com Signed-off-by: David Howells Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin --- fs/afs/dynroot.c | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c index 069273a2483f..fc6c42eeb659 100644 --- a/fs/afs/dynroot.c +++ b/fs/afs/dynroot.c @@ -299,15 +299,17 @@ void afs_dynroot_depopulate(struct super_block *sb) net->dynroot_sb = NULL; mutex_unlock(&net->proc_cells_lock); - inode_lock(root->d_inode); - - /* Remove all the pins for dirs created for manually added cells */ - list_for_each_entry_safe(subdir, tmp, &root->d_subdirs, d_child) { - if (subdir->d_fsdata) { - subdir->d_fsdata = NULL; - dput(subdir); + if (root) { + inode_lock(root->d_inode); + + /* Remove all the pins for dirs created for manually added cells */ + list_for_each_entry_safe(subdir, tmp, &root->d_subdirs, d_child) { + if (subdir->d_fsdata) { + subdir->d_fsdata = NULL; + dput(subdir); + } } - } - inode_unlock(root->d_inode); + inode_unlock(root->d_inode); + } } -- GitLab From ae405ea98693f3991192d172cbbcea04c53b0aba Mon Sep 17 00:00:00 2001 From: Jiri Wiesner Date: Sun, 16 Aug 2020 20:52:44 +0200 Subject: [PATCH 0454/1304] bonding: fix active-backup failover for current ARP slave [ Upstream commit 0410d07190961ac526f05085765a8d04d926545b ] When the ARP monitor is used for link detection, ARP replies are validated for all slaves (arp_validate=3) and fail_over_mac is set to active, two slaves of an active-backup bond may get stuck in a state where both of them are active and pass packets that they receive to the bond. This state makes IPv6 duplicate address detection fail. The state is reached thus: 1. The current active slave goes down because the ARP target is not reachable. 2. The current ARP slave is chosen and made active. 3. A new slave is enslaved. This new slave becomes the current active slave and can reach the ARP target. As a result, the current ARP slave stays active after the enslave action has finished and the log is littered with "PROBE BAD" messages: > bond0: PROBE: c_arp ens10 && cas ens11 BAD The workaround is to remove the slave with "going back" status from the bond and re-enslave it. This issue was encountered when DPDK PMD interfaces were being enslaved to an active-backup bond. I would be possible to fix the issue in bond_enslave() or bond_change_active_slave() but the ARP monitor was fixed instead to keep most of the actions changing the current ARP slave in the ARP monitor code. The current ARP slave is set as inactive and backup during the commit phase. A new state, BOND_LINK_FAIL, has been introduced for slaves in the context of the ARP monitor. This allows administrators to see how slaves are rotated for sending ARP requests and attempts are made to find a new active slave. Fixes: b2220cad583c9 ("bonding: refactor ARP active-backup monitor") Signed-off-by: Jiri Wiesner Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/bonding/bond_main.c | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index ee7138a92d5e..d32e32e79174 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -2773,6 +2773,9 @@ static int bond_ab_arp_inspect(struct bonding *bond) if (bond_time_in_interval(bond, last_rx, 1)) { bond_propose_link_state(slave, BOND_LINK_UP); commit++; + } else if (slave->link == BOND_LINK_BACK) { + bond_propose_link_state(slave, BOND_LINK_FAIL); + commit++; } continue; } @@ -2883,6 +2886,19 @@ static void bond_ab_arp_commit(struct bonding *bond) continue; + case BOND_LINK_FAIL: + bond_set_slave_link_state(slave, BOND_LINK_FAIL, + BOND_SLAVE_NOTIFY_NOW); + bond_set_slave_inactive_flags(slave, + BOND_SLAVE_NOTIFY_NOW); + + /* A slave has just been enslaved and has become + * the current active slave. + */ + if (rtnl_dereference(bond->curr_active_slave)) + RCU_INIT_POINTER(bond->current_arp_slave, NULL); + continue; + default: netdev_err(bond->dev, "impossible: new_link %d on slave %s\n", slave->link_new_state, slave->dev->name); @@ -2932,8 +2948,6 @@ static bool bond_ab_arp_probe(struct bonding *bond) return should_notify_rtnl; } - bond_set_slave_inactive_flags(curr_arp_slave, BOND_SLAVE_NOTIFY_LATER); - bond_for_each_slave_rcu(bond, slave, iter) { if (!found && !before && bond_slave_is_up(slave)) before = slave; -- GitLab From 676f44ce25afa1edf3f69359bb7c8206a7945311 Mon Sep 17 00:00:00 2001 From: Shay Agroskin Date: Wed, 19 Aug 2020 20:28:36 +0300 Subject: [PATCH 0455/1304] net: ena: Prevent reset after device destruction [ Upstream commit 63d4a4c145cca2e84dc6e62d2ef5cb990c9723c2 ] The reset work is scheduled by the timer routine whenever it detects that a device reset is required (e.g. when a keep_alive signal is missing). When releasing device resources in ena_destroy_device() the driver cancels the scheduling of the timer routine without destroying the reset work explicitly. This creates the following bug: The driver is suspended and the ena_suspend() function is called -> This function calls ena_destroy_device() to free the net device resources -> The driver waits for the timer routine to finish its execution and then cancels it, thus preventing from it to be called again. If, in its final execution, the timer routine schedules a reset, the reset routine might be called afterwards,and a redundant call to ena_restore_device() would be made. By changing the reset routine we allow it to read the device's state accurately. This is achieved by checking whether ENA_FLAG_TRIGGER_RESET flag is set before resetting the device and making both the destruction function and the flag check are under rtnl lock. The ENA_FLAG_TRIGGER_RESET is cleared at the end of the destruction routine. Also surround the flag check with 'likely' because we expect that the reset routine would be called only when ENA_FLAG_TRIGGER_RESET flag is set. The destruction of the timer and reset services in __ena_shutoff() have to stay, even though the timer routine is destroyed in ena_destroy_device(). This is to avoid a case in which the reset routine is scheduled after free_netdev() in __ena_shutoff(), which would create an access to freed memory in adapter->flags. Fixes: 8c5c7abdeb2d ("net: ena: add power management ops to the ENA driver") Signed-off-by: Shay Agroskin Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/ethernet/amazon/ena/ena_netdev.c | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 8736718b1735..55cc70ba5b09 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -2647,16 +2647,14 @@ static void ena_fw_reset_device(struct work_struct *work) { struct ena_adapter *adapter = container_of(work, struct ena_adapter, reset_task); - struct pci_dev *pdev = adapter->pdev; - if (unlikely(!test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { - dev_err(&pdev->dev, - "device reset schedule while reset bit is off\n"); - return; - } rtnl_lock(); - ena_destroy_device(adapter, false); - ena_restore_device(adapter); + + if (likely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { + ena_destroy_device(adapter, false); + ena_restore_device(adapter); + } + rtnl_unlock(); } @@ -3392,8 +3390,11 @@ static void ena_remove(struct pci_dev *pdev) netdev->rx_cpu_rmap = NULL; } #endif /* CONFIG_RFS_ACCEL */ - del_timer_sync(&adapter->timer_service); + /* Make sure timer and reset routine won't be called after + * freeing device resources. + */ + del_timer_sync(&adapter->timer_service); cancel_work_sync(&adapter->reset_task); unregister_netdev(netdev); -- GitLab From 391d0ad15088a310a3e6a4c8703793143f57796b Mon Sep 17 00:00:00 2001 From: Wang Hai Date: Wed, 19 Aug 2020 10:33:09 +0800 Subject: [PATCH 0456/1304] net: gemini: Fix missing free_netdev() in error path of gemini_ethernet_port_probe() [ Upstream commit cf96d977381d4a23957bade2ddf1c420b74a26b6 ] Replace alloc_etherdev_mq with devm_alloc_etherdev_mqs. In this way, when probe fails, netdev can be freed automatically. Fixes: 4d5ae32f5e1e ("net: ethernet: Add a driver for Gemini gigabit ethernet") Reported-by: Hulk Robot Signed-off-by: Wang Hai Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/ethernet/cortina/gemini.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c index f402af39da42..16de0fa92ab7 100644 --- a/drivers/net/ethernet/cortina/gemini.c +++ b/drivers/net/ethernet/cortina/gemini.c @@ -2392,7 +2392,7 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev) dev_info(dev, "probe %s ID %d\n", dev_name(dev), id); - netdev = alloc_etherdev_mq(sizeof(*port), TX_QUEUE_NUM); + netdev = devm_alloc_etherdev_mqs(dev, sizeof(*port), TX_QUEUE_NUM, TX_QUEUE_NUM); if (!netdev) { dev_err(dev, "Can't allocate ethernet device #%d\n", id); return -ENOMEM; @@ -2526,7 +2526,6 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev) } port->netdev = NULL; - free_netdev(netdev); return ret; } @@ -2535,7 +2534,6 @@ static int gemini_ethernet_port_remove(struct platform_device *pdev) struct gemini_ethernet_port *port = platform_get_drvdata(pdev); gemini_port_remove(port); - free_netdev(port->netdev); return 0; } -- GitLab From 1bf1ca93b93a35e54a3f2b6f36f40d026917c679 Mon Sep 17 00:00:00 2001 From: Haiyang Zhang Date: Thu, 20 Aug 2020 14:53:15 -0700 Subject: [PATCH 0457/1304] hv_netvsc: Fix the queue_mapping in netvsc_vf_xmit() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit c3d897e01aef8ddc43149e4d661b86f823e3aae7 ] netvsc_vf_xmit() / dev_queue_xmit() will call VF NIC’s ndo_select_queue or netdev_pick_tx() again. They will use skb_get_rx_queue() to get the queue number, so the “skb->queue_mapping - 1” will be used. This may cause the last queue of VF not been used. Use skb_record_rx_queue() here, so that the skb_get_rx_queue() called later will get the correct queue number, and VF will be able to use all queues. Fixes: b3bf5666a510 ("hv_netvsc: defer queue selection to VF") Signed-off-by: Haiyang Zhang Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/hyperv/netvsc_drv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index e33cbb793b63..4a5d99ecb89d 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -513,7 +513,7 @@ static int netvsc_vf_xmit(struct net_device *net, struct net_device *vf_netdev, int rc; skb->dev = vf_netdev; - skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping; + skb_record_rx_queue(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping); rc = dev_queue_xmit(skb); if (likely(rc == NET_XMIT_SUCCESS || rc == NET_XMIT_CN)) { -- GitLab From 405ef1b43a3d837e50b30b5164a5cab815931d66 Mon Sep 17 00:00:00 2001 From: Tom Rix Date: Fri, 21 Aug 2020 06:56:00 -0700 Subject: [PATCH 0458/1304] net: dsa: b53: check for timeout [ Upstream commit 774d977abfd024e6f73484544b9abe5a5cd62de7 ] clang static analysis reports this problem b53_common.c:1583:13: warning: The left expression of the compound assignment is an uninitialized value. The computed value will also be garbage ent.port &= ~BIT(port); ~~~~~~~~ ^ ent is set by a successful call to b53_arl_read(). Unsuccessful calls are caught by an switch statement handling specific returns. b32_arl_read() calls b53_arl_op_wait() which fails with the unhandled -ETIMEDOUT. So add -ETIMEDOUT to the switch statement. Because b53_arl_op_wait() already prints out a message, do not add another one. Fixes: 1da6df85c6fb ("net: dsa: b53: Implement ARL add/del/dump operations") Signed-off-by: Tom Rix Acked-by: Florian Fainelli Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/dsa/b53/b53_common.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 11f3993ab7f3..294be86420b6 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -1335,6 +1335,8 @@ static int b53_arl_op(struct b53_device *dev, int op, int port, return ret; switch (ret) { + case -ETIMEDOUT: + return ret; case -ENOSPC: dev_dbg(dev->dev, "{%pM,%.4d} no space left in ARL\n", addr, vid); -- GitLab From fa80b284d706f445e353255db0f943e9b4b797cc Mon Sep 17 00:00:00 2001 From: Vasant Hegde Date: Thu, 20 Aug 2020 11:48:44 +0530 Subject: [PATCH 0459/1304] powerpc/pseries: Do not initiate shutdown when system is running on UPS commit 90a9b102eddf6a3f987d15f4454e26a2532c1c98 upstream. As per PAPR we have to look for both EPOW sensor value and event modifier to identify the type of event and take appropriate action. In LoPAPR v1.1 section 10.2.2 includes table 136 "EPOW Action Codes": SYSTEM_SHUTDOWN 3 The system must be shut down. An EPOW-aware OS logs the EPOW error log information, then schedules the system to be shut down to begin after an OS defined delay internal (default is 10 minutes.) Then in section 10.3.2.2.8 there is table 146 "Platform Event Log Format, Version 6, EPOW Section", which includes the "EPOW Event Modifier": For EPOW sensor value = 3 0x01 = Normal system shutdown with no additional delay 0x02 = Loss of utility power, system is running on UPS/Battery 0x03 = Loss of system critical functions, system should be shutdown 0x04 = Ambient temperature too high All other values = reserved We have a user space tool (rtas_errd) on LPAR to monitor for EPOW_SHUTDOWN_ON_UPS. Once it gets an event it initiates shutdown after predefined time. It also starts monitoring for any new EPOW events. If it receives "Power restored" event before predefined time it will cancel the shutdown. Otherwise after predefined time it will shutdown the system. Commit 79872e35469b ("powerpc/pseries: All events of EPOW_SYSTEM_SHUTDOWN must initiate shutdown") changed our handling of the "on UPS/Battery" case, to immediately shutdown the system. This breaks existing setups that rely on the userspace tool to delay shutdown and let the system run on the UPS. Fixes: 79872e35469b ("powerpc/pseries: All events of EPOW_SYSTEM_SHUTDOWN must initiate shutdown") Cc: stable@vger.kernel.org # v4.0+ Signed-off-by: Vasant Hegde [mpe: Massage change log and add PAPR references] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200820061844.306460-1-hegdevasant@linux.vnet.ibm.com Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/platforms/pseries/ras.c | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c index e81a285f3a6c..e827108680f2 100644 --- a/arch/powerpc/platforms/pseries/ras.c +++ b/arch/powerpc/platforms/pseries/ras.c @@ -118,7 +118,6 @@ static void handle_system_shutdown(char event_modifier) case EPOW_SHUTDOWN_ON_UPS: pr_emerg("Loss of system power detected. System is running on" " UPS/battery. Check RTAS error log for details\n"); - orderly_poweroff(true); break; case EPOW_SHUTDOWN_LOSS_OF_CRITICAL_FUNCTIONS: -- GitLab From 2ff3c97b47521d6700cc6485c7935908dcd2c27c Mon Sep 17 00:00:00 2001 From: Li Heng Date: Mon, 20 Jul 2020 15:22:18 +0800 Subject: [PATCH 0460/1304] efi: add missed destroy_workqueue when efisubsys_init fails commit 98086df8b70c06234a8f4290c46064e44dafa0ed upstream. destroy_workqueue() should be called to destroy efi_rts_wq when efisubsys_init() init resources fails. Cc: Reported-by: Hulk Robot Signed-off-by: Li Heng Link: https://lore.kernel.org/r/1595229738-10087-1-git-send-email-liheng40@huawei.com Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- drivers/firmware/efi/efi.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index de1bc38ab39f..a8180f9090fa 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -359,6 +359,7 @@ static int __init efisubsys_init(void) efi_kobj = kobject_create_and_add("efi", firmware_kobj); if (!efi_kobj) { pr_err("efi: Firmware registration failed.\n"); + destroy_workqueue(efi_rts_wq); return -ENOMEM; } @@ -395,6 +396,7 @@ static int __init efisubsys_init(void) generic_ops_unregister(); err_put: kobject_put(efi_kobj); + destroy_workqueue(efi_rts_wq); return error; } -- GitLab From 4957d56414ff5ca8fe62f8f11f0f9814f35b1d11 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 19 Aug 2020 17:12:17 +0100 Subject: [PATCH 0461/1304] epoll: Keep a reference on files added to the check list commit a9ed4a6560b8562b7e2e2bed9527e88001f7b682 upstream. When adding a new fd to an epoll, and that this new fd is an epoll fd itself, we recursively scan the fds attached to it to detect cycles, and add non-epool files to a "check list" that gets subsequently parsed. However, this check list isn't completely safe when deletions can happen concurrently. To sidestep the issue, make sure that a struct file placed on the check list sees its f_count increased, ensuring that a concurrent deletion won't result in the file disapearing from under our feet. Cc: stable@vger.kernel.org Signed-off-by: Marc Zyngier Signed-off-by: Al Viro Signed-off-by: Marc Zyngier Signed-off-by: Greg Kroah-Hartman --- fs/eventpoll.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 58f48ea0db23..d804ad60be73 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -1890,9 +1890,11 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests) * not already there, and calling reverse_path_check() * during ep_insert(). */ - if (list_empty(&epi->ffd.file->f_tfile_llink)) + if (list_empty(&epi->ffd.file->f_tfile_llink)) { + get_file(epi->ffd.file); list_add(&epi->ffd.file->f_tfile_llink, &tfile_check_list); + } } } mutex_unlock(&ep->mtx); @@ -1936,6 +1938,7 @@ static void clear_tfile_check_list(void) file = list_first_entry(&tfile_check_list, struct file, f_tfile_llink); list_del_init(&file->f_tfile_llink); + fput(file); } INIT_LIST_HEAD(&tfile_check_list); } @@ -2095,9 +2098,11 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, clear_tfile_check_list(); goto error_tgt_fput; } - } else + } else { + get_file(tf.file); list_add(&tf.file->f_tfile_llink, &tfile_check_list); + } mutex_lock_nested(&ep->mtx, 0); if (is_file_epoll(tf.file)) { tep = tf.file->private_data; -- GitLab From dcb6e6efb3298e59d90ee05c6ed33de810314892 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 22 Aug 2020 18:25:52 -0400 Subject: [PATCH 0462/1304] do_epoll_ctl(): clean the failure exits up a bit commit 52c479697c9b73f628140dcdfcd39ea302d05482 upstream. Signed-off-by: Al Viro Signed-off-by: Marc Zyngier Signed-off-by: Greg Kroah-Hartman --- fs/eventpoll.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/fs/eventpoll.c b/fs/eventpoll.c index d804ad60be73..f988ccd064a2 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -2094,10 +2094,8 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, mutex_lock(&epmutex); if (is_file_epoll(tf.file)) { error = -ELOOP; - if (ep_loop_check(ep, tf.file) != 0) { - clear_tfile_check_list(); + if (ep_loop_check(ep, tf.file) != 0) goto error_tgt_fput; - } } else { get_file(tf.file); list_add(&tf.file->f_tfile_llink, @@ -2126,8 +2124,6 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, error = ep_insert(ep, &epds, tf.file, fd, full_check); } else error = -EEXIST; - if (full_check) - clear_tfile_check_list(); break; case EPOLL_CTL_DEL: if (epi) @@ -2150,8 +2146,10 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, mutex_unlock(&ep->mtx); error_tgt_fput: - if (full_check) + if (full_check) { + clear_tfile_check_list(); mutex_unlock(&epmutex); + } fdput(tf); error_fput: -- GitLab From 734654ae7962be55c44ff3fb0bb0652b5149cc17 Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Thu, 6 Aug 2020 23:26:11 -0700 Subject: [PATCH 0463/1304] mm/hugetlb: fix calculation of adjust_range_if_pmd_sharing_possible commit 75802ca66354a39ab8e35822747cd08b3384a99a upstream. This is found by code observation only. Firstly, the worst case scenario should assume the whole range was covered by pmd sharing. The old algorithm might not work as expected for ranges like (1g-2m, 1g+2m), where the adjusted range should be (0, 1g+2m) but the expected range should be (0, 2g). Since at it, remove the loop since it should not be required. With that, the new code should be faster too when the invalidating range is huge. Mike said: : With range (1g-2m, 1g+2m) within a vma (0, 2g) the existing code will only : adjust to (0, 1g+2m) which is incorrect. : : We should cc stable. The original reason for adjusting the range was to : prevent data corruption (getting wrong page). Since the range is not : always adjusted correctly, the potential for corruption still exists. : : However, I am fairly confident that adjust_range_if_pmd_sharing_possible : is only gong to be called in two cases: : : 1) for a single page : 2) for range == entire vma : : In those cases, the current code should produce the correct results. : : To be safe, let's just cc stable. Fixes: 017b1660df89 ("mm: migration: fix migration of huge PMD shared pages") Signed-off-by: Peter Xu Signed-off-by: Andrew Morton Reviewed-by: Mike Kravetz Cc: Andrea Arcangeli Cc: Matthew Wilcox Cc: Link: http://lkml.kernel.org/r/20200730201636.74778-1-peterx@redhat.com Signed-off-by: Linus Torvalds Signed-off-by: Mike Kravetz Signed-off-by: Greg Kroah-Hartman --- mm/hugetlb.c | 24 ++++++++++-------------- 1 file changed, 10 insertions(+), 14 deletions(-) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index e068c7f75a84..8a5708f31aa0 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -4650,25 +4650,21 @@ static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr) void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, unsigned long *start, unsigned long *end) { - unsigned long check_addr = *start; + unsigned long a_start, a_end; if (!(vma->vm_flags & VM_MAYSHARE)) return; - for (check_addr = *start; check_addr < *end; check_addr += PUD_SIZE) { - unsigned long a_start = check_addr & PUD_MASK; - unsigned long a_end = a_start + PUD_SIZE; + /* Extend the range to be PUD aligned for a worst case scenario */ + a_start = ALIGN_DOWN(*start, PUD_SIZE); + a_end = ALIGN(*end, PUD_SIZE); - /* - * If sharing is possible, adjust start/end if necessary. - */ - if (range_in_vma(vma, a_start, a_end)) { - if (a_start < *start) - *start = a_start; - if (a_end > *end) - *end = a_end; - } - } + /* + * Intersect the range with the vma range, since pmd sharing won't be + * across vma after all + */ + *start = max(vma->vm_start, a_start); + *end = min(vma->vm_end, a_end); } /* -- GitLab From da1754a2fb2d95c355949e282409cccf884ed7a2 Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Thu, 20 Aug 2020 08:59:08 +0200 Subject: [PATCH 0464/1304] xen: don't reschedule in preemption off sections For support of long running hypercalls xen_maybe_preempt_hcall() is calling cond_resched() in case a hypercall marked as preemptible has been interrupted. Normally this is no problem, as only hypercalls done via some ioctl()s are marked to be preemptible. In rare cases when during such a preemptible hypercall an interrupt occurs and any softirq action is started from irq_exit(), a further hypercall issued by the softirq handler will be regarded to be preemptible, too. This might lead to rescheduling in spite of the softirq handler potentially having set preempt_disable(), leading to splats like: BUG: sleeping function called from invalid context at drivers/xen/preempt.c:37 in_atomic(): 1, irqs_disabled(): 0, non_block: 0, pid: 20775, name: xl INFO: lockdep is turned off. CPU: 1 PID: 20775 Comm: xl Tainted: G D W 5.4.46-1_prgmr_debug.el7.x86_64 #1 Call Trace: dump_stack+0x8f/0xd0 ___might_sleep.cold.76+0xb2/0x103 xen_maybe_preempt_hcall+0x48/0x70 xen_do_hypervisor_callback+0x37/0x40 RIP: e030:xen_hypercall_xen_version+0xa/0x20 Code: ... RSP: e02b:ffffc900400dcc30 EFLAGS: 00000246 RAX: 000000000004000d RBX: 0000000000000200 RCX: ffffffff8100122a RDX: ffff88812e788000 RSI: 0000000000000000 RDI: 0000000000000000 RBP: ffffffff83ee3ad0 R08: 0000000000000001 R09: 0000000000000001 R10: 0000000000000000 R11: 0000000000000246 R12: ffff8881824aa0b0 R13: 0000000865496000 R14: 0000000865496000 R15: ffff88815d040000 ? xen_hypercall_xen_version+0xa/0x20 ? xen_force_evtchn_callback+0x9/0x10 ? check_events+0x12/0x20 ? xen_restore_fl_direct+0x1f/0x20 ? _raw_spin_unlock_irqrestore+0x53/0x60 ? debug_dma_sync_single_for_cpu+0x91/0xc0 ? _raw_spin_unlock_irqrestore+0x53/0x60 ? xen_swiotlb_sync_single_for_cpu+0x3d/0x140 ? mlx4_en_process_rx_cq+0x6b6/0x1110 [mlx4_en] ? mlx4_en_poll_rx_cq+0x64/0x100 [mlx4_en] ? net_rx_action+0x151/0x4a0 ? __do_softirq+0xed/0x55b ? irq_exit+0xea/0x100 ? xen_evtchn_do_upcall+0x2c/0x40 ? xen_do_hypervisor_callback+0x29/0x40 ? xen_hypercall_domctl+0xa/0x20 ? xen_hypercall_domctl+0x8/0x20 ? privcmd_ioctl+0x221/0x990 [xen_privcmd] ? do_vfs_ioctl+0xa5/0x6f0 ? ksys_ioctl+0x60/0x90 ? trace_hardirqs_off_thunk+0x1a/0x20 ? __x64_sys_ioctl+0x16/0x20 ? do_syscall_64+0x62/0x250 ? entry_SYSCALL_64_after_hwframe+0x49/0xbe Fix that by testing preempt_count() before calling cond_resched(). In kernel 5.8 this can't happen any more due to the entry code rework (more than 100 patches, so not a candidate for backporting). The issue was introduced in kernel 4.3, so this patch should go into all stable kernels in [4.3 ... 5.7]. Reported-by: Sarah Newman Fixes: 0fa2f5cb2b0ecd8 ("sched/preempt, xen: Use need_resched() instead of should_resched()") Cc: Sarah Newman Cc: stable@vger.kernel.org Signed-off-by: Juergen Gross Tested-by: Chris Brannon Signed-off-by: Greg Kroah-Hartman --- drivers/xen/preempt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/xen/preempt.c b/drivers/xen/preempt.c index 5f6b77ea34fb..128375ff80b8 100644 --- a/drivers/xen/preempt.c +++ b/drivers/xen/preempt.c @@ -31,7 +31,7 @@ EXPORT_SYMBOL_GPL(xen_in_preemptible_hcall); asmlinkage __visible void xen_maybe_preempt_hcall(void) { if (unlikely(__this_cpu_read(xen_in_preemptible_hcall) - && need_resched())) { + && need_resched() && !preempt_count())) { /* * Clear flag as we may be rescheduled on a different * cpu. -- GitLab From 903c6bd937ca84ee9e60eca6beb5c2593d0bfd4e Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Wed, 28 Aug 2019 11:19:59 -0700 Subject: [PATCH 0465/1304] clk: Evict unregistered clks from parent caches commit bdcf1dc253248542537a742ae1e7ccafdd03f2d3 upstream. We leave a dangling pointer in each clk_core::parents array that has an unregistered clk as a potential parent when that clk_core pointer is freed by clk{_hw}_unregister(). It is impossible for the true parent of a clk to be set with clk_set_parent() once the dangling pointer is left in the cache because we compare parent pointers in clk_fetch_parent_index() instead of checking for a matching clk name or clk_hw pointer. Before commit ede77858473a ("clk: Remove global clk traversal on fetch parent index"), we would check clk_hw pointers, which has a higher chance of being the same between registration and unregistration, but it can still be allocated and freed by the clk provider. In fact, this has been a long standing problem since commit da0f0b2c3ad2 ("clk: Correct lookup logic in clk_fetch_parent_index()") where we stopped trying to compare clk names and skipped over entries in the cache that weren't NULL. There are good (performance) reasons to not do the global tree lookup in cases where the cache holds dangling pointers to parents that have been unregistered. Let's take the performance hit on the uncommon registration path instead. Loop through all the clk_core::parents arrays when a clk is unregistered and set the entry to NULL when the parent cache entry and clk being unregistered are the same pointer. This will fix this problem and avoid the overhead for the "normal" case. Based on a patch by Bjorn Andersson. Fixes: da0f0b2c3ad2 ("clk: Correct lookup logic in clk_fetch_parent_index()") Reviewed-by: Bjorn Andersson Tested-by: Sai Prakash Ranjan Signed-off-by: Stephen Boyd Link: https://lkml.kernel.org/r/20190828181959.204401-1-sboyd@kernel.org Tested-by: Naresh Kamboju Signed-off-by: Greg Kroah-Hartman --- drivers/clk/clk.c | 52 +++++++++++++++++++++++++++++++++++++---------- 1 file changed, 41 insertions(+), 11 deletions(-) diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 8353ab9bd31b..c5cf9e77fe86 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -40,6 +40,17 @@ static HLIST_HEAD(clk_root_list); static HLIST_HEAD(clk_orphan_list); static LIST_HEAD(clk_notifier_list); +static struct hlist_head *all_lists[] = { + &clk_root_list, + &clk_orphan_list, + NULL, +}; + +static struct hlist_head *orphan_list[] = { + &clk_orphan_list, + NULL, +}; + /*** private data structures ***/ struct clk_core { @@ -2618,17 +2629,6 @@ static int inited = 0; static DEFINE_MUTEX(clk_debug_lock); static HLIST_HEAD(clk_debug_list); -static struct hlist_head *all_lists[] = { - &clk_root_list, - &clk_orphan_list, - NULL, -}; - -static struct hlist_head *orphan_list[] = { - &clk_orphan_list, - NULL, -}; - static void clk_summary_show_one(struct seq_file *s, struct clk_core *c, int level) { @@ -3328,6 +3328,34 @@ static const struct clk_ops clk_nodrv_ops = { .set_parent = clk_nodrv_set_parent, }; +static void clk_core_evict_parent_cache_subtree(struct clk_core *root, + struct clk_core *target) +{ + int i; + struct clk_core *child; + + for (i = 0; i < root->num_parents; i++) + if (root->parents[i] == target) + root->parents[i] = NULL; + + hlist_for_each_entry(child, &root->children, child_node) + clk_core_evict_parent_cache_subtree(child, target); +} + +/* Remove this clk from all parent caches */ +static void clk_core_evict_parent_cache(struct clk_core *core) +{ + struct hlist_head **lists; + struct clk_core *root; + + lockdep_assert_held(&prepare_lock); + + for (lists = all_lists; *lists; lists++) + hlist_for_each_entry(root, *lists, child_node) + clk_core_evict_parent_cache_subtree(root, core); + +} + /** * clk_unregister - unregister a currently registered clock * @clk: clock to unregister @@ -3366,6 +3394,8 @@ void clk_unregister(struct clk *clk) clk_core_set_parent_nolock(child, NULL); } + clk_core_evict_parent_cache(clk->core); + hlist_del_init(&clk->core->child_node); if (clk->core->prepare_count) -- GitLab From a53dc16499fc9efd8db0b40e45f3344a0fb9c0a2 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 11 Aug 2020 11:27:24 +0100 Subject: [PATCH 0466/1304] KVM: Pass MMU notifier range flags to kvm_unmap_hva_range() commit fdfe7cbd58806522e799e2a50a15aee7f2cbb7b6 upstream. The 'flags' field of 'struct mmu_notifier_range' is used to indicate whether invalidate_range_{start,end}() are permitted to block. In the case of kvm_mmu_notifier_invalidate_range_start(), this field is not forwarded on to the architecture-specific implementation of kvm_unmap_hva_range() and therefore the backend cannot sensibly decide whether or not to block. Add an extra 'flags' parameter to kvm_unmap_hva_range() so that architectures are aware as to whether or not they are permitted to block. Cc: Cc: Marc Zyngier Cc: Suzuki K Poulose Cc: James Morse Signed-off-by: Will Deacon Message-Id: <20200811102725.7121-2-will@kernel.org> Signed-off-by: Paolo Bonzini [will: Backport to 4.19; use 'blockable' instead of non-existent range flags] Signed-off-by: Will Deacon Signed-off-by: Greg Kroah-Hartman --- arch/arm/include/asm/kvm_host.h | 2 +- arch/arm64/include/asm/kvm_host.h | 2 +- arch/mips/include/asm/kvm_host.h | 2 +- arch/mips/kvm/mmu.c | 3 ++- arch/powerpc/include/asm/kvm_host.h | 3 ++- arch/powerpc/kvm/book3s.c | 3 ++- arch/powerpc/kvm/e500_mmu_host.c | 3 ++- arch/x86/include/asm/kvm_host.h | 3 ++- arch/x86/kvm/mmu.c | 3 ++- virt/kvm/arm/mmu.c | 2 +- virt/kvm/kvm_main.c | 2 +- 11 files changed, 17 insertions(+), 11 deletions(-) diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index c9128bb187f9..471859cbfe0b 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -234,7 +234,7 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, #define KVM_ARCH_WANT_MMU_NOTIFIER int kvm_unmap_hva_range(struct kvm *kvm, - unsigned long start, unsigned long end); + unsigned long start, unsigned long end, bool blockable); void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index e9afdfcb8403..5e720742d647 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -370,7 +370,7 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, #define KVM_ARCH_WANT_MMU_NOTIFIER int kvm_unmap_hva_range(struct kvm *kvm, - unsigned long start, unsigned long end); + unsigned long start, unsigned long end, bool blockable); void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index 2b3fdfc9e0e7..c254761cb8ad 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -936,7 +936,7 @@ enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu, #define KVM_ARCH_WANT_MMU_NOTIFIER int kvm_unmap_hva_range(struct kvm *kvm, - unsigned long start, unsigned long end); + unsigned long start, unsigned long end, bool blockable); void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c index d8dcdb350405..098a7afd4d38 100644 --- a/arch/mips/kvm/mmu.c +++ b/arch/mips/kvm/mmu.c @@ -512,7 +512,8 @@ static int kvm_unmap_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end, return 1; } -int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) +int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, + bool blockable) { handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL); diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 2f95e38f0549..7b54d8412367 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -68,7 +68,8 @@ #define KVM_ARCH_WANT_MMU_NOTIFIER extern int kvm_unmap_hva_range(struct kvm *kvm, - unsigned long start, unsigned long end); + unsigned long start, unsigned long end, + bool blockable); extern int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); extern void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index cc05f346e042..bc9d1321dc73 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -812,7 +812,8 @@ void kvmppc_core_commit_memory_region(struct kvm *kvm, kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old, new); } -int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) +int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, + bool blockable) { return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end); } diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c index 8f2985e46f6f..bbb02195dc53 100644 --- a/arch/powerpc/kvm/e500_mmu_host.c +++ b/arch/powerpc/kvm/e500_mmu_host.c @@ -737,7 +737,8 @@ static int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) return 0; } -int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) +int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, + bool blockable) { /* kvm_unmap_hva flushes everything anyways */ kvm_unmap_hva(kvm, start); diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index ce7b3b22ae86..4876411a072a 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1465,7 +1465,8 @@ asmlinkage void __noreturn kvm_spurious_fault(void); ____kvm_handle_fault_on_reboot(insn, "") #define KVM_ARCH_WANT_MMU_NOTIFIER -int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end); +int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, + bool blockable); int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 92ff656e1810..a2ff5c214738 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1956,7 +1956,8 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler); } -int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) +int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, + bool blockable) { return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp); } diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c index a5bc10d30618..3957ff0ecda5 100644 --- a/virt/kvm/arm/mmu.c +++ b/virt/kvm/arm/mmu.c @@ -1825,7 +1825,7 @@ static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *dat } int kvm_unmap_hva_range(struct kvm *kvm, - unsigned long start, unsigned long end) + unsigned long start, unsigned long end, bool blockable) { if (!kvm->arch.pgd) return 0; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 1218ea663c6d..2155b52b17ec 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -410,7 +410,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, * count is also read inside the mmu_lock critical section. */ kvm->mmu_notifier_count++; - need_tlb_flush = kvm_unmap_hva_range(kvm, start, end); + need_tlb_flush = kvm_unmap_hva_range(kvm, start, end, blockable); need_tlb_flush |= kvm->tlbs_dirty; /* we've to flush the tlb before the pages can be freed */ if (need_tlb_flush) -- GitLab From 0f09071279b2e5c2ebffa9b60375c52aee576c61 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 11 Aug 2020 11:27:25 +0100 Subject: [PATCH 0467/1304] KVM: arm64: Only reschedule if MMU_NOTIFIER_RANGE_BLOCKABLE is not set commit b5331379bc62611d1026173a09c73573384201d9 upstream. When an MMU notifier call results in unmapping a range that spans multiple PGDs, we end up calling into cond_resched_lock() when crossing a PGD boundary, since this avoids running into RCU stalls during VM teardown. Unfortunately, if the VM is destroyed as a result of OOM, then blocking is not permitted and the call to the scheduler triggers the following BUG(): | BUG: sleeping function called from invalid context at arch/arm64/kvm/mmu.c:394 | in_atomic(): 1, irqs_disabled(): 0, non_block: 1, pid: 36, name: oom_reaper | INFO: lockdep is turned off. | CPU: 3 PID: 36 Comm: oom_reaper Not tainted 5.8.0 #1 | Hardware name: QEMU QEMU Virtual Machine, BIOS 0.0.0 02/06/2015 | Call trace: | dump_backtrace+0x0/0x284 | show_stack+0x1c/0x28 | dump_stack+0xf0/0x1a4 | ___might_sleep+0x2bc/0x2cc | unmap_stage2_range+0x160/0x1ac | kvm_unmap_hva_range+0x1a0/0x1c8 | kvm_mmu_notifier_invalidate_range_start+0x8c/0xf8 | __mmu_notifier_invalidate_range_start+0x218/0x31c | mmu_notifier_invalidate_range_start_nonblock+0x78/0xb0 | __oom_reap_task_mm+0x128/0x268 | oom_reap_task+0xac/0x298 | oom_reaper+0x178/0x17c | kthread+0x1e4/0x1fc | ret_from_fork+0x10/0x30 Use the new 'flags' argument to kvm_unmap_hva_range() to ensure that we only reschedule if MMU_NOTIFIER_RANGE_BLOCKABLE is set in the notifier flags. Cc: Fixes: 8b3405e345b5 ("kvm: arm/arm64: Fix locking for kvm_free_stage2_pgd") Cc: Marc Zyngier Cc: Suzuki K Poulose Cc: James Morse Signed-off-by: Will Deacon Message-Id: <20200811102725.7121-3-will@kernel.org> Signed-off-by: Paolo Bonzini [will: Backport to 4.19; use 'blockable' instead of non-existent MMU_NOTIFIER_RANGE_BLOCKABLE flag] Signed-off-by: Will Deacon Signed-off-by: Greg Kroah-Hartman --- virt/kvm/arm/mmu.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c index 3957ff0ecda5..41d6285c3da9 100644 --- a/virt/kvm/arm/mmu.c +++ b/virt/kvm/arm/mmu.c @@ -323,7 +323,8 @@ static void unmap_stage2_puds(struct kvm *kvm, pgd_t *pgd, * destroying the VM), otherwise another faulting VCPU may come in and mess * with things behind our backs. */ -static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) +static void __unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size, + bool may_block) { pgd_t *pgd; phys_addr_t addr = start, end = start + size; @@ -348,11 +349,16 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) * If the range is too large, release the kvm->mmu_lock * to prevent starvation and lockup detector warnings. */ - if (next != end) + if (may_block && next != end) cond_resched_lock(&kvm->mmu_lock); } while (pgd++, addr = next, addr != end); } +static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) +{ + __unmap_stage2_range(kvm, start, size, true); +} + static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr, phys_addr_t end) { @@ -1820,7 +1826,9 @@ static int handle_hva_to_gpa(struct kvm *kvm, static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) { - unmap_stage2_range(kvm, gpa, size); + bool may_block = *(bool *)data; + + __unmap_stage2_range(kvm, gpa, size, may_block); return 0; } @@ -1831,7 +1839,7 @@ int kvm_unmap_hva_range(struct kvm *kvm, return 0; trace_kvm_unmap_hva_range(start, end); - handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL); + handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, &blockable); return 0; } -- GitLab From f6d5cb9e2c06f7d583dd9f4f7cca21d13d78c32a Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Wed, 26 Aug 2020 10:31:07 +0200 Subject: [PATCH 0468/1304] Linux 4.19.142 Tested-by: Jon Hunter Tested-by: Guenter Roeck Signed-off-by: Greg Kroah-Hartman --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 5b64e1141984..e5e46aecf357 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 4 PATCHLEVEL = 19 -SUBLEVEL = 141 +SUBLEVEL = 142 EXTRAVERSION = NAME = "People's Front" -- GitLab From 0e00bb1cf774025be4f8a0ee27569d3a49b4c8c8 Mon Sep 17 00:00:00 2001 From: Andrey Ignatov Date: Fri, 21 Sep 2018 17:03:27 -0700 Subject: [PATCH 0469/1304] UPSTREAM: cgroup: Simplify cgroup_ancestor Simplify cgroup_ancestor function. This is follow-up for commit 7723628101aa ("bpf: Introduce bpf_skb_ancestor_cgroup_id helper") Suggested-by: Tejun Heo Signed-off-by: Andrey Ignatov Signed-off-by: Tejun Heo Bug: 154548692 Signed-off-by: Marco Ballesio (cherry picked from commit 808c43b7c7f70360ed7b9e43e2cf980f388e71fa) Change-Id: I246d9ab057805abd0fa54ed1c23a15c0c2caf552 --- include/linux/cgroup.h | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 0e1f062f31fa..0c57a284ac21 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -581,20 +581,11 @@ static inline bool cgroup_is_descendant(struct cgroup *cgrp, static inline struct cgroup *cgroup_ancestor(struct cgroup *cgrp, int ancestor_level) { - struct cgroup *ptr; - if (cgrp->level < ancestor_level) return NULL; - - for (ptr = cgrp; - ptr && ptr->level > ancestor_level; - ptr = cgroup_parent(ptr)) - ; - - if (ptr && ptr->level == ancestor_level) - return ptr; - - return NULL; + while (cgrp && cgrp->level > ancestor_level) + cgrp = cgroup_parent(cgrp); + return cgrp; } /** -- GitLab From 0d53c300641ed4d7bf0020643c619a7e5131552d Mon Sep 17 00:00:00 2001 From: Yangtao Li Date: Sat, 3 Nov 2018 22:27:41 -0400 Subject: [PATCH 0470/1304] UPSTREAM: cgroup: remove unnecessary unlikely() WARN_ON() already contains an unlikely(), so it's not necessary to use unlikely. Signed-off-by: Yangtao Li Signed-off-by: Tejun Heo Change-Id: I092c0aae2a06b13d3fc9ecfbb24ab3e8d10235f6 (cherry picked from commit 4d9ebbe2b061a9c25e12ba8539ba172533132eb6) Bug: 154548692 Signed-off-by: Marco Ballesio --- kernel/cgroup/cgroup.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index 2dbd5e1b02d8..d35f944867fd 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -6147,10 +6147,8 @@ static ssize_t show_delegatable_files(struct cftype *files, char *buf, ret += snprintf(buf + ret, size - ret, "%s\n", cft->name); - if (unlikely(ret >= size)) { - WARN_ON(1); + if (WARN_ON(ret >= size)) break; - } } return ret; -- GitLab From ce5f91bdfa3b47356b35661f84173eeef05d1299 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 28 Dec 2018 10:31:07 -0800 Subject: [PATCH 0471/1304] UPSTREAM: cgroup: Add named hierarchy disabling to cgroup_no_v1 boot param It can be useful to inhibit all cgroup1 hierarchies especially during transition and for debugging. cgroup_no_v1 can block hierarchies with controllers which leaves out the named hierarchies. Expand it to cover the named hierarchies so that "cgroup_no_v1=all,named" disables all cgroup1 hierarchies. Signed-off-by: Tejun Heo Suggested-by: Marcin Pawlowski Signed-off-by: Tejun Heo Change-Id: Ibd093dd9b70d15402a21db3c1ef56005ebc7f99e (cherry picked from commit 3fc9c12d27b4ded4f1f761a800558dab2e6bbac5) Bug: 154548692 Signed-off-by: Marco Ballesio --- Documentation/admin-guide/kernel-parameters.txt | 8 ++++++-- kernel/cgroup/cgroup-v1.c | 14 +++++++++++++- 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index b2678cf459e4..af3044330cce 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -490,10 +490,14 @@ cut the overhead, others just disable the usage. So only cgroup_disable=memory is actually worthy} - cgroup_no_v1= [KNL] Disable one, multiple, all cgroup controllers in v1 - Format: { controller[,controller...] | "all" } + cgroup_no_v1= [KNL] Disable cgroup controllers and named hierarchies in v1 + Format: { { controller | "all" | "named" } + [,{ controller | "all" | "named" }...] } Like cgroup_disable, but only applies to cgroup v1; the blacklisted controllers remain available in cgroup2. + "all" blacklists all controllers and "named" disables + named mounts. Specifying both "all" and "named" disables + all v1 hierarchies. cgroup.memory= [KNL] Pass options to the cgroup memory controller. Format: diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c index 405167715d52..7b47cb0a82e9 100644 --- a/kernel/cgroup/cgroup-v1.c +++ b/kernel/cgroup/cgroup-v1.c @@ -27,6 +27,9 @@ /* Controllers blocked by the commandline in v1 */ static u16 cgroup_no_v1_mask; +/* disable named v1 mounts */ +static bool cgroup_no_v1_named; + /* * pidlist destructions need to be flushed on cgroup destruction. Use a * separate workqueue as flush domain. @@ -965,6 +968,10 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts) } if (!strncmp(token, "name=", 5)) { const char *name = token + 5; + + /* blocked by boot param? */ + if (cgroup_no_v1_named) + return -ENOENT; /* Can't specify an empty name */ if (!strlen(name)) return -EINVAL; @@ -1294,7 +1301,12 @@ static int __init cgroup_no_v1(char *str) if (!strcmp(token, "all")) { cgroup_no_v1_mask = U16_MAX; - break; + continue; + } + + if (!strcmp(token, "named")) { + cgroup_no_v1_named = true; + continue; } for_each_subsys(ss, i) { -- GitLab From 8e0f48a73d274a449aa2ae148ef0d5f2499e9b93 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 12 Jan 2019 00:20:54 -0500 Subject: [PATCH 0472/1304] UPSTREAM: cgroup: saner refcounting for cgroup_root * make the reference from superblock to cgroup_root counting - do cgroup_put() in cgroup_kill_sb() whether we'd done percpu_ref_kill() or not; matching grab is done when we allocate a new root. That gives the same refcounting rules for all callers of cgroup_do_mount() - a reference to cgroup_root has been grabbed by caller and it either is transferred to new superblock or dropped. * have cgroup_kill_sb() treat an already killed refcount as "just don't bother killing it, then". * after successful cgroup_do_mount() have cgroup1_mount() recheck if we'd raced with mount/umount from somebody else and cgroup_root got killed. In that case we drop the superblock and bugger off with -ERESTARTSYS, same as if we'd found it in the list already dying. * don't bother with delayed initialization of refcount - it's unreliable and not needed. No need to prevent attempts to bump the refcount if we find cgroup_root of another mount in progress - sget will reuse an existing superblock just fine and if the other sb manages to die before we get there, we'll catch that immediately after cgroup_do_mount(). * don't bother with kernfs_pin_sb() - no need for doing that either. Signed-off-by: Al Viro Change-Id: I8e088dfc516b76c42d9d4b34db7f49f0cebc5414 (cherry picked from commit 35ac1184244f1329783e1d897f74926d8bb1103a) Bug: 154548692 Signed-off-by: Marco Ballesio --- kernel/cgroup/cgroup-internal.h | 2 +- kernel/cgroup/cgroup-v1.c | 58 ++++++++------------------------- kernel/cgroup/cgroup.c | 16 ++++----- 3 files changed, 21 insertions(+), 55 deletions(-) diff --git a/kernel/cgroup/cgroup-internal.h b/kernel/cgroup/cgroup-internal.h index 75568fcf2180..6f02be18e1d5 100644 --- a/kernel/cgroup/cgroup-internal.h +++ b/kernel/cgroup/cgroup-internal.h @@ -196,7 +196,7 @@ int cgroup_path_ns_locked(struct cgroup *cgrp, char *buf, size_t buflen, void cgroup_free_root(struct cgroup_root *root); void init_cgroup_root(struct cgroup_root *root, struct cgroup_sb_opts *opts); -int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask, int ref_flags); +int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask); int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask); struct dentry *cgroup_do_mount(struct file_system_type *fs_type, int flags, struct cgroup_root *root, unsigned long magic, diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c index 7b47cb0a82e9..c8c8abf2c563 100644 --- a/kernel/cgroup/cgroup-v1.c +++ b/kernel/cgroup/cgroup-v1.c @@ -1118,13 +1118,11 @@ struct dentry *cgroup1_mount(struct file_system_type *fs_type, int flags, void *data, unsigned long magic, struct cgroup_namespace *ns) { - struct super_block *pinned_sb = NULL; struct cgroup_sb_opts opts; struct cgroup_root *root; struct cgroup_subsys *ss; struct dentry *dentry; int i, ret; - bool new_root = false; cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp); @@ -1186,29 +1184,6 @@ struct dentry *cgroup1_mount(struct file_system_type *fs_type, int flags, if (root->flags ^ opts.flags) pr_warn("new mount options do not match the existing superblock, will be ignored\n"); - /* - * We want to reuse @root whose lifetime is governed by its - * ->cgrp. Let's check whether @root is alive and keep it - * that way. As cgroup_kill_sb() can happen anytime, we - * want to block it by pinning the sb so that @root doesn't - * get killed before mount is complete. - * - * With the sb pinned, tryget_live can reliably indicate - * whether @root can be reused. If it's being killed, - * drain it. We can use wait_queue for the wait but this - * path is super cold. Let's just sleep a bit and retry. - */ - pinned_sb = kernfs_pin_sb(root->kf_root, NULL); - if (IS_ERR(pinned_sb) || - !percpu_ref_tryget_live(&root->cgrp.self.refcnt)) { - mutex_unlock(&cgroup_mutex); - if (!IS_ERR_OR_NULL(pinned_sb)) - deactivate_super(pinned_sb); - msleep(10); - ret = restart_syscall(); - goto out_free; - } - ret = 0; goto out_unlock; } @@ -1234,15 +1209,20 @@ struct dentry *cgroup1_mount(struct file_system_type *fs_type, int flags, ret = -ENOMEM; goto out_unlock; } - new_root = true; init_cgroup_root(root, &opts); - ret = cgroup_setup_root(root, opts.subsys_mask, PERCPU_REF_INIT_DEAD); + ret = cgroup_setup_root(root, opts.subsys_mask); if (ret) cgroup_free_root(root); out_unlock: + if (!ret && !percpu_ref_tryget_live(&root->cgrp.self.refcnt)) { + mutex_unlock(&cgroup_mutex); + msleep(10); + ret = restart_syscall(); + goto out_free; + } mutex_unlock(&cgroup_mutex); out_free: kfree(opts.release_agent); @@ -1254,25 +1234,13 @@ struct dentry *cgroup1_mount(struct file_system_type *fs_type, int flags, dentry = cgroup_do_mount(&cgroup_fs_type, flags, root, CGROUP_SUPER_MAGIC, ns); - /* - * There's a race window after we release cgroup_mutex and before - * allocating a superblock. Make sure a concurrent process won't - * be able to re-use the root during this window by delaying the - * initialization of root refcnt. - */ - if (new_root) { - mutex_lock(&cgroup_mutex); - percpu_ref_reinit(&root->cgrp.self.refcnt); - mutex_unlock(&cgroup_mutex); + if (!IS_ERR(dentry) && percpu_ref_is_dying(&root->cgrp.self.refcnt)) { + struct super_block *sb = dentry->d_sb; + dput(dentry); + deactivate_locked_super(sb); + msleep(10); + dentry = ERR_PTR(restart_syscall()); } - - /* - * If @pinned_sb, we're reusing an existing root and holding an - * extra ref on its sb. Mount is complete. Put the extra ref. - */ - if (pinned_sb) - deactivate_super(pinned_sb); - return dentry; } diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index d35f944867fd..4275faf9be35 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -1898,7 +1898,7 @@ void init_cgroup_root(struct cgroup_root *root, struct cgroup_sb_opts *opts) set_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags); } -int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask, int ref_flags) +int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask) { LIST_HEAD(tmp_links); struct cgroup *root_cgrp = &root->cgrp; @@ -1915,7 +1915,7 @@ int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask, int ref_flags) root_cgrp->ancestor_ids[0] = ret; ret = percpu_ref_init(&root_cgrp->self.refcnt, css_release, - ref_flags, GFP_KERNEL); + 0, GFP_KERNEL); if (ret) goto out; @@ -2092,18 +2092,16 @@ static void cgroup_kill_sb(struct super_block *sb) struct cgroup_root *root = cgroup_root_from_kf(kf_root); /* - * If @root doesn't have any mounts or children, start killing it. + * If @root doesn't have any children, start killing it. * This prevents new mounts by disabling percpu_ref_tryget_live(). * cgroup_mount() may wait for @root's release. * * And don't kill the default root. */ - if (!list_empty(&root->cgrp.self.children) || - root == &cgrp_dfl_root) - cgroup_put(&root->cgrp); - else + if (list_empty(&root->cgrp.self.children) && root != &cgrp_dfl_root && + !percpu_ref_is_dying(&root->cgrp.self.refcnt)) percpu_ref_kill(&root->cgrp.self.refcnt); - + cgroup_put(&root->cgrp); kernfs_kill_sb(sb); } @@ -5504,7 +5502,7 @@ int __init cgroup_init(void) hash_add(css_set_table, &init_css_set.hlist, css_set_hash(init_css_set.subsys)); - BUG_ON(cgroup_setup_root(&cgrp_dfl_root, 0, 0)); + BUG_ON(cgroup_setup_root(&cgrp_dfl_root, 0)); mutex_unlock(&cgroup_mutex); -- GitLab From 1dea25023f8025ada65a044984250f96302e10ae Mon Sep 17 00:00:00 2001 From: Shakeel Butt Date: Wed, 3 Apr 2019 16:03:54 -0700 Subject: [PATCH 0473/1304] UPSTREAM: cgroup: remove extra cgroup_migrate_finish() call The callers of cgroup_migrate_prepare_dst() correctly call cgroup_migrate_finish() for success and failure cases both. No need to call it in cgroup_migrate_prepare_dst() in failure case. Signed-off-by: Shakeel Butt Reviewed-by: Daniel Jordan Signed-off-by: Tejun Heo Change-Id: I785d7ab70a42b1b79aea9852bb14ba5abefcaa9b (cherry picked from commit d6e486ee0ef2f99a4069d9186e53dac61b28cb3c) Bug: 154548692 Signed-off-by: Marco Ballesio --- kernel/cgroup/cgroup.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index 4275faf9be35..ee45147cf1c4 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -2530,7 +2530,7 @@ int cgroup_migrate_prepare_dst(struct cgroup_mgctx *mgctx) dst_cset = find_css_set(src_cset, src_cset->mg_dst_cgrp); if (!dst_cset) - goto err; + return -ENOMEM; WARN_ON_ONCE(src_cset->mg_dst_cset || dst_cset->mg_dst_cset); @@ -2562,9 +2562,6 @@ int cgroup_migrate_prepare_dst(struct cgroup_mgctx *mgctx) } return 0; -err: - cgroup_migrate_finish(mgctx); - return -ENOMEM; } /** -- GitLab From 279c82022c2e6e2b2472118385c6dcb11903504b Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Fri, 19 Apr 2019 10:03:01 -0700 Subject: [PATCH 0474/1304] UPSTREAM: cgroup: rename freezer.c into legacy_freezer.c Freezer.c will contain an implementation of cgroup v2 freezer, so let's rename the v1 freezer to avoid naming conflicts. Signed-off-by: Roman Gushchin Signed-off-by: Tejun Heo Cc: kernel-team@fb.com (cherry picked from commit 50943f3e136adfc421f9768d6ae09ba7b83aaefd) Bug: 154548692 Signed-off-by: Marco Ballesio Change-Id: I7fa1223c841ee70a0c93fa9cc1ca51a63f53d457 --- kernel/cgroup/Makefile | 2 +- kernel/cgroup/{freezer.c => legacy_freezer.c} | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename kernel/cgroup/{freezer.c => legacy_freezer.c} (100%) diff --git a/kernel/cgroup/Makefile b/kernel/cgroup/Makefile index bfcdae896122..8d5689ca94b9 100644 --- a/kernel/cgroup/Makefile +++ b/kernel/cgroup/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 obj-y := cgroup.o rstat.o namespace.o cgroup-v1.o -obj-$(CONFIG_CGROUP_FREEZER) += freezer.o +obj-$(CONFIG_CGROUP_FREEZER) += legacy_freezer.o obj-$(CONFIG_CGROUP_PIDS) += pids.o obj-$(CONFIG_CGROUP_RDMA) += rdma.o obj-$(CONFIG_CPUSETS) += cpuset.o diff --git a/kernel/cgroup/freezer.c b/kernel/cgroup/legacy_freezer.c similarity index 100% rename from kernel/cgroup/freezer.c rename to kernel/cgroup/legacy_freezer.c -- GitLab From 2d4721389fdeefcde0b03f736062f58b56631479 Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Fri, 19 Apr 2019 10:03:02 -0700 Subject: [PATCH 0475/1304] UPSTREAM: cgroup: implement __cgroup_task_count() helper The helper is identical to the existing cgroup_task_count() except it doesn't take the css_set_lock by itself, assuming that the caller does. Also, move cgroup_task_count() implementation into kernel/cgroup/cgroup.c, as there is nothing specific to cgroup v1. Signed-off-by: Roman Gushchin Signed-off-by: Tejun Heo Cc: kernel-team@fb.com Change-Id: Iaa9085d2375d395a051543d2555389213c2892d6 (cherry picked from commit aade7f9efba098859681f8e88d81a5b44ad09b12) Bug: 154548692 Signed-off-by: Marco Ballesio --- kernel/cgroup/cgroup-internal.h | 1 + kernel/cgroup/cgroup-v1.c | 16 ---------------- kernel/cgroup/cgroup.c | 33 +++++++++++++++++++++++++++++++++ 3 files changed, 34 insertions(+), 16 deletions(-) diff --git a/kernel/cgroup/cgroup-internal.h b/kernel/cgroup/cgroup-internal.h index 6f02be18e1d5..92717bb90059 100644 --- a/kernel/cgroup/cgroup-internal.h +++ b/kernel/cgroup/cgroup-internal.h @@ -224,6 +224,7 @@ int cgroup_rmdir(struct kernfs_node *kn); int cgroup_show_path(struct seq_file *sf, struct kernfs_node *kf_node, struct kernfs_root *kf_root); +int __cgroup_task_count(const struct cgroup *cgrp); int cgroup_task_count(const struct cgroup *cgrp); /* diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c index c8c8abf2c563..262c96134c5c 100644 --- a/kernel/cgroup/cgroup-v1.c +++ b/kernel/cgroup/cgroup-v1.c @@ -339,22 +339,6 @@ static struct cgroup_pidlist *cgroup_pidlist_find_create(struct cgroup *cgrp, return l; } -/** - * cgroup_task_count - count the number of tasks in a cgroup. - * @cgrp: the cgroup in question - */ -int cgroup_task_count(const struct cgroup *cgrp) -{ - int count = 0; - struct cgrp_cset_link *link; - - spin_lock_irq(&css_set_lock); - list_for_each_entry(link, &cgrp->cset_links, cset_link) - count += link->cset->nr_tasks; - spin_unlock_irq(&css_set_lock); - return count; -} - /* * Load a cgroup's pidarray with either procs' tgids or tasks' pids */ diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index ee45147cf1c4..6df63eea071c 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -563,6 +563,39 @@ static void cgroup_get_live(struct cgroup *cgrp) css_get(&cgrp->self); } +/** + * __cgroup_task_count - count the number of tasks in a cgroup. The caller + * is responsible for taking the css_set_lock. + * @cgrp: the cgroup in question + */ +int __cgroup_task_count(const struct cgroup *cgrp) +{ + int count = 0; + struct cgrp_cset_link *link; + + lockdep_assert_held(&css_set_lock); + + list_for_each_entry(link, &cgrp->cset_links, cset_link) + count += link->cset->nr_tasks; + + return count; +} + +/** + * cgroup_task_count - count the number of tasks in a cgroup. + * @cgrp: the cgroup in question + */ +int cgroup_task_count(const struct cgroup *cgrp) +{ + int count; + + spin_lock_irq(&css_set_lock); + count = __cgroup_task_count(cgrp); + spin_unlock_irq(&css_set_lock); + + return count; +} + struct cgroup_subsys_state *of_css(struct kernfs_open_file *of) { struct cgroup *cgrp = of->kn->parent->priv; -- GitLab From 5318f3163c860066b1084087d0d8895ad5d80289 Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Fri, 19 Apr 2019 10:03:04 -0700 Subject: [PATCH 0476/1304] BACKPORT: cgroup: cgroup v2 freezer Cgroup v1 implements the freezer controller, which provides an ability to stop the workload in a cgroup and temporarily free up some resources (cpu, io, network bandwidth and, potentially, memory) for some other tasks. Cgroup v2 lacks this functionality. This patch implements freezer for cgroup v2. Cgroup v2 freezer tries to put tasks into a state similar to jobctl stop. This means that tasks can be killed, ptraced (using PTRACE_SEIZE*), and interrupted. It is possible to attach to a frozen task, get some information (e.g. read registers) and detach. It's also possible to migrate a frozen tasks to another cgroup. This differs cgroup v2 freezer from cgroup v1 freezer, which mostly tried to imitate the system-wide freezer. However uninterruptible sleep is fine when all tasks are going to be frozen (hibernation case), it's not the acceptable state for some subset of the system. Cgroup v2 freezer is not supporting freezing kthreads. If a non-root cgroup contains kthread, the cgroup still can be frozen, but the kthread will remain running, the cgroup will be shown as non-frozen, and the notification will not be delivered. * PTRACE_ATTACH is not working because non-fatal signal delivery is blocked in frozen state. There are some interface differences between cgroup v1 and cgroup v2 freezer too, which are required to conform the cgroup v2 interface design principles: 1) There is no separate controller, which has to be turned on: the functionality is always available and is represented by cgroup.freeze and cgroup.events cgroup control files. 2) The desired state is defined by the cgroup.freeze control file. Any hierarchical configuration is allowed. 3) The interface is asynchronous. The actual state is available using cgroup.events control file ("frozen" field). There are no dedicated transitional states. 4) It's allowed to make any changes with the cgroup hierarchy (create new cgroups, remove old cgroups, move tasks between cgroups) no matter if some cgroups are frozen. Signed-off-by: Roman Gushchin Signed-off-by: Tejun Heo No-objection-from-me-by: Oleg Nesterov Cc: kernel-team@fb.com Change-Id: I3404119678cbcd7410aa56e9334055cee79d02fa (cherry picked from commit 76f969e8948d82e78e1bc4beb6b9465908e74873) cgroup-defs.h: use the struct cgroup_freezer_state and the freezer field from definitions in I6221a975c04f06249a4f8d693852776ae08a8d8e sched.h: use the frozen field defined in I6221a975c04f06249a4f8d693852776ae08a8d8e Bug: 154548692 Signed-off-by: Marco Ballesio --- include/linux/cgroup-defs.h | 6 + include/linux/cgroup.h | 43 +++++ include/linux/sched/jobctl.h | 2 + kernel/cgroup/Makefile | 2 +- kernel/cgroup/cgroup.c | 110 +++++++++++- kernel/cgroup/freezer.c | 317 +++++++++++++++++++++++++++++++++++ kernel/fork.c | 2 + kernel/signal.c | 70 +++++++- 8 files changed, 542 insertions(+), 10 deletions(-) create mode 100644 kernel/cgroup/freezer.c diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index fae076e4887e..9f511b3e769b 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -65,6 +65,12 @@ enum { * specified at mount time and thus is implemented here. */ CGRP_CPUSET_CLONE_CHILDREN, + + /* Control group has to be frozen. */ + CGRP_FREEZE, + + /* Cgroup is frozen. */ + CGRP_FROZEN, }; /* cgroup_root->flags */ diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 0c57a284ac21..11003a115508 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -894,4 +894,47 @@ static inline void put_cgroup_ns(struct cgroup_namespace *ns) free_cgroup_ns(ns); } +#ifdef CONFIG_CGROUPS + +void cgroup_enter_frozen(void); +void cgroup_leave_frozen(bool always_leave); +void cgroup_update_frozen(struct cgroup *cgrp); +void cgroup_freeze(struct cgroup *cgrp, bool freeze); +void cgroup_freezer_migrate_task(struct task_struct *task, struct cgroup *src, + struct cgroup *dst); +void cgroup_freezer_frozen_exit(struct task_struct *task); +static inline bool cgroup_task_freeze(struct task_struct *task) +{ + bool ret; + + if (task->flags & PF_KTHREAD) + return false; + + rcu_read_lock(); + ret = test_bit(CGRP_FREEZE, &task_dfl_cgroup(task)->flags); + rcu_read_unlock(); + + return ret; +} + +static inline bool cgroup_task_frozen(struct task_struct *task) +{ + return task->frozen; +} + +#else /* !CONFIG_CGROUPS */ + +static inline void cgroup_enter_frozen(void) { } +static inline void cgroup_leave_frozen(bool always_leave) { } +static inline bool cgroup_task_freeze(struct task_struct *task) +{ + return false; +} +static inline bool cgroup_task_frozen(struct task_struct *task) +{ + return false; +} + +#endif /* !CONFIG_CGROUPS */ + #endif /* _LINUX_CGROUP_H */ diff --git a/include/linux/sched/jobctl.h b/include/linux/sched/jobctl.h index 98228bd48aee..fa067de9f1a9 100644 --- a/include/linux/sched/jobctl.h +++ b/include/linux/sched/jobctl.h @@ -18,6 +18,7 @@ struct task_struct; #define JOBCTL_TRAP_NOTIFY_BIT 20 /* trap for NOTIFY */ #define JOBCTL_TRAPPING_BIT 21 /* switching to TRACED */ #define JOBCTL_LISTENING_BIT 22 /* ptracer is listening for events */ +#define JOBCTL_TRAP_FREEZE_BIT 23 /* trap for cgroup freezer */ #define JOBCTL_STOP_DEQUEUED (1UL << JOBCTL_STOP_DEQUEUED_BIT) #define JOBCTL_STOP_PENDING (1UL << JOBCTL_STOP_PENDING_BIT) @@ -26,6 +27,7 @@ struct task_struct; #define JOBCTL_TRAP_NOTIFY (1UL << JOBCTL_TRAP_NOTIFY_BIT) #define JOBCTL_TRAPPING (1UL << JOBCTL_TRAPPING_BIT) #define JOBCTL_LISTENING (1UL << JOBCTL_LISTENING_BIT) +#define JOBCTL_TRAP_FREEZE (1UL << JOBCTL_TRAP_FREEZE_BIT) #define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY) #define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK) diff --git a/kernel/cgroup/Makefile b/kernel/cgroup/Makefile index 8d5689ca94b9..5d7a76bfbbb7 100644 --- a/kernel/cgroup/Makefile +++ b/kernel/cgroup/Makefile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -obj-y := cgroup.o rstat.o namespace.o cgroup-v1.o +obj-y := cgroup.o rstat.o namespace.o cgroup-v1.o freezer.o obj-$(CONFIG_CGROUP_FREEZER) += legacy_freezer.o obj-$(CONFIG_CGROUP_PIDS) += pids.o diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index 6df63eea071c..5898456e8a53 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -2363,8 +2363,15 @@ static int cgroup_migrate_execute(struct cgroup_mgctx *mgctx) get_css_set(to_cset); to_cset->nr_tasks++; css_set_move_task(task, from_cset, to_cset, true); - put_css_set_locked(from_cset); from_cset->nr_tasks--; + /* + * If the source or destination cgroup is frozen, + * the task might require to change its state. + */ + cgroup_freezer_migrate_task(task, from_cset->dfl_cgrp, + to_cset->dfl_cgrp); + put_css_set_locked(from_cset); + } } spin_unlock_irq(&css_set_lock); @@ -3406,8 +3413,11 @@ static ssize_t cgroup_max_depth_write(struct kernfs_open_file *of, static int cgroup_events_show(struct seq_file *seq, void *v) { - seq_printf(seq, "populated %d\n", - cgroup_is_populated(seq_css(seq)->cgroup)); + struct cgroup *cgrp = seq_css(seq)->cgroup; + + seq_printf(seq, "populated %d\n", cgroup_is_populated(cgrp)); + seq_printf(seq, "frozen %d\n", test_bit(CGRP_FROZEN, &cgrp->flags)); + return 0; } @@ -3527,6 +3537,40 @@ static void cgroup_pressure_release(struct kernfs_open_file *of) } #endif /* CONFIG_PSI */ +static int cgroup_freeze_show(struct seq_file *seq, void *v) +{ + struct cgroup *cgrp = seq_css(seq)->cgroup; + + seq_printf(seq, "%d\n", cgrp->freezer.freeze); + + return 0; +} + +static ssize_t cgroup_freeze_write(struct kernfs_open_file *of, + char *buf, size_t nbytes, loff_t off) +{ + struct cgroup *cgrp; + ssize_t ret; + int freeze; + + ret = kstrtoint(strstrip(buf), 0, &freeze); + if (ret) + return ret; + + if (freeze < 0 || freeze > 1) + return -ERANGE; + + cgrp = cgroup_kn_lock_live(of->kn, false); + if (!cgrp) + return -ENOENT; + + cgroup_freeze(cgrp, freeze); + + cgroup_kn_unlock(of->kn); + + return nbytes; +} + static int cgroup_file_open(struct kernfs_open_file *of) { struct cftype *cft = of->kn->priv; @@ -4715,6 +4759,12 @@ static struct cftype cgroup_base_files[] = { .name = "cgroup.stat", .seq_show = cgroup_stat_show, }, + { + .name = "cgroup.freeze", + .flags = CFTYPE_NOT_ON_ROOT, + .seq_show = cgroup_freeze_show, + .write = cgroup_freeze_write, + }, { .name = "cpu.stat", .flags = CFTYPE_NOT_ON_ROOT, @@ -5074,12 +5124,29 @@ static struct cgroup *cgroup_create(struct cgroup *parent) if (ret) goto out_psi_free; + /* + * New cgroup inherits effective freeze counter, and + * if the parent has to be frozen, the child has too. + */ + cgrp->freezer.e_freeze = parent->freezer.e_freeze; + if (cgrp->freezer.e_freeze) + set_bit(CGRP_FROZEN, &cgrp->flags); + spin_lock_irq(&css_set_lock); for (tcgrp = cgrp; tcgrp; tcgrp = cgroup_parent(tcgrp)) { cgrp->ancestor_ids[tcgrp->level] = tcgrp->id; - if (tcgrp != cgrp) + if (tcgrp != cgrp) { tcgrp->nr_descendants++; + + /* + * If the new cgroup is frozen, all ancestor cgroups + * get a new frozen descendant, but their state can't + * change because of this. + */ + if (cgrp->freezer.e_freeze) + tcgrp->freezer.nr_frozen_descendants++; + } } spin_unlock_irq(&css_set_lock); @@ -5370,6 +5437,12 @@ static int cgroup_destroy_locked(struct cgroup *cgrp) for (tcgrp = cgroup_parent(cgrp); tcgrp; tcgrp = cgroup_parent(tcgrp)) { tcgrp->nr_descendants--; tcgrp->nr_dying_descendants++; + /* + * If the dying cgroup is frozen, decrease frozen descendants + * counters of ancestor cgroups. + */ + if (test_bit(CGRP_FROZEN, &cgrp->flags)) + tcgrp->freezer.nr_frozen_descendants--; } spin_unlock_irq(&css_set_lock); @@ -5823,6 +5896,29 @@ void cgroup_post_fork(struct task_struct *child) cset->nr_tasks++; css_set_move_task(child, NULL, cset, false); } + + /* + * If the cgroup has to be frozen, the new task has too. + * Let's set the JOBCTL_TRAP_FREEZE jobctl bit to get + * the task into the frozen state. + */ + if (unlikely(cgroup_task_freeze(child))) { + struct cgroup *cgrp; + + spin_lock(&child->sighand->siglock); + WARN_ON_ONCE(child->frozen); + cgrp = cset->dfl_cgrp; + child->jobctl |= JOBCTL_TRAP_FREEZE; + spin_unlock(&child->sighand->siglock); + + /* + * Calling cgroup_update_frozen() isn't required here, + * because it will be called anyway a bit later + * from do_freezer_trap(). So we avoid cgroup's + * transient switch from the frozen state and back. + */ + } + spin_unlock_irq(&css_set_lock); } @@ -5872,6 +5968,12 @@ void cgroup_exit(struct task_struct *tsk) css_set_move_task(tsk, cset, NULL, false); list_add_tail(&tsk->cg_list, &cset->dying_tasks); cset->nr_tasks--; + + if (unlikely(cgroup_task_frozen(tsk))) + cgroup_freezer_frozen_exit(tsk); + else if (unlikely(cgroup_task_freeze(tsk))) + cgroup_update_frozen(task_dfl_cgroup(tsk)); + spin_unlock_irq(&css_set_lock); } else { get_css_set(cset); diff --git a/kernel/cgroup/freezer.c b/kernel/cgroup/freezer.c new file mode 100644 index 000000000000..9d8cda478fc9 --- /dev/null +++ b/kernel/cgroup/freezer.c @@ -0,0 +1,317 @@ +//SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include + +#include "cgroup-internal.h" + +/* + * Propagate the cgroup frozen state upwards by the cgroup tree. + */ +static void cgroup_propagate_frozen(struct cgroup *cgrp, bool frozen) +{ + int desc = 1; + + /* + * If the new state is frozen, some freezing ancestor cgroups may change + * their state too, depending on if all their descendants are frozen. + * + * Otherwise, all ancestor cgroups are forced into the non-frozen state. + */ + while ((cgrp = cgroup_parent(cgrp))) { + if (frozen) { + cgrp->freezer.nr_frozen_descendants += desc; + if (!test_bit(CGRP_FROZEN, &cgrp->flags) && + test_bit(CGRP_FREEZE, &cgrp->flags) && + cgrp->freezer.nr_frozen_descendants == + cgrp->nr_descendants) { + set_bit(CGRP_FROZEN, &cgrp->flags); + cgroup_file_notify(&cgrp->events_file); + desc++; + } + } else { + cgrp->freezer.nr_frozen_descendants -= desc; + if (test_bit(CGRP_FROZEN, &cgrp->flags)) { + clear_bit(CGRP_FROZEN, &cgrp->flags); + cgroup_file_notify(&cgrp->events_file); + desc++; + } + } + } +} + +/* + * Revisit the cgroup frozen state. + * Checks if the cgroup is really frozen and perform all state transitions. + */ +void cgroup_update_frozen(struct cgroup *cgrp) +{ + bool frozen; + + lockdep_assert_held(&css_set_lock); + + /* + * If the cgroup has to be frozen (CGRP_FREEZE bit set), + * and all tasks are frozen and/or stopped, let's consider + * the cgroup frozen. Otherwise it's not frozen. + */ + frozen = test_bit(CGRP_FREEZE, &cgrp->flags) && + cgrp->freezer.nr_frozen_tasks == __cgroup_task_count(cgrp); + + if (frozen) { + /* Already there? */ + if (test_bit(CGRP_FROZEN, &cgrp->flags)) + return; + + set_bit(CGRP_FROZEN, &cgrp->flags); + } else { + /* Already there? */ + if (!test_bit(CGRP_FROZEN, &cgrp->flags)) + return; + + clear_bit(CGRP_FROZEN, &cgrp->flags); + } + cgroup_file_notify(&cgrp->events_file); + + /* Update the state of ancestor cgroups. */ + cgroup_propagate_frozen(cgrp, frozen); +} + +/* + * Increment cgroup's nr_frozen_tasks. + */ +static void cgroup_inc_frozen_cnt(struct cgroup *cgrp) +{ + cgrp->freezer.nr_frozen_tasks++; +} + +/* + * Decrement cgroup's nr_frozen_tasks. + */ +static void cgroup_dec_frozen_cnt(struct cgroup *cgrp) +{ + cgrp->freezer.nr_frozen_tasks--; + WARN_ON_ONCE(cgrp->freezer.nr_frozen_tasks < 0); +} + +/* + * Enter frozen/stopped state, if not yet there. Update cgroup's counters, + * and revisit the state of the cgroup, if necessary. + */ +void cgroup_enter_frozen(void) +{ + struct cgroup *cgrp; + + if (current->frozen) + return; + + spin_lock_irq(&css_set_lock); + current->frozen = true; + cgrp = task_dfl_cgroup(current); + cgroup_inc_frozen_cnt(cgrp); + cgroup_update_frozen(cgrp); + spin_unlock_irq(&css_set_lock); +} + +/* + * Conditionally leave frozen/stopped state. Update cgroup's counters, + * and revisit the state of the cgroup, if necessary. + * + * If always_leave is not set, and the cgroup is freezing, + * we're racing with the cgroup freezing. In this case, we don't + * drop the frozen counter to avoid a transient switch to + * the unfrozen state. + */ +void cgroup_leave_frozen(bool always_leave) +{ + struct cgroup *cgrp; + + spin_lock_irq(&css_set_lock); + cgrp = task_dfl_cgroup(current); + if (always_leave || !test_bit(CGRP_FREEZE, &cgrp->flags)) { + cgroup_dec_frozen_cnt(cgrp); + cgroup_update_frozen(cgrp); + WARN_ON_ONCE(!current->frozen); + current->frozen = false; + } + spin_unlock_irq(&css_set_lock); + + if (unlikely(current->frozen)) { + /* + * If the task remained in the frozen state, + * make sure it won't reach userspace without + * entering the signal handling loop. + */ + spin_lock_irq(¤t->sighand->siglock); + recalc_sigpending(); + spin_unlock_irq(¤t->sighand->siglock); + } +} + +/* + * Freeze or unfreeze the task by setting or clearing the JOBCTL_TRAP_FREEZE + * jobctl bit. + */ +static void cgroup_freeze_task(struct task_struct *task, bool freeze) +{ + unsigned long flags; + + /* If the task is about to die, don't bother with freezing it. */ + if (!lock_task_sighand(task, &flags)) + return; + + if (freeze) { + task->jobctl |= JOBCTL_TRAP_FREEZE; + signal_wake_up(task, false); + } else { + task->jobctl &= ~JOBCTL_TRAP_FREEZE; + wake_up_process(task); + } + + unlock_task_sighand(task, &flags); +} + +/* + * Freeze or unfreeze all tasks in the given cgroup. + */ +static void cgroup_do_freeze(struct cgroup *cgrp, bool freeze) +{ + struct css_task_iter it; + struct task_struct *task; + + lockdep_assert_held(&cgroup_mutex); + + spin_lock_irq(&css_set_lock); + if (freeze) + set_bit(CGRP_FREEZE, &cgrp->flags); + else + clear_bit(CGRP_FREEZE, &cgrp->flags); + spin_unlock_irq(&css_set_lock); + + css_task_iter_start(&cgrp->self, 0, &it); + while ((task = css_task_iter_next(&it))) { + /* + * Ignore kernel threads here. Freezing cgroups containing + * kthreads isn't supported. + */ + if (task->flags & PF_KTHREAD) + continue; + cgroup_freeze_task(task, freeze); + } + css_task_iter_end(&it); + + /* + * Cgroup state should be revisited here to cover empty leaf cgroups + * and cgroups which descendants are already in the desired state. + */ + spin_lock_irq(&css_set_lock); + if (cgrp->nr_descendants == cgrp->freezer.nr_frozen_descendants) + cgroup_update_frozen(cgrp); + spin_unlock_irq(&css_set_lock); +} + +/* + * Adjust the task state (freeze or unfreeze) and revisit the state of + * source and destination cgroups. + */ +void cgroup_freezer_migrate_task(struct task_struct *task, + struct cgroup *src, struct cgroup *dst) +{ + lockdep_assert_held(&css_set_lock); + + /* + * Kernel threads are not supposed to be frozen at all. + */ + if (task->flags & PF_KTHREAD) + return; + + /* + * Adjust counters of freezing and frozen tasks. + * Note, that if the task is frozen, but the destination cgroup is not + * frozen, we bump both counters to keep them balanced. + */ + if (task->frozen) { + cgroup_inc_frozen_cnt(dst); + cgroup_dec_frozen_cnt(src); + } + cgroup_update_frozen(dst); + cgroup_update_frozen(src); + + /* + * Force the task to the desired state. + */ + cgroup_freeze_task(task, test_bit(CGRP_FREEZE, &dst->flags)); +} + +void cgroup_freezer_frozen_exit(struct task_struct *task) +{ + struct cgroup *cgrp = task_dfl_cgroup(task); + + lockdep_assert_held(&css_set_lock); + + cgroup_dec_frozen_cnt(cgrp); + cgroup_update_frozen(cgrp); +} + +void cgroup_freeze(struct cgroup *cgrp, bool freeze) +{ + struct cgroup_subsys_state *css; + struct cgroup *dsct; + bool applied = false; + + lockdep_assert_held(&cgroup_mutex); + + /* + * Nothing changed? Just exit. + */ + if (cgrp->freezer.freeze == freeze) + return; + + cgrp->freezer.freeze = freeze; + + /* + * Propagate changes downwards the cgroup tree. + */ + css_for_each_descendant_pre(css, &cgrp->self) { + dsct = css->cgroup; + + if (cgroup_is_dead(dsct)) + continue; + + if (freeze) { + dsct->freezer.e_freeze++; + /* + * Already frozen because of ancestor's settings? + */ + if (dsct->freezer.e_freeze > 1) + continue; + } else { + dsct->freezer.e_freeze--; + /* + * Still frozen because of ancestor's settings? + */ + if (dsct->freezer.e_freeze > 0) + continue; + + WARN_ON_ONCE(dsct->freezer.e_freeze < 0); + } + + /* + * Do change actual state: freeze or unfreeze. + */ + cgroup_do_freeze(dsct, freeze); + applied = true; + } + + /* + * Even if the actual state hasn't changed, let's notify a user. + * The state can be enforced by an ancestor cgroup: the cgroup + * can already be in the desired state or it can be locked in the + * opposite state, so that the transition will never happen. + * In both cases it's better to notify a user, that there is + * nothing to wait for. + */ + if (!applied) + cgroup_file_notify(&cgrp->events_file); +} diff --git a/kernel/fork.c b/kernel/fork.c index 830c2da04ba2..3c634a20253d 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1204,7 +1204,9 @@ static int wait_for_vfork_done(struct task_struct *child, int killed; freezer_do_not_count(); + cgroup_enter_frozen(); killed = wait_for_completion_killable(vfork); + cgroup_leave_frozen(false); freezer_count(); if (killed) { diff --git a/kernel/signal.c b/kernel/signal.c index fbcf1f3780da..65fca9750dca 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -43,6 +43,7 @@ #include #include #include +#include #define CREATE_TRACE_POINTS #include @@ -151,9 +152,10 @@ static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked) static bool recalc_sigpending_tsk(struct task_struct *t) { - if ((t->jobctl & JOBCTL_PENDING_MASK) || + if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) || PENDING(&t->pending, &t->blocked) || - PENDING(&t->signal->shared_pending, &t->blocked)) { + PENDING(&t->signal->shared_pending, &t->blocked) || + cgroup_task_frozen(t)) { set_tsk_thread_flag(t, TIF_SIGPENDING); return true; } @@ -2130,6 +2132,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info) preempt_disable(); read_unlock(&tasklist_lock); preempt_enable_no_resched(); + cgroup_enter_frozen(); freezable_schedule(); } else { /* @@ -2308,6 +2311,7 @@ static bool do_signal_stop(int signr) } /* Now we don't run again until woken by SIGCONT or SIGKILL */ + cgroup_enter_frozen(); freezable_schedule(); return true; } else { @@ -2354,6 +2358,43 @@ static void do_jobctl_trap(void) } } +/** + * do_freezer_trap - handle the freezer jobctl trap + * + * Puts the task into frozen state, if only the task is not about to quit. + * In this case it drops JOBCTL_TRAP_FREEZE. + * + * CONTEXT: + * Must be called with @current->sighand->siglock held, + * which is always released before returning. + */ +static void do_freezer_trap(void) + __releases(¤t->sighand->siglock) +{ + /* + * If there are other trap bits pending except JOBCTL_TRAP_FREEZE, + * let's make another loop to give it a chance to be handled. + * In any case, we'll return back. + */ + if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) != + JOBCTL_TRAP_FREEZE) { + spin_unlock_irq(¤t->sighand->siglock); + return; + } + + /* + * Now we're sure that there is no pending fatal signal and no + * pending traps. Clear TIF_SIGPENDING to not get out of schedule() + * immediately (if there is a non-fatal signal pending), and + * put the task into sleep. + */ + __set_current_state(TASK_INTERRUPTIBLE); + clear_thread_flag(TIF_SIGPENDING); + spin_unlock_irq(¤t->sighand->siglock); + cgroup_enter_frozen(); + freezable_schedule(); +} + static int ptrace_signal(int signr, siginfo_t *info) { /* @@ -2466,6 +2507,10 @@ bool get_signal(struct ksignal *ksig) trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO, &sighand->action[SIGKILL - 1]); recalc_sigpending(); + current->jobctl &= ~JOBCTL_TRAP_FREEZE; + spin_unlock_irq(&sighand->siglock); + if (unlikely(cgroup_task_frozen(current))) + cgroup_leave_frozen(true); goto fatal; } @@ -2476,9 +2521,24 @@ bool get_signal(struct ksignal *ksig) do_signal_stop(0)) goto relock; - if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) { - do_jobctl_trap(); + if (unlikely(current->jobctl & + (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) { + if (current->jobctl & JOBCTL_TRAP_MASK) { + do_jobctl_trap(); + spin_unlock_irq(&sighand->siglock); + } else if (current->jobctl & JOBCTL_TRAP_FREEZE) + do_freezer_trap(); + + goto relock; + } + + /* + * If the task is leaving the frozen state, let's update + * cgroup counters and reset the frozen bit. + */ + if (unlikely(cgroup_task_frozen(current))) { spin_unlock_irq(&sighand->siglock); + cgroup_leave_frozen(true); goto relock; } @@ -2572,8 +2632,8 @@ bool get_signal(struct ksignal *ksig) continue; } - fatal: spin_unlock_irq(&sighand->siglock); + fatal: /* * Anything else is fatal, maybe with a core dump. -- GitLab From f7840a0a07ddeb68c53dde663f9e3b944f8fcaf1 Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Thu, 16 May 2019 10:38:21 -0700 Subject: [PATCH 0477/1304] UPSTREAM: signal: unconditionally leave the frozen state in ptrace_stop() Alex Xu reported a regression in strace, caused by the introduction of the cgroup v2 freezer. The regression can be reproduced by stracing the following simple program: #include int main() { write(1, "a", 1); return 0; } An attempt to run strace ./a.out leads to the infinite loop: [ pre-main omitted ] write(1, "a", 1) = ? ERESTARTSYS (To be restarted if SA_RESTART is set) write(1, "a", 1) = ? ERESTARTSYS (To be restarted if SA_RESTART is set) write(1, "a", 1) = ? ERESTARTSYS (To be restarted if SA_RESTART is set) write(1, "a", 1) = ? ERESTARTSYS (To be restarted if SA_RESTART is set) write(1, "a", 1) = ? ERESTARTSYS (To be restarted if SA_RESTART is set) write(1, "a", 1) = ? ERESTARTSYS (To be restarted if SA_RESTART is set) [ repeats forever ] The problem occurs because the traced task leaves ptrace_stop() (and the signal handling loop) with the frozen bit set. So let's call cgroup_leave_frozen(true) unconditionally after sleeping in ptrace_stop(). With this patch applied, strace works as expected: [ pre-main omitted ] write(1, "a", 1) = 1 exit_group(0) = ? +++ exited with 0 +++ Reported-by: Alex Xu Fixes: 76f969e8948d ("cgroup: cgroup v2 freezer") Signed-off-by: Roman Gushchin Acked-by: Oleg Nesterov Cc: Tejun Heo Signed-off-by: Tejun Heo Change-Id: If644b15ead36ce13f0c2c3dd57eebe3658e3edf7 (cherry picked from commit 05b289263772b0698589abc47771264a685cd365) Bug: 154548692 Signed-off-by: Marco Ballesio --- kernel/signal.c | 1 + 1 file changed, 1 insertion(+) diff --git a/kernel/signal.c b/kernel/signal.c index 65fca9750dca..bb2bf761f422 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -2134,6 +2134,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info) preempt_enable_no_resched(); cgroup_enter_frozen(); freezable_schedule(); + cgroup_leave_frozen(true); } else { /* * By the time we got the lock, our tracer went away. -- GitLab From fe5770aedaf86054a73e8c023f8c54400492d376 Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Thu, 12 Sep 2019 10:56:45 -0700 Subject: [PATCH 0478/1304] UPSTREAM: cgroup: freezer: fix frozen state inheritance If a new child cgroup is created in the frozen cgroup hierarchy (one or more of ancestor cgroups is frozen), the CGRP_FREEZE cgroup flag should be set. Otherwise if a process will be attached to the child cgroup, it won't become frozen. The problem can be reproduced with the test_cgfreezer_mkdir test. This is the output before this patch: ~/test_freezer ok 1 test_cgfreezer_simple ok 2 test_cgfreezer_tree ok 3 test_cgfreezer_forkbomb Cgroup /sys/fs/cgroup/cg_test_mkdir_A/cg_test_mkdir_B isn't frozen not ok 4 test_cgfreezer_mkdir ok 5 test_cgfreezer_rmdir ok 6 test_cgfreezer_migrate ok 7 test_cgfreezer_ptrace ok 8 test_cgfreezer_stopped ok 9 test_cgfreezer_ptraced ok 10 test_cgfreezer_vfork And with this patch: ~/test_freezer ok 1 test_cgfreezer_simple ok 2 test_cgfreezer_tree ok 3 test_cgfreezer_forkbomb ok 4 test_cgfreezer_mkdir ok 5 test_cgfreezer_rmdir ok 6 test_cgfreezer_migrate ok 7 test_cgfreezer_ptrace ok 8 test_cgfreezer_stopped ok 9 test_cgfreezer_ptraced ok 10 test_cgfreezer_vfork Reported-by: Mark Crossen Signed-off-by: Roman Gushchin Fixes: 76f969e8948d ("cgroup: cgroup v2 freezer") Cc: Tejun Heo Cc: stable@vger.kernel.org # v5.2+ Signed-off-by: Tejun Heo Change-Id: I6ba7b8dec5600e78bb7448f03fd97a9b43838fa0 (cherry picked from commit 97a61369830ab085df5aed0ff9256f35b07d425a) Bug: 154548692 Signed-off-by: Marco Ballesio --- kernel/cgroup/cgroup.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index 5898456e8a53..b5b6c9103501 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -5129,8 +5129,16 @@ static struct cgroup *cgroup_create(struct cgroup *parent) * if the parent has to be frozen, the child has too. */ cgrp->freezer.e_freeze = parent->freezer.e_freeze; - if (cgrp->freezer.e_freeze) + if (cgrp->freezer.e_freeze) { + /* + * Set the CGRP_FREEZE flag, so when a process will be + * attached to the child cgroup, it will become frozen. + * At this point the new cgroup is unpopulated, so we can + * consider it frozen immediately. + */ + set_bit(CGRP_FREEZE, &cgrp->flags); set_bit(CGRP_FROZEN, &cgrp->flags); + } spin_lock_irq(&css_set_lock); for (tcgrp = cgrp; tcgrp; tcgrp = cgroup_parent(tcgrp)) { -- GitLab From caf7caca0b7883e3f4df0488e3f23ced5ade9ade Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Wed, 9 Oct 2019 17:02:30 +0200 Subject: [PATCH 0479/1304] UPSTREAM: cgroup: freezer: call cgroup_enter_frozen() with preemption disabled in ptrace_stop() ptrace_stop() does preempt_enable_no_resched() to avoid the preemption, but after that cgroup_enter_frozen() does spin_lock/unlock and this adds another preemption point. Reported-and-tested-by: Bruce Ashfield Fixes: 76f969e8948d ("cgroup: cgroup v2 freezer") Cc: stable@vger.kernel.org # v5.2+ Signed-off-by: Oleg Nesterov Acked-by: Roman Gushchin Signed-off-by: Tejun Heo Change-Id: Ic53e0f2d6624b0bb90817b0c57060fb7db971348 (cherry picked from commit 937c6b27c73e02cd4114f95f5c37ba2c29fadba1) Bug: 154548692 Signed-off-by: Marco Ballesio --- kernel/signal.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/signal.c b/kernel/signal.c index bb2bf761f422..1c5140a75cee 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -2131,8 +2131,8 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info) */ preempt_disable(); read_unlock(&tasklist_lock); - preempt_enable_no_resched(); cgroup_enter_frozen(); + preempt_enable_no_resched(); freezable_schedule(); cgroup_leave_frozen(true); } else { -- GitLab From 5ab3ecd497440eb70b966032a11c0ea01804d0ec Mon Sep 17 00:00:00 2001 From: Shaokun Zhang Date: Tue, 30 Apr 2019 17:57:29 +0800 Subject: [PATCH 0480/1304] UPSTREAM: cgroup: Remove unused cgrp variable The 'cgrp' is set but not used in commit <76f969e8948d8> ("cgroup: cgroup v2 freezer"). Remove it to avoid [-Wunused-but-set-variable] warning. Cc: Tejun Heo Signed-off-by: Shaokun Zhang Acked-by: Roman Gushchin Signed-off-by: Tejun Heo (cherry picked from commit 533307dc20a9e84a0687d4ca24aeb669516c0243) Bug: 154548692 Signed-off-by: Marco Ballesio Change-Id: I6221a975c04f06249a4f8d693852776ae08a8d8e --- kernel/cgroup/cgroup.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index b5b6c9103501..e6bad6b3f604 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -5911,11 +5911,8 @@ void cgroup_post_fork(struct task_struct *child) * the task into the frozen state. */ if (unlikely(cgroup_task_freeze(child))) { - struct cgroup *cgrp; - spin_lock(&child->sighand->siglock); WARN_ON_ONCE(child->frozen); - cgrp = cset->dfl_cgrp; child->jobctl |= JOBCTL_TRAP_FREEZE; spin_unlock(&child->sighand->siglock); -- GitLab From 4aa9e2613188b9d3ed3df34663d0758be40281ec Mon Sep 17 00:00:00 2001 From: Peter Collingbourne Date: Fri, 21 Jun 2019 10:52:32 +0100 Subject: [PATCH 0481/1304] UPSTREAM: arm64: vdso: Build vDSO with -ffixed-x18 The vDSO needs to be built with x18 reserved in order to accommodate userspace platform ABIs built on top of Linux that use the register to carry inter-procedural state, as provided for by the AAPCS. An example of such a platform ABI is the one that will be used by an upcoming version of Android. Although this change is currently a no-op due to the fact that the vDSO is currently implemented in pure assembly on arm64, it is necessary in order to prepare for using the generic C implementation of the vDSO. [ tglx: Massaged changelog ] Signed-off-by: Peter Collingbourne Signed-off-by: Vincenzo Frascino Signed-off-by: Thomas Gleixner Tested-by: Shijith Thotton Tested-by: Andre Przywara Cc: linux-arch@vger.kernel.org Cc: linux-arm-kernel@lists.infradead.org Cc: linux-mips@vger.kernel.org Cc: linux-kselftest@vger.kernel.org Cc: Catalin Marinas Cc: Will Deacon Cc: Arnd Bergmann Cc: Russell King Cc: Ralf Baechle Cc: Paul Burton Cc: Daniel Lezcano Cc: Mark Salyzyn Cc: Shuah Khan Cc: Dmitry Safonov <0x7f454c46@gmail.com> Cc: Rasmus Villemoes Cc: Huw Davies Cc: Mark Salyzyn Link: https://lkml.kernel.org/r/20190621095252.32307-6-vincenzo.frascino@arm.com (cherry picked from commit 98cd3c3f83fbba27a6bacd75ad12e8388a61a32a) Signed-off-by: Mark Salyzyn Bug: 154668398 Change-Id: Idfb841361c2942ee12bfe963642c8d9ebb18d2ad --- arch/arm64/kernel/vdso/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile index f2eb6ad713fb..7155a6ff320d 100644 --- a/arch/arm64/kernel/vdso/Makefile +++ b/arch/arm64/kernel/vdso/Makefile @@ -21,7 +21,7 @@ ldflags-y := -shared -nostdlib -soname=linux-vdso.so.1 --hash-style=sysv \ --build-id -n -T ccflags-y += $(DISABLE_LTO) -ccflags-y := -fno-common -fno-builtin -fno-stack-protector +ccflags-y := -fno-common -fno-builtin -fno-stack-protector -ffixed-x18 ccflags-y += -DDISABLE_BRANCH_PROFILING VDSO_LDFLAGS := -Bsymbolic -- GitLab From b588efc6c04ca94821200231fc24a12b397e3dd5 Mon Sep 17 00:00:00 2001 From: Sami Tolvanen Date: Fri, 24 Apr 2020 12:30:46 -0700 Subject: [PATCH 0482/1304] BACKPORT: recordmcount: support >64k sections When compiling a kernel with Clang and LTO, we need to run recordmcount on vmlinux.o with a large number of sections, which currently fails as the program doesn't understand extended section indexes. This change adds support for processing binaries with >64k sections. Link: https://lkml.kernel.org/r/20200424193046.160744-1-samitolvanen@google.com Link: https://lore.kernel.org/lkml/CAK7LNARbZhoaA=Nnuw0=gBrkuKbr_4Ng_Ei57uafujZf7Xazgw@mail.gmail.com/ Bug: 166522612 Cc: Kees Cook Reviewed-by: Matt Helsley Signed-off-by: Sami Tolvanen Signed-off-by: Steven Rostedt (VMware) (cherry picked from commit 4ef57b21d6fb49d2b25c47e4cff467a0c2c8b6b7) Signed-off-by: Sami Tolvanen Change-Id: I39acdf51ca882020bbacd5c54299d2e2e6f02ed3 --- scripts/recordmcount.h | 98 +++++++++++++++++++++++++++++++++++++++--- 1 file changed, 92 insertions(+), 6 deletions(-) diff --git a/scripts/recordmcount.h b/scripts/recordmcount.h index ccfbfde61556..9f8339c7ce54 100644 --- a/scripts/recordmcount.h +++ b/scripts/recordmcount.h @@ -30,6 +30,11 @@ #undef has_rel_mcount #undef tot_relsize #undef get_mcountsym +#undef find_symtab +#undef get_shnum +#undef set_shnum +#undef get_shstrndx +#undef get_symindex #undef get_sym_str_and_relp #undef do_func #undef Elf_Addr @@ -59,6 +64,11 @@ # define __has_rel_mcount __has64_rel_mcount # define has_rel_mcount has64_rel_mcount # define tot_relsize tot64_relsize +# define find_symtab find_symtab64 +# define get_shnum get_shnum64 +# define set_shnum set_shnum64 +# define get_shstrndx get_shstrndx64 +# define get_symindex get_symindex64 # define get_sym_str_and_relp get_sym_str_and_relp_64 # define do_func do64 # define get_mcountsym get_mcountsym_64 @@ -92,6 +102,11 @@ # define __has_rel_mcount __has32_rel_mcount # define has_rel_mcount has32_rel_mcount # define tot_relsize tot32_relsize +# define find_symtab find_symtab32 +# define get_shnum get_shnum32 +# define set_shnum set_shnum32 +# define get_shstrndx get_shstrndx32 +# define get_symindex get_symindex32 # define get_sym_str_and_relp get_sym_str_and_relp_32 # define do_func do32 # define get_mcountsym get_mcountsym_32 @@ -174,6 +189,67 @@ static int MIPS_is_fake_mcount(Elf_Rel const *rp) return is_fake; } +static unsigned int get_symindex(Elf_Sym const *sym, Elf32_Word const *symtab, + Elf32_Word const *symtab_shndx) +{ + unsigned long offset; + int index; + + if (sym->st_shndx != SHN_XINDEX) + return w2(sym->st_shndx); + + offset = (unsigned long)sym - (unsigned long)symtab; + index = offset / sizeof(*sym); + + return w(symtab_shndx[index]); +} + +static unsigned int get_shnum(Elf_Ehdr const *ehdr, Elf_Shdr const *shdr0) +{ + if (shdr0 && !ehdr->e_shnum) + return w(shdr0->sh_size); + + return w2(ehdr->e_shnum); +} + +static void set_shnum(Elf_Ehdr *ehdr, Elf_Shdr *shdr0, unsigned int new_shnum) +{ + if (new_shnum >= SHN_LORESERVE) { + ehdr->e_shnum = 0; + shdr0->sh_size = w(new_shnum); + } else + ehdr->e_shnum = w2(new_shnum); +} + +static int get_shstrndx(Elf_Ehdr const *ehdr, Elf_Shdr const *shdr0) +{ + if (ehdr->e_shstrndx != SHN_XINDEX) + return w2(ehdr->e_shstrndx); + + return w(shdr0->sh_link); +} + +static void find_symtab(Elf_Ehdr *const ehdr, Elf_Shdr const *shdr0, + unsigned const nhdr, Elf32_Word **symtab, + Elf32_Word **symtab_shndx) +{ + Elf_Shdr const *relhdr; + unsigned k; + + *symtab = NULL; + *symtab_shndx = NULL; + + for (relhdr = shdr0, k = nhdr; k; --k, ++relhdr) { + if (relhdr->sh_type == SHT_SYMTAB) + *symtab = (void *)ehdr + relhdr->sh_offset; + else if (relhdr->sh_type == SHT_SYMTAB_SHNDX) + *symtab_shndx = (void *)ehdr + relhdr->sh_offset; + + if (*symtab && *symtab_shndx) + break; + } +} + /* Append the new shstrtab, Elf_Shdr[], __mcount_loc and its relocations. */ static void append_func(Elf_Ehdr *const ehdr, Elf_Shdr *const shstr, @@ -189,10 +265,12 @@ static void append_func(Elf_Ehdr *const ehdr, char const *mc_name = (sizeof(Elf_Rela) == rel_entsize) ? ".rela__mcount_loc" : ".rel__mcount_loc"; - unsigned const old_shnum = w2(ehdr->e_shnum); uint_t const old_shoff = _w(ehdr->e_shoff); uint_t const old_shstr_sh_size = _w(shstr->sh_size); uint_t const old_shstr_sh_offset = _w(shstr->sh_offset); + Elf_Shdr *const shdr0 = (Elf_Shdr *)(old_shoff + (void *)ehdr); + unsigned int const old_shnum = get_shnum(ehdr, shdr0); + unsigned int const new_shnum = 2 + old_shnum; /* {.rel,}__mcount_loc */ uint_t t = 1 + strlen(mc_name) + _w(shstr->sh_size); uint_t new_e_shoff; @@ -202,6 +280,8 @@ static void append_func(Elf_Ehdr *const ehdr, t += (_align & -t); /* word-byte align */ new_e_shoff = t; + set_shnum(ehdr, shdr0, new_shnum); + /* body for new shstrtab */ ulseek(fd_map, sb.st_size, SEEK_SET); uwrite(fd_map, old_shstr_sh_offset + (void *)ehdr, old_shstr_sh_size); @@ -246,7 +326,6 @@ static void append_func(Elf_Ehdr *const ehdr, uwrite(fd_map, mrel0, (void *)mrelp - (void *)mrel0); ehdr->e_shoff = _w(new_e_shoff); - ehdr->e_shnum = w2(2 + w2(ehdr->e_shnum)); /* {.rel,}__mcount_loc */ ulseek(fd_map, 0, SEEK_SET); uwrite(fd_map, ehdr, sizeof(*ehdr)); } @@ -419,6 +498,8 @@ static unsigned find_secsym_ndx(unsigned const txtndx, char const *const txtname, uint_t *const recvalp, Elf_Shdr const *const symhdr, + Elf32_Word const *symtab, + Elf32_Word const *symtab_shndx, Elf_Ehdr const *const ehdr) { Elf_Sym const *const sym0 = (Elf_Sym const *)(_w(symhdr->sh_offset) @@ -430,7 +511,7 @@ static unsigned find_secsym_ndx(unsigned const txtndx, for (symp = sym0, t = nsym; t; --t, ++symp) { unsigned int const st_bind = ELF_ST_BIND(symp->st_info); - if (txtndx == w2(symp->st_shndx) + if (txtndx == get_symindex(symp, symtab, symtab_shndx) /* avoid STB_WEAK */ && (STB_LOCAL == st_bind || STB_GLOBAL == st_bind)) { /* function symbols on ARM have quirks, avoid them */ @@ -498,21 +579,23 @@ static unsigned tot_relsize(Elf_Shdr const *const shdr0, return totrelsz; } - /* Overall supervision for Elf32 ET_REL file. */ static void do_func(Elf_Ehdr *const ehdr, char const *const fname, unsigned const reltype) { Elf_Shdr *const shdr0 = (Elf_Shdr *)(_w(ehdr->e_shoff) + (void *)ehdr); - unsigned const nhdr = w2(ehdr->e_shnum); - Elf_Shdr *const shstr = &shdr0[w2(ehdr->e_shstrndx)]; + unsigned const nhdr = get_shnum(ehdr, shdr0); + Elf_Shdr *const shstr = &shdr0[get_shstrndx(ehdr, shdr0)]; char const *const shstrtab = (char const *)(_w(shstr->sh_offset) + (void *)ehdr); Elf_Shdr const *relhdr; unsigned k; + Elf32_Word *symtab; + Elf32_Word *symtab_shndx; + /* Upper bound on space: assume all relevant relocs are for mcount. */ unsigned const totrelsz = tot_relsize(shdr0, nhdr, shstrtab, fname); Elf_Rel *const mrel0 = umalloc(totrelsz); @@ -525,6 +608,8 @@ do_func(Elf_Ehdr *const ehdr, char const *const fname, unsigned const reltype) unsigned rel_entsize = 0; unsigned symsec_sh_link = 0; + find_symtab(ehdr, shdr0, nhdr, &symtab, &symtab_shndx); + for (relhdr = shdr0, k = nhdr; k; --k, ++relhdr) { char const *const txtname = has_rel_mcount(relhdr, shdr0, shstrtab, fname); @@ -533,6 +618,7 @@ do_func(Elf_Ehdr *const ehdr, char const *const fname, unsigned const reltype) unsigned const recsym = find_secsym_ndx( w(relhdr->sh_info), txtname, &recval, &shdr0[symsec_sh_link = w(relhdr->sh_link)], + symtab, symtab_shndx, ehdr); rel_entsize = _w(relhdr->sh_entsize); -- GitLab From 6410fbaaaf169cbfa45c44b260ab6386c240f505 Mon Sep 17 00:00:00 2001 From: Will McVicker Date: Fri, 28 Aug 2020 10:51:09 -0700 Subject: [PATCH 0483/1304] ANDROID: GKI: update the ABI xml Leaf changes summary: 1 artifact changed Changed leaf types summary: 0 leaf type changed Removed/Changed/Added functions summary: 0 Removed, 0 Changed, 1 Added function Removed/Changed/Added variables summary: 0 Removed, 0 Changed, 0 Added variable 1 Added function: [A] 'function void usb_hcd_platform_shutdown(platform_device*)' Bug: 162298027 Signed-off-by: lucaswei Change-Id: I7302460d3558eef262e9c77cc2766c6aeee896e9 Signed-off-by: Will McVicker --- android/abi_gki_aarch64.xml | 1867 +++++++++++++++++----------------- android/abi_gki_aarch64_qcom | 2 + 2 files changed, 937 insertions(+), 932 deletions(-) diff --git a/android/abi_gki_aarch64.xml b/android/abi_gki_aarch64.xml index 3d6847f32e87..59e3d91556bc 100644 --- a/android/abi_gki_aarch64.xml +++ b/android/abi_gki_aarch64.xml @@ -2420,6 +2420,7 @@ + @@ -7144,6 +7145,34 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -7239,7 +7268,7 @@ - + @@ -8047,7 +8076,7 @@ - + @@ -8100,13 +8129,13 @@ - + - + @@ -8647,7 +8676,7 @@ - + @@ -8671,7 +8700,7 @@ - + @@ -9671,7 +9700,7 @@ - + @@ -10036,7 +10065,7 @@ - + @@ -10343,7 +10372,7 @@ - + @@ -13999,21 +14028,21 @@ - + - + - + - + - + - + @@ -15463,7 +15492,7 @@ - + @@ -15595,7 +15624,7 @@ - + @@ -16480,7 +16509,7 @@ - + @@ -16535,15 +16564,15 @@ - + - + - + @@ -16812,12 +16841,12 @@ - + - + @@ -17465,7 +17494,7 @@ - + @@ -17648,7 +17677,7 @@ - + @@ -18241,12 +18270,12 @@ - + - + @@ -19139,69 +19168,69 @@ - - + + - - + + - - + + - - + + - - + + - - - + + + - - + + - - + + - - + + - - + + - - + + - - - + + + - - - + + + - - - + + + - - - + + + @@ -19210,61 +19239,61 @@ - - - + + + - - - + + + - - - + + + - - + + - - - + + + - - - + + + - - + + - - - + + + - - - + + + - - - + + + - - + + - - - + + + @@ -21823,13 +21852,13 @@ - + - + @@ -21911,6 +21940,7 @@ + @@ -21920,6 +21950,7 @@ + @@ -28188,7 +28219,7 @@ - + @@ -29565,10 +29596,10 @@ - + - + @@ -29687,13 +29718,13 @@ - + - + @@ -31568,7 +31599,7 @@ - + @@ -31659,7 +31690,7 @@ - + @@ -32397,7 +32428,7 @@ - + @@ -32416,7 +32447,7 @@ - + @@ -33041,7 +33072,7 @@ - + @@ -33250,7 +33281,7 @@ - + @@ -33270,9 +33301,9 @@ - + - + @@ -33284,7 +33315,7 @@ - + @@ -33601,9 +33632,9 @@ - + - + @@ -33612,7 +33643,7 @@ - + @@ -34268,11 +34299,11 @@ - + - + @@ -34587,108 +34618,108 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -34698,27 +34729,27 @@ - + - + - + - + - + - + - + - + @@ -34726,109 +34757,109 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -35136,7 +35167,7 @@ - + @@ -35332,7 +35363,7 @@ - + @@ -35362,7 +35393,7 @@ - + @@ -35536,7 +35567,7 @@ - + @@ -35653,7 +35684,7 @@ - + @@ -35790,7 +35821,7 @@ - + @@ -35898,7 +35929,7 @@ - + @@ -35913,7 +35944,7 @@ - + @@ -37502,13 +37533,13 @@ - + - + @@ -39899,7 +39930,7 @@ - + @@ -40219,7 +40250,7 @@ - + @@ -40308,7 +40339,7 @@ - + @@ -40922,7 +40953,7 @@ - + @@ -41723,6 +41754,23 @@ + + + + + + + + + + + + + + + + + @@ -41806,7 +41854,7 @@ - + @@ -41818,12 +41866,12 @@ - + - + @@ -41831,12 +41879,12 @@ - + - + @@ -41850,7 +41898,7 @@ - + @@ -41866,11 +41914,11 @@ - + - + @@ -41886,12 +41934,12 @@ - + - + @@ -41922,25 +41970,6 @@ - - - - - - - - - - - - - - - - - - - @@ -41973,7 +42002,7 @@ - + @@ -42392,7 +42421,7 @@ - + @@ -42634,7 +42663,7 @@ - + @@ -42727,165 +42756,165 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -45075,37 +45104,37 @@ - + - + - + - + - + - + - + - + - + - + - + - + @@ -47653,7 +47682,7 @@ - + @@ -47693,18 +47722,18 @@ - + - + - + - + - + @@ -47742,7 +47771,7 @@ - + @@ -47962,7 +47991,7 @@ - + @@ -48061,7 +48090,7 @@ - + @@ -48285,7 +48314,7 @@ - + @@ -49358,21 +49387,21 @@ - + - + - + - + @@ -49389,7 +49418,7 @@ - + @@ -49404,7 +49433,7 @@ - + @@ -49470,7 +49499,7 @@ - + @@ -49834,7 +49863,7 @@ - + @@ -50057,7 +50086,7 @@ - + @@ -50077,12 +50106,12 @@ - + - + @@ -50642,7 +50671,6 @@ - @@ -51229,7 +51257,6 @@ - @@ -52009,7 +52036,7 @@ - + @@ -52125,7 +52152,7 @@ - + @@ -53113,23 +53140,6 @@ - - - - - - - - - - - - - - - - - @@ -53500,7 +53510,7 @@ - + @@ -53512,7 +53522,7 @@ - + @@ -53550,7 +53560,7 @@ - + @@ -53558,7 +53568,7 @@ - + @@ -53602,8 +53612,8 @@ - - + + @@ -53611,31 +53621,31 @@ - - + + - - - - + + + + - - + + - - - + + + - - + + - - + + @@ -54476,9 +54486,9 @@ - + - + @@ -57068,7 +57078,7 @@ - + @@ -57595,6 +57605,10 @@ + + + + @@ -64527,62 +64541,62 @@ - - + + - - + + - - + + - - - + + + - - - - - - - - - - - + + + + + + + + + + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + @@ -64825,7 +64839,7 @@ - + @@ -64950,7 +64964,7 @@ - + @@ -65031,7 +65045,7 @@ - + @@ -65048,7 +65062,7 @@ - + @@ -65165,10 +65179,10 @@ - + - + @@ -65677,18 +65691,18 @@ - + - + - + - + @@ -65750,7 +65764,7 @@ - + @@ -65921,7 +65935,7 @@ - + @@ -65935,7 +65949,7 @@ - + @@ -65963,7 +65977,7 @@ - + @@ -65971,7 +65985,7 @@ - + @@ -66175,7 +66189,7 @@ - + @@ -66187,7 +66201,7 @@ - + @@ -66952,6 +66966,8 @@ + + @@ -67063,13 +67079,13 @@ - + - + @@ -67087,7 +67103,7 @@ - + @@ -67117,7 +67133,7 @@ - + @@ -67408,7 +67424,7 @@ - + @@ -67421,7 +67437,7 @@ - + @@ -67461,7 +67477,7 @@ - + @@ -67824,7 +67840,7 @@ - + @@ -67872,7 +67888,7 @@ - + @@ -68333,58 +68349,58 @@ - - - - + + + + - - - + + + - - + + - - - + + + - - - + + + - - + + - - - + + + - - - - - - + + + + + + - - - - + + + + - - - - + + + + @@ -68430,54 +68446,54 @@ - + - - - - + + + + - - + + - - - + + + - - + + - - - - + + + + - - + + - - - - + + + + - - + + - - + + - - + + @@ -68507,18 +68523,18 @@ - + - + - + @@ -68653,12 +68669,12 @@ - - - - - - + + + + + + @@ -68776,7 +68792,7 @@ - + @@ -68864,7 +68880,7 @@ - + @@ -68901,13 +68917,13 @@ - + - + @@ -68940,7 +68956,7 @@ - + @@ -69547,10 +69563,10 @@ - + - + @@ -69973,7 +69989,7 @@ - + @@ -70037,7 +70053,7 @@ - + @@ -70668,7 +70684,7 @@ - + @@ -70813,13 +70829,13 @@ - + - + @@ -72309,6 +72325,14 @@ + + + + + + + + @@ -72480,7 +72504,7 @@ - + @@ -72613,14 +72637,6 @@ - - - - - - - - @@ -72702,10 +72718,10 @@ - + - + @@ -72761,7 +72777,7 @@ - + @@ -72785,7 +72801,7 @@ - + @@ -72867,7 +72883,7 @@ - + @@ -73545,242 +73561,242 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -73919,178 +73935,178 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -80068,7 +80084,7 @@ - + @@ -81815,7 +81831,7 @@ - + @@ -81856,7 +81872,7 @@ - + @@ -81962,10 +81978,10 @@ - - - - + + + + @@ -82258,7 +82274,7 @@ - + @@ -82272,7 +82288,7 @@ - + @@ -82366,17 +82382,6 @@ - - - - - - - - - - - @@ -82403,7 +82408,6 @@ - @@ -83851,7 +83855,7 @@ - + @@ -83916,8 +83920,43 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + @@ -84043,41 +84082,41 @@ - - - + + + - - - + + + - - - - - + + + + + - - - - - + + + + + - - - + + + - - - + + + - + @@ -84147,7 +84186,7 @@ - + @@ -84156,7 +84195,7 @@ - + @@ -84199,7 +84238,7 @@ - + @@ -84207,14 +84246,6 @@ - - - - - - - - @@ -84249,10 +84280,10 @@ - + - + @@ -84320,7 +84351,7 @@ - + @@ -84367,7 +84398,7 @@ - + @@ -84376,7 +84407,7 @@ - + @@ -84400,13 +84431,13 @@ - + - + @@ -84414,14 +84445,6 @@ - - - - - - - - @@ -84631,7 +84654,15 @@ - + + + + + + + + + @@ -85029,7 +85060,7 @@ - + @@ -85952,7 +85983,7 @@ - + @@ -86172,56 +86203,56 @@ - - - + + + - - - - - - + + + + + + - - - - - - + + + + + + - - - - - - + + + + + + - - - - - - + + + + + + - - + + - - - - + + + + - - - + + + @@ -86775,7 +86806,7 @@ - + @@ -86783,7 +86814,7 @@ - + @@ -87296,7 +87327,7 @@ - + @@ -87473,7 +87504,7 @@ - + @@ -87481,7 +87512,7 @@ - + @@ -87577,7 +87608,7 @@ - + @@ -87742,7 +87773,7 @@ - + @@ -88055,9 +88086,9 @@ - + - + @@ -88066,7 +88097,7 @@ - + @@ -88163,18 +88194,18 @@ - + - + - + - + - + @@ -88220,7 +88251,7 @@ - + @@ -88262,6 +88293,23 @@ + + + + + + + + + + + + + + + + + @@ -88331,7 +88379,7 @@ - + @@ -88444,7 +88492,7 @@ - + @@ -88657,7 +88705,7 @@ - + @@ -88665,34 +88713,6 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -88829,7 +88849,7 @@ - + @@ -88855,7 +88875,7 @@ - + @@ -88908,7 +88928,7 @@ - + @@ -90871,7 +90891,7 @@ - + @@ -91747,7 +91767,7 @@ - + @@ -91759,7 +91779,7 @@ - + @@ -93064,7 +93084,7 @@ - + @@ -93072,7 +93092,7 @@ - + @@ -93080,7 +93100,7 @@ - + @@ -93104,7 +93124,7 @@ - + @@ -93112,7 +93132,7 @@ - + @@ -93708,7 +93728,7 @@ - + @@ -93716,7 +93736,7 @@ - + @@ -93724,23 +93744,6 @@ - - - - - - - - - - - - - - - - - @@ -93825,10 +93828,10 @@ - + - + @@ -93858,7 +93861,7 @@ - + @@ -93872,7 +93875,7 @@ - + @@ -93959,10 +93962,10 @@ - + - + @@ -94498,7 +94501,7 @@ - + @@ -94509,7 +94512,7 @@ - + @@ -95383,22 +95386,22 @@ - + - + - + - + - + @@ -95407,7 +95410,7 @@ - + @@ -95418,7 +95421,7 @@ - + @@ -95446,7 +95449,7 @@ - + @@ -95474,7 +95477,7 @@ - + @@ -95483,13 +95486,13 @@ - + - + @@ -95513,7 +95516,7 @@ - + @@ -95536,7 +95539,7 @@ - + @@ -96359,11 +96362,11 @@ - + - + @@ -96396,13 +96399,13 @@ - + - + @@ -97123,7 +97126,7 @@ - + @@ -97265,7 +97268,7 @@ - + @@ -97380,10 +97383,10 @@ - + - + @@ -97465,10 +97468,10 @@ - + - + @@ -98345,7 +98348,7 @@ - + @@ -99274,46 +99277,6 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -99602,7 +99565,7 @@ - + @@ -99720,6 +99683,46 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -99982,7 +99985,7 @@ - + diff --git a/android/abi_gki_aarch64_qcom b/android/abi_gki_aarch64_qcom index c3c44f9a1a83..8fde29744153 100644 --- a/android/abi_gki_aarch64_qcom +++ b/android/abi_gki_aarch64_qcom @@ -1911,6 +1911,7 @@ cpuidle_unregister_driver cpu_pm_enter cpu_pm_exit + get_next_event_cpu param_get_bool param_get_uint pending_ipi @@ -2692,6 +2693,7 @@ __usb_create_hcd usb_disabled usb_hcd_is_primary_hcd + usb_hcd_platform_shutdown usb_put_hcd usb_remove_hcd xhci_gen_setup -- GitLab From a0c68fd8e84e361d16c799e67dab0997bb148a12 Mon Sep 17 00:00:00 2001 From: Todd Kjos Date: Fri, 28 Aug 2020 08:55:25 -0700 Subject: [PATCH 0484/1304] Revert "binder: Prevent context manager from incrementing ref 0" This reverts commit c5665cafbedd2e2a523fe933e452391a02d3adb3. This patch was causing display hangs for Qualcomm after the 5.4.58 merge. Bug: 166779391 Change-Id: Iaf22ede68247422709b00f059e5c4d517f219adf Signed-off-by: Todd Kjos --- drivers/android/binder.c | 15 +-------------- 1 file changed, 1 insertion(+), 14 deletions(-) diff --git a/drivers/android/binder.c b/drivers/android/binder.c index e84aec4a797d..90a13e370ac6 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -3081,12 +3081,6 @@ static void binder_transaction(struct binder_proc *proc, goto err_dead_binder; } e->to_node = target_node->debug_id; - if (WARN_ON(proc == target_proc)) { - return_error = BR_FAILED_REPLY; - return_error_param = -EINVAL; - return_error_line = __LINE__; - goto err_invalid_target_handle; - } if (security_binder_transaction(proc->tsk, target_proc->tsk) < 0) { return_error = BR_FAILED_REPLY; @@ -3689,17 +3683,10 @@ static int binder_thread_write(struct binder_proc *proc, struct binder_node *ctx_mgr_node; mutex_lock(&context->context_mgr_node_lock); ctx_mgr_node = context->binder_context_mgr_node; - if (ctx_mgr_node) { - if (ctx_mgr_node->proc == proc) { - binder_user_error("%d:%d context manager tried to acquire desc 0\n", - proc->pid, thread->pid); - mutex_unlock(&context->context_mgr_node_lock); - return -EINVAL; - } + if (ctx_mgr_node) ret = binder_inc_ref_for_node( proc, ctx_mgr_node, strong, NULL, &rdata); - } mutex_unlock(&context->context_mgr_node_lock); } if (ret) -- GitLab From ef771d848744f71e6af82b75f3675dddac14bf73 Mon Sep 17 00:00:00 2001 From: "glider@google.com" Date: Tue, 16 Jun 2020 10:34:35 +0200 Subject: [PATCH 0485/1304] UPSTREAM: security: allow using Clang's zero initialization for stack variables MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Upstream commit f0fe00d4972a8cd4b98cc2c29758615e4d51cdfe. In addition to -ftrivial-auto-var-init=pattern (used by CONFIG_INIT_STACK_ALL now) Clang also supports zero initialization for locals enabled by -ftrivial-auto-var-init=zero. The future of this flag is still being debated (see https://bugs.llvm.org/show_bug.cgi?id=45497). Right now it is guarded by another flag, -enable-trivial-auto-var-init-zero-knowing-it-will-be-removed-from-clang, which means it may not be supported by future Clang releases. Another possible resolution is that -ftrivial-auto-var-init=zero will persist (as certain users have already started depending on it), but the name of the guard flag will change. In the meantime, zero initialization has proven itself as a good production mitigation measure against uninitialized locals. Unlike pattern initialization, which has a higher chance of triggering existing bugs, zero initialization provides safe defaults for strings, pointers, indexes, and sizes. On the other hand, pattern initialization remains safer for return values. Chrome OS and Android are moving to using zero initialization for production builds. Performance-wise, the difference between pattern and zero initialization is usually negligible, although the generated code for zero initialization is more compact. This patch renames CONFIG_INIT_STACK_ALL to CONFIG_INIT_STACK_ALL_PATTERN and introduces another config option, CONFIG_INIT_STACK_ALL_ZERO, that enables zero initialization for locals if the corresponding flags are supported by Clang. Cc: Kees Cook Cc: Nick Desaulniers Cc: Greg Kroah-Hartman Signed-off-by: Alexander Potapenko Link: https://lore.kernel.org/r/20200616083435.223038-1-glider@google.com Reviewed-by: Maciej Żenczykowski Signed-off-by: Kees Cook Change-Id: Ifa2711b14ada169fe7c22d07a41e26195ffd8ea2 --- Makefile | 15 ++++++++++++--- init/main.c | 12 +++++++----- security/Kconfig.hardening | 29 +++++++++++++++++++++++++---- 3 files changed, 44 insertions(+), 12 deletions(-) diff --git a/Makefile b/Makefile index 52f4e3d29936..c8f26a58228c 100644 --- a/Makefile +++ b/Makefile @@ -737,9 +737,18 @@ KBUILD_CFLAGS += -fomit-frame-pointer endif endif -# Initialize all stack variables with a pattern, if desired. -ifdef CONFIG_INIT_STACK_ALL -KBUILD_CFLAGS += -ftrivial-auto-var-init=pattern +# Initialize all stack variables with a 0xAA pattern. +ifdef CONFIG_INIT_STACK_ALL_PATTERN +KBUILD_CFLAGS += -ftrivial-auto-var-init=pattern +endif + +# Initialize all stack variables with a zero value. +ifdef CONFIG_INIT_STACK_ALL_ZERO +# Future support for zero initialization is still being debated, see +# https://bugs.llvm.org/show_bug.cgi?id=45497. These flags are subject to being +# renamed or dropped. +KBUILD_CFLAGS += -ftrivial-auto-var-init=zero +KBUILD_CFLAGS += -enable-trivial-auto-var-init-zero-knowing-it-will-be-removed-from-clang endif KBUILD_CFLAGS += $(call cc-option, -fno-var-tracking-assignments) diff --git a/init/main.c b/init/main.c index 2bb6cd487b71..4045566bb98a 100644 --- a/init/main.c +++ b/init/main.c @@ -512,14 +512,16 @@ static void __init report_meminit(void) { const char *stack; - if (IS_ENABLED(CONFIG_INIT_STACK_ALL)) - stack = "all"; + if (IS_ENABLED(CONFIG_INIT_STACK_ALL_PATTERN)) + stack = "all(pattern)"; + else if (IS_ENABLED(CONFIG_INIT_STACK_ALL_ZERO)) + stack = "all(zero)"; else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL)) - stack = "byref_all"; + stack = "byref_all(zero)"; else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF)) - stack = "byref"; + stack = "byref(zero)"; else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_USER)) - stack = "__user"; + stack = "__user(zero)"; else stack = "off"; diff --git a/security/Kconfig.hardening b/security/Kconfig.hardening index b0e9cc084506..234250ce0b6c 100644 --- a/security/Kconfig.hardening +++ b/security/Kconfig.hardening @@ -19,13 +19,16 @@ config GCC_PLUGIN_STRUCTLEAK menu "Memory initialization" -config CC_HAS_AUTO_VAR_INIT +config CC_HAS_AUTO_VAR_INIT_PATTERN def_bool $(cc-option,-ftrivial-auto-var-init=pattern) +config CC_HAS_AUTO_VAR_INIT_ZERO + def_bool $(cc-option,-ftrivial-auto-var-init=zero -enable-trivial-auto-var-init-zero-knowing-it-will-be-removed-from-clang) + choice prompt "Initialize kernel stack variables at function entry" default GCC_PLUGIN_STRUCTLEAK_BYREF_ALL if COMPILE_TEST && GCC_PLUGINS - default INIT_STACK_ALL if COMPILE_TEST && CC_HAS_AUTO_VAR_INIT + default INIT_STACK_ALL_PATTERN if COMPILE_TEST && CC_HAS_AUTO_VAR_INIT_PATTERN default INIT_STACK_NONE help This option enables initialization of stack variables at @@ -58,9 +61,9 @@ choice of uninitialized stack variable exploits and information exposures. - config INIT_STACK_ALL + config INIT_STACK_ALL_PATTERN bool "0xAA-init everything on the stack (strongest)" - depends on CC_HAS_AUTO_VAR_INIT + depends on CC_HAS_AUTO_VAR_INIT_PATTERN help Initializes everything on the stack with a 0xAA pattern. This is intended to eliminate all classes @@ -68,6 +71,24 @@ choice exposures, even variables that were warned to have been left uninitialized. + Pattern initialization is known to provoke many existing bugs + related to uninitialized locals, e.g. pointers receive + non-NULL values, buffer sizes and indices are very big. + + config INIT_STACK_ALL_ZERO + bool "zero-init everything on the stack (strongest and safest)" + depends on CC_HAS_AUTO_VAR_INIT_ZERO + help + Initializes everything on the stack with a zero + value. This is intended to eliminate all classes + of uninitialized stack variable exploits and information + exposures, even variables that were warned to have been + left uninitialized. + + Zero initialization provides safe defaults for strings, + pointers, indices and sizes, and is therefore + more suitable as a security mitigation measure. + endchoice config GCC_PLUGIN_STRUCTLEAK_VERBOSE -- GitLab From 9de56a001b2e707a208ab1fd4aaa92dabce9870d Mon Sep 17 00:00:00 2001 From: Alexander Potapenko Date: Wed, 2 Sep 2020 18:47:50 +0200 Subject: [PATCH 0486/1304] ANDROID: gki_defconfig: initialize locals with zeroes This patch switches compiler-based stack initialization from 0xAA to zero pattern, resulting in much more efficient code and saner defaults for uninitialized local variables. Bug: 154198143 Test: run cuttlefish and observe the following lines in dmesg: test_stackinit: all tests passed! test_meminit: all 130 tests passed! Signed-off-by: Alexander Potapenko Change-Id: I49821914df887760e90295d91fa54a2ebda8240b --- arch/arm64/configs/gki_defconfig | 2 +- arch/x86/configs/gki_defconfig | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/configs/gki_defconfig b/arch/arm64/configs/gki_defconfig index 885aa0c5ebed..a76a259bc2ea 100644 --- a/arch/arm64/configs/gki_defconfig +++ b/arch/arm64/configs/gki_defconfig @@ -491,7 +491,7 @@ CONFIG_FORTIFY_SOURCE=y CONFIG_STATIC_USERMODEHELPER=y CONFIG_STATIC_USERMODEHELPER_PATH="" CONFIG_SECURITY_SELINUX=y -CONFIG_INIT_STACK_ALL=y +CONFIG_INIT_STACK_ALL_ZERO=y CONFIG_INIT_ON_ALLOC_DEFAULT_ON=y CONFIG_CRYPTO_ADIANTUM=y CONFIG_CRYPTO_LZ4=y diff --git a/arch/x86/configs/gki_defconfig b/arch/x86/configs/gki_defconfig index 9d12142293a9..68e9e518d6c7 100644 --- a/arch/x86/configs/gki_defconfig +++ b/arch/x86/configs/gki_defconfig @@ -423,7 +423,7 @@ CONFIG_FORTIFY_SOURCE=y CONFIG_STATIC_USERMODEHELPER=y CONFIG_STATIC_USERMODEHELPER_PATH="" CONFIG_SECURITY_SELINUX=y -CONFIG_INIT_STACK_ALL=y +CONFIG_INIT_STACK_ALL_ZERO=y CONFIG_INIT_ON_ALLOC_DEFAULT_ON=y CONFIG_CRYPTO_ADIANTUM=y CONFIG_CRYPTO_SHA256_SSSE3=y -- GitLab From f81b1d34b57cab7e131087a708116a63f3325ba9 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Thu, 28 May 2020 00:58:40 +1000 Subject: [PATCH 0487/1304] powerpc/64s: Don't init FSCR_DSCR in __init_FSCR() commit 0828137e8f16721842468e33df0460044a0c588b upstream. __init_FSCR() was added originally in commit 2468dcf641e4 ("powerpc: Add support for context switching the TAR register") (Feb 2013), and only set FSCR_TAR. At that point FSCR (Facility Status and Control Register) was not context switched, so the setting was permanent after boot. Later we added initialisation of FSCR_DSCR to __init_FSCR(), in commit 54c9b2253d34 ("powerpc: Set DSCR bit in FSCR setup") (Mar 2013), again that was permanent after boot. Then commit 2517617e0de6 ("powerpc: Fix context switch DSCR on POWER8") (Aug 2013) added a limited context switch of FSCR, just the FSCR_DSCR bit was context switched based on thread.dscr_inherit. That commit said "This clears the H/FSCR DSCR bit initially", but it didn't, it left the initialisation of FSCR_DSCR in __init_FSCR(). However the initial context switch from init_task to pid 1 would clear FSCR_DSCR because thread.dscr_inherit was 0. That commit also introduced the requirement that FSCR_DSCR be clear for user processes, so that we can take the facility unavailable interrupt in order to manage dscr_inherit. Then in commit 152d523e6307 ("powerpc: Create context switch helpers save_sprs() and restore_sprs()") (Dec 2015) FSCR was added to thread_struct. However it still wasn't fully context switched, we just took the existing value and set FSCR_DSCR if the new thread had dscr_inherit set. FSCR was still initialised at boot to FSCR_DSCR | FSCR_TAR, but that value was not propagated into the thread_struct, so the initial context switch set FSCR_DSCR back to 0. Finally commit b57bd2de8c6c ("powerpc: Improve FSCR init and context switching") (Jun 2016) added a full context switch of the FSCR, and added an initialisation of init_task.thread.fscr to FSCR_TAR | FSCR_EBB, but omitted FSCR_DSCR. The end result is that swapper runs with FSCR_DSCR set because of the initialisation in __init_FSCR(), but no other processes do, they use the value from init_task.thread.fscr. Having FSCR_DSCR set for swapper allows it to access SPR 3 from userspace, but swapper never runs userspace, so it has no useful effect. It's also confusing to have the value initialised in two places to two different values. So remove FSCR_DSCR from __init_FSCR(), this at least gets us to the point where there's a single value of FSCR, even if it's still set in two places. Signed-off-by: Michael Ellerman Tested-by: Alistair Popple Link: https://lore.kernel.org/r/20200527145843.2761782-1-mpe@ellerman.id.au Cc: Thadeu Lima de Souza Cascardo Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/kernel/cpu_setup_power.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S index 458b928dbd84..5bef78e2b4c1 100644 --- a/arch/powerpc/kernel/cpu_setup_power.S +++ b/arch/powerpc/kernel/cpu_setup_power.S @@ -183,7 +183,7 @@ __init_LPCR_ISA300: __init_FSCR: mfspr r3,SPRN_FSCR - ori r3,r3,FSCR_TAR|FSCR_DSCR|FSCR_EBB + ori r3,r3,FSCR_TAR|FSCR_EBB mtspr SPRN_FSCR,r3 blr -- GitLab From c3f242a608040074ff54fc67d74b168408c92899 Mon Sep 17 00:00:00 2001 From: Mark Tomlinson Date: Wed, 19 Aug 2020 13:53:58 +1200 Subject: [PATCH 0488/1304] gre6: Fix reception with IP6_TNL_F_RCV_DSCP_COPY [ Upstream commit 272502fcb7cda01ab07fc2fcff82d1d2f73d43cc ] When receiving an IPv4 packet inside an IPv6 GRE packet, and the IP6_TNL_F_RCV_DSCP_COPY flag is set on the tunnel, the IPv4 header would get corrupted. This is due to the common ip6_tnl_rcv() function assuming that the inner header is always IPv6. This patch checks the tunnel protocol for IPv4 inner packets, but still defaults to IPv6. Fixes: 308edfdf1563 ("gre6: Cleanup GREv6 receive path, call common GRE functions") Signed-off-by: Mark Tomlinson Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/ipv6/ip6_tunnel.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 8e70a015c792..b825ac025d5b 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -865,7 +865,15 @@ int ip6_tnl_rcv(struct ip6_tnl *t, struct sk_buff *skb, struct metadata_dst *tun_dst, bool log_ecn_err) { - return __ip6_tnl_rcv(t, skb, tpi, tun_dst, ip6ip6_dscp_ecn_decapsulate, + int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t, + const struct ipv6hdr *ipv6h, + struct sk_buff *skb); + + dscp_ecn_decapsulate = ip6ip6_dscp_ecn_decapsulate; + if (tpi->proto == htons(ETH_P_IP)) + dscp_ecn_decapsulate = ip4ip6_dscp_ecn_decapsulate; + + return __ip6_tnl_rcv(t, skb, tpi, tun_dst, dscp_ecn_decapsulate, log_ecn_err); } EXPORT_SYMBOL(ip6_tnl_rcv); -- GitLab From ad270a5a9a04923da92238eaabfe04ad3cb06c16 Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Sat, 15 Aug 2020 04:44:31 -0400 Subject: [PATCH 0489/1304] net: Fix potential wrong skb->protocol in skb_vlan_untag() [ Upstream commit 55eff0eb7460c3d50716ed9eccf22257b046ca92 ] We may access the two bytes after vlan_hdr in vlan_set_encap_proto(). So we should pull VLAN_HLEN + sizeof(unsigned short) in skb_vlan_untag() or we may access the wrong data. Fixes: 0d5501c1c828 ("net: Always untag vlan-tagged traffic on input.") Signed-off-by: Miaohe Lin Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/core/skbuff.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 0629ca89ab74..af6e9028716d 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -5128,8 +5128,8 @@ struct sk_buff *skb_vlan_untag(struct sk_buff *skb) skb = skb_share_check(skb, GFP_ATOMIC); if (unlikely(!skb)) goto err_free; - - if (unlikely(!pskb_may_pull(skb, VLAN_HLEN))) + /* We may access the two bytes after vlan_hdr in vlan_set_encap_proto(). */ + if (unlikely(!pskb_may_pull(skb, VLAN_HLEN + sizeof(unsigned short)))) goto err_free; vhdr = (struct vlan_hdr *)skb->data; -- GitLab From 8a794b35f625761a24bf7b0b32ce94a266ff2729 Mon Sep 17 00:00:00 2001 From: Necip Fazil Yildiran Date: Mon, 17 Aug 2020 15:54:48 +0000 Subject: [PATCH 0490/1304] net: qrtr: fix usage of idr in port assignment to socket [ Upstream commit 8dfddfb79653df7c38a9c8c4c034f242a36acee9 ] Passing large uint32 sockaddr_qrtr.port numbers for port allocation triggers a warning within idr_alloc() since the port number is cast to int, and thus interpreted as a negative number. This leads to the rejection of such valid port numbers in qrtr_port_assign() as idr_alloc() fails. To avoid the problem, switch to idr_alloc_u32() instead. Fixes: bdabad3e363d ("net: Add Qualcomm IPC router") Reported-by: syzbot+f31428628ef672716ea8@syzkaller.appspotmail.com Signed-off-by: Necip Fazil Yildiran Reviewed-by: Dmitry Vyukov Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/qrtr/qrtr.c | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c index b5671966fa03..42bd1e74f78c 100644 --- a/net/qrtr/qrtr.c +++ b/net/qrtr/qrtr.c @@ -554,23 +554,25 @@ static void qrtr_port_remove(struct qrtr_sock *ipc) */ static int qrtr_port_assign(struct qrtr_sock *ipc, int *port) { + u32 min_port; int rc; mutex_lock(&qrtr_port_lock); if (!*port) { - rc = idr_alloc(&qrtr_ports, ipc, - QRTR_MIN_EPH_SOCKET, QRTR_MAX_EPH_SOCKET + 1, - GFP_ATOMIC); - if (rc >= 0) - *port = rc; + min_port = QRTR_MIN_EPH_SOCKET; + rc = idr_alloc_u32(&qrtr_ports, ipc, &min_port, QRTR_MAX_EPH_SOCKET, GFP_ATOMIC); + if (!rc) + *port = min_port; } else if (*port < QRTR_MIN_EPH_SOCKET && !capable(CAP_NET_ADMIN)) { rc = -EACCES; } else if (*port == QRTR_PORT_CTRL) { - rc = idr_alloc(&qrtr_ports, ipc, 0, 1, GFP_ATOMIC); + min_port = 0; + rc = idr_alloc_u32(&qrtr_ports, ipc, &min_port, 0, GFP_ATOMIC); } else { - rc = idr_alloc(&qrtr_ports, ipc, *port, *port + 1, GFP_ATOMIC); - if (rc >= 0) - *port = rc; + min_port = *port; + rc = idr_alloc_u32(&qrtr_ports, ipc, &min_port, *port, GFP_ATOMIC); + if (!rc) + *port = min_port; } mutex_unlock(&qrtr_port_lock); -- GitLab From 7c8c02c99b25e491a8ac7ed21cbedb327a2d2728 Mon Sep 17 00:00:00 2001 From: Peilin Ye Date: Thu, 20 Aug 2020 16:30:52 +0200 Subject: [PATCH 0491/1304] net/smc: Prevent kernel-infoleak in __smc_diag_dump() [ Upstream commit ce51f63e63c52a4e1eee4dd040fb0ba0af3b43ab ] __smc_diag_dump() is potentially copying uninitialized kernel stack memory into socket buffers, since the compiler may leave a 4-byte hole near the beginning of `struct smcd_diag_dmbinfo`. Fix it by initializing `dinfo` with memset(). Fixes: 4b1b7d3b30a6 ("net/smc: add SMC-D diag support") Suggested-by: Dan Carpenter Signed-off-by: Peilin Ye Signed-off-by: Ursula Braun Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/smc/smc_diag.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/net/smc/smc_diag.c b/net/smc/smc_diag.c index 2379a02c319d..6c4a7a5938b7 100644 --- a/net/smc/smc_diag.c +++ b/net/smc/smc_diag.c @@ -169,13 +169,15 @@ static int __smc_diag_dump(struct sock *sk, struct sk_buff *skb, (req->diag_ext & (1 << (SMC_DIAG_DMBINFO - 1))) && !list_empty(&smc->conn.lgr->list)) { struct smc_connection *conn = &smc->conn; - struct smcd_diag_dmbinfo dinfo = { - .linkid = *((u32 *)conn->lgr->id), - .peer_gid = conn->lgr->peer_gid, - .my_gid = conn->lgr->smcd->local_gid, - .token = conn->rmb_desc->token, - .peer_token = conn->peer_token - }; + struct smcd_diag_dmbinfo dinfo; + + memset(&dinfo, 0, sizeof(dinfo)); + + dinfo.linkid = *((u32 *)conn->lgr->id); + dinfo.peer_gid = conn->lgr->peer_gid; + dinfo.my_gid = conn->lgr->smcd->local_gid; + dinfo.token = conn->rmb_desc->token; + dinfo.peer_token = conn->peer_token; if (nla_put(skb, SMC_DIAG_DMBINFO, sizeof(dinfo), &dinfo) < 0) goto errout; -- GitLab From 0d43753b0273731997a180f11d426d22f61bbb7d Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Sat, 15 Aug 2020 16:29:15 -0700 Subject: [PATCH 0492/1304] tipc: fix uninit skb->data in tipc_nl_compat_dumpit() [ Upstream commit 47733f9daf4fe4f7e0eb9e273f21ad3a19130487 ] __tipc_nl_compat_dumpit() has two callers, and it expects them to pass a valid nlmsghdr via arg->data. This header is artificial and crafted just for __tipc_nl_compat_dumpit(). tipc_nl_compat_publ_dump() does so by putting a genlmsghdr as well as some nested attribute, TIPC_NLA_SOCK. But the other caller tipc_nl_compat_dumpit() does not, this leaves arg->data uninitialized on this call path. Fix this by just adding a similar nlmsghdr without any payload in tipc_nl_compat_dumpit(). This bug exists since day 1, but the recent commit 6ea67769ff33 ("net: tipc: prepare attrs in __tipc_nl_compat_dumpit()") makes it easier to appear. Reported-and-tested-by: syzbot+0e7181deafa7e0b79923@syzkaller.appspotmail.com Fixes: d0796d1ef63d ("tipc: convert legacy nl bearer dump to nl compat") Cc: Jon Maloy Cc: Ying Xue Cc: Richard Alpe Signed-off-by: Cong Wang Acked-by: Ying Xue Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/tipc/netlink_compat.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c index 29e684054abe..f8e111218a0e 100644 --- a/net/tipc/netlink_compat.c +++ b/net/tipc/netlink_compat.c @@ -255,8 +255,9 @@ static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd, static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd, struct tipc_nl_compat_msg *msg) { - int err; + struct nlmsghdr *nlh; struct sk_buff *arg; + int err; if (msg->req_type && (!msg->req_size || !TLV_CHECK_TYPE(msg->req, msg->req_type))) @@ -285,6 +286,15 @@ static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd, return -ENOMEM; } + nlh = nlmsg_put(arg, 0, 0, tipc_genl_family.id, 0, NLM_F_MULTI); + if (!nlh) { + kfree_skb(arg); + kfree_skb(msg->rep); + msg->rep = NULL; + return -EMSGSIZE; + } + nlmsg_end(arg, nlh); + err = __tipc_nl_compat_dumpit(cmd, msg, arg); if (err) { kfree_skb(msg->rep); -- GitLab From bb14103b29bf40d691ca5214626aa472e1c940b5 Mon Sep 17 00:00:00 2001 From: Shay Agroskin Date: Wed, 19 Aug 2020 20:28:38 +0300 Subject: [PATCH 0493/1304] net: ena: Make missed_tx stat incremental [ Upstream commit ccd143e5150f24b9ba15145c7221b61dd9e41021 ] Most statistics in ena driver are incremented, meaning that a stat's value is a sum of all increases done to it since driver/queue initialization. This patch makes all statistics this way, effectively making missed_tx statistic incremental. Also added a comment regarding rx_drops and tx_drops to make it clearer how these counters are calculated. Fixes: 11095fdb712b ("net: ena: add statistics for missed tx packets") Signed-off-by: Shay Agroskin Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/amazon/ena/ena_netdev.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 55cc70ba5b09..3c3222e2dcfc 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -2736,7 +2736,7 @@ static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter, } u64_stats_update_begin(&tx_ring->syncp); - tx_ring->tx_stats.missed_tx = missed_tx; + tx_ring->tx_stats.missed_tx += missed_tx; u64_stats_update_end(&tx_ring->syncp); return rc; @@ -3544,6 +3544,9 @@ static void ena_keep_alive_wd(void *adapter_data, rx_drops = ((u64)desc->rx_drops_high << 32) | desc->rx_drops_low; u64_stats_update_begin(&adapter->syncp); + /* These stats are accumulated by the device, so the counters indicate + * all drops since last reset. + */ adapter->dev_stats.rx_drops = rx_drops; u64_stats_update_end(&adapter->syncp); } -- GitLab From 76791cccd967c382384c6164df4c0e9bf9b9c61f Mon Sep 17 00:00:00 2001 From: Mahesh Bandewar Date: Fri, 14 Aug 2020 22:53:24 -0700 Subject: [PATCH 0494/1304] ipvlan: fix device features [ Upstream commit d0f5c7076e01fef6fcb86988d9508bf3ce258bd4 ] Processing NETDEV_FEAT_CHANGE causes IPvlan links to lose NETIF_F_LLTX feature because of the incorrect handling of features in ipvlan_fix_features(). --before-- lpaa10:~# ethtool -k ipvl0 | grep tx-lockless tx-lockless: on [fixed] lpaa10:~# ethtool -K ipvl0 tso off Cannot change tcp-segmentation-offload Actual changes: vlan-challenged: off [fixed] tx-lockless: off [fixed] lpaa10:~# ethtool -k ipvl0 | grep tx-lockless tx-lockless: off [fixed] lpaa10:~# --after-- lpaa10:~# ethtool -k ipvl0 | grep tx-lockless tx-lockless: on [fixed] lpaa10:~# ethtool -K ipvl0 tso off Cannot change tcp-segmentation-offload Could not change any device features lpaa10:~# ethtool -k ipvl0 | grep tx-lockless tx-lockless: on [fixed] lpaa10:~# Fixes: 2ad7bf363841 ("ipvlan: Initial check-in of the IPVLAN driver.") Signed-off-by: Mahesh Bandewar Cc: Eric Dumazet Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/ipvlan/ipvlan_main.c | 27 ++++++++++++++++++++++----- 1 file changed, 22 insertions(+), 5 deletions(-) diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 87f605a33c37..9fa3c0bd6ec7 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -177,12 +177,21 @@ static void ipvlan_port_destroy(struct net_device *dev) kfree(port); } +#define IPVLAN_ALWAYS_ON_OFLOADS \ + (NETIF_F_SG | NETIF_F_HW_CSUM | \ + NETIF_F_GSO_ROBUST | NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL) + +#define IPVLAN_ALWAYS_ON \ + (IPVLAN_ALWAYS_ON_OFLOADS | NETIF_F_LLTX | NETIF_F_VLAN_CHALLENGED) + #define IPVLAN_FEATURES \ - (NETIF_F_SG | NETIF_F_CSUM_MASK | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ + (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ NETIF_F_GSO | NETIF_F_TSO | NETIF_F_GSO_ROBUST | \ NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM | \ NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER) + /* NETIF_F_GSO_ENCAP_ALL NETIF_F_GSO_SOFTWARE Newly added */ + #define IPVLAN_STATE_MASK \ ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT)) @@ -196,7 +205,9 @@ static int ipvlan_init(struct net_device *dev) dev->state = (dev->state & ~IPVLAN_STATE_MASK) | (phy_dev->state & IPVLAN_STATE_MASK); dev->features = phy_dev->features & IPVLAN_FEATURES; - dev->features |= NETIF_F_LLTX | NETIF_F_VLAN_CHALLENGED; + dev->features |= IPVLAN_ALWAYS_ON; + dev->vlan_features = phy_dev->vlan_features & IPVLAN_FEATURES; + dev->vlan_features |= IPVLAN_ALWAYS_ON_OFLOADS; dev->gso_max_size = phy_dev->gso_max_size; dev->gso_max_segs = phy_dev->gso_max_segs; dev->hard_header_len = phy_dev->hard_header_len; @@ -297,7 +308,14 @@ static netdev_features_t ipvlan_fix_features(struct net_device *dev, { struct ipvl_dev *ipvlan = netdev_priv(dev); - return features & (ipvlan->sfeatures | ~IPVLAN_FEATURES); + features |= NETIF_F_ALL_FOR_ALL; + features &= (ipvlan->sfeatures | ~IPVLAN_FEATURES); + features = netdev_increment_features(ipvlan->phy_dev->features, + features, features); + features |= IPVLAN_ALWAYS_ON; + features &= (IPVLAN_FEATURES | IPVLAN_ALWAYS_ON); + + return features; } static void ipvlan_change_rx_flags(struct net_device *dev, int change) @@ -802,10 +820,9 @@ static int ipvlan_device_event(struct notifier_block *unused, case NETDEV_FEAT_CHANGE: list_for_each_entry(ipvlan, &port->ipvlans, pnode) { - ipvlan->dev->features = dev->features & IPVLAN_FEATURES; ipvlan->dev->gso_max_size = dev->gso_max_size; ipvlan->dev->gso_max_segs = dev->gso_max_segs; - netdev_features_change(ipvlan->dev); + netdev_update_features(ipvlan->dev); } break; -- GitLab From 65b121aa00b699069e549a77b171e505bdcc1cab Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Wed, 5 Aug 2020 19:19:26 -0700 Subject: [PATCH 0495/1304] ALSA: pci: delete repeated words in comments [ Upstream commit c7fabbc51352f50cc58242a6dc3b9c1a3599849b ] Drop duplicated words in sound/pci/. {and, the, at} Signed-off-by: Randy Dunlap Link: https://lore.kernel.org/r/20200806021926.32418-1-rdunlap@infradead.org Signed-off-by: Takashi Iwai Signed-off-by: Sasha Levin --- sound/pci/cs46xx/cs46xx_lib.c | 2 +- sound/pci/cs46xx/dsp_spos_scb_lib.c | 2 +- sound/pci/hda/hda_codec.c | 2 +- sound/pci/hda/hda_generic.c | 2 +- sound/pci/hda/patch_sigmatel.c | 2 +- sound/pci/ice1712/prodigy192.c | 2 +- sound/pci/oxygen/xonar_dg.c | 2 +- 7 files changed, 7 insertions(+), 7 deletions(-) diff --git a/sound/pci/cs46xx/cs46xx_lib.c b/sound/pci/cs46xx/cs46xx_lib.c index 146e1a3498c7..419da70cd942 100644 --- a/sound/pci/cs46xx/cs46xx_lib.c +++ b/sound/pci/cs46xx/cs46xx_lib.c @@ -780,7 +780,7 @@ static void snd_cs46xx_set_capture_sample_rate(struct snd_cs46xx *chip, unsigned rate = 48000 / 9; /* - * We can not capture at at rate greater than the Input Rate (48000). + * We can not capture at a rate greater than the Input Rate (48000). * Return an error if an attempt is made to stray outside that limit. */ if (rate > 48000) diff --git a/sound/pci/cs46xx/dsp_spos_scb_lib.c b/sound/pci/cs46xx/dsp_spos_scb_lib.c index 8d0a3d357345..8ef51a29380a 100644 --- a/sound/pci/cs46xx/dsp_spos_scb_lib.c +++ b/sound/pci/cs46xx/dsp_spos_scb_lib.c @@ -1739,7 +1739,7 @@ int cs46xx_iec958_pre_open (struct snd_cs46xx *chip) struct dsp_spos_instance * ins = chip->dsp_spos_instance; if ( ins->spdif_status_out & DSP_SPDIF_STATUS_OUTPUT_ENABLED ) { - /* remove AsynchFGTxSCB and and PCMSerialInput_II */ + /* remove AsynchFGTxSCB and PCMSerialInput_II */ cs46xx_dsp_disable_spdif_out (chip); /* save state */ diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index f3a6b1d869d8..dbeb62362f1c 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c @@ -3410,7 +3410,7 @@ EXPORT_SYMBOL_GPL(snd_hda_set_power_save); * @nid: NID to check / update * * Check whether the given NID is in the amp list. If it's in the list, - * check the current AMP status, and update the the power-status according + * check the current AMP status, and update the power-status according * to the mute status. * * This function is supposed to be set or called from the check_power_status diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c index 2609161707a4..97adb7e340f9 100644 --- a/sound/pci/hda/hda_generic.c +++ b/sound/pci/hda/hda_generic.c @@ -825,7 +825,7 @@ static void activate_amp_in(struct hda_codec *codec, struct nid_path *path, } } -/* sync power of each widget in the the given path */ +/* sync power of each widget in the given path */ static hda_nid_t path_power_update(struct hda_codec *codec, struct nid_path *path, bool allow_powerdown) diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index d8168aa2cef3..85c33f528d7b 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c @@ -845,7 +845,7 @@ static int stac_auto_create_beep_ctls(struct hda_codec *codec, static struct snd_kcontrol_new beep_vol_ctl = HDA_CODEC_VOLUME(NULL, 0, 0, 0); - /* check for mute support for the the amp */ + /* check for mute support for the amp */ if ((caps & AC_AMPCAP_MUTE) >> AC_AMPCAP_MUTE_SHIFT) { const struct snd_kcontrol_new *temp; if (spec->anabeep_nid == nid) diff --git a/sound/pci/ice1712/prodigy192.c b/sound/pci/ice1712/prodigy192.c index 3919aed39ca0..5e52086d7b98 100644 --- a/sound/pci/ice1712/prodigy192.c +++ b/sound/pci/ice1712/prodigy192.c @@ -31,7 +31,7 @@ * Experimentally I found out that only a combination of * OCKS0=1, OCKS1=1 (128fs, 64fs output) and ice1724 - * VT1724_MT_I2S_MCLK_128X=0 (256fs input) yields correct - * sampling rate. That means the the FPGA doubles the + * sampling rate. That means that the FPGA doubles the * MCK01 rate. * * Copyright (c) 2003 Takashi Iwai diff --git a/sound/pci/oxygen/xonar_dg.c b/sound/pci/oxygen/xonar_dg.c index 4cf3200e988b..df44135e1b0c 100644 --- a/sound/pci/oxygen/xonar_dg.c +++ b/sound/pci/oxygen/xonar_dg.c @@ -39,7 +39,7 @@ * GPIO 4 <- headphone detect * GPIO 5 -> enable ADC analog circuit for the left channel * GPIO 6 -> enable ADC analog circuit for the right channel - * GPIO 7 -> switch green rear output jack between CS4245 and and the first + * GPIO 7 -> switch green rear output jack between CS4245 and the first * channel of CS4361 (mechanical relay) * GPIO 8 -> enable output to speakers * -- GitLab From 6978222ea037ca8dcefaa0b37fea2cc41320a7f4 Mon Sep 17 00:00:00 2001 From: Qiushi Wu Date: Sat, 13 Jun 2020 22:37:48 -0500 Subject: [PATCH 0496/1304] ASoC: img: Fix a reference count leak in img_i2s_in_set_fmt [ Upstream commit c4c59b95b7f7d4cef5071b151be2dadb33f3287b ] pm_runtime_get_sync() increments the runtime PM usage counter even when it returns an error code, causing incorrect ref count if pm_runtime_put_noidle() is not called in error handling paths. Thus call pm_runtime_put_noidle() if pm_runtime_get_sync() fails. Signed-off-by: Qiushi Wu Link: https://lore.kernel.org/r/20200614033749.2975-1-wu000273@umn.edu Signed-off-by: Mark Brown Signed-off-by: Sasha Levin --- sound/soc/img/img-i2s-in.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/sound/soc/img/img-i2s-in.c b/sound/soc/img/img-i2s-in.c index c22880aea82a..7e48c740bf55 100644 --- a/sound/soc/img/img-i2s-in.c +++ b/sound/soc/img/img-i2s-in.c @@ -346,8 +346,10 @@ static int img_i2s_in_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) chan_control_mask = IMG_I2S_IN_CH_CTL_CLK_TRANS_MASK; ret = pm_runtime_get_sync(i2s->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_noidle(i2s->dev); return ret; + } for (i = 0; i < i2s->active_channels; i++) img_i2s_in_ch_disable(i2s, i); -- GitLab From 951fba03cf3ed5981d53c91e93ab3dd5f3d0ebbd Mon Sep 17 00:00:00 2001 From: Qiushi Wu Date: Sat, 13 Jun 2020 22:33:43 -0500 Subject: [PATCH 0497/1304] ASoC: img-parallel-out: Fix a reference count leak [ Upstream commit 6b9fbb073636906eee9fe4d4c05a4f445b9e2a23 ] pm_runtime_get_sync() increments the runtime PM usage counter even when it returns an error code, causing incorrect ref count if pm_runtime_put_noidle() is not called in error handling paths. Thus call pm_runtime_put_noidle() if pm_runtime_get_sync() fails. Signed-off-by: Qiushi Wu Link: https://lore.kernel.org/r/20200614033344.1814-1-wu000273@umn.edu Signed-off-by: Mark Brown Signed-off-by: Sasha Levin --- sound/soc/img/img-parallel-out.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/sound/soc/img/img-parallel-out.c b/sound/soc/img/img-parallel-out.c index acc005217be0..f56752662b19 100644 --- a/sound/soc/img/img-parallel-out.c +++ b/sound/soc/img/img-parallel-out.c @@ -166,8 +166,10 @@ static int img_prl_out_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) } ret = pm_runtime_get_sync(prl->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_noidle(prl->dev); return ret; + } reg = img_prl_out_readl(prl, IMG_PRL_OUT_CTL); reg = (reg & ~IMG_PRL_OUT_CTL_EDGE_MASK) | control_set; -- GitLab From 3ff0d9154ef86b2f90ecf75149a30cd3d9253241 Mon Sep 17 00:00:00 2001 From: Qiushi Wu Date: Sat, 13 Jun 2020 15:44:19 -0500 Subject: [PATCH 0498/1304] ASoC: tegra: Fix reference count leaks. [ Upstream commit deca195383a6085be62cb453079e03e04d618d6e ] Calling pm_runtime_get_sync increments the counter even in case of failure, causing incorrect ref count if pm_runtime_put is not called in error handling paths. Call pm_runtime_put if pm_runtime_get_sync fails. Signed-off-by: Qiushi Wu Reviewed-by: Jon Hunter Link: https://lore.kernel.org/r/20200613204422.24484-1-wu000273@umn.edu Signed-off-by: Mark Brown Signed-off-by: Sasha Levin --- sound/soc/tegra/tegra30_ahub.c | 4 +++- sound/soc/tegra/tegra30_i2s.c | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/sound/soc/tegra/tegra30_ahub.c b/sound/soc/tegra/tegra30_ahub.c index 43679aeeb12b..88e838ac937d 100644 --- a/sound/soc/tegra/tegra30_ahub.c +++ b/sound/soc/tegra/tegra30_ahub.c @@ -655,8 +655,10 @@ static int tegra30_ahub_resume(struct device *dev) int ret; ret = pm_runtime_get_sync(dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put(dev); return ret; + } ret = regcache_sync(ahub->regmap_ahub); ret |= regcache_sync(ahub->regmap_apbif); pm_runtime_put(dev); diff --git a/sound/soc/tegra/tegra30_i2s.c b/sound/soc/tegra/tegra30_i2s.c index 0b176ea24914..bf155c5092f0 100644 --- a/sound/soc/tegra/tegra30_i2s.c +++ b/sound/soc/tegra/tegra30_i2s.c @@ -551,8 +551,10 @@ static int tegra30_i2s_resume(struct device *dev) int ret; ret = pm_runtime_get_sync(dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put(dev); return ret; + } ret = regcache_sync(i2s->regmap); pm_runtime_put(dev); -- GitLab From 8524be5aafb78d1c8715910345fcc963e62fac86 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Mon, 15 Jun 2020 19:10:32 +0300 Subject: [PATCH 0499/1304] mfd: intel-lpss: Add Intel Emmitsburg PCH PCI IDs [ Upstream commit 3ea2e4eab64cefa06055bb0541fcdedad4b48565 ] Intel Emmitsburg PCH has the same LPSS than Intel Ice Lake. Add the new IDs to the list of supported devices. Signed-off-by: Andy Shevchenko Signed-off-by: Lee Jones Signed-off-by: Sasha Levin --- drivers/mfd/intel-lpss-pci.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c index 742d6c1973f4..adea7ff63132 100644 --- a/drivers/mfd/intel-lpss-pci.c +++ b/drivers/mfd/intel-lpss-pci.c @@ -176,6 +176,9 @@ static const struct pci_device_id intel_lpss_pci_ids[] = { { PCI_VDEVICE(INTEL, 0x1ac4), (kernel_ulong_t)&bxt_info }, { PCI_VDEVICE(INTEL, 0x1ac6), (kernel_ulong_t)&bxt_info }, { PCI_VDEVICE(INTEL, 0x1aee), (kernel_ulong_t)&bxt_uart_info }, + /* EBG */ + { PCI_VDEVICE(INTEL, 0x1bad), (kernel_ulong_t)&bxt_uart_info }, + { PCI_VDEVICE(INTEL, 0x1bae), (kernel_ulong_t)&bxt_uart_info }, /* GLK */ { PCI_VDEVICE(INTEL, 0x31ac), (kernel_ulong_t)&glk_i2c_info }, { PCI_VDEVICE(INTEL, 0x31ae), (kernel_ulong_t)&glk_i2c_info }, -- GitLab From 49c469ac2b637fc85e44b3cb85683b21ceeffd73 Mon Sep 17 00:00:00 2001 From: Stephan Gerhold Date: Fri, 5 Jun 2020 20:59:15 +0200 Subject: [PATCH 0500/1304] arm64: dts: qcom: msm8916: Pull down PDM GPIOs during sleep [ Upstream commit e2ee9edc282961783d519c760bbaa20fed4dec38 ] The original qcom kernel changed the PDM GPIOs to be pull-down during sleep at some point. Reportedly this was done because there was some "leakage at PDM outputs during sleep": https://source.codeaurora.org/quic/la/kernel/msm-3.10/commit/?id=0f87e08c1cd3e6484a6f7fb3e74e37340bdcdee0 I cannot say how effective this is, but everything seems to work fine with this change so let's apply the same to mainline just to be sure. Cc: Srinivas Kandagatla Signed-off-by: Stephan Gerhold Link: https://lore.kernel.org/r/20200605185916.318494-3-stephan@gerhold.net Signed-off-by: Bjorn Andersson Signed-off-by: Sasha Levin --- arch/arm64/boot/dts/qcom/msm8916-pins.dtsi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi b/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi index 60d218c5275c..6754817658fa 100644 --- a/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi @@ -529,7 +529,7 @@ pins = "gpio63", "gpio64", "gpio65", "gpio66", "gpio67", "gpio68"; drive-strength = <2>; - bias-disable; + bias-pull-down; }; }; }; -- GitLab From 1838bdf6cabc2b6b0c12363b82f19d388cbd20e9 Mon Sep 17 00:00:00 2001 From: Alexey Kardashevskiy Date: Fri, 12 Jun 2020 14:33:03 +1000 Subject: [PATCH 0501/1304] powerpc/xive: Ignore kmemleak false positives [ Upstream commit f0993c839e95dd6c7f054a1015e693c87e33e4fb ] xive_native_provision_pages() allocates memory and passes the pointer to OPAL so kmemleak cannot find the pointer usage in the kernel memory and produces a false positive report (below) (even if the kernel did scan OPAL memory, it is unable to deal with __pa() addresses anyway). This silences the warning. unreferenced object 0xc000200350c40000 (size 65536): comm "qemu-system-ppc", pid 2725, jiffies 4294946414 (age 70776.530s) hex dump (first 32 bytes): 02 00 00 00 50 00 00 00 00 00 00 00 00 00 00 00 ....P........... 01 00 08 07 00 00 00 00 00 00 00 00 00 00 00 00 ................ backtrace: [<0000000081ff046c>] xive_native_alloc_vp_block+0x120/0x250 [<00000000d555d524>] kvmppc_xive_compute_vp_id+0x248/0x350 [kvm] [<00000000d69b9c9f>] kvmppc_xive_connect_vcpu+0xc0/0x520 [kvm] [<000000006acbc81c>] kvm_arch_vcpu_ioctl+0x308/0x580 [kvm] [<0000000089c69580>] kvm_vcpu_ioctl+0x19c/0xae0 [kvm] [<00000000902ae91e>] ksys_ioctl+0x184/0x1b0 [<00000000f3e68bd7>] sys_ioctl+0x48/0xb0 [<0000000001b2c127>] system_call_exception+0x124/0x1f0 [<00000000d2b2ee40>] system_call_common+0xe8/0x214 Signed-off-by: Alexey Kardashevskiy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200612043303.84894-1-aik@ozlabs.ru Signed-off-by: Sasha Levin --- arch/powerpc/sysdev/xive/native.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c index cb1f51ad48e4..411f785cdfb5 100644 --- a/arch/powerpc/sysdev/xive/native.c +++ b/arch/powerpc/sysdev/xive/native.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include @@ -627,6 +628,7 @@ static bool xive_native_provision_pages(void) pr_err("Failed to allocate provisioning page\n"); return false; } + kmemleak_ignore(p); opal_xive_donate_page(chip, __pa(p)); } return true; -- GitLab From 882e00b601c27999e4d12bac9a3233737bcbf9d9 Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Sat, 30 May 2020 16:42:08 +0200 Subject: [PATCH 0502/1304] media: pci: ttpci: av7110: fix possible buffer overflow caused by bad DMA value in debiirq() [ Upstream commit 6499a0db9b0f1e903d52f8244eacc1d4be00eea2 ] The value av7110->debi_virt is stored in DMA memory, and it is assigned to data, and thus data[0] can be modified at any time by malicious hardware. In this case, "if (data[0] < 2)" can be passed, but then data[0] can be changed into a large number, which may cause buffer overflow when the code "av7110->ci_slot[data[0]]" is used. To fix this possible bug, data[0] is assigned to a local variable, which replaces the use of data[0]. Signed-off-by: Jia-Ju Bai Signed-off-by: Sean Young Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Sasha Levin --- drivers/media/pci/ttpci/av7110.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/media/pci/ttpci/av7110.c b/drivers/media/pci/ttpci/av7110.c index d6816effb878..d02b5fd940c1 100644 --- a/drivers/media/pci/ttpci/av7110.c +++ b/drivers/media/pci/ttpci/av7110.c @@ -424,14 +424,15 @@ static void debiirq(unsigned long cookie) case DATA_CI_GET: { u8 *data = av7110->debi_virt; + u8 data_0 = data[0]; - if ((data[0] < 2) && data[2] == 0xff) { + if (data_0 < 2 && data[2] == 0xff) { int flags = 0; if (data[5] > 0) flags |= CA_CI_MODULE_PRESENT; if (data[5] > 5) flags |= CA_CI_MODULE_READY; - av7110->ci_slot[data[0]].flags = flags; + av7110->ci_slot[data_0].flags = flags; } else ci_get_data(&av7110->ci_rbuffer, av7110->debi_virt, -- GitLab From f4e860765108133941c9c964af336769b487c388 Mon Sep 17 00:00:00 2001 From: Luis Chamberlain Date: Fri, 19 Jun 2020 20:47:29 +0000 Subject: [PATCH 0503/1304] blktrace: ensure our debugfs dir exists [ Upstream commit b431ef837e3374da0db8ff6683170359aaa0859c ] We make an assumption that a debugfs directory exists, but since this can fail ensure it exists before allowing blktrace setup to complete. Otherwise we end up stuffing blktrace files on the debugfs root directory. In the worst case scenario this *in theory* can create an eventual panic *iff* in the future a similarly named file is created prior on the debugfs root directory. This theoretical crash can happen due to a recursive removal followed by a specific dentry removal. This doesn't fix any known crash, however I have seen the files go into the main debugfs root directory in cases where the debugfs directory was not created due to other internal bugs with blktrace now fixed. blktrace is also completely useless without this directory, so this ensures to userspace we only setup blktrace if the kernel can stuff files where they are supposed to go into. debugfs directory creations typically aren't checked for, and we have maintainers doing sweep removals of these checks, but since we need this check to ensure proper userspace blktrace functionality we make sure to annotate the justification for the check. Signed-off-by: Luis Chamberlain Reviewed-by: Christoph Hellwig Reviewed-by: Bart Van Assche Signed-off-by: Jens Axboe Signed-off-by: Sasha Levin --- kernel/trace/blktrace.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 7a4ca2deb39b..1442f6152abc 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -529,6 +529,18 @@ static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev, if (!dir) goto err; + /* + * As blktrace relies on debugfs for its interface the debugfs directory + * is required, contrary to the usual mantra of not checking for debugfs + * files or directories. + */ + if (IS_ERR_OR_NULL(dir)) { + pr_warn("debugfs_dir not present for %s so skipping\n", + buts->name); + ret = -ENOENT; + goto err; + } + bt->dev = dev; atomic_set(&bt->dropped, 0); INIT_LIST_HEAD(&bt->running_list); -- GitLab From b5cd5c1e708721c421e6a101a949d462a8d840f4 Mon Sep 17 00:00:00 2001 From: Bodo Stroesser Date: Mon, 29 Jun 2020 11:37:56 +0200 Subject: [PATCH 0504/1304] scsi: target: tcmu: Fix crash on ARM during cmd completion [ Upstream commit 5a0c256d96f020e4771f6fd5524b80f89a2d3132 ] If tcmu_handle_completions() has to process a padding shorter than sizeof(struct tcmu_cmd_entry), the current call to tcmu_flush_dcache_range() with sizeof(struct tcmu_cmd_entry) as length param is wrong and causes crashes on e.g. ARM, because tcmu_flush_dcache_range() in this case calls flush_dcache_page(vmalloc_to_page(start)); with start being an invalid address above the end of the vmalloc'ed area. The fix is to use the minimum of remaining ring space and sizeof(struct tcmu_cmd_entry) as the length param. The patch was tested on kernel 4.19.118. See https://bugzilla.kernel.org/show_bug.cgi?id=208045#c10 Link: https://lore.kernel.org/r/20200629093756.8947-1-bstroesser@ts.fujitsu.com Tested-by: JiangYu Acked-by: Mike Christie Signed-off-by: Bodo Stroesser Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin --- drivers/target/target_core_user.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 9c05e820857a..91dbac7446a4 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -1231,7 +1231,14 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned; - tcmu_flush_dcache_range(entry, sizeof(*entry)); + /* + * Flush max. up to end of cmd ring since current entry might + * be a padding that is shorter than sizeof(*entry) + */ + size_t ring_left = head_to_end(udev->cmdr_last_cleaned, + udev->cmdr_size); + tcmu_flush_dcache_range(entry, ring_left < sizeof(*entry) ? + ring_left : sizeof(*entry)); if (tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_PAD) { UPDATE_HEAD(udev->cmdr_last_cleaned, -- GitLab From 9c9723816024195e6d68255e29db0dd4658f76ff Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Tue, 2 Jun 2020 14:08:18 +0100 Subject: [PATCH 0505/1304] iommu/iova: Don't BUG on invalid PFNs [ Upstream commit d3e3d2be688b4b5864538de61e750721a311e4fc ] Unlike the other instances which represent a complete loss of consistency within the rcache mechanism itself, or a fundamental and obvious misconfiguration by an IOMMU driver, the BUG_ON() in iova_magazine_free_pfns() can be provoked at more or less any time in a "spooky action-at-a-distance" manner by any old device driver passing nonsense to dma_unmap_*() which then propagates through to queue_iova(). Not only is this well outside the IOVA layer's control, it's also nowhere near fatal enough to justify panicking anyway - all that really achieves is to make debugging the offending driver more difficult. Let's simply WARN and otherwise ignore bogus PFNs. Reported-by: Prakash Gupta Signed-off-by: Robin Murphy Reviewed-by: Prakash Gupta Link: https://lore.kernel.org/r/acbd2d092b42738a03a21b417ce64e27f8c91c86.1591103298.git.robin.murphy@arm.com Signed-off-by: Joerg Roedel Signed-off-by: Sasha Levin --- drivers/iommu/iova.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index 34c058c24b9d..ce5cd05253db 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -814,7 +814,9 @@ iova_magazine_free_pfns(struct iova_magazine *mag, struct iova_domain *iovad) for (i = 0 ; i < mag->size; ++i) { struct iova *iova = private_find_iova(iovad, mag->pfns[i]); - BUG_ON(!iova); + if (WARN_ON(!iova)) + continue; + private_free_iova(iovad, iova); } -- GitLab From 74d20579fcf02320fb51efd12bad1ff320e0d4f0 Mon Sep 17 00:00:00 2001 From: Qiushi Wu Date: Sat, 13 Jun 2020 14:32:26 -0500 Subject: [PATCH 0506/1304] drm/amdkfd: Fix reference count leaks. [ Upstream commit 20eca0123a35305e38b344d571cf32768854168c ] kobject_init_and_add() takes reference even when it fails. If this function returns an error, kobject_put() must be called to properly clean up the memory associated with the object. Signed-off-by: Qiushi Wu Reviewed-by: Felix Kuehling Signed-off-by: Felix Kuehling Signed-off-by: Alex Deucher Signed-off-by: Sasha Levin --- drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 0805c423a5ce..5cf499a07806 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -592,8 +592,10 @@ static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev, ret = kobject_init_and_add(dev->kobj_node, &node_type, sys_props.kobj_nodes, "%d", id); - if (ret < 0) + if (ret < 0) { + kobject_put(dev->kobj_node); return ret; + } dev->kobj_mem = kobject_create_and_add("mem_banks", dev->kobj_node); if (!dev->kobj_mem) @@ -640,8 +642,10 @@ static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev, return -ENOMEM; ret = kobject_init_and_add(mem->kobj, &mem_type, dev->kobj_mem, "%d", i); - if (ret < 0) + if (ret < 0) { + kobject_put(mem->kobj); return ret; + } mem->attr.name = "properties"; mem->attr.mode = KFD_SYSFS_FILE_MODE; @@ -659,8 +663,10 @@ static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev, return -ENOMEM; ret = kobject_init_and_add(cache->kobj, &cache_type, dev->kobj_cache, "%d", i); - if (ret < 0) + if (ret < 0) { + kobject_put(cache->kobj); return ret; + } cache->attr.name = "properties"; cache->attr.mode = KFD_SYSFS_FILE_MODE; @@ -678,8 +684,10 @@ static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev, return -ENOMEM; ret = kobject_init_and_add(iolink->kobj, &iolink_type, dev->kobj_iolink, "%d", i); - if (ret < 0) + if (ret < 0) { + kobject_put(iolink->kobj); return ret; + } iolink->attr.name = "properties"; iolink->attr.mode = KFD_SYSFS_FILE_MODE; @@ -759,8 +767,10 @@ static int kfd_topology_update_sysfs(void) ret = kobject_init_and_add(sys_props.kobj_topology, &sysprops_type, &kfd_device->kobj, "topology"); - if (ret < 0) + if (ret < 0) { + kobject_put(sys_props.kobj_topology); return ret; + } sys_props.kobj_nodes = kobject_create_and_add("nodes", sys_props.kobj_topology); -- GitLab From 93d3e58c97741f25c3e6c15e3dd61ab684f72cb2 Mon Sep 17 00:00:00 2001 From: Aditya Pakki Date: Sat, 13 Jun 2020 20:55:39 -0500 Subject: [PATCH 0507/1304] drm/radeon: fix multiple reference count leak [ Upstream commit 6f2e8acdb48ed166b65d47837c31b177460491ec ] On calling pm_runtime_get_sync() the reference count of the device is incremented. In case of failure, decrement the reference count before returning the error. Signed-off-by: Aditya Pakki Signed-off-by: Alex Deucher Signed-off-by: Sasha Levin --- drivers/gpu/drm/radeon/radeon_connectors.c | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index de656f555383..b9927101e845 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -882,8 +882,10 @@ radeon_lvds_detect(struct drm_connector *connector, bool force) if (!drm_kms_helper_is_poll_worker()) { r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(connector->dev->dev); return connector_status_disconnected; + } } if (encoder) { @@ -1028,8 +1030,10 @@ radeon_vga_detect(struct drm_connector *connector, bool force) if (!drm_kms_helper_is_poll_worker()) { r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(connector->dev->dev); return connector_status_disconnected; + } } encoder = radeon_best_single_encoder(connector); @@ -1166,8 +1170,10 @@ radeon_tv_detect(struct drm_connector *connector, bool force) if (!drm_kms_helper_is_poll_worker()) { r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(connector->dev->dev); return connector_status_disconnected; + } } encoder = radeon_best_single_encoder(connector); @@ -1250,8 +1256,10 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) if (!drm_kms_helper_is_poll_worker()) { r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(connector->dev->dev); return connector_status_disconnected; + } } if (radeon_connector->detected_hpd_without_ddc) { @@ -1665,8 +1673,10 @@ radeon_dp_detect(struct drm_connector *connector, bool force) if (!drm_kms_helper_is_poll_worker()) { r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(connector->dev->dev); return connector_status_disconnected; + } } if (!force && radeon_check_hpd_status_unchanged(connector)) { -- GitLab From 9006d622156a5c4a6699d303472fe24786cd10c9 Mon Sep 17 00:00:00 2001 From: Navid Emamdoost Date: Sun, 14 Jun 2020 02:12:29 -0500 Subject: [PATCH 0508/1304] drm/amdgpu: fix ref count leak in amdgpu_driver_open_kms [ Upstream commit 9ba8923cbbe11564dd1bf9f3602add9a9cfbb5c6 ] in amdgpu_driver_open_kms the call to pm_runtime_get_sync increments the counter even in case of failure, leading to incorrect ref count. In case of failure, decrement the ref count before returning. Signed-off-by: Navid Emamdoost Signed-off-by: Alex Deucher Signed-off-by: Sasha Levin --- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index bb41936df0d9..2beaaf4bee68 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -835,7 +835,7 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) r = pm_runtime_get_sync(dev->dev); if (r < 0) - return r; + goto pm_put; fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); if (unlikely(!fpriv)) { @@ -883,6 +883,7 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) out_suspend: pm_runtime_mark_last_busy(dev->dev); +pm_put: pm_runtime_put_autosuspend(dev->dev); return r; -- GitLab From 214b2803ba44538f929dad7d196224c2acd06740 Mon Sep 17 00:00:00 2001 From: Navid Emamdoost Date: Sun, 14 Jun 2020 02:14:50 -0500 Subject: [PATCH 0509/1304] drm/amd/display: fix ref count leak in amdgpu_drm_ioctl [ Upstream commit 5509ac65f2fe5aa3c0003237ec629ca55024307c ] in amdgpu_drm_ioctl the call to pm_runtime_get_sync increments the counter even in case of failure, leading to incorrect ref count. In case of failure, decrement the ref count before returning. Signed-off-by: Navid Emamdoost Signed-off-by: Alex Deucher Signed-off-by: Sasha Levin --- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 5e29f14f4b30..63b1e325b45c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -1085,11 +1085,12 @@ long amdgpu_drm_ioctl(struct file *filp, dev = file_priv->minor->dev; ret = pm_runtime_get_sync(dev->dev); if (ret < 0) - return ret; + goto out; ret = drm_ioctl(filp, cmd, arg); pm_runtime_mark_last_busy(dev->dev); +out: pm_runtime_put_autosuspend(dev->dev); return ret; } -- GitLab From d94a5e441cf1a7bf9f88580248fb5f8205b26c53 Mon Sep 17 00:00:00 2001 From: Navid Emamdoost Date: Sun, 14 Jun 2020 02:09:44 -0500 Subject: [PATCH 0510/1304] drm/amdgpu: fix ref count leak in amdgpu_display_crtc_set_config [ Upstream commit e008fa6fb41544b63973a529b704ef342f47cc65 ] in amdgpu_display_crtc_set_config, the call to pm_runtime_get_sync increments the counter even in case of failure, leading to incorrect ref count. In case of failure, decrement the ref count before returning. Signed-off-by: Navid Emamdoost Signed-off-by: Alex Deucher Signed-off-by: Sasha Levin --- drivers/gpu/drm/amd/amdgpu/amdgpu_display.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 686a26de50f9..049a1961c3fa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -275,7 +275,7 @@ int amdgpu_display_crtc_set_config(struct drm_mode_set *set, ret = pm_runtime_get_sync(dev->dev); if (ret < 0) - return ret; + goto out; ret = drm_crtc_helper_set_config(set, ctx); @@ -290,7 +290,7 @@ int amdgpu_display_crtc_set_config(struct drm_mode_set *set, take the current one */ if (active && !adev->have_disp_power_ref) { adev->have_disp_power_ref = true; - return ret; + goto out; } /* if we have no active crtcs, then drop the power ref we got before */ @@ -299,6 +299,7 @@ int amdgpu_display_crtc_set_config(struct drm_mode_set *set, adev->have_disp_power_ref = false; } +out: /* drop the power reference we got coming in here */ pm_runtime_put_autosuspend(dev->dev); return ret; -- GitLab From f93736e489642e7fdece41ed29aeb0fe1bef5fd3 Mon Sep 17 00:00:00 2001 From: Navid Emamdoost Date: Sun, 14 Jun 2020 02:05:28 -0500 Subject: [PATCH 0511/1304] drm/amdgpu/display: fix ref count leak when pm_runtime_get_sync fails [ Upstream commit f79f94765f8c39db0b7dec1d335ab046aac03f20 ] The call to pm_runtime_get_sync increments the counter even in case of failure, leading to incorrect ref count. In case of failure, decrement the ref count before returning. Signed-off-by: Navid Emamdoost Signed-off-by: Alex Deucher Signed-off-by: Sasha Levin --- drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index c770d73352a7..c15286858f0b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c @@ -718,8 +718,10 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force) if (!drm_kms_helper_is_poll_worker()) { r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(connector->dev->dev); return connector_status_disconnected; + } } if (encoder) { @@ -856,8 +858,10 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force) if (!drm_kms_helper_is_poll_worker()) { r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(connector->dev->dev); return connector_status_disconnected; + } } encoder = amdgpu_connector_best_single_encoder(connector); @@ -979,8 +983,10 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force) if (!drm_kms_helper_is_poll_worker()) { r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(connector->dev->dev); return connector_status_disconnected; + } } if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) { @@ -1329,8 +1335,10 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force) if (!drm_kms_helper_is_poll_worker()) { r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(connector->dev->dev); return connector_status_disconnected; + } } if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) { -- GitLab From f28249fbb663f0bc09d13654bf2e303f5c24d86b Mon Sep 17 00:00:00 2001 From: Dick Kennedy Date: Tue, 30 Jun 2020 14:49:54 -0700 Subject: [PATCH 0512/1304] scsi: lpfc: Fix shost refcount mismatch when deleting vport [ Upstream commit 03dbfe0668e6692917ac278883e0586cd7f7d753 ] When vports are deleted, it is observed that there is memory/kthread leakage as the vport isn't fully being released. There is a shost reference taken in scsi_add_host_dma that is not released during scsi_remove_host. It was noticed that other drivers resolve this by doing a scsi_host_put after calling scsi_remove_host. The vport_delete routine is taking two references one that corresponds to an access to the scsi_host in the vport_delete routine and another that is released after the adapter mailbox command completes that destroys the VPI that corresponds to the vport. Remove one of the references taken such that the second reference that is put will complete the missing scsi_add_host_dma reference and the shost will be terminated. Link: https://lore.kernel.org/r/20200630215001.70793-8-jsmart2021@gmail.com Signed-off-by: Dick Kennedy Signed-off-by: James Smart Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin --- drivers/scsi/lpfc/lpfc_vport.c | 26 ++++++++------------------ 1 file changed, 8 insertions(+), 18 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index 1ff0f7de9105..64545b300dfc 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c @@ -653,27 +653,16 @@ lpfc_vport_delete(struct fc_vport *fc_vport) vport->port_state < LPFC_VPORT_READY) return -EAGAIN; } + /* - * This is a bit of a mess. We want to ensure the shost doesn't get - * torn down until we're done with the embedded lpfc_vport structure. - * - * Beyond holding a reference for this function, we also need a - * reference for outstanding I/O requests we schedule during delete - * processing. But once we scsi_remove_host() we can no longer obtain - * a reference through scsi_host_get(). - * - * So we take two references here. We release one reference at the - * bottom of the function -- after delinking the vport. And we - * release the other at the completion of the unreg_vpi that get's - * initiated after we've disposed of all other resources associated - * with the port. + * Take early refcount for outstanding I/O requests we schedule during + * delete processing for unreg_vpi. Always keep this before + * scsi_remove_host() as we can no longer obtain a reference through + * scsi_host_get() after scsi_host_remove as shost is set to SHOST_DEL. */ if (!scsi_host_get(shost)) return VPORT_INVAL; - if (!scsi_host_get(shost)) { - scsi_host_put(shost); - return VPORT_INVAL; - } + lpfc_free_sysfs_attr(vport); lpfc_debugfs_terminate(vport); @@ -820,8 +809,9 @@ lpfc_vport_delete(struct fc_vport *fc_vport) if (!(vport->vpi_state & LPFC_VPI_REGISTERED) || lpfc_mbx_unreg_vpi(vport)) scsi_host_put(shost); - } else + } else { scsi_host_put(shost); + } lpfc_free_vpi(phba, vport->vpi); vport->work_port_events = 0; -- GitLab From 6af2bb145126e4907ff0a64bbee04fa0b499f09c Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Mon, 29 Jun 2020 14:48:45 -0700 Subject: [PATCH 0513/1304] xfs: Don't allow logging of XFS_ISTALE inodes [ Upstream commit 96355d5a1f0ee6dcc182c37db4894ec0c29f1692 ] In tracking down a problem in this patchset, I discovered we are reclaiming dirty stale inodes. This wasn't discovered until inodes were always attached to the cluster buffer and then the rcu callback that freed inodes was assert failing because the inode still had an active pointer to the cluster buffer after it had been reclaimed. Debugging the issue indicated that this was a pre-existing issue resulting from the way the inodes are handled in xfs_inactive_ifree. When we free a cluster buffer from xfs_ifree_cluster, all the inodes in cache are marked XFS_ISTALE. Those that are clean have nothing else done to them and so eventually get cleaned up by background reclaim. i.e. it is assumed we'll never dirty/relog an inode marked XFS_ISTALE. On journal commit dirty stale inodes as are handled by both buffer and inode log items to run though xfs_istale_done() and removed from the AIL (buffer log item commit) or the log item will simply unpin it because the buffer log item will clean it. What happens to any specific inode is entirely dependent on which log item wins the commit race, but the result is the same - stale inodes are clean, not attached to the cluster buffer, and not in the AIL. Hence inode reclaim can just free these inodes without further care. However, if the stale inode is relogged, it gets dirtied again and relogged into the CIL. Most of the time this isn't an issue, because relogging simply changes the inode's location in the current checkpoint. Problems arise, however, when the CIL checkpoints between two transactions in the xfs_inactive_ifree() deferops processing. This results in the XFS_ISTALE inode being redirtied and inserted into the CIL without any of the other stale cluster buffer infrastructure being in place. Hence on journal commit, it simply gets unpinned, so it remains dirty in memory. Everything in inode writeback avoids XFS_ISTALE inodes so it can't be written back, and it is not tracked in the AIL so there's not even a trigger to attempt to clean the inode. Hence the inode just sits dirty in memory until inode reclaim comes along, sees that it is XFS_ISTALE, and goes to reclaim it. This reclaiming of a dirty inode caused use after free, list corruptions and other nasty issues later in this patchset. Hence this patch addresses a violation of the "never log XFS_ISTALE inodes" caused by the deferops processing rolling a transaction and relogging a stale inode in xfs_inactive_free. It also adds a bunch of asserts to catch this problem in debug kernels so that we don't reintroduce this problem in future. Reproducer for this issue was generic/558 on a v4 filesystem. Signed-off-by: Dave Chinner Reviewed-by: Brian Foster Reviewed-by: Darrick J. Wong Signed-off-by: Darrick J. Wong Signed-off-by: Sasha Levin --- fs/xfs/xfs_icache.c | 3 ++- fs/xfs/xfs_inode.c | 25 ++++++++++++++++++++++--- fs/xfs/xfs_trans_inode.c | 2 ++ 3 files changed, 26 insertions(+), 4 deletions(-) diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c index 901f27ac94ab..56e9043bddc7 100644 --- a/fs/xfs/xfs_icache.c +++ b/fs/xfs/xfs_icache.c @@ -1127,7 +1127,7 @@ xfs_reclaim_inode( goto out_ifunlock; xfs_iunpin_wait(ip); } - if (xfs_iflags_test(ip, XFS_ISTALE) || xfs_inode_clean(ip)) { + if (xfs_inode_clean(ip)) { xfs_ifunlock(ip); goto reclaim; } @@ -1214,6 +1214,7 @@ xfs_reclaim_inode( xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_qm_dqdetach(ip); xfs_iunlock(ip, XFS_ILOCK_EXCL); + ASSERT(xfs_inode_clean(ip)); __xfs_inode_free(ip); return error; diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index f2d06e1e4906..cd81d6d9848d 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -1772,10 +1772,31 @@ xfs_inactive_ifree( return error; } + /* + * We do not hold the inode locked across the entire rolling transaction + * here. We only need to hold it for the first transaction that + * xfs_ifree() builds, which may mark the inode XFS_ISTALE if the + * underlying cluster buffer is freed. Relogging an XFS_ISTALE inode + * here breaks the relationship between cluster buffer invalidation and + * stale inode invalidation on cluster buffer item journal commit + * completion, and can result in leaving dirty stale inodes hanging + * around in memory. + * + * We have no need for serialising this inode operation against other + * operations - we freed the inode and hence reallocation is required + * and that will serialise on reallocating the space the deferops need + * to free. Hence we can unlock the inode on the first commit of + * the transaction rather than roll it right through the deferops. This + * avoids relogging the XFS_ISTALE inode. + * + * We check that xfs_ifree() hasn't grown an internal transaction roll + * by asserting that the inode is still locked when it returns. + */ xfs_ilock(ip, XFS_ILOCK_EXCL); - xfs_trans_ijoin(tp, ip, 0); + xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); error = xfs_ifree(tp, ip); + ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); if (error) { /* * If we fail to free the inode, shut down. The cancel @@ -1788,7 +1809,6 @@ xfs_inactive_ifree( xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR); } xfs_trans_cancel(tp); - xfs_iunlock(ip, XFS_ILOCK_EXCL); return error; } @@ -1806,7 +1826,6 @@ xfs_inactive_ifree( xfs_notice(mp, "%s: xfs_trans_commit returned error %d", __func__, error); - xfs_iunlock(ip, XFS_ILOCK_EXCL); return 0; } diff --git a/fs/xfs/xfs_trans_inode.c b/fs/xfs/xfs_trans_inode.c index 542927321a61..ae453dd236a6 100644 --- a/fs/xfs/xfs_trans_inode.c +++ b/fs/xfs/xfs_trans_inode.c @@ -39,6 +39,7 @@ xfs_trans_ijoin( ASSERT(iip->ili_lock_flags == 0); iip->ili_lock_flags = lock_flags; + ASSERT(!xfs_iflags_test(ip, XFS_ISTALE)); /* * Get a log_item_desc to point at the new item. @@ -90,6 +91,7 @@ xfs_trans_log_inode( ASSERT(ip->i_itemp != NULL); ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); + ASSERT(!xfs_iflags_test(ip, XFS_ISTALE)); /* * Don't bother with i_lock for the I_DIRTY_TIME check here, as races -- GitLab From 8c74fe263acd8d7a5d16bfb3a282ef34abd36d4e Mon Sep 17 00:00:00 2001 From: "Desnes A. Nunes do Rosario" Date: Fri, 26 Jun 2020 13:47:37 -0300 Subject: [PATCH 0514/1304] selftests/powerpc: Purge extra count_pmc() calls of ebb selftests [ Upstream commit 3337bf41e0dd70b4064cdf60acdfcdc2d050066c ] An extra count on ebb_state.stats.pmc_count[PMC_INDEX(pmc)] is being per- formed when count_pmc() is used to reset PMCs on a few selftests. This extra pmc_count can occasionally invalidate results, such as the ones from cycles_test shown hereafter. The ebb_check_count() failed with an above the upper limit error due to the extra value on ebb_state.stats.pmc_count. Furthermore, this extra count is also indicated by extra PMC1 trace_log on the output of the cycle test (as well as on pmc56_overflow_test): ========== ... [21]: counter = 8 [22]: register SPRN_MMCR0 = 0x0000000080000080 [23]: register SPRN_PMC1 = 0x0000000080000004 [24]: counter = 9 [25]: register SPRN_MMCR0 = 0x0000000080000080 [26]: register SPRN_PMC1 = 0x0000000080000004 [27]: counter = 10 [28]: register SPRN_MMCR0 = 0x0000000080000080 [29]: register SPRN_PMC1 = 0x0000000080000004 >> [30]: register SPRN_PMC1 = 0x000000004000051e PMC1 count (0x280000546) above upper limit 0x2800003e8 (+0x15e) [FAIL] Test FAILED on line 52 failure: cycles ========== Signed-off-by: Desnes A. Nunes do Rosario Tested-by: Sachin Sant Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200626164737.21943-1-desnesn@linux.ibm.com Signed-off-by: Sasha Levin --- .../selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c | 2 -- tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c | 2 -- .../selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c | 2 -- .../selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c | 2 -- tools/testing/selftests/powerpc/pmu/ebb/ebb.c | 2 -- .../selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c | 2 -- .../selftests/powerpc/pmu/ebb/lost_exception_test.c | 1 - .../testing/selftests/powerpc/pmu/ebb/multi_counter_test.c | 7 ------- .../selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c | 2 -- .../testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c | 2 -- .../selftests/powerpc/pmu/ebb/pmc56_overflow_test.c | 2 -- 11 files changed, 26 deletions(-) diff --git a/tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c b/tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c index 94110b1dcd3d..031baa43646f 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c +++ b/tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c @@ -91,8 +91,6 @@ int back_to_back_ebbs(void) ebb_global_disable(); ebb_freeze_pmcs(); - count_pmc(1, sample_period); - dump_ebb_state(); event_close(&event); diff --git a/tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c b/tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c index 7c57a8d79535..361e0be9df9a 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c +++ b/tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c @@ -42,8 +42,6 @@ int cycles(void) ebb_global_disable(); ebb_freeze_pmcs(); - count_pmc(1, sample_period); - dump_ebb_state(); event_close(&event); diff --git a/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c b/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c index ecf5ee3283a3..fe7d0dc2a1a2 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c +++ b/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c @@ -99,8 +99,6 @@ int cycles_with_freeze(void) ebb_global_disable(); ebb_freeze_pmcs(); - count_pmc(1, sample_period); - dump_ebb_state(); printf("EBBs while frozen %d\n", ebbs_while_frozen); diff --git a/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c b/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c index c0faba520b35..b9b30f974b5e 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c +++ b/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c @@ -71,8 +71,6 @@ int cycles_with_mmcr2(void) ebb_global_disable(); ebb_freeze_pmcs(); - count_pmc(1, sample_period); - dump_ebb_state(); event_close(&event); diff --git a/tools/testing/selftests/powerpc/pmu/ebb/ebb.c b/tools/testing/selftests/powerpc/pmu/ebb/ebb.c index 46681fec549b..2694ae161a84 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/ebb.c +++ b/tools/testing/selftests/powerpc/pmu/ebb/ebb.c @@ -396,8 +396,6 @@ int ebb_child(union pipe read_pipe, union pipe write_pipe) ebb_global_disable(); ebb_freeze_pmcs(); - count_pmc(1, sample_period); - dump_ebb_state(); event_close(&event); diff --git a/tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c b/tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c index a991d2ea8d0a..174e4f4dae6c 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c +++ b/tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c @@ -38,8 +38,6 @@ static int victim_child(union pipe read_pipe, union pipe write_pipe) ebb_global_disable(); ebb_freeze_pmcs(); - count_pmc(1, sample_period); - dump_ebb_state(); FAIL_IF(ebb_state.stats.ebb_count == 0); diff --git a/tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c b/tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c index 2ed7ad33f7a3..dddb95938304 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c +++ b/tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c @@ -75,7 +75,6 @@ static int test_body(void) ebb_freeze_pmcs(); ebb_global_disable(); - count_pmc(4, sample_period); mtspr(SPRN_PMC4, 0xdead); dump_summary_ebb_state(); diff --git a/tools/testing/selftests/powerpc/pmu/ebb/multi_counter_test.c b/tools/testing/selftests/powerpc/pmu/ebb/multi_counter_test.c index 6ff8c8ff27d6..035c02273cd4 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/multi_counter_test.c +++ b/tools/testing/selftests/powerpc/pmu/ebb/multi_counter_test.c @@ -70,13 +70,6 @@ int multi_counter(void) ebb_global_disable(); ebb_freeze_pmcs(); - count_pmc(1, sample_period); - count_pmc(2, sample_period); - count_pmc(3, sample_period); - count_pmc(4, sample_period); - count_pmc(5, sample_period); - count_pmc(6, sample_period); - dump_ebb_state(); for (i = 0; i < 6; i++) diff --git a/tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c b/tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c index 037cb6154f36..3e9d4ac965c8 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c +++ b/tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c @@ -61,8 +61,6 @@ static int cycles_child(void) ebb_global_disable(); ebb_freeze_pmcs(); - count_pmc(1, sample_period); - dump_summary_ebb_state(); event_close(&event); diff --git a/tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c b/tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c index c5fa64790c22..d90891fe96a3 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c +++ b/tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c @@ -82,8 +82,6 @@ static int test_body(void) ebb_global_disable(); ebb_freeze_pmcs(); - count_pmc(1, sample_period); - dump_ebb_state(); if (mmcr0_mismatch) diff --git a/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c b/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c index 30e1ac62e8cb..8ca92b9ee5b0 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c +++ b/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c @@ -76,8 +76,6 @@ int pmc56_overflow(void) ebb_global_disable(); ebb_freeze_pmcs(); - count_pmc(2, sample_period); - dump_ebb_state(); printf("PMC5/6 overflow %d\n", pmc56_overflowed); -- GitLab From 9e2c212d7c96d25f0dd53abfe5745ddb88266a33 Mon Sep 17 00:00:00 2001 From: Chao Yu Date: Mon, 6 Jul 2020 18:23:36 +0800 Subject: [PATCH 0515/1304] f2fs: fix error path in do_recover_data() [ Upstream commit 9627a7b31f3c4ff8bc8f3be3683983ffe6eaebe6 ] - don't panic kernel if f2fs_get_node_page() fails in f2fs_recover_inline_data() or f2fs_recover_inline_xattr(); - return error number of f2fs_truncate_blocks() to f2fs_recover_inline_data()'s caller; Signed-off-by: Chao Yu Signed-off-by: Jaegeuk Kim Signed-off-by: Sasha Levin --- fs/f2fs/f2fs.h | 4 ++-- fs/f2fs/inline.c | 19 ++++++++++++------- fs/f2fs/node.c | 6 ++++-- fs/f2fs/recovery.c | 10 ++++++++-- 4 files changed, 26 insertions(+), 13 deletions(-) diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 6b5b685af599..53ffa6fe207a 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -2921,7 +2921,7 @@ bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid); void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid); void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid); int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink); -void f2fs_recover_inline_xattr(struct inode *inode, struct page *page); +int f2fs_recover_inline_xattr(struct inode *inode, struct page *page); int f2fs_recover_xattr_data(struct inode *inode, struct page *page); int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page); int f2fs_restore_node_summary(struct f2fs_sb_info *sbi, @@ -3314,7 +3314,7 @@ int f2fs_read_inline_data(struct inode *inode, struct page *page); int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page); int f2fs_convert_inline_inode(struct inode *inode); int f2fs_write_inline_data(struct inode *inode, struct page *page); -bool f2fs_recover_inline_data(struct inode *inode, struct page *npage); +int f2fs_recover_inline_data(struct inode *inode, struct page *npage); struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir, struct fscrypt_name *fname, struct page **res_page); int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent, diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c index c1ba29d10789..2fabeb0bb28f 100644 --- a/fs/f2fs/inline.c +++ b/fs/f2fs/inline.c @@ -256,7 +256,7 @@ int f2fs_write_inline_data(struct inode *inode, struct page *page) return 0; } -bool f2fs_recover_inline_data(struct inode *inode, struct page *npage) +int f2fs_recover_inline_data(struct inode *inode, struct page *npage) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_inode *ri = NULL; @@ -278,7 +278,8 @@ bool f2fs_recover_inline_data(struct inode *inode, struct page *npage) ri && (ri->i_inline & F2FS_INLINE_DATA)) { process_inline: ipage = f2fs_get_node_page(sbi, inode->i_ino); - f2fs_bug_on(sbi, IS_ERR(ipage)); + if (IS_ERR(ipage)) + return PTR_ERR(ipage); f2fs_wait_on_page_writeback(ipage, NODE, true); @@ -291,21 +292,25 @@ bool f2fs_recover_inline_data(struct inode *inode, struct page *npage) set_page_dirty(ipage); f2fs_put_page(ipage, 1); - return true; + return 1; } if (f2fs_has_inline_data(inode)) { ipage = f2fs_get_node_page(sbi, inode->i_ino); - f2fs_bug_on(sbi, IS_ERR(ipage)); + if (IS_ERR(ipage)) + return PTR_ERR(ipage); f2fs_truncate_inline_inode(inode, ipage, 0); clear_inode_flag(inode, FI_INLINE_DATA); f2fs_put_page(ipage, 1); } else if (ri && (ri->i_inline & F2FS_INLINE_DATA)) { - if (f2fs_truncate_blocks(inode, 0, false)) - return false; + int ret; + + ret = f2fs_truncate_blocks(inode, 0, false); + if (ret) + return ret; goto process_inline; } - return false; + return 0; } struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir, diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index f0714c1258c7..2ff02541c53d 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -2451,7 +2451,7 @@ int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink) return nr - nr_shrink; } -void f2fs_recover_inline_xattr(struct inode *inode, struct page *page) +int f2fs_recover_inline_xattr(struct inode *inode, struct page *page) { void *src_addr, *dst_addr; size_t inline_size; @@ -2459,7 +2459,8 @@ void f2fs_recover_inline_xattr(struct inode *inode, struct page *page) struct f2fs_inode *ri; ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino); - f2fs_bug_on(F2FS_I_SB(inode), IS_ERR(ipage)); + if (IS_ERR(ipage)) + return PTR_ERR(ipage); ri = F2FS_INODE(page); if (ri->i_inline & F2FS_INLINE_XATTR) { @@ -2478,6 +2479,7 @@ void f2fs_recover_inline_xattr(struct inode *inode, struct page *page) update_inode: f2fs_update_inode(inode, ipage); f2fs_put_page(ipage, 1); + return 0; } int f2fs_recover_xattr_data(struct inode *inode, struct page *page) diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c index 733f005b85d6..ad0486beee2c 100644 --- a/fs/f2fs/recovery.c +++ b/fs/f2fs/recovery.c @@ -471,7 +471,9 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode, /* step 1: recover xattr */ if (IS_INODE(page)) { - f2fs_recover_inline_xattr(inode, page); + err = f2fs_recover_inline_xattr(inode, page); + if (err) + goto out; } else if (f2fs_has_xattr_block(ofs_of_node(page))) { err = f2fs_recover_xattr_data(inode, page); if (!err) @@ -480,8 +482,12 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode, } /* step 2: recover inline data */ - if (f2fs_recover_inline_data(inode, page)) + err = f2fs_recover_inline_data(inode, page); + if (err) { + if (err == 1) + err = 0; goto out; + } /* step 3: recover data indices */ start = f2fs_start_bidx_of_node(ofs_of_node(page), inode); -- GitLab From 1c33c23b931d0b0e38aa436a90c2c527414e2fc5 Mon Sep 17 00:00:00 2001 From: Aditya Pakki Date: Sat, 13 Jun 2020 22:05:18 -0500 Subject: [PATCH 0516/1304] omapfb: fix multiple reference count leaks due to pm_runtime_get_sync [ Upstream commit 78c2ce9bde70be5be7e3615a2ae7024ed8173087 ] On calling pm_runtime_get_sync() the reference count of the device is incremented. In case of failure, decrement the reference count before returning the error. Signed-off-by: Aditya Pakki Cc: kjlu@umn.edu Cc: wu000273@umn.edu Cc: Allison Randal Cc: Thomas Gleixner Cc: Enrico Weigelt cc: "Andrew F. Davis" Cc: Tomi Valkeinen Cc: Alexios Zavras Cc: Greg Kroah-Hartman Cc: YueHaibing Signed-off-by: Bartlomiej Zolnierkiewicz Link: https://patchwork.freedesktop.org/patch/msgid/20200614030528.128064-1-pakki001@umn.edu Signed-off-by: Sasha Levin --- drivers/video/fbdev/omap2/omapfb/dss/dispc.c | 7 +++++-- drivers/video/fbdev/omap2/omapfb/dss/dsi.c | 7 +++++-- drivers/video/fbdev/omap2/omapfb/dss/dss.c | 7 +++++-- drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c | 5 +++-- drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c | 5 +++-- drivers/video/fbdev/omap2/omapfb/dss/venc.c | 7 +++++-- 6 files changed, 26 insertions(+), 12 deletions(-) diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c index a06d9c25765c..0bd582e845f3 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c @@ -531,8 +531,11 @@ int dispc_runtime_get(void) DSSDBG("dispc_runtime_get\n"); r = pm_runtime_get_sync(&dispc.pdev->dev); - WARN_ON(r < 0); - return r < 0 ? r : 0; + if (WARN_ON(r < 0)) { + pm_runtime_put_sync(&dispc.pdev->dev); + return r; + } + return 0; } EXPORT_SYMBOL(dispc_runtime_get); diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c index 8e1d60d48dbb..50792d31533b 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c @@ -1148,8 +1148,11 @@ static int dsi_runtime_get(struct platform_device *dsidev) DSSDBG("dsi_runtime_get\n"); r = pm_runtime_get_sync(&dsi->pdev->dev); - WARN_ON(r < 0); - return r < 0 ? r : 0; + if (WARN_ON(r < 0)) { + pm_runtime_put_sync(&dsi->pdev->dev); + return r; + } + return 0; } static void dsi_runtime_put(struct platform_device *dsidev) diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss.c b/drivers/video/fbdev/omap2/omapfb/dss/dss.c index b6c6c24979dd..faebf9a773ba 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/dss.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/dss.c @@ -779,8 +779,11 @@ int dss_runtime_get(void) DSSDBG("dss_runtime_get\n"); r = pm_runtime_get_sync(&dss.pdev->dev); - WARN_ON(r < 0); - return r < 0 ? r : 0; + if (WARN_ON(r < 0)) { + pm_runtime_put_sync(&dss.pdev->dev); + return r; + } + return 0; } void dss_runtime_put(void) diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c index 28de56e21c74..9fd9a02bb871 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c @@ -50,9 +50,10 @@ static int hdmi_runtime_get(void) DSSDBG("hdmi_runtime_get\n"); r = pm_runtime_get_sync(&hdmi.pdev->dev); - WARN_ON(r < 0); - if (r < 0) + if (WARN_ON(r < 0)) { + pm_runtime_put_sync(&hdmi.pdev->dev); return r; + } return 0; } diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c index 2e2fcc3d6d4f..13f3a5ce5529 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c @@ -54,9 +54,10 @@ static int hdmi_runtime_get(void) DSSDBG("hdmi_runtime_get\n"); r = pm_runtime_get_sync(&hdmi.pdev->dev); - WARN_ON(r < 0); - if (r < 0) + if (WARN_ON(r < 0)) { + pm_runtime_put_sync(&hdmi.pdev->dev); return r; + } return 0; } diff --git a/drivers/video/fbdev/omap2/omapfb/dss/venc.c b/drivers/video/fbdev/omap2/omapfb/dss/venc.c index 392464da12e4..96714b4596d2 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/venc.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/venc.c @@ -402,8 +402,11 @@ static int venc_runtime_get(void) DSSDBG("venc_runtime_get\n"); r = pm_runtime_get_sync(&venc.pdev->dev); - WARN_ON(r < 0); - return r < 0 ? r : 0; + if (WARN_ON(r < 0)) { + pm_runtime_put_sync(&venc.pdev->dev); + return r; + } + return 0; } static void venc_runtime_put(void) -- GitLab From 03f4e517e6ac202d6a6ca50f02a1319a4a70cdd6 Mon Sep 17 00:00:00 2001 From: Qiushi Wu Date: Wed, 27 May 2020 21:13:22 -0500 Subject: [PATCH 0517/1304] PCI: Fix pci_create_slot() reference count leak [ Upstream commit 8a94644b440eef5a7b9c104ac8aa7a7f413e35e5 ] kobject_init_and_add() takes a reference even when it fails. If it returns an error, kobject_put() must be called to clean up the memory associated with the object. When kobject_init_and_add() fails, call kobject_put() instead of kfree(). b8eb718348b8 ("net-sysfs: Fix reference count leak in rx|netdev_queue_add_kobject") fixed a similar problem. Link: https://lore.kernel.org/r/20200528021322.1984-1-wu000273@umn.edu Signed-off-by: Qiushi Wu Signed-off-by: Bjorn Helgaas Signed-off-by: Sasha Levin --- drivers/pci/slot.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c index a32897f83ee5..fb7478b6c4f9 100644 --- a/drivers/pci/slot.c +++ b/drivers/pci/slot.c @@ -303,13 +303,16 @@ struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr, slot_name = make_slot_name(name); if (!slot_name) { err = -ENOMEM; + kfree(slot); goto err; } err = kobject_init_and_add(&slot->kobj, &pci_slot_ktype, NULL, "%s", slot_name); - if (err) + if (err) { + kobject_put(&slot->kobj); goto err; + } INIT_LIST_HEAD(&slot->list); list_add(&slot->list, &parent->slots); @@ -328,7 +331,6 @@ struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr, mutex_unlock(&pci_slot_mutex); return slot; err: - kfree(slot); slot = ERR_PTR(err); goto out; } -- GitLab From 4841465f8901e6b84e24b08875f5d1bf5af59360 Mon Sep 17 00:00:00 2001 From: Yangbo Lu Date: Fri, 22 May 2020 09:30:52 +0800 Subject: [PATCH 0518/1304] ARM: dts: ls1021a: output PPS signal on FIPER2 [ Upstream commit 5656bb3857c4904d1dec6e1b8f876c1c0337274e ] The timer fixed interval period pulse generator register is used to generate periodic pulses. The down count register loads the value programmed in the fixed period interval (FIPER). At every tick of the timer accumulator overflow, the counter decrements by the value of TMR_CTRL[TCLK_PERIOD]. It generates a pulse when the down counter value reaches zero. It reloads the down counter in the cycle following a pulse. To use the TMR_FIPER register to generate desired periodic pulses. The value should programmed is, desired_period - tclk_period Current tmr-fiper2 value is to generate 100us periodic pulses. (But the value should have been 99995, not 99990. The tclk_period is 5.) This patch is to generate 1 second periodic pulses with value 999999995 programmed which is more desired by user. Signed-off-by: Yangbo Lu Acked-by: Richard Cochran Signed-off-by: Shawn Guo Signed-off-by: Sasha Levin --- arch/arm/boot/dts/ls1021a.dtsi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi index 074b4ec520c6..d18c04326444 100644 --- a/arch/arm/boot/dts/ls1021a.dtsi +++ b/arch/arm/boot/dts/ls1021a.dtsi @@ -609,7 +609,7 @@ fsl,tmr-prsc = <2>; fsl,tmr-add = <0xaaaaaaab>; fsl,tmr-fiper1 = <999999995>; - fsl,tmr-fiper2 = <99990>; + fsl,tmr-fiper2 = <999999995>; fsl,max-adj = <499999999>; }; -- GitLab From 644e4f2425a64b45ac8bcca6ae86e2d3cc50729c Mon Sep 17 00:00:00 2001 From: Reto Schneider Date: Mon, 22 Jun 2020 15:21:12 +0200 Subject: [PATCH 0519/1304] rtlwifi: rtl8192cu: Prevent leaking urb [ Upstream commit 03128643eb5453a798db5770952c73dc64fcaf00 ] If usb_submit_urb fails the allocated urb should be unanchored and released. Signed-off-by: Reto Schneider Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200622132113.14508-3-code@reto-schneider.ch Signed-off-by: Sasha Levin --- drivers/net/wireless/realtek/rtlwifi/usb.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c index 1893640555c1..3d6c0d8c71d7 100644 --- a/drivers/net/wireless/realtek/rtlwifi/usb.c +++ b/drivers/net/wireless/realtek/rtlwifi/usb.c @@ -739,8 +739,11 @@ static int _rtl_usb_receive(struct ieee80211_hw *hw) usb_anchor_urb(urb, &rtlusb->rx_submitted); err = usb_submit_urb(urb, GFP_KERNEL); - if (err) + if (err) { + usb_unanchor_urb(urb); + usb_free_urb(urb); goto err_out; + } usb_free_urb(urb); } return 0; -- GitLab From a48f3e865420f3dc48d45040b00d0f05e5f32b86 Mon Sep 17 00:00:00 2001 From: Peng Fan Date: Tue, 14 Jul 2020 20:30:18 +0800 Subject: [PATCH 0520/1304] mips/vdso: Fix resource leaks in genvdso.c [ Upstream commit a859647b4e6bfeb192284d27d24b6a0c914cae1d ] Close "fd" before the return of map_vdso() and close "out_file" in main(). Signed-off-by: Peng Fan Signed-off-by: Thomas Bogendoerfer Signed-off-by: Sasha Levin --- arch/mips/vdso/genvdso.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/arch/mips/vdso/genvdso.c b/arch/mips/vdso/genvdso.c index 530a36f465ce..afcc86726448 100644 --- a/arch/mips/vdso/genvdso.c +++ b/arch/mips/vdso/genvdso.c @@ -126,6 +126,7 @@ static void *map_vdso(const char *path, size_t *_size) if (fstat(fd, &stat) != 0) { fprintf(stderr, "%s: Failed to stat '%s': %s\n", program_name, path, strerror(errno)); + close(fd); return NULL; } @@ -134,6 +135,7 @@ static void *map_vdso(const char *path, size_t *_size) if (addr == MAP_FAILED) { fprintf(stderr, "%s: Failed to map '%s': %s\n", program_name, path, strerror(errno)); + close(fd); return NULL; } @@ -143,6 +145,7 @@ static void *map_vdso(const char *path, size_t *_size) if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0) { fprintf(stderr, "%s: '%s' is not an ELF file\n", program_name, path); + close(fd); return NULL; } @@ -154,6 +157,7 @@ static void *map_vdso(const char *path, size_t *_size) default: fprintf(stderr, "%s: '%s' has invalid ELF class\n", program_name, path); + close(fd); return NULL; } @@ -165,6 +169,7 @@ static void *map_vdso(const char *path, size_t *_size) default: fprintf(stderr, "%s: '%s' has invalid ELF data order\n", program_name, path); + close(fd); return NULL; } @@ -172,15 +177,18 @@ static void *map_vdso(const char *path, size_t *_size) fprintf(stderr, "%s: '%s' has invalid ELF machine (expected EM_MIPS)\n", program_name, path); + close(fd); return NULL; } else if (swap_uint16(ehdr->e_type) != ET_DYN) { fprintf(stderr, "%s: '%s' has invalid ELF type (expected ET_DYN)\n", program_name, path); + close(fd); return NULL; } *_size = stat.st_size; + close(fd); return addr; } @@ -284,10 +292,12 @@ int main(int argc, char **argv) /* Calculate and write symbol offsets to */ if (!get_symbols(dbg_vdso_path, dbg_vdso)) { unlink(out_path); + fclose(out_file); return EXIT_FAILURE; } fprintf(out_file, "};\n"); + fclose(out_file); return EXIT_SUCCESS; } -- GitLab From da489549711e61bd43f3fd6fe19bb538eb575b39 Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Fri, 26 Jun 2020 12:44:26 +0200 Subject: [PATCH 0521/1304] cec-api: prevent leaking memory through hole in structure [ Upstream commit 6c42227c3467549ddc65efe99c869021d2f4a570 ] Fix this smatch warning: drivers/media/cec/core/cec-api.c:156 cec_adap_g_log_addrs() warn: check that 'log_addrs' doesn't leak information (struct has a hole after 'features') Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Sasha Levin --- drivers/media/cec/cec-api.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/media/cec/cec-api.c b/drivers/media/cec/cec-api.c index 4961573850d5..b2b3f779592f 100644 --- a/drivers/media/cec/cec-api.c +++ b/drivers/media/cec/cec-api.c @@ -147,7 +147,13 @@ static long cec_adap_g_log_addrs(struct cec_adapter *adap, struct cec_log_addrs log_addrs; mutex_lock(&adap->lock); - log_addrs = adap->log_addrs; + /* + * We use memcpy here instead of assignment since there is a + * hole at the end of struct cec_log_addrs that an assignment + * might ignore. So when we do copy_to_user() we could leak + * one byte of memory. + */ + memcpy(&log_addrs, &adap->log_addrs, sizeof(log_addrs)); if (!adap->is_configured) memset(log_addrs.log_addr, CEC_LOG_ADDR_INVALID, sizeof(log_addrs.log_addr)); -- GitLab From 9094d83e14661b0fe8c1d7014e734443b8f4cbb0 Mon Sep 17 00:00:00 2001 From: Ikjoon Jang Date: Tue, 21 Jul 2020 14:54:09 +0800 Subject: [PATCH 0522/1304] HID: quirks: add NOGET quirk for Logitech GROUP [ Upstream commit 68f775ddd2a6f513e225f9a565b054ab48fef142 ] Add HID_QUIRK_NOGET for Logitech GROUP device. Logitech GROUP is a compound with camera and audio. When the HID interface in an audio device is requested to get specific report id, all following control transfers are stalled and never be restored back. BugLink: https://bugzilla.kernel.org/show_bug.cgi?id=203419 Signed-off-by: Ikjoon Jang Signed-off-by: Jiri Kosina Signed-off-by: Sasha Levin --- drivers/hid/hid-ids.h | 1 + drivers/hid/hid-quirks.c | 1 + 2 files changed, 2 insertions(+) diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 20530d8adfbb..2c100b73d3fc 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -756,6 +756,7 @@ #define USB_DEVICE_ID_LOGITECH_G27_WHEEL 0xc29b #define USB_DEVICE_ID_LOGITECH_WII_WHEEL 0xc29c #define USB_DEVICE_ID_LOGITECH_ELITE_KBD 0xc30a +#define USB_DEVICE_ID_LOGITECH_GROUP_AUDIO 0x0882 #define USB_DEVICE_ID_S510_RECEIVER 0xc50c #define USB_DEVICE_ID_S510_RECEIVER_2 0xc517 #define USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500 0xc512 diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c index bdde16395b2c..62f87f8bd972 100644 --- a/drivers/hid/hid-quirks.c +++ b/drivers/hid/hid-quirks.c @@ -179,6 +179,7 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD2, USB_DEVICE_ID_SMARTJOY_DUAL_PLUS), HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_QUAD_USB_JOYPAD), HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE), HID_QUIRK_MULTI_INPUT }, + { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_GROUP_AUDIO), HID_QUIRK_NOGET }, { 0 } }; -- GitLab From 8cfbac12a68bb660491759d56e37049a05289691 Mon Sep 17 00:00:00 2001 From: Li Guifu Date: Fri, 24 Jul 2020 09:38:11 +0800 Subject: [PATCH 0523/1304] f2fs: fix use-after-free issue [ Upstream commit 99c787cfd2bd04926f1f553b30bd7dcea2caaba1 ] During umount, f2fs_put_super() unregisters procfs entries after f2fs_destroy_segment_manager(), it may cause use-after-free issue when umount races with procfs accessing, fix it by relocating f2fs_unregister_sysfs(). [Chao Yu: change commit title/message a bit] Signed-off-by: Li Guifu Reviewed-by: Chao Yu Signed-off-by: Jaegeuk Kim Signed-off-by: Sasha Levin --- fs/f2fs/super.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 9782250c9815..161ce0eb8891 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -1004,6 +1004,9 @@ static void f2fs_put_super(struct super_block *sb) int i; bool dropped; + /* unregister procfs/sysfs entries in advance to avoid race case */ + f2fs_unregister_sysfs(sbi); + f2fs_quota_off_umount(sb); /* prevent remaining shrinker jobs */ @@ -1067,8 +1070,6 @@ static void f2fs_put_super(struct super_block *sb) kfree(sbi->ckpt); - f2fs_unregister_sysfs(sbi); - sb->s_fs_info = NULL; if (sbi->s_chksum_driver) crypto_free_shash(sbi->s_chksum_driver); -- GitLab From 83443512a9493281dd9481681194ea45dbdfd5ee Mon Sep 17 00:00:00 2001 From: Aditya Pakki Date: Sat, 13 Jun 2020 20:33:42 -0500 Subject: [PATCH 0524/1304] drm/nouveau/drm/noveau: fix reference count leak in nouveau_fbcon_open [ Upstream commit bfad51c7633325b5d4b32444efe04329d53297b2 ] nouveau_fbcon_open() calls calls pm_runtime_get_sync() that increments the reference count. In case of failure, decrement the ref count before returning the error. Signed-off-by: Aditya Pakki Signed-off-by: Ben Skeggs Signed-off-by: Sasha Levin --- drivers/gpu/drm/nouveau/nouveau_fbcon.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 406cb99af7f2..d4fe52ec4c96 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -189,8 +189,10 @@ nouveau_fbcon_open(struct fb_info *info, int user) struct nouveau_fbdev *fbcon = info->par; struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev); int ret = pm_runtime_get_sync(drm->dev->dev); - if (ret < 0 && ret != -EACCES) + if (ret < 0 && ret != -EACCES) { + pm_runtime_put(drm->dev->dev); return ret; + } return 0; } -- GitLab From e15bc26ff99cdcb459a59ba5f35ebe2549ed9390 Mon Sep 17 00:00:00 2001 From: Aditya Pakki Date: Sat, 13 Jun 2020 20:29:18 -0500 Subject: [PATCH 0525/1304] drm/nouveau: fix reference count leak in nv50_disp_atomic_commit [ Upstream commit a2cdf39536b0d21fb06113f5e16692513d7bcb9c ] nv50_disp_atomic_commit() calls calls pm_runtime_get_sync and in turn increments the reference count. In case of failure, decrement the ref count before returning the error. Signed-off-by: Aditya Pakki Signed-off-by: Ben Skeggs Signed-off-by: Sasha Levin --- drivers/gpu/drm/nouveau/dispnv50/disp.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index 10107e551fac..e06ea8c8184c 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -1920,8 +1920,10 @@ nv50_disp_atomic_commit(struct drm_device *dev, int ret, i; ret = pm_runtime_get_sync(dev->dev); - if (ret < 0 && ret != -EACCES) + if (ret < 0 && ret != -EACCES) { + pm_runtime_put_autosuspend(dev->dev); return ret; + } ret = drm_atomic_helper_setup_commit(state, nonblock); if (ret) -- GitLab From 66e1e18133c43e0cfff609054aa64ad0652371fa Mon Sep 17 00:00:00 2001 From: Aditya Pakki Date: Sat, 13 Jun 2020 20:22:23 -0500 Subject: [PATCH 0526/1304] drm/nouveau: Fix reference count leak in nouveau_connector_detect [ Upstream commit 990a1162986e8eff7ca18cc5a0e03b4304392ae2 ] nouveau_connector_detect() calls pm_runtime_get_sync and in turn increments the reference count. In case of failure, decrement the ref count before returning the error. Signed-off-by: Aditya Pakki Signed-off-by: Ben Skeggs Signed-off-by: Sasha Levin --- drivers/gpu/drm/nouveau/nouveau_connector.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index fb0094fc5583..b71afde8f115 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -551,8 +551,10 @@ nouveau_connector_detect(struct drm_connector *connector, bool force) pm_runtime_get_noresume(dev->dev); } else { ret = pm_runtime_get_sync(dev->dev); - if (ret < 0 && ret != -EACCES) + if (ret < 0 && ret != -EACCES) { + pm_runtime_put_autosuspend(dev->dev); return conn_status; + } } nv_encoder = nouveau_connector_ddc_detect(connector); -- GitLab From db454f8ab4b694eaf6a23b97479aa4b42c38e6e3 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sat, 25 Jul 2020 19:51:10 +0100 Subject: [PATCH 0527/1304] locking/lockdep: Fix overflow in presentation of average lock-time [ Upstream commit a7ef9b28aa8d72a1656fa6f0a01bbd1493886317 ] Though the number of lock-acquisitions is tracked as unsigned long, this is passed as the divisor to div_s64() which interprets it as a s32, giving nonsense values with more than 2 billion acquisitons. E.g. acquisitions holdtime-min holdtime-max holdtime-total holdtime-avg ------------------------------------------------------------------------- 2350439395 0.07 353.38 649647067.36 0.-32 Signed-off-by: Chris Wilson Signed-off-by: Ingo Molnar Cc: Peter Zijlstra Link: https://lore.kernel.org/r/20200725185110.11588-1-chris@chris-wilson.co.uk Signed-off-by: Sasha Levin --- kernel/locking/lockdep_proc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c index 6fcc4650f0c4..53cc3bb7025a 100644 --- a/kernel/locking/lockdep_proc.c +++ b/kernel/locking/lockdep_proc.c @@ -394,7 +394,7 @@ static void seq_lock_time(struct seq_file *m, struct lock_time *lt) seq_time(m, lt->min); seq_time(m, lt->max); seq_time(m, lt->total); - seq_time(m, lt->nr ? div_s64(lt->total, lt->nr) : 0); + seq_time(m, lt->nr ? div64_u64(lt->total, lt->nr) : 0); } static void seq_stats(struct seq_file *m, struct lock_stat_data *data) -- GitLab From 5d20e391d9e821cbfe35c9bcf9e965690121386c Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Wed, 10 Jun 2020 09:04:42 +0800 Subject: [PATCH 0528/1304] btrfs: file: reserve qgroup space after the hole punch range is locked [ Upstream commit a7f8b1c2ac21bf081b41264c9cfd6260dffa6246 ] The incoming qgroup reserved space timing will move the data reservation to ordered extent completely. However in btrfs_punch_hole_lock_range() will call btrfs_invalidate_page(), which will clear QGROUP_RESERVED bit for the range. In current stage it's OK, but if we're making ordered extents handle the reserved space, then btrfs_punch_hole_lock_range() can clear the QGROUP_RESERVED bit before we submit ordered extent, leading to qgroup reserved space leakage. So here change the timing to make reserve data space after btrfs_punch_hole_lock_range(). The new timing is fine for either current code or the new code. Reviewed-by: Josef Bacik Signed-off-by: Qu Wenruo Reviewed-by: David Sterba Signed-off-by: David Sterba Signed-off-by: Sasha Levin --- fs/btrfs/file.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index dc1841855a69..646152f30584 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -3010,14 +3010,14 @@ static int btrfs_zero_range(struct inode *inode, if (ret < 0) goto out; space_reserved = true; - ret = btrfs_qgroup_reserve_data(inode, &data_reserved, - alloc_start, bytes_to_reserve); - if (ret) - goto out; ret = btrfs_punch_hole_lock_range(inode, lockstart, lockend, &cached_state); if (ret) goto out; + ret = btrfs_qgroup_reserve_data(inode, &data_reserved, + alloc_start, bytes_to_reserve); + if (ret) + goto out; ret = btrfs_prealloc_file_range(inode, mode, alloc_start, alloc_end - alloc_start, i_blocksize(inode), -- GitLab From d10ceeb835b5db394e4b8443498190c6827e0576 Mon Sep 17 00:00:00 2001 From: Jing Xiangfeng Date: Mon, 15 Jun 2020 16:12:26 +0800 Subject: [PATCH 0529/1304] scsi: iscsi: Do not put host in iscsi_set_flashnode_param() [ Upstream commit 68e12e5f61354eb42cfffbc20a693153fc39738e ] If scsi_host_lookup() fails we will jump to put_host which may cause a panic. Jump to exit_set_fnode instead. Link: https://lore.kernel.org/r/20200615081226.183068-1-jingxiangfeng@huawei.com Reviewed-by: Mike Christie Signed-off-by: Jing Xiangfeng Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin --- drivers/scsi/scsi_transport_iscsi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 04d095488c76..698347301198 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -3172,7 +3172,7 @@ static int iscsi_set_flashnode_param(struct iscsi_transport *transport, pr_err("%s could not find host no %u\n", __func__, ev->u.set_flashnode.host_no); err = -ENODEV; - goto put_host; + goto exit_set_fnode; } idx = ev->u.set_flashnode.flashnode_idx; -- GitLab From d0335766691f37dcedc4f39a94a181a294951776 Mon Sep 17 00:00:00 2001 From: Xiubo Li Date: Wed, 1 Jul 2020 01:52:48 -0400 Subject: [PATCH 0530/1304] ceph: fix potential mdsc use-after-free crash [ Upstream commit fa9967734227b44acb1b6918033f9122dc7825b9 ] Make sure the delayed work stopped before releasing the resources. cancel_delayed_work_sync() will only guarantee that the work finishes executing if the work is already in the ->worklist. That means after the cancel_delayed_work_sync() returns, it will leave the work requeued if it was rearmed at the end. That can lead to a use after free once the work struct is freed. Fix it by flushing the delayed work instead of trying to cancel it, and ensure that the work doesn't rearm if the mdsc is stopping. URL: https://tracker.ceph.com/issues/46293 Signed-off-by: Xiubo Li Reviewed-by: Jeff Layton Signed-off-by: Ilya Dryomov Signed-off-by: Sasha Levin --- fs/ceph/mds_client.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 0fa14d8b9c64..5f3707a90e7f 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -3615,6 +3615,9 @@ static void delayed_work(struct work_struct *work) dout("mdsc delayed_work\n"); ceph_check_delayed_caps(mdsc); + if (mdsc->stopping) + return; + mutex_lock(&mdsc->mutex); renew_interval = mdsc->mdsmap->m_session_timeout >> 2; renew_caps = time_after_eq(jiffies, HZ*renew_interval + @@ -3950,7 +3953,16 @@ void ceph_mdsc_force_umount(struct ceph_mds_client *mdsc) static void ceph_mdsc_stop(struct ceph_mds_client *mdsc) { dout("stop\n"); - cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */ + /* + * Make sure the delayed work stopped before releasing + * the resources. + * + * Because the cancel_delayed_work_sync() will only + * guarantee that the work finishes executing. But the + * delayed work will re-arm itself again after that. + */ + flush_delayed_work(&mdsc->delayed_work); + if (mdsc->mdsmap) ceph_mdsmap_destroy(mdsc->mdsmap); kfree(mdsc->sessions); -- GitLab From 15f15650ee10ff24cd5736dca5f9d693fc9f56d4 Mon Sep 17 00:00:00 2001 From: Javed Hasan Date: Wed, 29 Jul 2020 01:18:24 -0700 Subject: [PATCH 0531/1304] scsi: fcoe: Memory leak fix in fcoe_sysfs_fcf_del() [ Upstream commit e95b4789ff4380733006836d28e554dc296b2298 ] In fcoe_sysfs_fcf_del(), we first deleted the fcf from the list and then freed it if ctlr_dev was not NULL. This was causing a memory leak. Free the fcf even if ctlr_dev is NULL. Link: https://lore.kernel.org/r/20200729081824.30996-3-jhasan@marvell.com Reviewed-by: Girish Basrur Reviewed-by: Santosh Vernekar Reviewed-by: Saurav Kashyap Reviewed-by: Shyam Sundar Signed-off-by: Javed Hasan Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin --- drivers/scsi/fcoe/fcoe_ctlr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c index 24cbd0a2cc69..658c0726581f 100644 --- a/drivers/scsi/fcoe/fcoe_ctlr.c +++ b/drivers/scsi/fcoe/fcoe_ctlr.c @@ -267,9 +267,9 @@ static void fcoe_sysfs_fcf_del(struct fcoe_fcf *new) WARN_ON(!fcf_dev); new->fcf_dev = NULL; fcoe_fcf_device_delete(fcf_dev); - kfree(new); mutex_unlock(&cdev->lock); } + kfree(new); } /** -- GitLab From c10826fbfb5c958701207031503a4432ac556656 Mon Sep 17 00:00:00 2001 From: Jason Baron Date: Thu, 16 Jul 2020 14:25:11 -0400 Subject: [PATCH 0532/1304] EDAC/ie31200: Fallback if host bridge device is already initialized [ Upstream commit 709ed1bcef12398ac1a35c149f3e582db04456c2 ] The Intel uncore driver may claim some of the pci ids from ie31200 which means that the ie31200 edac driver will not initialize them as part of pci_register_driver(). Let's add a fallback for this case to 'pci_get_device()' to get a reference on the device such that it can still be configured. This is similar in approach to other edac drivers. Signed-off-by: Jason Baron Cc: Borislav Petkov Cc: Mauro Carvalho Chehab Cc: linux-edac Signed-off-by: Tony Luck Link: https://lore.kernel.org/r/1594923911-10885-1-git-send-email-jbaron@akamai.com Signed-off-by: Sasha Levin --- drivers/edac/ie31200_edac.c | 50 ++++++++++++++++++++++++++++++++++--- 1 file changed, 47 insertions(+), 3 deletions(-) diff --git a/drivers/edac/ie31200_edac.c b/drivers/edac/ie31200_edac.c index aac9b9b360b8..9e4781a807cf 100644 --- a/drivers/edac/ie31200_edac.c +++ b/drivers/edac/ie31200_edac.c @@ -147,6 +147,8 @@ (n << (28 + (2 * skl) - PAGE_SHIFT)) static int nr_channels; +static struct pci_dev *mci_pdev; +static int ie31200_registered = 1; struct ie31200_priv { void __iomem *window; @@ -518,12 +520,16 @@ static int ie31200_probe1(struct pci_dev *pdev, int dev_idx) static int ie31200_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { - edac_dbg(0, "MC:\n"); + int rc; + edac_dbg(0, "MC:\n"); if (pci_enable_device(pdev) < 0) return -EIO; + rc = ie31200_probe1(pdev, ent->driver_data); + if (rc == 0 && !mci_pdev) + mci_pdev = pci_dev_get(pdev); - return ie31200_probe1(pdev, ent->driver_data); + return rc; } static void ie31200_remove_one(struct pci_dev *pdev) @@ -532,6 +538,8 @@ static void ie31200_remove_one(struct pci_dev *pdev) struct ie31200_priv *priv; edac_dbg(0, "\n"); + pci_dev_put(mci_pdev); + mci_pdev = NULL; mci = edac_mc_del_mc(&pdev->dev); if (!mci) return; @@ -583,17 +591,53 @@ static struct pci_driver ie31200_driver = { static int __init ie31200_init(void) { + int pci_rc, i; + edac_dbg(3, "MC:\n"); /* Ensure that the OPSTATE is set correctly for POLL or NMI */ opstate_init(); - return pci_register_driver(&ie31200_driver); + pci_rc = pci_register_driver(&ie31200_driver); + if (pci_rc < 0) + goto fail0; + + if (!mci_pdev) { + ie31200_registered = 0; + for (i = 0; ie31200_pci_tbl[i].vendor != 0; i++) { + mci_pdev = pci_get_device(ie31200_pci_tbl[i].vendor, + ie31200_pci_tbl[i].device, + NULL); + if (mci_pdev) + break; + } + if (!mci_pdev) { + edac_dbg(0, "ie31200 pci_get_device fail\n"); + pci_rc = -ENODEV; + goto fail1; + } + pci_rc = ie31200_init_one(mci_pdev, &ie31200_pci_tbl[i]); + if (pci_rc < 0) { + edac_dbg(0, "ie31200 init fail\n"); + pci_rc = -ENODEV; + goto fail1; + } + } + return 0; + +fail1: + pci_unregister_driver(&ie31200_driver); +fail0: + pci_dev_put(mci_pdev); + + return pci_rc; } static void __exit ie31200_exit(void) { edac_dbg(3, "MC:\n"); pci_unregister_driver(&ie31200_driver); + if (!ie31200_registered) + ie31200_remove_one(mci_pdev); } module_init(ie31200_init); -- GitLab From acf3356196b882fe5d060bb87bcb3ce7b9b5f890 Mon Sep 17 00:00:00 2001 From: David Brazdil Date: Thu, 25 Jun 2020 14:14:06 +0100 Subject: [PATCH 0533/1304] KVM: arm64: Fix symbol dependency in __hyp_call_panic_nvhe [ Upstream commit b38b298aa4397e2dc74a89b4dd3eac9e59b64c96 ] __hyp_call_panic_nvhe contains inline assembly which did not declare its dependency on the __hyp_panic_string symbol. The static-declared string has previously been kept alive because of a use in __hyp_call_panic_vhe. Fix this in preparation for separating the source files between VHE and nVHE when the two users land in two different compilation units. The static variable otherwise gets dropped when compiling the nVHE source file, causing an undefined symbol linker error later. Signed-off-by: David Brazdil Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20200625131420.71444-2-dbrazdil@google.com Signed-off-by: Sasha Levin --- arch/arm64/kvm/hyp/switch.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index f3978931aaf4..3cdefd84af54 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -626,7 +626,7 @@ static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par, * making sure it is a kernel address and not a PC-relative * reference. */ - asm volatile("ldr %0, =__hyp_panic_string" : "=r" (str_va)); + asm volatile("ldr %0, =%1" : "=r" (str_va) : "S" (__hyp_panic_string)); __hyp_do_panic(str_va, spsr, elr, -- GitLab From 6a76cddecff0c7420c1d6785b77dd1823f36219d Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 6 Jul 2020 15:22:46 +0200 Subject: [PATCH 0534/1304] powerpc/spufs: add CONFIG_COREDUMP dependency [ Upstream commit b648a5132ca3237a0f1ce5d871fff342b0efcf8a ] The kernel test robot pointed out a slightly different error message after recent commit 5456ffdee666 ("powerpc/spufs: simplify spufs core dumping") to spufs for a configuration that never worked: powerpc64-linux-ld: arch/powerpc/platforms/cell/spufs/file.o: in function `.spufs_proxydma_info_dump': >> file.c:(.text+0x4c68): undefined reference to `.dump_emit' powerpc64-linux-ld: arch/powerpc/platforms/cell/spufs/file.o: in function `.spufs_dma_info_dump': file.c:(.text+0x4d70): undefined reference to `.dump_emit' powerpc64-linux-ld: arch/powerpc/platforms/cell/spufs/file.o: in function `.spufs_wbox_info_dump': file.c:(.text+0x4df4): undefined reference to `.dump_emit' Add a Kconfig dependency to prevent this from happening again. Reported-by: kernel test robot Signed-off-by: Arnd Bergmann Acked-by: Jeremy Kerr Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200706132302.3885935-1-arnd@arndb.de Signed-off-by: Sasha Levin --- arch/powerpc/platforms/cell/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig index 9f5958f16923..741a8fa8a3e6 100644 --- a/arch/powerpc/platforms/cell/Kconfig +++ b/arch/powerpc/platforms/cell/Kconfig @@ -46,6 +46,7 @@ config SPU_FS tristate "SPU file system" default m depends on PPC_CELL + depends on COREDUMP select SPU_BASE help The SPU file system is used to access Synergistic Processing -- GitLab From c5b61db89d201dee13cc400eab1f7e0e49f4f7b2 Mon Sep 17 00:00:00 2001 From: Changming Liu Date: Sat, 11 Jul 2020 00:30:18 -0400 Subject: [PATCH 0535/1304] USB: sisusbvga: Fix a potential UB casued by left shifting a negative value [ Upstream commit 2b53a19284f537168fb506f2f40d7fda40a01162 ] The char buffer buf, receives data directly from user space, so its content might be negative and its elements are left shifted to form an unsigned integer. Since left shifting a negative value is undefined behavior, thus change the char to u8 to elimintate this UB. Signed-off-by: Changming Liu Link: https://lore.kernel.org/r/20200711043018.928-1-charley.ashbringer@gmail.com Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin --- drivers/usb/misc/sisusbvga/sisusb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c index 6376be1f5fd2..4877bf82ad39 100644 --- a/drivers/usb/misc/sisusbvga/sisusb.c +++ b/drivers/usb/misc/sisusbvga/sisusb.c @@ -761,7 +761,7 @@ static int sisusb_write_mem_bulk(struct sisusb_usb_data *sisusb, u32 addr, u8 swap8, fromkern = kernbuffer ? 1 : 0; u16 swap16; u32 swap32, flag = (length >> 28) & 1; - char buf[4]; + u8 buf[4]; /* if neither kernbuffer not userbuffer are given, assume * data in obuf -- GitLab From 0e0e6185ea7762e224cecc27c0da5bf42d8b363b Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Thu, 6 Aug 2020 23:25:01 -0700 Subject: [PATCH 0536/1304] efi: provide empty efi_enter_virtual_mode implementation [ Upstream commit 2c547f9da0539ad1f7ef7f08c8c82036d61b011a ] When CONFIG_EFI is not enabled, we might get an undefined reference to efi_enter_virtual_mode() error, if this efi_enabled() call isn't inlined into start_kernel(). This happens in particular, if start_kernel() is annodated with __no_sanitize_address. Reported-by: kernel test robot Signed-off-by: Andrey Konovalov Signed-off-by: Andrew Morton Acked-by: Ard Biesheuvel Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Catalin Marinas Cc: Dmitry Vyukov Cc: Elena Petrova Cc: Marco Elver Cc: Vincenzo Frascino Cc: Walter Wu Link: http://lkml.kernel.org/r/6514652d3a32d3ed33d6eb5c91d0af63bf0d1a0c.1596544734.git.andreyknvl@google.com Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin --- include/linux/efi.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/include/linux/efi.h b/include/linux/efi.h index 6797811bf1e6..9a5d4b499271 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -996,7 +996,11 @@ extern void *efi_get_pal_addr (void); extern void efi_map_pal_code (void); extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg); extern void efi_gettimeofday (struct timespec64 *ts); +#ifdef CONFIG_EFI extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if possible */ +#else +static inline void efi_enter_virtual_mode (void) {} +#endif #ifdef CONFIG_X86 extern void efi_free_boot_services(void); extern efi_status_t efi_query_variable_store(u32 attributes, -- GitLab From 70323cca909d68ec88d44141a357b4c77e194492 Mon Sep 17 00:00:00 2001 From: Zhi Chen Date: Tue, 14 Jan 2020 12:35:21 +0800 Subject: [PATCH 0537/1304] Revert "ath10k: fix DMA related firmware crashes on multiple devices" [ Upstream commit a1769bb68a850508a492e3674ab1e5e479b11254 ] This reverts commit 76d164f582150fd0259ec0fcbc485470bcd8033e. PCIe hung issue was observed on multiple platforms. The issue was reproduced when DUT was configured as AP and associated with 50+ STAs. For QCA9984/QCA9888, the DMA_BURST_SIZE register controls the AXI burst size of the RD/WR access to the HOST MEM. 0 - No split , RAW read/write transfer size from MAC is put out on bus as burst length 1 - Split at 256 byte boundary 2,3 - Reserved With PCIe protocol analyzer, we can see DMA Read crossing 4KB boundary when issue happened. It broke PCIe spec and caused PCIe stuck. So revert the default value from 0 to 1. Tested: IPQ8064 + QCA9984 with firmware 10.4-3.10-00047 QCS404 + QCA9984 with firmware 10.4-3.9.0.2--00044 Synaptics AS370 + QCA9888 with firmware 10.4-3.9.0.2--00040 Signed-off-by: Zhi Chen Signed-off-by: Kalle Valo Signed-off-by: Sasha Levin --- drivers/net/wireless/ath/ath10k/hw.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index fac58c3c576a..3ff65a0a834a 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -753,7 +753,7 @@ ath10k_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw, #define TARGET_10_4_TX_DBG_LOG_SIZE 1024 #define TARGET_10_4_NUM_WDS_ENTRIES 32 -#define TARGET_10_4_DMA_BURST_SIZE 0 +#define TARGET_10_4_DMA_BURST_SIZE 1 #define TARGET_10_4_MAC_AGGR_DELIM 0 #define TARGET_10_4_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 1 #define TARGET_10_4_VOW_CONFIG 0 -- GitLab From 0961d7fe12491b474546952e25b441b9e56915c6 Mon Sep 17 00:00:00 2001 From: Sean Young Date: Sat, 2 May 2020 14:50:52 +0200 Subject: [PATCH 0538/1304] media: gpio-ir-tx: improve precision of transmitted signal due to scheduling [ Upstream commit ea8912b788f8144e7d32ee61e5ccba45424bef83 ] usleep_range() may take longer than the max argument due to scheduling, especially under load. This is causing random errors in the transmitted IR. Remove the usleep_range() in favour of busy-looping with udelay(). Signed-off-by: Sean Young Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Sasha Levin --- drivers/media/rc/gpio-ir-tx.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/drivers/media/rc/gpio-ir-tx.c b/drivers/media/rc/gpio-ir-tx.c index cd476cab9782..4e70b67ccd18 100644 --- a/drivers/media/rc/gpio-ir-tx.c +++ b/drivers/media/rc/gpio-ir-tx.c @@ -87,13 +87,8 @@ static int gpio_ir_tx(struct rc_dev *dev, unsigned int *txbuf, // space edge = ktime_add_us(edge, txbuf[i]); delta = ktime_us_delta(edge, ktime_get()); - if (delta > 10) { - spin_unlock_irqrestore(&gpio_ir->lock, flags); - usleep_range(delta, delta + 10); - spin_lock_irqsave(&gpio_ir->lock, flags); - } else if (delta > 0) { + if (delta > 0) udelay(delta); - } } else { // pulse ktime_t last = ktime_add_us(edge, txbuf[i]); -- GitLab From 14cb42ed874b742fae0e3acf10b43eb2ca0217aa Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Wed, 12 Aug 2020 17:03:09 -0700 Subject: [PATCH 0539/1304] drm/msm/adreno: fix updating ring fence [ Upstream commit f228af11dfa1d1616bc67f3a4119ab77c36181f1 ] We need to set it to the most recent completed fence, not the most recent submitted. Otherwise we have races where we think we can retire submits that the GPU is not finished with, if the GPU doesn't manage to overwrite the seqno before we look at it. This can show up with hang recovery if one of the submits after the crashing submit also hangs after it is replayed. Fixes: f97decac5f4c ("drm/msm: Support multiple ringbuffers") Signed-off-by: Rob Clark Reviewed-by: Jordan Crouse Signed-off-by: Rob Clark Signed-off-by: Sasha Levin --- drivers/gpu/drm/msm/adreno/adreno_gpu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 93d70f4a2154..c9f831604558 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -221,7 +221,7 @@ int adreno_hw_init(struct msm_gpu *gpu) ring->next = ring->start; /* reset completed fence seqno: */ - ring->memptrs->fence = ring->seqno; + ring->memptrs->fence = ring->fctx->completed_fence; ring->memptrs->rptr = 0; } -- GitLab From 0ece300942e050e30300cf108d6c71f13c0e424e Mon Sep 17 00:00:00 2001 From: Tianjia Zhang Date: Sun, 2 Aug 2020 19:15:45 +0800 Subject: [PATCH 0540/1304] nvme-fc: Fix wrong return value in __nvme_fc_init_request() [ Upstream commit f34448cd0dc697723fb5f4118f8431d9233b370d ] On an error exit path, a negative error code should be returned instead of a positive return value. Fixes: e399441de9115 ("nvme-fabrics: Add host support for FC transport") Cc: James Smart Signed-off-by: Tianjia Zhang Reviewed-by: Chaitanya Kulkarni Reviewed-by: Christoph Hellwig Signed-off-by: Sagi Grimberg Signed-off-by: Jens Axboe Signed-off-by: Sasha Levin --- drivers/nvme/host/fc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index ed43b06353a3..bb3b447c5646 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -1716,7 +1716,7 @@ __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl, if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) { dev_err(ctrl->dev, "FCP Op failed - cmdiu dma mapping failed.\n"); - ret = EFAULT; + ret = -EFAULT; goto out_on_error; } @@ -1726,7 +1726,7 @@ __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl, if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) { dev_err(ctrl->dev, "FCP Op failed - rspiu dma mapping failed.\n"); - ret = EFAULT; + ret = -EFAULT; } atomic_set(&op->state, FCPOP_STATE_IDLE); -- GitLab From 515903c16bde320ba4c9fbce97944f1214d7a464 Mon Sep 17 00:00:00 2001 From: Hou Pu Date: Fri, 21 Aug 2020 04:34:42 -0400 Subject: [PATCH 0541/1304] null_blk: fix passing of REQ_FUA flag in null_handle_rq [ Upstream commit 2d62e6b038e729c3e4bfbfcfbd44800ef0883680 ] REQ_FUA should be checked using rq->cmd_flags instead of req_op(). Fixes: deb78b419dfda ("nullb: emulate cache") Signed-off-by: Hou Pu Signed-off-by: Jens Axboe Signed-off-by: Sasha Levin --- drivers/block/null_blk_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c index d2d7dc9cd58d..4fef1fb918ec 100644 --- a/drivers/block/null_blk_main.c +++ b/drivers/block/null_blk_main.c @@ -1086,7 +1086,7 @@ static int null_handle_rq(struct nullb_cmd *cmd) len = bvec.bv_len; err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset, op_is_write(req_op(rq)), sector, - req_op(rq) & REQ_FUA); + rq->cmd_flags & REQ_FUA); if (err) { spin_unlock_irq(&nullb->lock); return err; -- GitLab From 69a11b99ce82be228138aa565e7ba2b63b77bc6e Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Mon, 17 Aug 2020 14:19:30 +0200 Subject: [PATCH 0542/1304] i2c: rcar: in slave mode, clear NACK earlier [ Upstream commit 914a7b3563b8fb92f976619bbd0fa3a4a708baae ] Currently, a NACK in slave mode is set/cleared when SCL is held low by the IP core right before the bit is about to be pushed out. This is too late for clearing and then a NACK from the previous byte is still used for the current one. Now, let's clear the NACK right after we detected the STOP condition following the NACK. Fixes: de20d1857dd6 ("i2c: rcar: add slave support") Signed-off-by: Wolfram Sang Signed-off-by: Wolfram Sang Signed-off-by: Sasha Levin --- drivers/i2c/busses/i2c-rcar.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c index dcdce18fc706..f9029800d399 100644 --- a/drivers/i2c/busses/i2c-rcar.c +++ b/drivers/i2c/busses/i2c-rcar.c @@ -594,6 +594,7 @@ static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv) /* master sent stop */ if (ssr_filtered & SSR) { i2c_slave_event(priv->slave, I2C_SLAVE_STOP, &value); + rcar_i2c_write(priv, ICSCR, SIE | SDBS); /* clear our NACK */ rcar_i2c_write(priv, ICSIER, SAR); rcar_i2c_write(priv, ICSSR, ~SSR & 0xff); } -- GitLab From 4627ea08b85019f77a71ce1a7d11a2ff139d2280 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Fri, 14 Aug 2020 07:55:01 +0200 Subject: [PATCH 0543/1304] usb: gadget: f_tcm: Fix some resource leaks in some error paths [ Upstream commit 07c8434150f4eb0b65cae288721c8af1080fde17 ] If a memory allocation fails within a 'usb_ep_alloc_request()' call, the already allocated memory must be released. Fix a mix-up in the code and free the correct requests. Fixes: c52661d60f63 ("usb-gadget: Initial merge of target module for UASP + BOT") Signed-off-by: Christophe JAILLET Signed-off-by: Felipe Balbi Signed-off-by: Sasha Levin --- drivers/usb/gadget/function/f_tcm.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c index 106988a6661a..785826ab5348 100644 --- a/drivers/usb/gadget/function/f_tcm.c +++ b/drivers/usb/gadget/function/f_tcm.c @@ -751,12 +751,13 @@ static int uasp_alloc_stream_res(struct f_uas *fu, struct uas_stream *stream) goto err_sts; return 0; + err_sts: - usb_ep_free_request(fu->ep_status, stream->req_status); - stream->req_status = NULL; -err_out: usb_ep_free_request(fu->ep_out, stream->req_out); stream->req_out = NULL; +err_out: + usb_ep_free_request(fu->ep_in, stream->req_in); + stream->req_in = NULL; out: return -ENOMEM; } -- GitLab From 3b1a4ea0028afe0194971c2b6910474a3eb89012 Mon Sep 17 00:00:00 2001 From: Lukas Czerner Date: Wed, 17 Jun 2020 11:25:49 +0200 Subject: [PATCH 0544/1304] jbd2: make sure jh have b_transaction set in refile/unfile_buffer [ Upstream commit 24dc9864914eb5813173cfa53313fcd02e4aea7d ] Callers of __jbd2_journal_unfile_buffer() and __jbd2_journal_refile_buffer() assume that the b_transaction is set. In fact if it's not, we can end up with journal_head refcounting errors leading to crash much later that might be very hard to track down. Add asserts to make sure that is the case. We also make sure that b_next_transaction is NULL in __jbd2_journal_unfile_buffer() since the callers expect that as well and we should not get into that stage in this state anyway, leading to problems later on if we do. Tested with fstests. Signed-off-by: Lukas Czerner Reviewed-by: Jan Kara Link: https://lore.kernel.org/r/20200617092549.6712-1-lczerner@redhat.com Signed-off-by: Theodore Ts'o Signed-off-by: Sasha Levin --- fs/jbd2/transaction.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index 43693b679710..5a0de78a5d71 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -1915,6 +1915,9 @@ static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh) */ static void __jbd2_journal_unfile_buffer(struct journal_head *jh) { + J_ASSERT_JH(jh, jh->b_transaction != NULL); + J_ASSERT_JH(jh, jh->b_next_transaction == NULL); + __jbd2_journal_temp_unlink_buffer(jh); jh->b_transaction = NULL; jbd2_journal_put_journal_head(jh); @@ -2462,6 +2465,13 @@ void __jbd2_journal_refile_buffer(struct journal_head *jh) was_dirty = test_clear_buffer_jbddirty(bh); __jbd2_journal_temp_unlink_buffer(jh); + + /* + * b_transaction must be set, otherwise the new b_transaction won't + * be holding jh reference + */ + J_ASSERT_JH(jh, jh->b_transaction != NULL); + /* * We set b_transaction here because b_next_transaction will inherit * our jh reference and thus __jbd2_journal_file_buffer() must not -- GitLab From a6d49257cbe53c7bca1a0353a6443f53cbed9cc7 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Fri, 10 Jul 2020 16:07:59 +0200 Subject: [PATCH 0545/1304] ext4: don't BUG on inconsistent journal feature [ Upstream commit 11215630aada28307ba555a43138db6ac54fa825 ] A customer has reported a BUG_ON in ext4_clear_journal_err() hitting during an LTP testing. Either this has been caused by a test setup issue where the filesystem was being overwritten while LTP was mounting it or the journal replay has overwritten the superblock with invalid data. In either case it is preferable we don't take the machine down with a BUG_ON. So handle the situation of unexpectedly missing has_journal feature more gracefully. We issue warning and fail the mount in the cases where the race window is narrow and the failed check is most likely a programming error. In cases where fs corruption is more likely, we do full ext4_error() handling before failing mount / remount. Reviewed-by: Lukas Czerner Signed-off-by: Jan Kara Link: https://lore.kernel.org/r/20200710140759.18031-1-jack@suse.cz Signed-off-by: Theodore Ts'o Signed-off-by: Sasha Levin --- fs/ext4/super.c | 68 ++++++++++++++++++++++++++++++++++--------------- 1 file changed, 47 insertions(+), 21 deletions(-) diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 1428dab2afff..47e406338a32 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -65,10 +65,10 @@ static int ext4_load_journal(struct super_block *, struct ext4_super_block *, unsigned long journal_devnum); static int ext4_show_options(struct seq_file *seq, struct dentry *root); static int ext4_commit_super(struct super_block *sb, int sync); -static void ext4_mark_recovery_complete(struct super_block *sb, +static int ext4_mark_recovery_complete(struct super_block *sb, struct ext4_super_block *es); -static void ext4_clear_journal_err(struct super_block *sb, - struct ext4_super_block *es); +static int ext4_clear_journal_err(struct super_block *sb, + struct ext4_super_block *es); static int ext4_sync_fs(struct super_block *sb, int wait); static int ext4_remount(struct super_block *sb, int *flags, char *data); static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf); @@ -4545,7 +4545,9 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) EXT4_SB(sb)->s_mount_state &= ~EXT4_ORPHAN_FS; if (needs_recovery) { ext4_msg(sb, KERN_INFO, "recovery complete"); - ext4_mark_recovery_complete(sb, es); + err = ext4_mark_recovery_complete(sb, es); + if (err) + goto failed_mount8; } if (EXT4_SB(sb)->s_journal) { if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) @@ -4588,10 +4590,8 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem"); goto failed_mount; -#ifdef CONFIG_QUOTA failed_mount8: ext4_unregister_sysfs(sb); -#endif failed_mount7: ext4_unregister_li_request(sb); failed_mount6: @@ -4727,7 +4727,8 @@ static journal_t *ext4_get_journal(struct super_block *sb, struct inode *journal_inode; journal_t *journal; - BUG_ON(!ext4_has_feature_journal(sb)); + if (WARN_ON_ONCE(!ext4_has_feature_journal(sb))) + return NULL; journal_inode = ext4_get_journal_inode(sb, journal_inum); if (!journal_inode) @@ -4757,7 +4758,8 @@ static journal_t *ext4_get_dev_journal(struct super_block *sb, struct ext4_super_block *es; struct block_device *bdev; - BUG_ON(!ext4_has_feature_journal(sb)); + if (WARN_ON_ONCE(!ext4_has_feature_journal(sb))) + return NULL; bdev = ext4_blkdev_get(j_dev, sb); if (bdev == NULL) @@ -4849,7 +4851,8 @@ static int ext4_load_journal(struct super_block *sb, int err = 0; int really_read_only; - BUG_ON(!ext4_has_feature_journal(sb)); + if (WARN_ON_ONCE(!ext4_has_feature_journal(sb))) + return -EFSCORRUPTED; if (journal_devnum && journal_devnum != le32_to_cpu(es->s_journal_dev)) { @@ -4919,7 +4922,12 @@ static int ext4_load_journal(struct super_block *sb, } EXT4_SB(sb)->s_journal = journal; - ext4_clear_journal_err(sb, es); + err = ext4_clear_journal_err(sb, es); + if (err) { + EXT4_SB(sb)->s_journal = NULL; + jbd2_journal_destroy(journal); + return err; + } if (!really_read_only && journal_devnum && journal_devnum != le32_to_cpu(es->s_journal_dev)) { @@ -5015,26 +5023,32 @@ static int ext4_commit_super(struct super_block *sb, int sync) * remounting) the filesystem readonly, then we will end up with a * consistent fs on disk. Record that fact. */ -static void ext4_mark_recovery_complete(struct super_block *sb, - struct ext4_super_block *es) +static int ext4_mark_recovery_complete(struct super_block *sb, + struct ext4_super_block *es) { + int err; journal_t *journal = EXT4_SB(sb)->s_journal; if (!ext4_has_feature_journal(sb)) { - BUG_ON(journal != NULL); - return; + if (journal != NULL) { + ext4_error(sb, "Journal got removed while the fs was " + "mounted!"); + return -EFSCORRUPTED; + } + return 0; } jbd2_journal_lock_updates(journal); - if (jbd2_journal_flush(journal) < 0) + err = jbd2_journal_flush(journal); + if (err < 0) goto out; if (ext4_has_feature_journal_needs_recovery(sb) && sb_rdonly(sb)) { ext4_clear_feature_journal_needs_recovery(sb); ext4_commit_super(sb, 1); } - out: jbd2_journal_unlock_updates(journal); + return err; } /* @@ -5042,14 +5056,17 @@ static void ext4_mark_recovery_complete(struct super_block *sb, * has recorded an error from a previous lifetime, move that error to the * main filesystem now. */ -static void ext4_clear_journal_err(struct super_block *sb, +static int ext4_clear_journal_err(struct super_block *sb, struct ext4_super_block *es) { journal_t *journal; int j_errno; const char *errstr; - BUG_ON(!ext4_has_feature_journal(sb)); + if (!ext4_has_feature_journal(sb)) { + ext4_error(sb, "Journal got removed while the fs was mounted!"); + return -EFSCORRUPTED; + } journal = EXT4_SB(sb)->s_journal; @@ -5074,6 +5091,7 @@ static void ext4_clear_journal_err(struct super_block *sb, jbd2_journal_clear_err(journal); jbd2_journal_update_sb_errno(journal); } + return 0; } /* @@ -5344,8 +5362,13 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) (sbi->s_mount_state & EXT4_VALID_FS)) es->s_state = cpu_to_le16(sbi->s_mount_state); - if (sbi->s_journal) + if (sbi->s_journal) { + /* + * We let remount-ro finish even if marking fs + * as clean failed... + */ ext4_mark_recovery_complete(sb, es); + } if (sbi->s_mmp_tsk) kthread_stop(sbi->s_mmp_tsk); } else { @@ -5393,8 +5416,11 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) * been changed by e2fsck since we originally mounted * the partition.) */ - if (sbi->s_journal) - ext4_clear_journal_err(sb, es); + if (sbi->s_journal) { + err = ext4_clear_journal_err(sb, es); + if (err) + goto restore_opts; + } sbi->s_mount_state = le16_to_cpu(es->s_state); err = ext4_setup_super(sb, es, 0); -- GitLab From 47788043e5fa6415fa9ff0b4af217079b06815af Mon Sep 17 00:00:00 2001 From: Lukas Czerner Date: Fri, 17 Jul 2020 11:06:05 +0200 Subject: [PATCH 0546/1304] ext4: handle read only external journal device [ Upstream commit 273108fa5015eeffc4bacfa5ce272af3434b96e4 ] Ext4 uses blkdev_get_by_dev() to get the block_device for journal device which does check to see if the read-only block device was opened read-only. As a result ext4 will hapily proceed mounting the file system with external journal on read-only device. This is bad as we would not be able to use the journal leading to errors later on. Instead of simply failing to mount file system in this case, treat it in a similar way we treat internal journal on read-only device. Allow to mount with -o noload in read-only mode. This can be reproduced easily like this: mke2fs -F -O journal_dev $JOURNAL_DEV 100M mkfs.$FSTYPE -F -J device=$JOURNAL_DEV $FS_DEV blockdev --setro $JOURNAL_DEV mount $FS_DEV $MNT touch $MNT/file umount $MNT leading to error like this [ 1307.318713] ------------[ cut here ]------------ [ 1307.323362] generic_make_request: Trying to write to read-only block-device dm-2 (partno 0) [ 1307.331741] WARNING: CPU: 36 PID: 3224 at block/blk-core.c:855 generic_make_request_checks+0x2c3/0x580 [ 1307.341041] Modules linked in: ext4 mbcache jbd2 rfkill intel_rapl_msr intel_rapl_common isst_if_commd [ 1307.419445] CPU: 36 PID: 3224 Comm: jbd2/dm-2 Tainted: G W I 5.8.0-rc5 #2 [ 1307.427359] Hardware name: Dell Inc. PowerEdge R740/01KPX8, BIOS 2.3.10 08/15/2019 [ 1307.434932] RIP: 0010:generic_make_request_checks+0x2c3/0x580 [ 1307.440676] Code: 94 03 00 00 48 89 df 48 8d 74 24 08 c6 05 cf 2b 18 01 01 e8 7f a4 ff ff 48 c7 c7 50e [ 1307.459420] RSP: 0018:ffffc0d70eb5fb48 EFLAGS: 00010286 [ 1307.464646] RAX: 0000000000000000 RBX: ffff9b33b2978300 RCX: 0000000000000000 [ 1307.471780] RDX: ffff9b33e12a81e0 RSI: ffff9b33e1298000 RDI: ffff9b33e1298000 [ 1307.478913] RBP: ffff9b7b9679e0c0 R08: 0000000000000837 R09: 0000000000000024 [ 1307.486044] R10: 0000000000000000 R11: ffffc0d70eb5f9f0 R12: 0000000000000400 [ 1307.493177] R13: 0000000000000000 R14: 0000000000000001 R15: 0000000000000000 [ 1307.500308] FS: 0000000000000000(0000) GS:ffff9b33e1280000(0000) knlGS:0000000000000000 [ 1307.508396] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 1307.514142] CR2: 000055eaf4109000 CR3: 0000003dee40a006 CR4: 00000000007606e0 [ 1307.521273] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [ 1307.528407] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 [ 1307.535538] PKRU: 55555554 [ 1307.538250] Call Trace: [ 1307.540708] generic_make_request+0x30/0x340 [ 1307.544985] submit_bio+0x43/0x190 [ 1307.548393] ? bio_add_page+0x62/0x90 [ 1307.552068] submit_bh_wbc+0x16a/0x190 [ 1307.555833] jbd2_write_superblock+0xec/0x200 [jbd2] [ 1307.560803] jbd2_journal_update_sb_log_tail+0x65/0xc0 [jbd2] [ 1307.566557] jbd2_journal_commit_transaction+0x2ae/0x1860 [jbd2] [ 1307.572566] ? check_preempt_curr+0x7a/0x90 [ 1307.576756] ? update_curr+0xe1/0x1d0 [ 1307.580421] ? account_entity_dequeue+0x7b/0xb0 [ 1307.584955] ? newidle_balance+0x231/0x3d0 [ 1307.589056] ? __switch_to_asm+0x42/0x70 [ 1307.592986] ? __switch_to_asm+0x36/0x70 [ 1307.596918] ? lock_timer_base+0x67/0x80 [ 1307.600851] kjournald2+0xbd/0x270 [jbd2] [ 1307.604873] ? finish_wait+0x80/0x80 [ 1307.608460] ? commit_timeout+0x10/0x10 [jbd2] [ 1307.612915] kthread+0x114/0x130 [ 1307.616152] ? kthread_park+0x80/0x80 [ 1307.619816] ret_from_fork+0x22/0x30 [ 1307.623400] ---[ end trace 27490236265b1630 ]--- Signed-off-by: Lukas Czerner Reviewed-by: Andreas Dilger Link: https://lore.kernel.org/r/20200717090605.2612-1-lczerner@redhat.com Signed-off-by: Theodore Ts'o Signed-off-by: Sasha Levin --- fs/ext4/super.c | 51 ++++++++++++++++++++++++++++++++----------------- 1 file changed, 33 insertions(+), 18 deletions(-) diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 47e406338a32..23ef8fbdb582 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -4850,6 +4850,7 @@ static int ext4_load_journal(struct super_block *sb, dev_t journal_dev; int err = 0; int really_read_only; + int journal_dev_ro; if (WARN_ON_ONCE(!ext4_has_feature_journal(sb))) return -EFSCORRUPTED; @@ -4862,7 +4863,31 @@ static int ext4_load_journal(struct super_block *sb, } else journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev)); - really_read_only = bdev_read_only(sb->s_bdev); + if (journal_inum && journal_dev) { + ext4_msg(sb, KERN_ERR, + "filesystem has both journal inode and journal device!"); + return -EINVAL; + } + + if (journal_inum) { + journal = ext4_get_journal(sb, journal_inum); + if (!journal) + return -EINVAL; + } else { + journal = ext4_get_dev_journal(sb, journal_dev); + if (!journal) + return -EINVAL; + } + + journal_dev_ro = bdev_read_only(journal->j_dev); + really_read_only = bdev_read_only(sb->s_bdev) | journal_dev_ro; + + if (journal_dev_ro && !sb_rdonly(sb)) { + ext4_msg(sb, KERN_ERR, + "journal device read-only, try mounting with '-o ro'"); + err = -EROFS; + goto err_out; + } /* * Are we loading a blank journal or performing recovery after a @@ -4877,27 +4902,14 @@ static int ext4_load_journal(struct super_block *sb, ext4_msg(sb, KERN_ERR, "write access " "unavailable, cannot proceed " "(try mounting with noload)"); - return -EROFS; + err = -EROFS; + goto err_out; } ext4_msg(sb, KERN_INFO, "write access will " "be enabled during recovery"); } } - if (journal_inum && journal_dev) { - ext4_msg(sb, KERN_ERR, "filesystem has both journal " - "and inode journals!"); - return -EINVAL; - } - - if (journal_inum) { - if (!(journal = ext4_get_journal(sb, journal_inum))) - return -EINVAL; - } else { - if (!(journal = ext4_get_dev_journal(sb, journal_dev))) - return -EINVAL; - } - if (!(journal->j_flags & JBD2_BARRIER)) ext4_msg(sb, KERN_INFO, "barriers disabled"); @@ -4917,8 +4929,7 @@ static int ext4_load_journal(struct super_block *sb, if (err) { ext4_msg(sb, KERN_ERR, "error loading journal"); - jbd2_journal_destroy(journal); - return err; + goto err_out; } EXT4_SB(sb)->s_journal = journal; @@ -4938,6 +4949,10 @@ static int ext4_load_journal(struct super_block *sb, } return 0; + +err_out: + jbd2_journal_destroy(journal); + return err; } static int ext4_commit_super(struct super_block *sb, int sync) -- GitLab From 8eed535dada298f74806d4d91948305a4cea1d5f Mon Sep 17 00:00:00 2001 From: "zhangyi (F)" Date: Sat, 20 Jun 2020 10:54:26 +0800 Subject: [PATCH 0547/1304] jbd2: abort journal if free a async write error metadata buffer [ Upstream commit c044f3d8360d2ecf831ba2cc9f08cf9fb2c699fb ] If we free a metadata buffer which has been failed to async write out in the background, the jbd2 checkpoint procedure will not detect this failure in jbd2_log_do_checkpoint(), so it may lead to filesystem inconsistency after cleanup journal tail. This patch abort the journal if free a buffer has write_io_error flag to prevent potential further inconsistency. Signed-off-by: zhangyi (F) Link: https://lore.kernel.org/r/20200620025427.1756360-5-yi.zhang@huawei.com Signed-off-by: Theodore Ts'o Signed-off-by: Sasha Levin --- fs/jbd2/transaction.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index 5a0de78a5d71..8c305593fb51 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -2009,6 +2009,7 @@ int jbd2_journal_try_to_free_buffers(journal_t *journal, { struct buffer_head *head; struct buffer_head *bh; + bool has_write_io_error = false; int ret = 0; J_ASSERT(PageLocked(page)); @@ -2033,11 +2034,26 @@ int jbd2_journal_try_to_free_buffers(journal_t *journal, jbd_unlock_bh_state(bh); if (buffer_jbd(bh)) goto busy; + + /* + * If we free a metadata buffer which has been failed to + * write out, the jbd2 checkpoint procedure will not detect + * this failure and may lead to filesystem inconsistency + * after cleanup journal tail. + */ + if (buffer_write_io_error(bh)) { + pr_err("JBD2: Error while async write back metadata bh %llu.", + (unsigned long long)bh->b_blocknr); + has_write_io_error = true; + } } while ((bh = bh->b_this_page) != head); ret = try_to_free_buffers(page); busy: + if (has_write_io_error) + jbd2_journal_abort(journal, -EIO); + return ret; } -- GitLab From bfb8d9b74750e7c9b12f9e18b4885617a6433f6d Mon Sep 17 00:00:00 2001 From: Lukas Czerner Date: Thu, 23 Jul 2020 17:05:26 +0200 Subject: [PATCH 0548/1304] ext4: handle option set by mount flags correctly [ Upstream commit f25391ebb475d3ffb3aa61bb90e3594c841749ef ] Currently there is a problem with mount options that can be both set by vfs using mount flags or by a string parsing in ext4. i_version/iversion options gets lost after remount, for example $ mount -o i_version /dev/pmem0 /mnt $ grep pmem0 /proc/self/mountinfo | grep i_version 310 95 259:0 / /mnt rw,relatime shared:163 - ext4 /dev/pmem0 rw,seclabel,i_version $ mount -o remount,ro /mnt $ grep pmem0 /proc/self/mountinfo | grep i_version nolazytime gets ignored by ext4 on remount, for example $ mount -o lazytime /dev/pmem0 /mnt $ grep pmem0 /proc/self/mountinfo | grep lazytime 310 95 259:0 / /mnt rw,relatime shared:163 - ext4 /dev/pmem0 rw,lazytime,seclabel $ mount -o remount,nolazytime /mnt $ grep pmem0 /proc/self/mountinfo | grep lazytime 310 95 259:0 / /mnt rw,relatime shared:163 - ext4 /dev/pmem0 rw,lazytime,seclabel Fix it by applying the SB_LAZYTIME and SB_I_VERSION flags from *flags to s_flags before we parse the option and use the resulting state of the same flags in *flags at the end of successful remount. Signed-off-by: Lukas Czerner Reviewed-by: Ritesh Harjani Link: https://lore.kernel.org/r/20200723150526.19931-1-lczerner@redhat.com Signed-off-by: Theodore Ts'o Signed-off-by: Sasha Levin --- fs/ext4/super.c | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 23ef8fbdb582..03ebb0b38546 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -5249,7 +5249,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) { struct ext4_super_block *es; struct ext4_sb_info *sbi = EXT4_SB(sb); - unsigned long old_sb_flags; + unsigned long old_sb_flags, vfs_flags; struct ext4_mount_options old_opts; int enable_quota = 0; ext4_group_t g; @@ -5292,6 +5292,14 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) if (sbi->s_journal && sbi->s_journal->j_task->io_context) journal_ioprio = sbi->s_journal->j_task->io_context->ioprio; + /* + * Some options can be enabled by ext4 and/or by VFS mount flag + * either way we need to make sure it matches in both *flags and + * s_flags. Copy those selected flags from *flags to s_flags + */ + vfs_flags = SB_LAZYTIME | SB_I_VERSION; + sb->s_flags = (sb->s_flags & ~vfs_flags) | (*flags & vfs_flags); + if (!parse_options(data, sb, NULL, &journal_ioprio, 1)) { err = -EINVAL; goto restore_opts; @@ -5345,9 +5353,6 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) set_task_ioprio(sbi->s_journal->j_task, journal_ioprio); } - if (*flags & SB_LAZYTIME) - sb->s_flags |= SB_LAZYTIME; - if ((bool)(*flags & SB_RDONLY) != sb_rdonly(sb)) { if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) { err = -EROFS; @@ -5487,7 +5492,13 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) } #endif - *flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME); + /* + * Some options can be enabled by ext4 and/or by VFS mount flag + * either way we need to make sure it matches in both *flags and + * s_flags. Copy those selected flags from s_flags to *flags + */ + *flags = (*flags & ~vfs_flags) | (sb->s_flags & vfs_flags); + ext4_msg(sb, KERN_INFO, "re-mounted. Opts: %s", orig_data); kfree(orig_data); return 0; -- GitLab From c279f7a44fd3d1554561efb71340302a6b005ce9 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Tue, 28 Jul 2020 15:04:32 +0200 Subject: [PATCH 0549/1304] ext4: handle error of ext4_setup_system_zone() on remount [ Upstream commit d176b1f62f242ab259ff665a26fbac69db1aecba ] ext4_setup_system_zone() can fail. Handle the failure in ext4_remount(). Reviewed-by: Lukas Czerner Signed-off-by: Jan Kara Link: https://lore.kernel.org/r/20200728130437.7804-2-jack@suse.cz Signed-off-by: Theodore Ts'o Signed-off-by: Sasha Levin --- fs/ext4/super.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 03ebb0b38546..daabd7a2cee8 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -5470,7 +5470,10 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) ext4_register_li_request(sb, first_not_zeroed); } - ext4_setup_system_zone(sb); + err = ext4_setup_system_zone(sb); + if (err) + goto restore_opts; + if (sbi->s_journal == NULL && !(old_sb_flags & SB_RDONLY)) { err = ext4_commit_super(sb, 1); if (err) -- GitLab From 7f6858a3b9361ca9ff89cc2b9cdc3cf3b9bee356 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Tue, 28 Jul 2020 15:04:37 +0200 Subject: [PATCH 0550/1304] ext4: correctly restore system zone info when remount fails [ Upstream commit 0f5bde1db174f6c471f0bd27198575719dabe3e5 ] When remounting filesystem fails late during remount handling and block_validity mount option is also changed during the remount, we fail to restore system zone information to a state matching the mount option. This is mostly harmless, just the block validity checking will not match the situation described by the mount option. Make sure these two are always consistent. Reported-by: Lukas Czerner Reviewed-by: Lukas Czerner Signed-off-by: Jan Kara Link: https://lore.kernel.org/r/20200728130437.7804-7-jack@suse.cz Signed-off-by: Theodore Ts'o Signed-off-by: Sasha Levin --- fs/ext4/block_validity.c | 8 -------- fs/ext4/super.c | 29 +++++++++++++++++++++-------- 2 files changed, 21 insertions(+), 16 deletions(-) diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c index 552164034d34..f22a89cdb407 100644 --- a/fs/ext4/block_validity.c +++ b/fs/ext4/block_validity.c @@ -250,14 +250,6 @@ int ext4_setup_system_zone(struct super_block *sb) int flex_size = ext4_flex_bg_size(sbi); int ret; - if (!test_opt(sb, BLOCK_VALIDITY)) { - if (sbi->system_blks) - ext4_release_system_zone(sb); - return 0; - } - if (sbi->system_blks) - return 0; - system_blks = kzalloc(sizeof(*system_blks), GFP_KERNEL); if (!system_blks) return -ENOMEM; diff --git a/fs/ext4/super.c b/fs/ext4/super.c index daabd7a2cee8..9ac34b6ae073 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -4473,11 +4473,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) ext4_set_resv_clusters(sb); - err = ext4_setup_system_zone(sb); - if (err) { - ext4_msg(sb, KERN_ERR, "failed to initialize system " - "zone (%d)", err); - goto failed_mount4a; + if (test_opt(sb, BLOCK_VALIDITY)) { + err = ext4_setup_system_zone(sb); + if (err) { + ext4_msg(sb, KERN_ERR, "failed to initialize system " + "zone (%d)", err); + goto failed_mount4a; + } } ext4_ext_init(sb); @@ -5470,9 +5472,16 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) ext4_register_li_request(sb, first_not_zeroed); } - err = ext4_setup_system_zone(sb); - if (err) - goto restore_opts; + /* + * Handle creation of system zone data early because it can fail. + * Releasing of existing data is done when we are sure remount will + * succeed. + */ + if (test_opt(sb, BLOCK_VALIDITY) && !sbi->system_blks) { + err = ext4_setup_system_zone(sb); + if (err) + goto restore_opts; + } if (sbi->s_journal == NULL && !(old_sb_flags & SB_RDONLY)) { err = ext4_commit_super(sb, 1); @@ -5494,6 +5503,8 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) } } #endif + if (!test_opt(sb, BLOCK_VALIDITY) && sbi->system_blks) + ext4_release_system_zone(sb); /* * Some options can be enabled by ext4 and/or by VFS mount flag @@ -5515,6 +5526,8 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) sbi->s_commit_interval = old_opts.s_commit_interval; sbi->s_min_batch_time = old_opts.s_min_batch_time; sbi->s_max_batch_time = old_opts.s_max_batch_time; + if (!test_opt(sb, BLOCK_VALIDITY) && sbi->system_blks) + ext4_release_system_zone(sb); #ifdef CONFIG_QUOTA sbi->s_jquota_fmt = old_opts.s_jquota_fmt; for (i = 0; i < EXT4_MAXQUOTAS; i++) { -- GitLab From 4aaac9c537b79ffd0602db06cd5127a455e49275 Mon Sep 17 00:00:00 2001 From: Xianting Tian Date: Fri, 31 Jul 2020 12:10:25 -0400 Subject: [PATCH 0551/1304] fs: prevent BUG_ON in submit_bh_wbc() [ Upstream commit 377254b2cd2252c7c3151b113cbdf93a7736c2e9 ] If a device is hot-removed --- for example, when a physical device is unplugged from pcie slot or a nbd device's network is shutdown --- this can result in a BUG_ON() crash in submit_bh_wbc(). This is because the when the block device dies, the buffer heads will have their Buffer_Mapped flag get cleared, leading to the crash in submit_bh_wbc. We had attempted to work around this problem in commit a17712c8 ("ext4: check superblock mapped prior to committing"). Unfortunately, it's still possible to hit the BUG_ON(!buffer_mapped(bh)) if the device dies between when the work-around check in ext4_commit_super() and when submit_bh_wbh() is finally called: Code path: ext4_commit_super judge if 'buffer_mapped(sbh)' is false, return <== commit a17712c8 lock_buffer(sbh) ... unlock_buffer(sbh) __sync_dirty_buffer(sbh,... lock_buffer(sbh) judge if 'buffer_mapped(sbh))' is false, return <== added by this patch submit_bh(...,sbh) submit_bh_wbc(...,sbh,...) [100722.966497] kernel BUG at fs/buffer.c:3095! <== BUG_ON(!buffer_mapped(bh))' in submit_bh_wbc() [100722.966503] invalid opcode: 0000 [#1] SMP [100722.966566] task: ffff8817e15a9e40 task.stack: ffffc90024744000 [100722.966574] RIP: 0010:submit_bh_wbc+0x180/0x190 [100722.966575] RSP: 0018:ffffc90024747a90 EFLAGS: 00010246 [100722.966576] RAX: 0000000000620005 RBX: ffff8818a80603a8 RCX: 0000000000000000 [100722.966576] RDX: ffff8818a80603a8 RSI: 0000000000020800 RDI: 0000000000000001 [100722.966577] RBP: ffffc90024747ac0 R08: 0000000000000000 R09: ffff88207f94170d [100722.966578] R10: 00000000000437c8 R11: 0000000000000001 R12: 0000000000020800 [100722.966578] R13: 0000000000000001 R14: 000000000bf9a438 R15: ffff88195f333000 [100722.966580] FS: 00007fa2eee27700(0000) GS:ffff88203d840000(0000) knlGS:0000000000000000 [100722.966580] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [100722.966581] CR2: 0000000000f0b008 CR3: 000000201a622003 CR4: 00000000007606e0 [100722.966582] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [100722.966583] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 [100722.966583] PKRU: 55555554 [100722.966583] Call Trace: [100722.966588] __sync_dirty_buffer+0x6e/0xd0 [100722.966614] ext4_commit_super+0x1d8/0x290 [ext4] [100722.966626] __ext4_std_error+0x78/0x100 [ext4] [100722.966635] ? __ext4_journal_get_write_access+0xca/0x120 [ext4] [100722.966646] ext4_reserve_inode_write+0x58/0xb0 [ext4] [100722.966655] ? ext4_dirty_inode+0x48/0x70 [ext4] [100722.966663] ext4_mark_inode_dirty+0x53/0x1e0 [ext4] [100722.966671] ? __ext4_journal_start_sb+0x6d/0xf0 [ext4] [100722.966679] ext4_dirty_inode+0x48/0x70 [ext4] [100722.966682] __mark_inode_dirty+0x17f/0x350 [100722.966686] generic_update_time+0x87/0xd0 [100722.966687] touch_atime+0xa9/0xd0 [100722.966690] generic_file_read_iter+0xa09/0xcd0 [100722.966694] ? page_cache_tree_insert+0xb0/0xb0 [100722.966704] ext4_file_read_iter+0x4a/0x100 [ext4] [100722.966707] ? __inode_security_revalidate+0x4f/0x60 [100722.966709] __vfs_read+0xec/0x160 [100722.966711] vfs_read+0x8c/0x130 [100722.966712] SyS_pread64+0x87/0xb0 [100722.966716] do_syscall_64+0x67/0x1b0 [100722.966719] entry_SYSCALL64_slow_path+0x25/0x25 To address this, add the check of 'buffer_mapped(bh)' to __sync_dirty_buffer(). This also has the benefit of fixing this for other file systems. With this addition, we can drop the workaround in ext4_commit_supper(). [ Commit description rewritten by tytso. ] Signed-off-by: Xianting Tian Link: https://lore.kernel.org/r/1596211825-8750-1-git-send-email-xianting_tian@126.com Signed-off-by: Theodore Ts'o Signed-off-by: Sasha Levin --- fs/buffer.c | 9 +++++++++ fs/ext4/super.c | 7 ------- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/fs/buffer.c b/fs/buffer.c index c49fdab5cb36..362a86876459 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -3193,6 +3193,15 @@ int __sync_dirty_buffer(struct buffer_head *bh, int op_flags) WARN_ON(atomic_read(&bh->b_count) < 1); lock_buffer(bh); if (test_clear_buffer_dirty(bh)) { + /* + * The bh should be mapped, but it might not be if the + * device was hot-removed. Not much we can do but fail the I/O. + */ + if (!buffer_mapped(bh)) { + unlock_buffer(bh); + return -EIO; + } + get_bh(bh); bh->b_end_io = end_buffer_write_sync; ret = submit_bh(REQ_OP_WRITE, op_flags, bh); diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 9ac34b6ae073..0c15ff19acbd 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -4966,13 +4966,6 @@ static int ext4_commit_super(struct super_block *sb, int sync) if (!sbh || block_device_ejected(sb)) return error; - /* - * The superblock bh should be mapped, but it might not be if the - * device was hot-removed. Not much we can do but fail the I/O. - */ - if (!buffer_mapped(sbh)) - return error; - /* * If the file system is mounted read-only, don't update the * superblock write time. This avoids updating the superblock -- GitLab From ca57f450507548d2eb14a15764e0f159c784c842 Mon Sep 17 00:00:00 2001 From: Amelie Delaunay Date: Mon, 10 Aug 2020 09:12:36 +0200 Subject: [PATCH 0552/1304] spi: stm32: fix stm32_spi_prepare_mbr in case of odd clk_rate [ Upstream commit 9cc61973bf9385b19ff5dda4a2a7e265fcba85e4 ] Fix spi->clk_rate when it is odd to the nearest lowest even value because minimum SPI divider is 2. Signed-off-by: Amelie Delaunay Signed-off-by: Alain Volmat Link: https://lore.kernel.org/r/1597043558-29668-4-git-send-email-alain.volmat@st.com Signed-off-by: Mark Brown Signed-off-by: Sasha Levin --- drivers/spi/spi-stm32.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c index ad1e55d3d5d5..391a20b3d2fd 100644 --- a/drivers/spi/spi-stm32.c +++ b/drivers/spi/spi-stm32.c @@ -254,7 +254,8 @@ static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz) { u32 div, mbrdiv; - div = DIV_ROUND_UP(spi->clk_rate, speed_hz); + /* Ensure spi->clk_rate is even */ + div = DIV_ROUND_UP(spi->clk_rate & ~0x1, speed_hz); /* * SPI framework set xfer->speed_hz to master->max_speed_hz if -- GitLab From 017d36c59ca80898314a18c23e28f4045415f3b8 Mon Sep 17 00:00:00 2001 From: Vineeth Vijayan Date: Thu, 18 Jun 2020 16:42:45 +0200 Subject: [PATCH 0553/1304] s390/cio: add cond_resched() in the slow_eval_known_fn() loop [ Upstream commit 0b8eb2ee9da1e8c9b8082f404f3948aa82a057b2 ] The scanning through subchannels during the time of an event could take significant amount of time in case of platforms with lots of known subchannels. This might result in higher scheduling latencies for other tasks especially on systems with a single CPU. Add cond_resched() call, as the loop in slow_eval_known_fn() can be executed for a longer duration. Reviewed-by: Peter Oberparleiter Signed-off-by: Vineeth Vijayan Signed-off-by: Heiko Carstens Signed-off-by: Sasha Levin --- drivers/s390/cio/css.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index df09ed53ab45..825a8f2703b4 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -615,6 +615,11 @@ static int slow_eval_known_fn(struct subchannel *sch, void *data) rc = css_evaluate_known_subchannel(sch, 1); if (rc == -EAGAIN) css_schedule_eval(sch->schid); + /* + * The loop might take long time for platforms with lots of + * known devices. Allow scheduling here. + */ + cond_resched(); } return 0; } -- GitLab From 00963a85dbb6f01001e4adf0f8145232221e4874 Mon Sep 17 00:00:00 2001 From: Sylwester Nawrocki Date: Fri, 31 Jul 2020 19:38:34 +0200 Subject: [PATCH 0554/1304] ASoC: wm8994: Avoid attempts to read unreadable registers [ Upstream commit f082bb59b72039a2326ec1a44496899fb8aa6d0e ] The driver supports WM1811, WM8994, WM8958 devices but according to documentation and the regmap definitions the WM8958_DSP2_* registers are only available on WM8958. In current code these registers are being accessed as if they were available on all the three chips. When starting playback on WM1811 CODEC multiple errors like: "wm8994-codec wm8994-codec: ASoC: error at soc_component_read_no_lock on wm8994-codec: -5" can be seen, which is caused by attempts to read an unavailable WM8958_DSP2_PROGRAM register. The issue has been uncovered by recent commit "e2329ee ASoC: soc-component: add soc_component_err()". This patch adds a check in wm8958_aif_ev() callback so the DSP2 handling is only done for WM8958. Signed-off-by: Sylwester Nawrocki Acked-by: Charles Keepax Link: https://lore.kernel.org/r/20200731173834.23832-1-s.nawrocki@samsung.com Signed-off-by: Mark Brown Signed-off-by: Sasha Levin --- sound/soc/codecs/wm8958-dsp2.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/sound/soc/codecs/wm8958-dsp2.c b/sound/soc/codecs/wm8958-dsp2.c index 108e8bf42a34..f0a409504a13 100644 --- a/sound/soc/codecs/wm8958-dsp2.c +++ b/sound/soc/codecs/wm8958-dsp2.c @@ -419,8 +419,12 @@ int wm8958_aif_ev(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm); + struct wm8994 *control = dev_get_drvdata(component->dev->parent); int i; + if (control->type != WM8958) + return 0; + switch (event) { case SND_SOC_DAPM_POST_PMU: case SND_SOC_DAPM_PRE_PMU: -- GitLab From 2229e50f25a5d9e6fa9aa263530978a8c83ff944 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Fri, 7 Aug 2020 15:23:33 -0500 Subject: [PATCH 0555/1304] scsi: fcoe: Fix I/O path allocation [ Upstream commit fa39ab5184d64563cd36f2fb5f0d3fbad83a432c ] ixgbe_fcoe_ddp_setup() can be called from the main I/O path and is called with a spin_lock held, so we have to use GFP_ATOMIC allocation instead of GFP_KERNEL. Link: https://lore.kernel.org/r/1596831813-9839-1-git-send-email-michael.christie@oracle.com cc: Hannes Reinecke Reviewed-by: Lee Duncan Signed-off-by: Mike Christie Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin --- drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c index ccd852ad62a4..d50c5b55da18 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c @@ -192,7 +192,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, } /* alloc the udl from per cpu ddp pool */ - ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_KERNEL, &ddp->udp); + ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp); if (!ddp->udl) { e_err(drv, "failed allocated ddp context\n"); goto out_noddp_unmap; -- GitLab From 8f76a208c2f439c02fc5e25fef0790938662e723 Mon Sep 17 00:00:00 2001 From: Stanley Chu Date: Sun, 9 Aug 2020 13:07:34 +0800 Subject: [PATCH 0556/1304] scsi: ufs: Fix possible infinite loop in ufshcd_hold [ Upstream commit 93b6c5db06028a3b55122bbb74d0715dd8ca4ae0 ] In ufshcd_suspend(), after clk-gating is suspended and link is set as Hibern8 state, ufshcd_hold() is still possibly invoked before ufshcd_suspend() returns. For example, MediaTek's suspend vops may issue UIC commands which would call ufshcd_hold() during the command issuing flow. Now if UFSHCD_CAP_HIBERN8_WITH_CLK_GATING capability is enabled, then ufshcd_hold() may enter infinite loops because there is no clk-ungating work scheduled or pending. In this case, ufshcd_hold() shall just bypass, and keep the link as Hibern8 state. Link: https://lore.kernel.org/r/20200809050734.18740-1-stanley.chu@mediatek.com Reviewed-by: Avri Altman Co-developed-by: Andy Teng Signed-off-by: Andy Teng Signed-off-by: Stanley Chu Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin --- drivers/scsi/ufs/ufshcd.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index ab628fd37e02..747a2321b5f7 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -1540,6 +1540,7 @@ static void ufshcd_ungate_work(struct work_struct *work) int ufshcd_hold(struct ufs_hba *hba, bool async) { int rc = 0; + bool flush_result; unsigned long flags; if (!ufshcd_is_clkgating_allowed(hba)) @@ -1571,7 +1572,9 @@ int ufshcd_hold(struct ufs_hba *hba, bool async) break; } spin_unlock_irqrestore(hba->host->host_lock, flags); - flush_work(&hba->clk_gating.ungate_work); + flush_result = flush_work(&hba->clk_gating.ungate_work); + if (hba->clk_gating.is_suspended && !flush_result) + goto out; spin_lock_irqsave(hba->host->host_lock, flags); goto start; } -- GitLab From 31871fb7adca15a0c60e723bad673802ce647932 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Tue, 11 Aug 2020 16:39:36 +0300 Subject: [PATCH 0557/1304] scsi: ufs: Improve interrupt handling for shared interrupts [ Upstream commit 127d5f7c4b653b8be5eb3b2c7bbe13728f9003ff ] For shared interrupts, the interrupt status might be zero, so check that first. Link: https://lore.kernel.org/r/20200811133936.19171-2-adrian.hunter@intel.com Reviewed-by: Avri Altman Signed-off-by: Adrian Hunter Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin --- drivers/scsi/ufs/ufshcd.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 747a2321b5f7..d8c6fd201275 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -5605,7 +5605,7 @@ static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status) */ static irqreturn_t ufshcd_intr(int irq, void *__hba) { - u32 intr_status, enabled_intr_status; + u32 intr_status, enabled_intr_status = 0; irqreturn_t retval = IRQ_NONE; struct ufs_hba *hba = __hba; int retries = hba->nutrs; @@ -5619,7 +5619,7 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba) * read, make sure we handle them by checking the interrupt status * again in a loop until we process all of the reqs before returning. */ - do { + while (intr_status && retries--) { enabled_intr_status = intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE); if (intr_status) @@ -5630,7 +5630,7 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba) } intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); - } while (intr_status && --retries); + } spin_unlock(hba->host->host_lock); return retval; -- GitLab From 023e0e6bd2d91ffff61cd040cece6f31bb826a38 Mon Sep 17 00:00:00 2001 From: Stanley Chu Date: Tue, 11 Aug 2020 16:18:58 +0200 Subject: [PATCH 0558/1304] scsi: ufs: Clean up completed request without interrupt notification [ Upstream commit b10178ee7fa88b68a9e8adc06534d2605cb0ec23 ] If somehow no interrupt notification is raised for a completed request and its doorbell bit is cleared by host, UFS driver needs to cleanup its outstanding bit in ufshcd_abort(). Otherwise, system may behave abnormally in the following scenario: After ufshcd_abort() returns, this request will be requeued by SCSI layer with its outstanding bit set. Any future completed request will trigger ufshcd_transfer_req_compl() to handle all "completed outstanding bits". At this time the "abnormal outstanding bit" will be detected and the "requeued request" will be chosen to execute request post-processing flow. This is wrong because this request is still "alive". Link: https://lore.kernel.org/r/20200811141859.27399-2-huobean@gmail.com Reviewed-by: Can Guo Acked-by: Avri Altman Signed-off-by: Stanley Chu Signed-off-by: Bean Huo Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin --- drivers/scsi/ufs/ufshcd.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index d8c6fd201275..eb10a5cacd90 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -5930,7 +5930,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) /* command completed already */ dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n", __func__, tag); - goto out; + goto cleanup; } else { dev_err(hba->dev, "%s: no response from device. tag = %d, err %d\n", @@ -5964,6 +5964,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) goto out; } +cleanup: scsi_dma_unmap(cmd); spin_lock_irqsave(host->host_lock, flags); -- GitLab From aba997482bc5304d90b3adf42527089ac6d02c02 Mon Sep 17 00:00:00 2001 From: Saurav Kashyap Date: Thu, 6 Aug 2020 04:10:11 -0700 Subject: [PATCH 0559/1304] scsi: qla2xxx: Check if FW supports MQ before enabling [ Upstream commit dffa11453313a115157b19021cc2e27ea98e624c ] OS boot during Boot from SAN was stuck at dracut emergency shell after enabling NVMe driver parameter. For non-MQ support the driver was enabling MQ. Add a check to confirm if FW supports MQ. Link: https://lore.kernel.org/r/20200806111014.28434-9-njavali@marvell.com Reviewed-by: Himanshu Madhani Signed-off-by: Saurav Kashyap Signed-off-by: Nilesh Javali Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin --- drivers/scsi/qla2xxx/qla_os.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index b56cf790587e..e17ca7df8d0e 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -1997,6 +1997,11 @@ qla2x00_iospace_config(struct qla_hw_data *ha) /* Determine queue resources */ ha->max_req_queues = ha->max_rsp_queues = 1; ha->msix_count = QLA_BASE_VECTORS; + + /* Check if FW supports MQ or not */ + if (!(ha->fw_attributes & BIT_6)) + goto mqiobase_exit; + if (!ql2xmqsupport || !ql2xnvmeenable || (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))) goto mqiobase_exit; -- GitLab From f92ff03ee69f3d7e937a4c7421c478232fc55a78 Mon Sep 17 00:00:00 2001 From: Quinn Tran Date: Thu, 6 Aug 2020 04:10:12 -0700 Subject: [PATCH 0560/1304] scsi: qla2xxx: Fix null pointer access during disconnect from subsystem [ Upstream commit 83949613fac61e8e37eadf8275bf072342302f4e ] NVMEAsync command is being submitted to QLA while the same NVMe controller is in the middle of reset. The reset path has deleted the association and freed aen_op->fcp_req.private. Add a check for this private pointer before issuing the command. ... 6 [ffffb656ca11fce0] page_fault at ffffffff8c00114e [exception RIP: qla_nvme_post_cmd+394] RIP: ffffffffc0d012ba RSP: ffffb656ca11fd98 RFLAGS: 00010206 RAX: ffff8fb039eda228 RBX: ffff8fb039eda200 RCX: 00000000000da161 RDX: ffffffffc0d4d0f0 RSI: ffffffffc0d26c9b RDI: ffff8fb039eda220 RBP: 0000000000000013 R8: ffff8fb47ff6aa80 R9: 0000000000000002 R10: 0000000000000000 R11: ffffb656ca11fdc8 R12: ffff8fb27d04a3b0 R13: ffff8fc46dd98a58 R14: 0000000000000000 R15: ffff8fc4540f0000 ORIG_RAX: ffffffffffffffff CS: 0010 SS: 0018 7 [ffffb656ca11fe08] nvme_fc_start_fcp_op at ffffffffc0241568 [nvme_fc] 8 [ffffb656ca11fe50] nvme_fc_submit_async_event at ffffffffc0241901 [nvme_fc] 9 [ffffb656ca11fe68] nvme_async_event_work at ffffffffc014543d [nvme_core] 10 [ffffb656ca11fe98] process_one_work at ffffffff8b6cd437 11 [ffffb656ca11fed8] worker_thread at ffffffff8b6cdcef 12 [ffffb656ca11ff10] kthread at ffffffff8b6d3402 13 [ffffb656ca11ff50] ret_from_fork at ffffffff8c000255 -- PID: 37824 TASK: ffff8fb033063d80 CPU: 20 COMMAND: "kworker/u97:451" 0 [ffffb656ce1abc28] __schedule at ffffffff8be629e3 1 [ffffb656ce1abcc8] schedule at ffffffff8be62fe8 2 [ffffb656ce1abcd0] schedule_timeout at ffffffff8be671ed 3 [ffffb656ce1abd70] wait_for_completion at ffffffff8be639cf 4 [ffffb656ce1abdd0] flush_work at ffffffff8b6ce2d5 5 [ffffb656ce1abe70] nvme_stop_ctrl at ffffffffc0144900 [nvme_core] 6 [ffffb656ce1abe80] nvme_fc_reset_ctrl_work at ffffffffc0243445 [nvme_fc] 7 [ffffb656ce1abe98] process_one_work at ffffffff8b6cd437 8 [ffffb656ce1abed8] worker_thread at ffffffff8b6cdb50 9 [ffffb656ce1abf10] kthread at ffffffff8b6d3402 10 [ffffb656ce1abf50] ret_from_fork at ffffffff8c000255 Link: https://lore.kernel.org/r/20200806111014.28434-10-njavali@marvell.com Reviewed-by: Himanshu Madhani Signed-off-by: Quinn Tran Signed-off-by: Nilesh Javali Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin --- drivers/scsi/qla2xxx/qla_nvme.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c index 5590d6e8b576..3e2f8ce1d9a9 100644 --- a/drivers/scsi/qla2xxx/qla_nvme.c +++ b/drivers/scsi/qla2xxx/qla_nvme.c @@ -477,6 +477,11 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport, struct nvme_private *priv = fd->private; struct qla_nvme_rport *qla_rport = rport->private; + if (!priv) { + /* nvme association has been torn down */ + return rval; + } + fcport = qla_rport->fcport; vha = fcport->vha; -- GitLab From 2d1a5f56ac7b74e78a25a43e0fa59fad6f79d570 Mon Sep 17 00:00:00 2001 From: Saurav Kashyap Date: Thu, 6 Aug 2020 04:10:13 -0700 Subject: [PATCH 0561/1304] Revert "scsi: qla2xxx: Fix crash on qla2x00_mailbox_command" [ Upstream commit de7e6194301ad31c4ce95395eb678e51a1b907e5 ] FCoE adapter initialization failed for ISP8021 with the following patch applied. In addition, reproduction of the issue the patch originally tried to address has been unsuccessful. This reverts commit 3cb182b3fa8b7a61f05c671525494697cba39c6a. Link: https://lore.kernel.org/r/20200806111014.28434-11-njavali@marvell.com Reviewed-by: Himanshu Madhani Signed-off-by: Saurav Kashyap Signed-off-by: Nilesh Javali Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin --- drivers/scsi/qla2xxx/qla_mbx.c | 8 -------- 1 file changed, 8 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index ac5d2d34aeea..07c5d7397d42 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -329,14 +329,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) if (time_after(jiffies, wait_time)) break; - /* - * Check if it's UNLOADING, cause we cannot poll in - * this case, or else a NULL pointer dereference - * is triggered. - */ - if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags))) - return QLA_FUNCTION_TIMEOUT; - /* Check for pending interrupts. */ qla2x00_poll(ha->rsp_q_map[0]); -- GitLab From b12151989d2930328b85419c20037a0c9769ef52 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alvin=20=C5=A0ipraga?= Date: Tue, 18 Aug 2020 10:51:34 +0200 Subject: [PATCH 0562/1304] macvlan: validate setting of multiple remote source MAC addresses MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 8b61fba503904acae24aeb2bd5569b4d6544d48f ] Remote source MAC addresses can be set on a 'source mode' macvlan interface via the IFLA_MACVLAN_MACADDR_DATA attribute. This commit tightens the validation of these MAC addresses to match the validation already performed when setting or adding a single MAC address via the IFLA_MACVLAN_MACADDR attribute. iproute2 uses IFLA_MACVLAN_MACADDR_DATA for its 'macvlan macaddr set' command, and IFLA_MACVLAN_MACADDR for its 'macvlan macaddr add' command, which demonstrates the inconsistent behaviour that this commit addresses: # ip link add link eth0 name macvlan0 type macvlan mode source # ip link set link dev macvlan0 type macvlan macaddr add 01:00:00:00:00:00 RTNETLINK answers: Cannot assign requested address # ip link set link dev macvlan0 type macvlan macaddr set 01:00:00:00:00:00 # ip -d link show macvlan0 5: macvlan0@eth0: mtu 1500 ... link/ether 2e:ac:fd:2d:69:f8 brd ff:ff:ff:ff:ff:ff promiscuity 0 macvlan mode source remotes (1) 01:00:00:00:00:00 numtxqueues 1 ... With this change, the 'set' command will (rightly) fail in the same way as the 'add' command. Signed-off-by: Alvin Šipraga Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/macvlan.c | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 349123592af0..e226a96da3a3 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -1230,6 +1230,9 @@ static void macvlan_port_destroy(struct net_device *dev) static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { + struct nlattr *nla, *head; + int rem, len; + if (tb[IFLA_ADDRESS]) { if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) return -EINVAL; @@ -1277,6 +1280,20 @@ static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[], return -EADDRNOTAVAIL; } + if (data[IFLA_MACVLAN_MACADDR_DATA]) { + head = nla_data(data[IFLA_MACVLAN_MACADDR_DATA]); + len = nla_len(data[IFLA_MACVLAN_MACADDR_DATA]); + + nla_for_each_attr(nla, head, len, rem) { + if (nla_type(nla) != IFLA_MACVLAN_MACADDR || + nla_len(nla) != ETH_ALEN) + return -EINVAL; + + if (!is_valid_ether_addr(nla_data(nla))) + return -EADDRNOTAVAIL; + } + } + if (data[IFLA_MACVLAN_MACADDR_COUNT]) return -EINVAL; @@ -1333,10 +1350,6 @@ static int macvlan_changelink_sources(struct macvlan_dev *vlan, u32 mode, len = nla_len(data[IFLA_MACVLAN_MACADDR_DATA]); nla_for_each_attr(nla, head, len, rem) { - if (nla_type(nla) != IFLA_MACVLAN_MACADDR || - nla_len(nla) != ETH_ALEN) - continue; - addr = nla_data(nla); ret = macvlan_hash_add_source(vlan, addr); if (ret) -- GitLab From 50b83d19ab3f9a07a70d0bb6d8efb66bff970a4f Mon Sep 17 00:00:00 2001 From: Sumera Priyadarsini Date: Wed, 19 Aug 2020 00:22:41 +0530 Subject: [PATCH 0563/1304] net: gianfar: Add of_node_put() before goto statement [ Upstream commit 989e4da042ca4a56bbaca9223d1a93639ad11e17 ] Every iteration of for_each_available_child_of_node() decrements reference count of the previous node, however when control is transferred from the middle of the loop, as in the case of a return or break or goto, there is no decrement thus ultimately resulting in a memory leak. Fix a potential memory leak in gianfar.c by inserting of_node_put() before the goto statement. Issue found with Coccinelle. Signed-off-by: Sumera Priyadarsini Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/ethernet/freescale/gianfar.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index cf2d1e846a69..8243501c3757 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -844,8 +844,10 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) continue; err = gfar_parse_group(child, priv, model); - if (err) + if (err) { + of_node_put(child); goto err_grp_init; + } } } else { /* SQ_SG_MODE */ err = gfar_parse_group(np, priv, model); -- GitLab From bcaa460435cb332181fbefa4cf2136d4bdf9fd85 Mon Sep 17 00:00:00 2001 From: Athira Rajeev Date: Thu, 6 Aug 2020 08:46:32 -0400 Subject: [PATCH 0564/1304] powerpc/perf: Fix soft lockups due to missed interrupt accounting [ Upstream commit 17899eaf88d689529b866371344c8f269ba79b5f ] Performance monitor interrupt handler checks if any counter has overflown and calls record_and_restart() in core-book3s which invokes perf_event_overflow() to record the sample information. Apart from creating sample, perf_event_overflow() also does the interrupt and period checks via perf_event_account_interrupt(). Currently we record information only if the SIAR (Sampled Instruction Address Register) valid bit is set (using siar_valid() check) and hence the interrupt check. But it is possible that we do sampling for some events that are not generating valid SIAR, and hence there is no chance to disable the event if interrupts are more than max_samples_per_tick. This leads to soft lockup. Fix this by adding perf_event_account_interrupt() in the invalid SIAR code path for a sampling event. ie if SIAR is invalid, just do interrupt check and don't record the sample information. Reported-by: Alexey Kardashevskiy Signed-off-by: Athira Rajeev Tested-by: Alexey Kardashevskiy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/1596717992-7321-1-git-send-email-atrajeev@linux.vnet.ibm.com Signed-off-by: Sasha Levin --- arch/powerpc/perf/core-book3s.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 4004dbdab9c7..d407b7329817 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -2087,6 +2087,10 @@ static void record_and_restart(struct perf_event *event, unsigned long val, if (perf_event_overflow(event, &data, regs)) power_pmu_stop(event, 0); + } else if (period) { + /* Account for interrupt in case of invalid SIAR */ + if (perf_event_account_interrupt(event)) + power_pmu_stop(event, 0); } } -- GitLab From 3e7f61593b781c74d764b0c9bba1451161ffac1d Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Mon, 17 Aug 2020 18:01:30 +0800 Subject: [PATCH 0565/1304] block: loop: set discard granularity and alignment for block device backed loop commit bcb21c8cc9947286211327d663ace69f07d37a76 upstream. In case of block device backend, if the backend supports write zeros, the loop device will set queue flag of QUEUE_FLAG_DISCARD. However, limits.discard_granularity isn't setup, and this way is wrong, see the following description in Documentation/ABI/testing/sysfs-block: A discard_granularity of 0 means that the device does not support discard functionality. Especially 9b15d109a6b2 ("block: improve discard bio alignment in __blkdev_issue_discard()") starts to take q->limits.discard_granularity for computing max discard sectors. And zero discard granularity may cause kernel oops, or fail discard request even though the loop queue claims discard support via QUEUE_FLAG_DISCARD. Fix the issue by setup discard granularity and alignment. Fixes: c52abf563049 ("loop: Better discard support for block devices") Signed-off-by: Ming Lei Reviewed-by: Christoph Hellwig Acked-by: Coly Li Cc: Hannes Reinecke Cc: Xiao Ni Cc: Martin K. Petersen Cc: Evan Green Cc: Gwendal Grignou Cc: Chaitanya Kulkarni Cc: Andrzej Pietrasiewicz Cc: Christoph Hellwig Cc: Signed-off-by: Jens Axboe Signed-off-by: Greg Kroah-Hartman --- drivers/block/loop.c | 33 ++++++++++++++++++--------------- 1 file changed, 18 insertions(+), 15 deletions(-) diff --git a/drivers/block/loop.c b/drivers/block/loop.c index da68c42aed68..19042b42a8ba 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -864,6 +864,7 @@ static void loop_config_discard(struct loop_device *lo) struct file *file = lo->lo_backing_file; struct inode *inode = file->f_mapping->host; struct request_queue *q = lo->lo_queue; + u32 granularity, max_discard_sectors; /* * If the backing device is a block device, mirror its zeroing @@ -876,11 +877,10 @@ static void loop_config_discard(struct loop_device *lo) struct request_queue *backingq; backingq = bdev_get_queue(inode->i_bdev); - blk_queue_max_discard_sectors(q, - backingq->limits.max_write_zeroes_sectors); - blk_queue_max_write_zeroes_sectors(q, - backingq->limits.max_write_zeroes_sectors); + max_discard_sectors = backingq->limits.max_write_zeroes_sectors; + granularity = backingq->limits.discard_granularity ?: + queue_physical_block_size(backingq); /* * We use punch hole to reclaim the free space used by the @@ -889,23 +889,26 @@ static void loop_config_discard(struct loop_device *lo) * useful information. */ } else if (!file->f_op->fallocate || lo->lo_encrypt_key_size) { - q->limits.discard_granularity = 0; - q->limits.discard_alignment = 0; - blk_queue_max_discard_sectors(q, 0); - blk_queue_max_write_zeroes_sectors(q, 0); + max_discard_sectors = 0; + granularity = 0; } else { - q->limits.discard_granularity = inode->i_sb->s_blocksize; - q->limits.discard_alignment = 0; - - blk_queue_max_discard_sectors(q, UINT_MAX >> 9); - blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9); + max_discard_sectors = UINT_MAX >> 9; + granularity = inode->i_sb->s_blocksize; } - if (q->limits.max_write_zeroes_sectors) + if (max_discard_sectors) { + q->limits.discard_granularity = granularity; + blk_queue_max_discard_sectors(q, max_discard_sectors); + blk_queue_max_write_zeroes_sectors(q, max_discard_sectors); blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); - else + } else { + q->limits.discard_granularity = 0; + blk_queue_max_discard_sectors(q, 0); + blk_queue_max_write_zeroes_sectors(q, 0); blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q); + } + q->limits.discard_alignment = 0; } static void loop_unprepare_queue(struct loop_device *lo) -- GitLab From 30028c328b82f00c19c1ef800b3026abff7ca982 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Tue, 11 Aug 2020 15:39:58 +0200 Subject: [PATCH 0566/1304] HID: i2c-hid: Always sleep 60ms after I2C_HID_PWR_ON commands commit eef4016243e94c438f177ca8226876eb873b9c75 upstream. Before this commit i2c_hid_parse() consists of the following steps: 1. Send power on cmd 2. usleep_range(1000, 5000) 3. Send reset cmd 4. Wait for reset to complete (device interrupt, or msleep(100)) 5. Send power on cmd 6. Try to read HID descriptor Notice how there is an usleep_range(1000, 5000) after the first power-on command, but not after the second power-on command. Testing has shown that at least on the BMAX Y13 laptop's i2c-hid touchpad, not having a delay after the second power-on command causes the HID descriptor to read as all zeros. In case we hit this on other devices too, the descriptor being all zeros can be recognized by the following message being logged many, many times: hid-generic 0018:0911:5288.0002: unknown main item tag 0x0 At the same time as the BMAX Y13's touchpad issue was debugged, Kai-Heng was working on debugging some issues with Goodix i2c-hid touchpads. It turns out that these need a delay after a PWR_ON command too, otherwise they stop working after a suspend/resume cycle. According to Goodix a delay of minimal 60ms is needed. Having multiple cases where we need a delay after sending the power-on command, seems to indicate that we should always sleep after the power-on command. This commit fixes the mentioned issues by moving the existing 1ms sleep to the i2c_hid_set_power() function and changing it to a 60ms sleep. Cc: stable@vger.kernel.org BugLink: https://bugzilla.kernel.org/show_bug.cgi?id=208247 Reported-by: Kai-Heng Feng Reported-and-tested-by: Andrea Borgia Signed-off-by: Hans de Goede Signed-off-by: Jiri Kosina Signed-off-by: Greg Kroah-Hartman --- drivers/hid/i2c-hid/i2c-hid-core.c | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c index f17ebbe53abf..1f8d403d3db4 100644 --- a/drivers/hid/i2c-hid/i2c-hid-core.c +++ b/drivers/hid/i2c-hid/i2c-hid-core.c @@ -444,6 +444,19 @@ static int i2c_hid_set_power(struct i2c_client *client, int power_state) dev_err(&client->dev, "failed to change power setting.\n"); set_pwr_exit: + + /* + * The HID over I2C specification states that if a DEVICE needs time + * after the PWR_ON request, it should utilise CLOCK stretching. + * However, it has been observered that the Windows driver provides a + * 1ms sleep between the PWR_ON and RESET requests. + * According to Goodix Windows even waits 60 ms after (other?) + * PWR_ON requests. Testing has confirmed that several devices + * will not work properly without a delay after a PWR_ON request. + */ + if (!ret && power_state == I2C_HID_PWR_ON) + msleep(60); + return ret; } @@ -465,15 +478,6 @@ static int i2c_hid_hwreset(struct i2c_client *client) if (ret) goto out_unlock; - /* - * The HID over I2C specification states that if a DEVICE needs time - * after the PWR_ON request, it should utilise CLOCK stretching. - * However, it has been observered that the Windows driver provides a - * 1ms sleep between the PWR_ON and RESET requests and that some devices - * rely on this. - */ - usleep_range(1000, 5000); - i2c_hid_dbg(ihid, "resetting...\n"); ret = i2c_hid_command(client, &hid_reset_cmd, NULL, 0); -- GitLab From 77064570e4c3636da26f30ed28e4d132256424ec Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Mon, 17 Aug 2020 18:01:15 +0800 Subject: [PATCH 0567/1304] blk-mq: order adding requests to hctx->dispatch and checking SCHED_RESTART commit d7d8535f377e9ba87edbf7fbbd634ac942f3f54f upstream. SCHED_RESTART code path is relied to re-run queue for dispatch requests in hctx->dispatch. Meantime the SCHED_RSTART flag is checked when adding requests to hctx->dispatch. memory barriers have to be used for ordering the following two pair of OPs: 1) adding requests to hctx->dispatch and checking SCHED_RESTART in blk_mq_dispatch_rq_list() 2) clearing SCHED_RESTART and checking if there is request in hctx->dispatch in blk_mq_sched_restart(). Without the added memory barrier, either: 1) blk_mq_sched_restart() may miss requests added to hctx->dispatch meantime blk_mq_dispatch_rq_list() observes SCHED_RESTART, and not run queue in dispatch side or 2) blk_mq_dispatch_rq_list still sees SCHED_RESTART, and not run queue in dispatch side, meantime checking if there is request in hctx->dispatch from blk_mq_sched_restart() is missed. IO hang in ltp/fs_fill test is reported by kernel test robot: https://lkml.org/lkml/2020/7/26/77 Turns out it is caused by the above out-of-order OPs. And the IO hang can't be observed any more after applying this patch. Fixes: bd166ef183c2 ("blk-mq-sched: add framework for MQ capable IO schedulers") Reported-by: kernel test robot Signed-off-by: Ming Lei Reviewed-by: Christoph Hellwig Cc: Bart Van Assche Cc: Christoph Hellwig Cc: David Jeffery Cc: Signed-off-by: Jens Axboe Signed-off-by: Greg Kroah-Hartman --- block/blk-mq-sched.c | 9 +++++++++ block/blk-mq.c | 9 +++++++++ 2 files changed, 18 insertions(+) diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index da1de190a3b1..d89a757cbde0 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -69,6 +69,15 @@ void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx) return; clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); + /* + * Order clearing SCHED_RESTART and list_empty_careful(&hctx->dispatch) + * in blk_mq_run_hw_queue(). Its pair is the barrier in + * blk_mq_dispatch_rq_list(). So dispatch code won't see SCHED_RESTART, + * meantime new request added to hctx->dispatch is missed to check in + * blk_mq_run_hw_queue(). + */ + smp_mb(); + blk_mq_run_hw_queue(hctx, true); } diff --git a/block/blk-mq.c b/block/blk-mq.c index 684acaa96db7..db2db0b70d34 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1221,6 +1221,15 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list, list_splice_init(list, &hctx->dispatch); spin_unlock(&hctx->lock); + /* + * Order adding requests to hctx->dispatch and checking + * SCHED_RESTART flag. The pair of this smp_mb() is the one + * in blk_mq_sched_restart(). Avoid restart code path to + * miss the new added requests to hctx->dispatch, meantime + * SCHED_RESTART is observed here. + */ + smp_mb(); + /* * If SCHED_RESTART was set by the caller of this function and * it is no longer set that means that it was cleared by another -- GitLab From ee203be4dff5c1500ac2989306d0eab0aad1d0dd Mon Sep 17 00:00:00 2001 From: Marcos Paulo de Souza Date: Mon, 3 Aug 2020 16:55:01 -0300 Subject: [PATCH 0568/1304] btrfs: reset compression level for lzo on remount commit 282dd7d7718444679b046b769d872b188818ca35 upstream. Currently a user can set mount "-o compress" which will set the compression algorithm to zlib, and use the default compress level for zlib (3): relatime,compress=zlib:3,space_cache If the user remounts the fs using "-o compress=lzo", then the old compress_level is used: relatime,compress=lzo:3,space_cache But lzo does not expose any tunable compression level. The same happens if we set any compress argument with different level, also with zstd. Fix this by resetting the compress_level when compress=lzo is specified. With the fix applied, lzo is shown without compress level: relatime,compress=lzo,space_cache CC: stable@vger.kernel.org # 4.4+ Signed-off-by: Marcos Paulo de Souza Reviewed-by: David Sterba Signed-off-by: David Sterba Signed-off-by: Greg Kroah-Hartman --- fs/btrfs/super.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 4d2810a32b4a..40f5b4dcb927 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -539,6 +539,7 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options, } else if (strncmp(args[0].from, "lzo", 3) == 0) { compress_type = "lzo"; info->compress_type = BTRFS_COMPRESS_LZO; + info->compress_level = 0; btrfs_set_opt(info->mount_opt, COMPRESS); btrfs_clear_opt(info->mount_opt, NODATACOW); btrfs_clear_opt(info->mount_opt, NODATASUM); -- GitLab From b0186a11dfe7b2ee767b4e7acfea921594ecdb7f Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Fri, 14 Aug 2020 11:04:09 +0100 Subject: [PATCH 0569/1304] btrfs: fix space cache memory leak after transaction abort commit bbc37d6e475eee8ffa2156ec813efc6bbb43c06d upstream. If a transaction aborts it can cause a memory leak of the pages array of a block group's io_ctl structure. The following steps explain how that can happen: 1) Transaction N is committing, currently in state TRANS_STATE_UNBLOCKED and it's about to start writing out dirty extent buffers; 2) Transaction N + 1 already started and another task, task A, just called btrfs_commit_transaction() on it; 3) Block group B was dirtied (extents allocated from it) by transaction N + 1, so when task A calls btrfs_start_dirty_block_groups(), at the very beginning of the transaction commit, it starts writeback for the block group's space cache by calling btrfs_write_out_cache(), which allocates the pages array for the block group's io_ctl with a call to io_ctl_init(). Block group A is added to the io_list of transaction N + 1 by btrfs_start_dirty_block_groups(); 4) While transaction N's commit is writing out the extent buffers, it gets an IO error and aborts transaction N, also setting the file system to RO mode; 5) Task A has already returned from btrfs_start_dirty_block_groups(), is at btrfs_commit_transaction() and has set transaction N + 1 state to TRANS_STATE_COMMIT_START. Immediately after that it checks that the filesystem was turned to RO mode, due to transaction N's abort, and jumps to the "cleanup_transaction" label. After that we end up at btrfs_cleanup_one_transaction() which calls btrfs_cleanup_dirty_bgs(). That helper finds block group B in the transaction's io_list but it never releases the pages array of the block group's io_ctl, resulting in a memory leak. In fact at the point when we are at btrfs_cleanup_dirty_bgs(), the pages array points to pages that were already released by us at __btrfs_write_out_cache() through the call to io_ctl_drop_pages(). We end up freeing the pages array only after waiting for the ordered extent to complete through btrfs_wait_cache_io(), which calls io_ctl_free() to do that. But in the transaction abort case we don't wait for the space cache's ordered extent to complete through a call to btrfs_wait_cache_io(), so that's why we end up with a memory leak - we wait for the ordered extent to complete indirectly by shutting down the work queues and waiting for any jobs in them to complete before returning from close_ctree(). We can solve the leak simply by freeing the pages array right after releasing the pages (with the call to io_ctl_drop_pages()) at __btrfs_write_out_cache(), since we will never use it anymore after that and the pages array points to already released pages at that point, which is currently not a problem since no one will use it after that, but not a good practice anyway since it can easily lead to use-after-free issues. So fix this by freeing the pages array right after releasing the pages at __btrfs_write_out_cache(). This issue can often be reproduced with test case generic/475 from fstests and kmemleak can detect it and reports it with the following trace: unreferenced object 0xffff9bbf009fa600 (size 512): comm "fsstress", pid 38807, jiffies 4298504428 (age 22.028s) hex dump (first 32 bytes): 00 a0 7c 4d 3d ed ff ff 40 a0 7c 4d 3d ed ff ff ..|M=...@.|M=... 80 a0 7c 4d 3d ed ff ff c0 a0 7c 4d 3d ed ff ff ..|M=.....|M=... backtrace: [<00000000f4b5cfe2>] __kmalloc+0x1a8/0x3e0 [<0000000028665e7f>] io_ctl_init+0xa7/0x120 [btrfs] [<00000000a1f95b2d>] __btrfs_write_out_cache+0x86/0x4a0 [btrfs] [<00000000207ea1b0>] btrfs_write_out_cache+0x7f/0xf0 [btrfs] [<00000000af21f534>] btrfs_start_dirty_block_groups+0x27b/0x580 [btrfs] [<00000000c3c23d44>] btrfs_commit_transaction+0xa6f/0xe70 [btrfs] [<000000009588930c>] create_subvol+0x581/0x9a0 [btrfs] [<000000009ef2fd7f>] btrfs_mksubvol+0x3fb/0x4a0 [btrfs] [<00000000474e5187>] __btrfs_ioctl_snap_create+0x119/0x1a0 [btrfs] [<00000000708ee349>] btrfs_ioctl_snap_create_v2+0xb0/0xf0 [btrfs] [<00000000ea60106f>] btrfs_ioctl+0x12c/0x3130 [btrfs] [<000000005c923d6d>] __x64_sys_ioctl+0x83/0xb0 [<0000000043ace2c9>] do_syscall_64+0x33/0x80 [<00000000904efbce>] entry_SYSCALL_64_after_hwframe+0x44/0xa9 CC: stable@vger.kernel.org # 4.9+ Reviewed-by: Josef Bacik Signed-off-by: Filipe Manana Signed-off-by: David Sterba Signed-off-by: Greg Kroah-Hartman --- fs/btrfs/disk-io.c | 1 + fs/btrfs/free-space-cache.c | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 3130844e219c..cb21ffd3bba7 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -4444,6 +4444,7 @@ static void btrfs_cleanup_bg_io(struct btrfs_block_group_cache *cache) cache->io_ctl.inode = NULL; iput(inode); } + ASSERT(cache->io_ctl.pages == NULL); btrfs_put_block_group(cache); } diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 4c65305fd418..652b0b16e93e 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -1167,7 +1167,6 @@ static int __btrfs_wait_cache_io(struct btrfs_root *root, ret = update_cache_item(trans, root, inode, path, offset, io_ctl->entries, io_ctl->bitmaps); out: - io_ctl_free(io_ctl); if (ret) { invalidate_inode_pages2(inode->i_mapping); BTRFS_I(inode)->generation = 0; @@ -1332,6 +1331,7 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, * them out later */ io_ctl_drop_pages(io_ctl); + io_ctl_free(io_ctl); unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, &cached_state); -- GitLab From 72f099805dbc907fbe8fa19bccdc31d3e2ee6e9e Mon Sep 17 00:00:00 2001 From: George Kennedy Date: Fri, 31 Jul 2020 12:33:11 -0400 Subject: [PATCH 0570/1304] fbcon: prevent user font height or width change from causing potential out-of-bounds access commit 39b3cffb8cf3111738ea993e2757ab382253d86a upstream. Add a check to fbcon_resize() to ensure that a possible change to user font height or user font width will not allow a font data out-of-bounds access. NOTE: must use original charcount in calculation as font charcount can change and cannot be used to determine the font data allocated size. Signed-off-by: George Kennedy Cc: stable Reported-by: syzbot+38a3699c7eaf165b97a6@syzkaller.appspotmail.com Link: https://lore.kernel.org/r/1596213192-6635-1-git-send-email-george.kennedy@oracle.com Signed-off-by: Greg Kroah-Hartman --- drivers/video/fbdev/core/fbcon.c | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index cb93a6b38160..f75557b39a61 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -2152,6 +2152,9 @@ static void updatescrollmode(struct display *p, } } +#define PITCH(w) (((w) + 7) >> 3) +#define CALC_FONTSZ(h, p, c) ((h) * (p) * (c)) /* size = height * pitch * charcount */ + static int fbcon_resize(struct vc_data *vc, unsigned int width, unsigned int height, unsigned int user) { @@ -2161,6 +2164,24 @@ static int fbcon_resize(struct vc_data *vc, unsigned int width, struct fb_var_screeninfo var = info->var; int x_diff, y_diff, virt_w, virt_h, virt_fw, virt_fh; + if (ops->p && ops->p->userfont && FNTSIZE(vc->vc_font.data)) { + int size; + int pitch = PITCH(vc->vc_font.width); + + /* + * If user font, ensure that a possible change to user font + * height or width will not allow a font data out-of-bounds access. + * NOTE: must use original charcount in calculation as font + * charcount can change and cannot be used to determine the + * font data allocated size. + */ + if (pitch <= 0) + return -EINVAL; + size = CALC_FONTSZ(vc->vc_font.height, pitch, FNTCHARCNT(vc->vc_font.data)); + if (size > FNTSIZE(vc->vc_font.data)) + return -EINVAL; + } + virt_w = FBCON_SWAP(ops->rotate, width, height); virt_h = FBCON_SWAP(ops->rotate, height, width); virt_fw = FBCON_SWAP(ops->rotate, vc->vc_font.width, @@ -2623,7 +2644,7 @@ static int fbcon_set_font(struct vc_data *vc, struct console_font *font, int size; int i, csum; u8 *new_data, *data = font->data; - int pitch = (font->width+7) >> 3; + int pitch = PITCH(font->width); /* Is there a reason why fbconsole couldn't handle any charcount >256? * If not this check should be changed to charcount < 256 */ @@ -2639,7 +2660,7 @@ static int fbcon_set_font(struct vc_data *vc, struct console_font *font, if (fbcon_invalid_charcount(info, charcount)) return -EINVAL; - size = h * pitch * charcount; + size = CALC_FONTSZ(h, pitch, charcount); new_data = kmalloc(FONT_EXTRA_WORDS * sizeof(int) + size, GFP_USER); -- GitLab From 7c451eae80ee291315819728e8d5a2ee0930aa25 Mon Sep 17 00:00:00 2001 From: Evgeny Novikov Date: Wed, 5 Aug 2020 12:06:43 +0300 Subject: [PATCH 0571/1304] USB: lvtest: return proper error code in probe commit 531412492ce93ea29b9ca3b4eb5e3ed771f851dd upstream. lvs_rh_probe() can return some nonnegative value from usb_control_msg() when it is less than "USB_DT_HUB_NONVAR_SIZE + 2" that is considered as a failure. Make lvs_rh_probe() return -EINVAL in this case. Found by Linux Driver Verification project (linuxtesting.org). Signed-off-by: Evgeny Novikov Cc: stable Link: https://lore.kernel.org/r/20200805090643.3432-1-novikov@ispras.ru Signed-off-by: Greg Kroah-Hartman --- drivers/usb/misc/lvstest.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/usb/misc/lvstest.c b/drivers/usb/misc/lvstest.c index e5c03c6d16e9..d3b161bdef74 100644 --- a/drivers/usb/misc/lvstest.c +++ b/drivers/usb/misc/lvstest.c @@ -429,7 +429,7 @@ static int lvs_rh_probe(struct usb_interface *intf, USB_DT_SS_HUB_SIZE, USB_CTRL_GET_TIMEOUT); if (ret < (USB_DT_HUB_NONVAR_SIZE + 2)) { dev_err(&hdev->dev, "wrong root hub descriptor read %d\n", ret); - return ret; + return ret < 0 ? ret : -EINVAL; } /* submit urb to poll interrupt endpoint */ -- GitLab From c1fe757dd3d18497eaca831ed82aa20b4186affd Mon Sep 17 00:00:00 2001 From: Tetsuo Handa Date: Wed, 29 Jul 2020 23:57:01 +0900 Subject: [PATCH 0572/1304] vt: defer kfree() of vc_screenbuf in vc_do_resize() commit f8d1653daec02315e06d30246cff4af72e76e54e upstream. syzbot is reporting UAF bug in set_origin() from vc_do_resize() [1], for vc_do_resize() calls kfree(vc->vc_screenbuf) before calling set_origin(). Unfortunately, in set_origin(), vc->vc_sw->con_set_origin() might access vc->vc_pos when scroll is involved in order to manipulate cursor, but vc->vc_pos refers already released vc->vc_screenbuf until vc->vc_pos gets updated based on the result of vc->vc_sw->con_set_origin(). Preserving old buffer and tolerating outdated vc members until set_origin() completes would be easier than preventing vc->vc_sw->con_set_origin() from accessing outdated vc members. [1] https://syzkaller.appspot.com/bug?id=6649da2081e2ebdc65c0642c214b27fe91099db3 Reported-by: syzbot Signed-off-by: Tetsuo Handa Cc: stable Link: https://lore.kernel.org/r/1596034621-4714-1-git-send-email-penguin-kernel@I-love.SAKURA.ne.jp Signed-off-by: Greg Kroah-Hartman --- drivers/tty/vt/vt.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 982d9684c65e..758f522f331e 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -1199,7 +1199,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, unsigned int old_rows, old_row_size, first_copied_row; unsigned int new_cols, new_rows, new_row_size, new_screen_size; unsigned int user; - unsigned short *newscreen; + unsigned short *oldscreen, *newscreen; struct uni_screen *new_uniscr = NULL; WARN_CONSOLE_UNLOCKED(); @@ -1297,10 +1297,11 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, if (new_scr_end > new_origin) scr_memsetw((void *)new_origin, vc->vc_video_erase_char, new_scr_end - new_origin); - kfree(vc->vc_screenbuf); + oldscreen = vc->vc_screenbuf; vc->vc_screenbuf = newscreen; vc->vc_screenbuf_size = new_screen_size; set_origin(vc); + kfree(oldscreen); /* do part of a reset_terminal() */ vc->vc_top = 0; -- GitLab From 1221d11e5c35db18323ade3d4b2130bde89cc9df Mon Sep 17 00:00:00 2001 From: George Kennedy Date: Fri, 31 Jul 2020 12:33:12 -0400 Subject: [PATCH 0573/1304] vt_ioctl: change VT_RESIZEX ioctl to check for error return from vc_resize() commit bc5269ca765057a1b762e79a1cfd267cd7bf1c46 upstream. vc_resize() can return with an error after failure. Change VT_RESIZEX ioctl to save struct vc_data values that are modified and restore the original values in case of error. Signed-off-by: George Kennedy Cc: stable Reported-by: syzbot+38a3699c7eaf165b97a6@syzkaller.appspotmail.com Link: https://lore.kernel.org/r/1596213192-6635-2-git-send-email-george.kennedy@oracle.com Signed-off-by: Greg Kroah-Hartman --- drivers/tty/vt/vt_ioctl.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c index 5de81431c835..6a82030cf1ef 100644 --- a/drivers/tty/vt/vt_ioctl.c +++ b/drivers/tty/vt/vt_ioctl.c @@ -893,12 +893,22 @@ int vt_ioctl(struct tty_struct *tty, console_lock(); vcp = vc_cons[i].d; if (vcp) { + int ret; + int save_scan_lines = vcp->vc_scan_lines; + int save_font_height = vcp->vc_font.height; + if (v.v_vlin) vcp->vc_scan_lines = v.v_vlin; if (v.v_clin) vcp->vc_font.height = v.v_clin; vcp->vc_resize_user = 1; - vc_resize(vcp, v.v_cols, v.v_rows); + ret = vc_resize(vcp, v.v_cols, v.v_rows); + if (ret) { + vcp->vc_scan_lines = save_scan_lines; + vcp->vc_font.height = save_font_height; + console_unlock(); + return ret; + } } console_unlock(); } -- GitLab From 8a0d860cbdfd5557838bd0e46052907d5dcfdeb4 Mon Sep 17 00:00:00 2001 From: Tamseel Shams Date: Mon, 10 Aug 2020 08:30:21 +0530 Subject: [PATCH 0574/1304] serial: samsung: Removes the IRQ not found warning commit 8c6c378b0cbe0c9f1390986b5f8ffb5f6ff7593b upstream. In few older Samsung SoCs like s3c2410, s3c2412 and s3c2440, UART IP is having 2 interrupt lines. However, in other SoCs like s3c6400, s5pv210, exynos5433, and exynos4210 UART is having only 1 interrupt line. Due to this, "platform_get_irq(platdev, 1)" call in the driver gives the following false-positive error: "IRQ index 1 not found" on newer SoC's. This patch adds the condition to check for Tx interrupt only for the those SoC's which have 2 interrupt lines. Tested-by: Alim Akhtar Tested-by: Marek Szyprowski Reviewed-by: Krzysztof Kozlowski Reviewed-by: Alim Akhtar Signed-off-by: Tamseel Shams Cc: stable Link: https://lore.kernel.org/r/20200810030021.45348-1-m.shams@samsung.com Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/samsung.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c index 2a49b6d876b8..fcb89bf2524d 100644 --- a/drivers/tty/serial/samsung.c +++ b/drivers/tty/serial/samsung.c @@ -1755,9 +1755,11 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport, ourport->tx_irq = ret + 1; } - ret = platform_get_irq(platdev, 1); - if (ret > 0) - ourport->tx_irq = ret; + if (!s3c24xx_serial_has_interrupt_mask(port)) { + ret = platform_get_irq(platdev, 1); + if (ret > 0) + ourport->tx_irq = ret; + } /* * DMA is currently supported only on DT platforms, if DMA properties * are specified. -- GitLab From eec2f7d9f0352a8bfe41980632e4e67a0d5c032b Mon Sep 17 00:00:00 2001 From: Lukas Wunner Date: Thu, 13 Aug 2020 12:52:40 +0200 Subject: [PATCH 0575/1304] serial: pl011: Fix oops on -EPROBE_DEFER commit 27afac93e3bd7fa89749cf11da5d86ac9cde4dba upstream. If probing of a pl011 gets deferred until after free_initmem(), an oops ensues because pl011_console_match() is called which has been freed. Fix by removing the __init attribute from the function and those it calls. Commit 10879ae5f12e ("serial: pl011: add console matching function") introduced pl011_console_match() not just for early consoles but regular preferred consoles, such as those added by acpi_parse_spcr(). Regular consoles may be registered after free_initmem() for various reasons, one being deferred probing, another being dynamic enablement of serial ports using a DeviceTree overlay. Thus, pl011_console_match() must not be declared __init and the functions it calls mustn't either. Stack trace for posterity: Unable to handle kernel paging request at virtual address 80c38b58 Internal error: Oops: 8000000d [#1] PREEMPT SMP ARM PC is at pl011_console_match+0x0/0xfc LR is at register_console+0x150/0x468 [<80187004>] (register_console) [<805a8184>] (uart_add_one_port) [<805b2b68>] (pl011_register_port) [<805b3ce4>] (pl011_probe) [<80569214>] (amba_probe) [<805ca088>] (really_probe) [<805ca2ec>] (driver_probe_device) [<805ca5b0>] (__device_attach_driver) [<805c8060>] (bus_for_each_drv) [<805c9dfc>] (__device_attach) [<805ca630>] (device_initial_probe) [<805c90a8>] (bus_probe_device) [<805c95a8>] (deferred_probe_work_func) Fixes: 10879ae5f12e ("serial: pl011: add console matching function") Signed-off-by: Lukas Wunner Cc: stable@vger.kernel.org # v4.10+ Cc: Aleksey Makarov Cc: Peter Hurley Cc: Russell King Cc: Christopher Covington Link: https://lore.kernel.org/r/f827ff09da55b8c57d316a1b008a137677b58921.1597315557.git.lukas@wunner.de Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/amba-pl011.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index 1d501154e9f7..a8e2db9f7386 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c @@ -2252,9 +2252,8 @@ pl011_console_write(struct console *co, const char *s, unsigned int count) clk_disable(uap->clk); } -static void __init -pl011_console_get_options(struct uart_amba_port *uap, int *baud, - int *parity, int *bits) +static void pl011_console_get_options(struct uart_amba_port *uap, int *baud, + int *parity, int *bits) { if (pl011_read(uap, REG_CR) & UART01x_CR_UARTEN) { unsigned int lcr_h, ibrd, fbrd; @@ -2287,7 +2286,7 @@ pl011_console_get_options(struct uart_amba_port *uap, int *baud, } } -static int __init pl011_console_setup(struct console *co, char *options) +static int pl011_console_setup(struct console *co, char *options) { struct uart_amba_port *uap; int baud = 38400; @@ -2355,8 +2354,8 @@ static int __init pl011_console_setup(struct console *co, char *options) * * Returns 0 if console matches; otherwise non-zero to use default matching */ -static int __init pl011_console_match(struct console *co, char *name, int idx, - char *options) +static int pl011_console_match(struct console *co, char *name, int idx, + char *options) { unsigned char iotype; resource_size_t addr; -- GitLab From e12f36220f6f89ecf78edc98a153d548352d6951 Mon Sep 17 00:00:00 2001 From: Lukas Wunner Date: Thu, 13 Aug 2020 12:59:54 +0200 Subject: [PATCH 0576/1304] serial: pl011: Don't leak amba_ports entry on driver register error commit 89efbe70b27dd325d8a8c177743a26b885f7faec upstream. pl011_probe() calls pl011_setup_port() to reserve an amba_ports[] entry, then calls pl011_register_port() to register the uart driver with the tty layer. If registration of the uart driver fails, the amba_ports[] entry is not released. If this happens 14 times (value of UART_NR macro), then all amba_ports[] entries will have been leaked and driver probing is no longer possible. (To be fair, that can only happen if the DeviceTree doesn't contain alias IDs since they cause the same entry to be used for a given port.) Fix it. Fixes: ef2889f7ffee ("serial: pl011: Move uart_register_driver call to device") Signed-off-by: Lukas Wunner Cc: stable@vger.kernel.org # v3.15+ Cc: Tushar Behera Link: https://lore.kernel.org/r/138f8c15afb2f184d8102583f8301575566064a6.1597316167.git.lukas@wunner.de Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/amba-pl011.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index a8e2db9f7386..45e4f2952143 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c @@ -2593,7 +2593,7 @@ static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap, static int pl011_register_port(struct uart_amba_port *uap) { - int ret; + int ret, i; /* Ensure interrupts from this UART are masked and cleared */ pl011_write(0, uap, REG_IMSC); @@ -2604,6 +2604,9 @@ static int pl011_register_port(struct uart_amba_port *uap) if (ret < 0) { dev_err(uap->port.dev, "Failed to register AMBA-PL011 driver\n"); + for (i = 0; i < ARRAY_SIZE(amba_ports); i++) + if (amba_ports[i] == uap) + amba_ports[i] = NULL; return ret; } } -- GitLab From ce755e4eae3e7f782d6a690ac6bd85ac9671b5e0 Mon Sep 17 00:00:00 2001 From: Valmer Huhn Date: Thu, 13 Aug 2020 12:52:55 -0400 Subject: [PATCH 0577/1304] serial: 8250_exar: Fix number of ports for Commtech PCIe cards commit c6b9e95dde7b54e6a53c47241201ab5a4035c320 upstream. The following in 8250_exar.c line 589 is used to determine the number of ports for each Exar board: nr_ports = board->num_ports ? board->num_ports : pcidev->device & 0x0f; If the number of ports a card has is not explicitly specified, it defaults to the rightmost 4 bits of the PCI device ID. This is prone to error since not all PCI device IDs contain a number which corresponds to the number of ports that card provides. This particular case involves COMMTECH_4222PCIE, COMMTECH_4224PCIE and COMMTECH_4228PCIE cards with device IDs 0x0022, 0x0020 and 0x0021. Currently the multiport cards receive 2, 0 and 1 port instead of 2, 4 and 8 ports respectively. To fix this, each Commtech Fastcom PCIe card is given a struct where the number of ports is explicitly specified. This ensures 'board->num_ports' is used instead of the default 'pcidev->device & 0x0f'. Fixes: d0aeaa83f0b0 ("serial: exar: split out the exar code from 8250_pci") Signed-off-by: Valmer Huhn Tested-by: Valmer Huhn Cc: stable Link: https://lore.kernel.org/r/20200813165255.GC345440@icarus.concurrent-rt.com Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/8250/8250_exar.c | 24 +++++++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c index d39162e71f59..195f58c5b477 100644 --- a/drivers/tty/serial/8250/8250_exar.c +++ b/drivers/tty/serial/8250/8250_exar.c @@ -638,6 +638,24 @@ static const struct exar8250_board pbn_exar_XR17V35x = { .exit = pci_xr17v35x_exit, }; +static const struct exar8250_board pbn_fastcom35x_2 = { + .num_ports = 2, + .setup = pci_xr17v35x_setup, + .exit = pci_xr17v35x_exit, +}; + +static const struct exar8250_board pbn_fastcom35x_4 = { + .num_ports = 4, + .setup = pci_xr17v35x_setup, + .exit = pci_xr17v35x_exit, +}; + +static const struct exar8250_board pbn_fastcom35x_8 = { + .num_ports = 8, + .setup = pci_xr17v35x_setup, + .exit = pci_xr17v35x_exit, +}; + static const struct exar8250_board pbn_exar_XR17V4358 = { .num_ports = 12, .setup = pci_xr17v35x_setup, @@ -708,9 +726,9 @@ static const struct pci_device_id exar_pci_tbl[] = { EXAR_DEVICE(EXAR, EXAR_XR17V358, pbn_exar_XR17V35x), EXAR_DEVICE(EXAR, EXAR_XR17V4358, pbn_exar_XR17V4358), EXAR_DEVICE(EXAR, EXAR_XR17V8358, pbn_exar_XR17V8358), - EXAR_DEVICE(COMMTECH, COMMTECH_4222PCIE, pbn_exar_XR17V35x), - EXAR_DEVICE(COMMTECH, COMMTECH_4224PCIE, pbn_exar_XR17V35x), - EXAR_DEVICE(COMMTECH, COMMTECH_4228PCIE, pbn_exar_XR17V35x), + EXAR_DEVICE(COMMTECH, COMMTECH_4222PCIE, pbn_fastcom35x_2), + EXAR_DEVICE(COMMTECH, COMMTECH_4224PCIE, pbn_fastcom35x_4), + EXAR_DEVICE(COMMTECH, COMMTECH_4228PCIE, pbn_fastcom35x_8), EXAR_DEVICE(COMMTECH, COMMTECH_4222PCI335, pbn_fastcom335_2), EXAR_DEVICE(COMMTECH, COMMTECH_4224PCI335, pbn_fastcom335_4), -- GitLab From 2eb35a11bbcc3636cdabd6228299d6a1b91cde00 Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Mon, 17 Aug 2020 11:26:46 +0900 Subject: [PATCH 0578/1304] serial: 8250: change lock order in serial8250_do_startup() commit 205d300aea75623e1ae4aa43e0d265ab9cf195fd upstream. We have a number of "uart.port->desc.lock vs desc.lock->uart.port" lockdep reports coming from 8250 driver; this causes a bit of trouble to people, so let's fix it. The problem is reverse lock order in two different call paths: chain #1: serial8250_do_startup() spin_lock_irqsave(&port->lock); disable_irq_nosync(port->irq); raw_spin_lock_irqsave(&desc->lock) chain #2: __report_bad_irq() raw_spin_lock_irqsave(&desc->lock) for_each_action_of_desc() printk() spin_lock_irqsave(&port->lock); Fix this by changing the order of locks in serial8250_do_startup(): do disable_irq_nosync() first, which grabs desc->lock, and grab uart->port after that, so that chain #1 and chain #2 have same lock order. Full lockdep splat: ====================================================== WARNING: possible circular locking dependency detected 5.4.39 #55 Not tainted ====================================================== swapper/0/0 is trying to acquire lock: ffffffffab65b6c0 (console_owner){-...}, at: console_lock_spinning_enable+0x31/0x57 but task is already holding lock: ffff88810a8e34c0 (&irq_desc_lock_class){-.-.}, at: __report_bad_irq+0x5b/0xba which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #2 (&irq_desc_lock_class){-.-.}: _raw_spin_lock_irqsave+0x61/0x8d __irq_get_desc_lock+0x65/0x89 __disable_irq_nosync+0x3b/0x93 serial8250_do_startup+0x451/0x75c uart_startup+0x1b4/0x2ff uart_port_activate+0x73/0xa0 tty_port_open+0xae/0x10a uart_open+0x1b/0x26 tty_open+0x24d/0x3a0 chrdev_open+0xd5/0x1cc do_dentry_open+0x299/0x3c8 path_openat+0x434/0x1100 do_filp_open+0x9b/0x10a do_sys_open+0x15f/0x3d7 kernel_init_freeable+0x157/0x1dd kernel_init+0xe/0x105 ret_from_fork+0x27/0x50 -> #1 (&port_lock_key){-.-.}: _raw_spin_lock_irqsave+0x61/0x8d serial8250_console_write+0xa7/0x2a0 console_unlock+0x3b7/0x528 vprintk_emit+0x111/0x17f printk+0x59/0x73 register_console+0x336/0x3a4 uart_add_one_port+0x51b/0x5be serial8250_register_8250_port+0x454/0x55e dw8250_probe+0x4dc/0x5b9 platform_drv_probe+0x67/0x8b really_probe+0x14a/0x422 driver_probe_device+0x66/0x130 device_driver_attach+0x42/0x5b __driver_attach+0xca/0x139 bus_for_each_dev+0x97/0xc9 bus_add_driver+0x12b/0x228 driver_register+0x64/0xed do_one_initcall+0x20c/0x4a6 do_initcall_level+0xb5/0xc5 do_basic_setup+0x4c/0x58 kernel_init_freeable+0x13f/0x1dd kernel_init+0xe/0x105 ret_from_fork+0x27/0x50 -> #0 (console_owner){-...}: __lock_acquire+0x118d/0x2714 lock_acquire+0x203/0x258 console_lock_spinning_enable+0x51/0x57 console_unlock+0x25d/0x528 vprintk_emit+0x111/0x17f printk+0x59/0x73 __report_bad_irq+0xa3/0xba note_interrupt+0x19a/0x1d6 handle_irq_event_percpu+0x57/0x79 handle_irq_event+0x36/0x55 handle_fasteoi_irq+0xc2/0x18a do_IRQ+0xb3/0x157 ret_from_intr+0x0/0x1d cpuidle_enter_state+0x12f/0x1fd cpuidle_enter+0x2e/0x3d do_idle+0x1ce/0x2ce cpu_startup_entry+0x1d/0x1f start_kernel+0x406/0x46a secondary_startup_64+0xa4/0xb0 other info that might help us debug this: Chain exists of: console_owner --> &port_lock_key --> &irq_desc_lock_class Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock(&irq_desc_lock_class); lock(&port_lock_key); lock(&irq_desc_lock_class); lock(console_owner); *** DEADLOCK *** 2 locks held by swapper/0/0: #0: ffff88810a8e34c0 (&irq_desc_lock_class){-.-.}, at: __report_bad_irq+0x5b/0xba #1: ffffffffab65b5c0 (console_lock){+.+.}, at: console_trylock_spinning+0x20/0x181 stack backtrace: CPU: 0 PID: 0 Comm: swapper/0 Not tainted 5.4.39 #55 Hardware name: XXXXXX Call Trace: dump_stack+0xbf/0x133 ? print_circular_bug+0xd6/0xe9 check_noncircular+0x1b9/0x1c3 __lock_acquire+0x118d/0x2714 lock_acquire+0x203/0x258 ? console_lock_spinning_enable+0x31/0x57 console_lock_spinning_enable+0x51/0x57 ? console_lock_spinning_enable+0x31/0x57 console_unlock+0x25d/0x528 ? console_trylock+0x18/0x4e vprintk_emit+0x111/0x17f ? lock_acquire+0x203/0x258 printk+0x59/0x73 __report_bad_irq+0xa3/0xba note_interrupt+0x19a/0x1d6 handle_irq_event_percpu+0x57/0x79 handle_irq_event+0x36/0x55 handle_fasteoi_irq+0xc2/0x18a do_IRQ+0xb3/0x157 common_interrupt+0xf/0xf Signed-off-by: Sergey Senozhatsky Fixes: 768aec0b5bcc ("serial: 8250: fix shared interrupts issues with SMP and RT kernels") Reported-by: Guenter Roeck Reported-by: Raul Rangel BugLink: https://bugs.chromium.org/p/chromium/issues/detail?id=1114800 Link: https://lore.kernel.org/lkml/CAHQZ30BnfX+gxjPm1DUd5psOTqbyDh4EJE=2=VAMW_VDafctkA@mail.gmail.com/T/#u Reviewed-by: Andy Shevchenko Reviewed-by: Guenter Roeck Tested-by: Guenter Roeck Cc: stable Link: https://lore.kernel.org/r/20200817022646.1484638-1-sergey.senozhatsky@gmail.com Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/8250/8250_port.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index 20b799219826..09f0dc3b967b 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -2259,6 +2259,10 @@ int serial8250_do_startup(struct uart_port *port) if (port->irq && !(up->port.flags & UPF_NO_THRE_TEST)) { unsigned char iir1; + + if (port->irqflags & IRQF_SHARED) + disable_irq_nosync(port->irq); + /* * Test for UARTs that do not reassert THRE when the * transmitter is idle and the interrupt has already @@ -2268,8 +2272,6 @@ int serial8250_do_startup(struct uart_port *port) * allow register changes to become visible. */ spin_lock_irqsave(&port->lock, flags); - if (up->port.irqflags & IRQF_SHARED) - disable_irq_nosync(port->irq); wait_for_xmitr(up, UART_LSR_THRE); serial_port_out_sync(port, UART_IER, UART_IER_THRI); @@ -2281,9 +2283,10 @@ int serial8250_do_startup(struct uart_port *port) iir = serial_port_in(port, UART_IIR); serial_port_out(port, UART_IER, 0); + spin_unlock_irqrestore(&port->lock, flags); + if (port->irqflags & IRQF_SHARED) enable_irq(port->irq); - spin_unlock_irqrestore(&port->lock, flags); /* * If the interrupt is not reasserted, or we otherwise -- GitLab From 1f58ddc07eef098a32741e74516bb6ef18009ce1 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Wed, 10 Jun 2020 17:36:03 +0200 Subject: [PATCH 0579/1304] writeback: Protect inode->i_io_list with inode->i_lock commit b35250c0816c7cf7d0a8de92f5fafb6a7508a708 upstream. Currently, operations on inode->i_io_list are protected by wb->list_lock. In the following patches we'll need to maintain consistency between inode->i_state and inode->i_io_list so change the code so that inode->i_lock protects also all inode's i_io_list handling. Reviewed-by: Martijn Coenen Reviewed-by: Christoph Hellwig CC: stable@vger.kernel.org # Prerequisite for "writeback: Avoid skipping inode writeback" Signed-off-by: Jan Kara Signed-off-by: Greg Kroah-Hartman --- fs/fs-writeback.c | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 7bfeb1643c1f..5d0cfb979945 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -160,6 +160,7 @@ static void inode_io_list_del_locked(struct inode *inode, struct bdi_writeback *wb) { assert_spin_locked(&wb->list_lock); + assert_spin_locked(&inode->i_lock); list_del_init(&inode->i_io_list); wb_io_lists_depopulated(wb); @@ -1042,7 +1043,9 @@ void inode_io_list_del(struct inode *inode) struct bdi_writeback *wb; wb = inode_to_wb_and_lock_list(inode); + spin_lock(&inode->i_lock); inode_io_list_del_locked(inode, wb); + spin_unlock(&inode->i_lock); spin_unlock(&wb->list_lock); } @@ -1091,8 +1094,10 @@ void sb_clear_inode_writeback(struct inode *inode) * the case then the inode must have been redirtied while it was being written * out and we don't reset its dirtied_when. */ -static void redirty_tail(struct inode *inode, struct bdi_writeback *wb) +static void redirty_tail_locked(struct inode *inode, struct bdi_writeback *wb) { + assert_spin_locked(&inode->i_lock); + if (!list_empty(&wb->b_dirty)) { struct inode *tail; @@ -1103,6 +1108,13 @@ static void redirty_tail(struct inode *inode, struct bdi_writeback *wb) inode_io_list_move_locked(inode, wb, &wb->b_dirty); } +static void redirty_tail(struct inode *inode, struct bdi_writeback *wb) +{ + spin_lock(&inode->i_lock); + redirty_tail_locked(inode, wb); + spin_unlock(&inode->i_lock); +} + /* * requeue inode for re-scanning after bdi->b_io list is exhausted. */ @@ -1313,7 +1325,7 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb, * writeback is not making progress due to locked * buffers. Skip this inode for now. */ - redirty_tail(inode, wb); + redirty_tail_locked(inode, wb); return; } @@ -1333,7 +1345,7 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb, * retrying writeback of the dirty page/inode * that cannot be performed immediately. */ - redirty_tail(inode, wb); + redirty_tail_locked(inode, wb); } } else if (inode->i_state & I_DIRTY) { /* @@ -1341,7 +1353,7 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb, * such as delayed allocation during submission or metadata * updates after data IO completion. */ - redirty_tail(inode, wb); + redirty_tail_locked(inode, wb); } else if (inode->i_state & I_DIRTY_TIME) { inode->dirtied_when = jiffies; inode_io_list_move_locked(inode, wb, &wb->b_dirty_time); @@ -1588,8 +1600,8 @@ static long writeback_sb_inodes(struct super_block *sb, */ spin_lock(&inode->i_lock); if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { + redirty_tail_locked(inode, wb); spin_unlock(&inode->i_lock); - redirty_tail(inode, wb); continue; } if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) { -- GitLab From be0937e03bf65b4e113213658bb3c54b5e5d4f0e Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Fri, 29 May 2020 15:05:22 +0200 Subject: [PATCH 0580/1304] writeback: Avoid skipping inode writeback commit 5afced3bf28100d81fb2fe7e98918632a08feaf5 upstream. Inode's i_io_list list head is used to attach inode to several different lists - wb->{b_dirty, b_dirty_time, b_io, b_more_io}. When flush worker prepares a list of inodes to writeback e.g. for sync(2), it moves inodes to b_io list. Thus it is critical for sync(2) data integrity guarantees that inode is not requeued to any other writeback list when inode is queued for processing by flush worker. That's the reason why writeback_single_inode() does not touch i_io_list (unless the inode is completely clean) and why __mark_inode_dirty() does not touch i_io_list if I_SYNC flag is set. However there are two flaws in the current logic: 1) When inode has only I_DIRTY_TIME set but it is already queued in b_io list due to sync(2), concurrent __mark_inode_dirty(inode, I_DIRTY_SYNC) can still move inode back to b_dirty list resulting in skipping writeback of inode time stamps during sync(2). 2) When inode is on b_dirty_time list and writeback_single_inode() races with __mark_inode_dirty() like: writeback_single_inode() __mark_inode_dirty(inode, I_DIRTY_PAGES) inode->i_state |= I_SYNC __writeback_single_inode() inode->i_state |= I_DIRTY_PAGES; if (inode->i_state & I_SYNC) bail if (!(inode->i_state & I_DIRTY_ALL)) - not true so nothing done We end up with I_DIRTY_PAGES inode on b_dirty_time list and thus standard background writeback will not writeback this inode leading to possible dirty throttling stalls etc. (thanks to Martijn Coenen for this analysis). Fix these problems by tracking whether inode is queued in b_io or b_more_io lists in a new I_SYNC_QUEUED flag. When this flag is set, we know flush worker has queued inode and we should not touch i_io_list. On the other hand we also know that once flush worker is done with the inode it will requeue the inode to appropriate dirty list. When I_SYNC_QUEUED is not set, __mark_inode_dirty() can (and must) move inode to appropriate dirty list. Reported-by: Martijn Coenen Reviewed-by: Martijn Coenen Tested-by: Martijn Coenen Reviewed-by: Christoph Hellwig Fixes: 0ae45f63d4ef ("vfs: add support for a lazytime mount option") CC: stable@vger.kernel.org Signed-off-by: Jan Kara Signed-off-by: Greg Kroah-Hartman --- fs/fs-writeback.c | 17 ++++++++++++----- include/linux/fs.h | 8 ++++++-- 2 files changed, 18 insertions(+), 7 deletions(-) diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 5d0cfb979945..178578a010af 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -162,6 +162,7 @@ static void inode_io_list_del_locked(struct inode *inode, assert_spin_locked(&wb->list_lock); assert_spin_locked(&inode->i_lock); + inode->i_state &= ~I_SYNC_QUEUED; list_del_init(&inode->i_io_list); wb_io_lists_depopulated(wb); } @@ -1106,6 +1107,7 @@ static void redirty_tail_locked(struct inode *inode, struct bdi_writeback *wb) inode->dirtied_when = jiffies; } inode_io_list_move_locked(inode, wb, &wb->b_dirty); + inode->i_state &= ~I_SYNC_QUEUED; } static void redirty_tail(struct inode *inode, struct bdi_writeback *wb) @@ -1181,8 +1183,11 @@ static int move_expired_inodes(struct list_head *delaying_queue, break; list_move(&inode->i_io_list, &tmp); moved++; + spin_lock(&inode->i_lock); if (flags & EXPIRE_DIRTY_ATIME) - set_bit(__I_DIRTY_TIME_EXPIRED, &inode->i_state); + inode->i_state |= I_DIRTY_TIME_EXPIRED; + inode->i_state |= I_SYNC_QUEUED; + spin_unlock(&inode->i_lock); if (sb_is_blkdev_sb(inode->i_sb)) continue; if (sb && sb != inode->i_sb) @@ -1357,6 +1362,7 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb, } else if (inode->i_state & I_DIRTY_TIME) { inode->dirtied_when = jiffies; inode_io_list_move_locked(inode, wb, &wb->b_dirty_time); + inode->i_state &= ~I_SYNC_QUEUED; } else { /* The inode is clean. Remove from writeback lists. */ inode_io_list_del_locked(inode, wb); @@ -2220,11 +2226,12 @@ void __mark_inode_dirty(struct inode *inode, int flags) inode->i_state |= flags; /* - * If the inode is being synced, just update its dirty state. - * The unlocker will place the inode on the appropriate - * superblock list, based upon its state. + * If the inode is queued for writeback by flush worker, just + * update its dirty state. Once the flush worker is done with + * the inode it will place it on the appropriate superblock + * list, based upon its state. */ - if (inode->i_state & I_SYNC) + if (inode->i_state & I_SYNC_QUEUED) goto out_unlock_inode; /* diff --git a/include/linux/fs.h b/include/linux/fs.h index 8d568b51778b..876bfb6df06a 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2050,6 +2050,10 @@ static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp) * * I_CREATING New object's inode in the middle of setting up. * + * I_SYNC_QUEUED Inode is queued in b_io or b_more_io writeback lists. + * Used to detect that mark_inode_dirty() should not move + * inode between dirty lists. + * * Q: What is the difference between I_WILL_FREE and I_FREEING? */ #define I_DIRTY_SYNC (1 << 0) @@ -2067,11 +2071,11 @@ static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp) #define I_DIO_WAKEUP (1 << __I_DIO_WAKEUP) #define I_LINKABLE (1 << 10) #define I_DIRTY_TIME (1 << 11) -#define __I_DIRTY_TIME_EXPIRED 12 -#define I_DIRTY_TIME_EXPIRED (1 << __I_DIRTY_TIME_EXPIRED) +#define I_DIRTY_TIME_EXPIRED (1 << 12) #define I_WB_SWITCH (1 << 13) #define I_OVL_INUSE (1 << 14) #define I_CREATING (1 << 15) +#define I_SYNC_QUEUED (1 << 17) #define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC) #define I_DIRTY (I_DIRTY_INODE | I_DIRTY_PAGES) -- GitLab From 7c3d77a31b0633dd8d61e3ab7e3d919af6fded75 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Fri, 29 May 2020 16:08:58 +0200 Subject: [PATCH 0581/1304] writeback: Fix sync livelock due to b_dirty_time processing commit f9cae926f35e8230330f28c7b743ad088611a8de upstream. When we are processing writeback for sync(2), move_expired_inodes() didn't set any inode expiry value (older_than_this). This can result in writeback never completing if there's steady stream of inodes added to b_dirty_time list as writeback rechecks dirty lists after each writeback round whether there's more work to be done. Fix the problem by using sync(2) start time is inode expiry value when processing b_dirty_time list similarly as for ordinarily dirtied inodes. This requires some refactoring of older_than_this handling which simplifies the code noticeably as a bonus. Fixes: 0ae45f63d4ef ("vfs: add support for a lazytime mount option") CC: stable@vger.kernel.org Reviewed-by: Christoph Hellwig Signed-off-by: Jan Kara Signed-off-by: Greg Kroah-Hartman --- fs/fs-writeback.c | 44 ++++++++++++-------------------- include/trace/events/writeback.h | 13 +++++----- 2 files changed, 23 insertions(+), 34 deletions(-) diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 178578a010af..15216b440880 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -45,7 +45,6 @@ struct wb_completion { struct wb_writeback_work { long nr_pages; struct super_block *sb; - unsigned long *older_than_this; enum writeback_sync_modes sync_mode; unsigned int tagged_writepages:1; unsigned int for_kupdate:1; @@ -1153,16 +1152,13 @@ static bool inode_dirtied_after(struct inode *inode, unsigned long t) #define EXPIRE_DIRTY_ATIME 0x0001 /* - * Move expired (dirtied before work->older_than_this) dirty inodes from + * Move expired (dirtied before dirtied_before) dirty inodes from * @delaying_queue to @dispatch_queue. */ static int move_expired_inodes(struct list_head *delaying_queue, struct list_head *dispatch_queue, - int flags, - struct wb_writeback_work *work) + int flags, unsigned long dirtied_before) { - unsigned long *older_than_this = NULL; - unsigned long expire_time; LIST_HEAD(tmp); struct list_head *pos, *node; struct super_block *sb = NULL; @@ -1170,16 +1166,9 @@ static int move_expired_inodes(struct list_head *delaying_queue, int do_sb_sort = 0; int moved = 0; - if ((flags & EXPIRE_DIRTY_ATIME) == 0) - older_than_this = work->older_than_this; - else if (!work->for_sync) { - expire_time = jiffies - (dirtytime_expire_interval * HZ); - older_than_this = &expire_time; - } while (!list_empty(delaying_queue)) { inode = wb_inode(delaying_queue->prev); - if (older_than_this && - inode_dirtied_after(inode, *older_than_this)) + if (inode_dirtied_after(inode, dirtied_before)) break; list_move(&inode->i_io_list, &tmp); moved++; @@ -1225,18 +1214,22 @@ static int move_expired_inodes(struct list_head *delaying_queue, * | * +--> dequeue for IO */ -static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work) +static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work, + unsigned long dirtied_before) { int moved; + unsigned long time_expire_jif = dirtied_before; assert_spin_locked(&wb->list_lock); list_splice_init(&wb->b_more_io, &wb->b_io); - moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, 0, work); + moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, 0, dirtied_before); + if (!work->for_sync) + time_expire_jif = jiffies - dirtytime_expire_interval * HZ; moved += move_expired_inodes(&wb->b_dirty_time, &wb->b_io, - EXPIRE_DIRTY_ATIME, work); + EXPIRE_DIRTY_ATIME, time_expire_jif); if (moved) wb_io_lists_populated(wb); - trace_writeback_queue_io(wb, work, moved); + trace_writeback_queue_io(wb, work, dirtied_before, moved); } static int write_inode(struct inode *inode, struct writeback_control *wbc) @@ -1748,7 +1741,7 @@ static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages, blk_start_plug(&plug); spin_lock(&wb->list_lock); if (list_empty(&wb->b_io)) - queue_io(wb, &work); + queue_io(wb, &work, jiffies); __writeback_inodes_wb(wb, &work); spin_unlock(&wb->list_lock); blk_finish_plug(&plug); @@ -1768,7 +1761,7 @@ static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages, * takes longer than a dirty_writeback_interval interval, then leave a * one-second gap. * - * older_than_this takes precedence over nr_to_write. So we'll only write back + * dirtied_before takes precedence over nr_to_write. So we'll only write back * all dirty pages if they are all attached to "old" mappings. */ static long wb_writeback(struct bdi_writeback *wb, @@ -1776,14 +1769,11 @@ static long wb_writeback(struct bdi_writeback *wb, { unsigned long wb_start = jiffies; long nr_pages = work->nr_pages; - unsigned long oldest_jif; + unsigned long dirtied_before = jiffies; struct inode *inode; long progress; struct blk_plug plug; - oldest_jif = jiffies; - work->older_than_this = &oldest_jif; - blk_start_plug(&plug); spin_lock(&wb->list_lock); for (;;) { @@ -1817,14 +1807,14 @@ static long wb_writeback(struct bdi_writeback *wb, * safe. */ if (work->for_kupdate) { - oldest_jif = jiffies - + dirtied_before = jiffies - msecs_to_jiffies(dirty_expire_interval * 10); } else if (work->for_background) - oldest_jif = jiffies; + dirtied_before = jiffies; trace_writeback_start(wb, work); if (list_empty(&wb->b_io)) - queue_io(wb, work); + queue_io(wb, work, dirtied_before); if (work->sb) progress = writeback_sb_inodes(work->sb, wb, work); else diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h index 32db72c7c055..29d09755e5cf 100644 --- a/include/trace/events/writeback.h +++ b/include/trace/events/writeback.h @@ -360,8 +360,9 @@ DEFINE_WBC_EVENT(wbc_writepage); TRACE_EVENT(writeback_queue_io, TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work, + unsigned long dirtied_before, int moved), - TP_ARGS(wb, work, moved), + TP_ARGS(wb, work, dirtied_before, moved), TP_STRUCT__entry( __array(char, name, 32) __field(unsigned long, older) @@ -371,19 +372,17 @@ TRACE_EVENT(writeback_queue_io, __field(unsigned int, cgroup_ino) ), TP_fast_assign( - unsigned long *older_than_this = work->older_than_this; strncpy(__entry->name, dev_name(wb->bdi->dev), 32); - __entry->older = older_than_this ? *older_than_this : 0; - __entry->age = older_than_this ? - (jiffies - *older_than_this) * 1000 / HZ : -1; + __entry->older = dirtied_before; + __entry->age = (jiffies - dirtied_before) * 1000 / HZ; __entry->moved = moved; __entry->reason = work->reason; __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); ), TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup_ino=%u", __entry->name, - __entry->older, /* older_than_this in jiffies */ - __entry->age, /* older_than_this in relative milliseconds */ + __entry->older, /* dirtied_before in jiffies */ + __entry->age, /* dirtied_before in relative milliseconds */ __entry->moved, __print_symbolic(__entry->reason, WB_WORK_REASON), __entry->cgroup_ino -- GitLab From 32a4f37b6849f7432cbbd00de05aa470cb01782f Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 25 Aug 2020 17:22:58 +0200 Subject: [PATCH 0582/1304] XEN uses irqdesc::irq_data_common::handler_data to store a per interrupt XEN data pointer which contains XEN specific information. commit c330fb1ddc0a922f044989492b7fcca77ee1db46 upstream. handler data is meant for interrupt handlers and not for storing irq chip specific information as some devices require handler data to store internal per interrupt information, e.g. pinctrl/GPIO chained interrupt handlers. This obviously creates a conflict of interests and crashes the machine because the XEN pointer is overwritten by the driver pointer. As the XEN data is not handler specific it should be stored in irqdesc::irq_data::chip_data instead. A simple sed s/irq_[sg]et_handler_data/irq_[sg]et_chip_data/ cures that. Cc: stable@vger.kernel.org Reported-by: Roman Shaposhnik Signed-off-by: Thomas Gleixner Tested-by: Roman Shaposhnik Reviewed-by: Juergen Gross Link: https://lore.kernel.org/r/87lfi2yckt.fsf@nanos.tec.linutronix.de Signed-off-by: Juergen Gross Signed-off-by: Greg Kroah-Hartman --- drivers/xen/events/events_base.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index 8d49b91d92cd..95e5a9300ff0 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c @@ -154,7 +154,7 @@ int get_evtchn_to_irq(unsigned evtchn) /* Get info for IRQ */ struct irq_info *info_for_irq(unsigned irq) { - return irq_get_handler_data(irq); + return irq_get_chip_data(irq); } /* Constructors for packed IRQ information. */ @@ -375,7 +375,7 @@ static void xen_irq_init(unsigned irq) info->type = IRQT_UNBOUND; info->refcnt = -1; - irq_set_handler_data(irq, info); + irq_set_chip_data(irq, info); list_add_tail(&info->list, &xen_irq_list_head); } @@ -424,14 +424,14 @@ static int __must_check xen_allocate_irq_gsi(unsigned gsi) static void xen_free_irq(unsigned irq) { - struct irq_info *info = irq_get_handler_data(irq); + struct irq_info *info = irq_get_chip_data(irq); if (WARN_ON(!info)) return; list_del(&info->list); - irq_set_handler_data(irq, NULL); + irq_set_chip_data(irq, NULL); WARN_ON(info->refcnt > 0); @@ -601,7 +601,7 @@ EXPORT_SYMBOL_GPL(xen_irq_from_gsi); static void __unbind_from_irq(unsigned int irq) { int evtchn = evtchn_from_irq(irq); - struct irq_info *info = irq_get_handler_data(irq); + struct irq_info *info = irq_get_chip_data(irq); if (info->refcnt > 0) { info->refcnt--; @@ -1105,7 +1105,7 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi, void unbind_from_irqhandler(unsigned int irq, void *dev_id) { - struct irq_info *info = irq_get_handler_data(irq); + struct irq_info *info = irq_get_chip_data(irq); if (WARN_ON(!info)) return; @@ -1139,7 +1139,7 @@ int evtchn_make_refcounted(unsigned int evtchn) if (irq == -1) return -ENOENT; - info = irq_get_handler_data(irq); + info = irq_get_chip_data(irq); if (!info) return -ENOENT; @@ -1167,7 +1167,7 @@ int evtchn_get(unsigned int evtchn) if (irq == -1) goto done; - info = irq_get_handler_data(irq); + info = irq_get_chip_data(irq); if (!info) goto done; -- GitLab From 567e1a915e8f0897972d190fd7a7ef8e9a35954c Mon Sep 17 00:00:00 2001 From: Li Jun Date: Fri, 21 Aug 2020 12:15:47 +0300 Subject: [PATCH 0583/1304] usb: host: xhci: fix ep context print mismatch in debugfs commit 0077b1b2c8d9ad5f7a08b62fb8524cdb9938388f upstream. dci is 0 based and xhci_get_ep_ctx() will do ep index increment to get the ep context. [rename dci to ep_index -Mathias] Cc: stable # v4.15+ Fixes: 02b6fdc2a153 ("usb: xhci: Add debugfs interface for xHCI driver") Signed-off-by: Li Jun Signed-off-by: Mathias Nyman Link: https://lore.kernel.org/r/20200821091549.20556-2-mathias.nyman@linux.intel.com Signed-off-by: Greg Kroah-Hartman --- drivers/usb/host/xhci-debugfs.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/usb/host/xhci-debugfs.c b/drivers/usb/host/xhci-debugfs.c index 76c3f29562d2..448d7b11dec4 100644 --- a/drivers/usb/host/xhci-debugfs.c +++ b/drivers/usb/host/xhci-debugfs.c @@ -273,7 +273,7 @@ static int xhci_slot_context_show(struct seq_file *s, void *unused) static int xhci_endpoint_context_show(struct seq_file *s, void *unused) { - int dci; + int ep_index; dma_addr_t dma; struct xhci_hcd *xhci; struct xhci_ep_ctx *ep_ctx; @@ -282,9 +282,9 @@ static int xhci_endpoint_context_show(struct seq_file *s, void *unused) xhci = hcd_to_xhci(bus_to_hcd(dev->udev->bus)); - for (dci = 1; dci < 32; dci++) { - ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, dci); - dma = dev->out_ctx->dma + dci * CTX_SIZE(xhci->hcc_params); + for (ep_index = 0; ep_index < 31; ep_index++) { + ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); + dma = dev->out_ctx->dma + (ep_index + 1) * CTX_SIZE(xhci->hcc_params); seq_printf(s, "%pad: %s\n", &dma, xhci_decode_ep_context(le32_to_cpu(ep_ctx->ep_info), le32_to_cpu(ep_ctx->ep_info2), -- GitLab From de24ca614bad9279707ebd1e4d4b0cc78b423631 Mon Sep 17 00:00:00 2001 From: Kai-Heng Feng Date: Fri, 21 Aug 2020 12:15:48 +0300 Subject: [PATCH 0584/1304] xhci: Do warm-reset when both CAS and XDEV_RESUME are set commit 904df64a5f4d5ebd670801d869ca0a6d6a6e8df6 upstream. Sometimes re-plugging a USB device during system sleep renders the device useless: [ 173.418345] xhci_hcd 0000:00:14.0: Get port status 2-4 read: 0x14203e2, return 0x10262 ... [ 176.496485] usb 2-4: Waited 2000ms for CONNECT [ 176.496781] usb usb2-port4: status 0000.0262 after resume, -19 [ 176.497103] usb 2-4: can't resume, status -19 [ 176.497438] usb usb2-port4: logical disconnect Because PLS equals to XDEV_RESUME, xHCI driver reports U3 to usbcore, despite of CAS bit is flagged. So proritize CAS over XDEV_RESUME to let usbcore handle warm-reset for the port. Cc: stable Signed-off-by: Kai-Heng Feng Signed-off-by: Mathias Nyman Link: https://lore.kernel.org/r/20200821091549.20556-3-mathias.nyman@linux.intel.com Signed-off-by: Greg Kroah-Hartman --- drivers/usb/host/xhci-hub.c | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index a58ef53e4ae1..64dc94853b8b 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -736,15 +736,6 @@ static void xhci_hub_report_usb3_link_state(struct xhci_hcd *xhci, { u32 pls = status_reg & PORT_PLS_MASK; - /* resume state is a xHCI internal state. - * Do not report it to usb core, instead, pretend to be U3, - * thus usb core knows it's not ready for transfer - */ - if (pls == XDEV_RESUME) { - *status |= USB_SS_PORT_LS_U3; - return; - } - /* When the CAS bit is set then warm reset * should be performed on port */ @@ -766,6 +757,16 @@ static void xhci_hub_report_usb3_link_state(struct xhci_hcd *xhci, */ pls |= USB_PORT_STAT_CONNECTION; } else { + /* + * Resume state is an xHCI internal state. Do not report it to + * usb core, instead, pretend to be U3, thus usb core knows + * it's not ready for transfer. + */ + if (pls == XDEV_RESUME) { + *status |= USB_SS_PORT_LS_U3; + return; + } + /* * If CAS bit isn't set but the Port is already at * Compliance Mode, fake a connection so the USB core -- GitLab From dc52d19b77e9ef49939dea2f8f8756d79f9533ae Mon Sep 17 00:00:00 2001 From: Ding Hui Date: Fri, 21 Aug 2020 12:15:49 +0300 Subject: [PATCH 0585/1304] xhci: Always restore EP_SOFT_CLEAR_TOGGLE even if ep reset failed commit f1ec7ae6c9f8c016db320e204cb519a1da1581b8 upstream. Some device drivers call libusb_clear_halt when target ep queue is not empty. (eg. spice client connected to qemu for usb redir) Before commit f5249461b504 ("xhci: Clear the host side toggle manually when endpoint is soft reset"), that works well. But now, we got the error log: EP not empty, refuse reset xhci_endpoint_reset failed and left ep_state's EP_SOFT_CLEAR_TOGGLE bit still set So all the subsequent urb sumbits to the ep will fail with the warn log: Can't enqueue URB while manually clearing toggle We need to clear ep_state EP_SOFT_CLEAR_TOGGLE bit after xhci_endpoint_reset, even if it failed. Fixes: f5249461b504 ("xhci: Clear the host side toggle manually when endpoint is soft reset") Cc: stable # v4.17+ Signed-off-by: Ding Hui Signed-off-by: Mathias Nyman Link: https://lore.kernel.org/r/20200821091549.20556-4-mathias.nyman@linux.intel.com Signed-off-by: Greg Kroah-Hartman --- drivers/usb/host/xhci.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index f8e71c7aba6e..6f976c4cccda 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -3154,10 +3154,11 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd, wait_for_completion(cfg_cmd->completion); - ep->ep_state &= ~EP_SOFT_CLEAR_TOGGLE; xhci_free_command(xhci, cfg_cmd); cleanup: xhci_free_command(xhci, stop_cmd); + if (ep->ep_state & EP_SOFT_CLEAR_TOGGLE) + ep->ep_state &= ~EP_SOFT_CLEAR_TOGGLE; } static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, -- GitLab From 517b087a70a3fc59cdba5dcf99afcb8a2f90a3c7 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Mon, 24 Aug 2020 19:35:31 +0200 Subject: [PATCH 0586/1304] PM: sleep: core: Fix the handling of pending runtime resume requests commit e3eb6e8fba65094328b8dca635d00de74ba75b45 upstream. It has been reported that system-wide suspend may be aborted in the absence of any wakeup events due to unforseen interactions of it with the runtume PM framework. One failing scenario is when there are multiple devices sharing an ACPI power resource and runtime-resume needs to be carried out for one of them during system-wide suspend (for example, because it needs to be reconfigured before the whole system goes to sleep). In that case, the runtime-resume of that device involves turning the ACPI power resource "on" which in turn causes runtime-resume requests to be queued up for all of the other devices sharing it. Those requests go to the runtime PM workqueue which is frozen during system-wide suspend, so they are not actually taken care of until the resume of the whole system, but the pm_runtime_barrier() call in __device_suspend() sees them and triggers system wakeup events for them which then cause the system-wide suspend to be aborted if wakeup source objects are in active use. Of course, the logic that leads to triggering those wakeup events is questionable in the first place, because clearly there are cases in which a pending runtime resume request for a device is not connected to any real wakeup events in any way (like the one above). Moreover, it is racy, because the device may be resuming already by the time the pm_runtime_barrier() runs and so if the driver doesn't take care of signaling the wakeup event as appropriate, it will be lost. However, if the driver does take care of that, the extra pm_wakeup_event() call in the core is redundant. Accordingly, drop the conditional pm_wakeup_event() call fron __device_suspend() and make the latter call pm_runtime_barrier() alone. Also modify the comment next to that call to reflect the new code and extend it to mention the need to avoid unwanted interactions between runtime PM and system-wide device suspend callbacks. Fixes: 1e2ef05bb8cf8 ("PM: Limit race conditions between runtime PM and system sleep (v2)") Signed-off-by: Rafael J. Wysocki Acked-by: Alan Stern Reported-by: Utkarsh H Patel Tested-by: Utkarsh H Patel Tested-by: Pengfei Xu Cc: All applicable Signed-off-by: Greg Kroah-Hartman --- drivers/base/power/main.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 3b382a7e07b2..da413b95afab 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -1751,13 +1751,17 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) } /* - * If a device configured to wake up the system from sleep states - * has been suspended at run time and there's a resume request pending - * for it, this is equivalent to the device signaling wakeup, so the - * system suspend operation should be aborted. + * Wait for possible runtime PM transitions of the device in progress + * to complete and if there's a runtime resume request pending for it, + * resume it before proceeding with invoking the system-wide suspend + * callbacks for it. + * + * If the system-wide suspend callbacks below change the configuration + * of the device, they must disable runtime PM for it or otherwise + * ensure that its runtime-resume callbacks will not be confused by that + * change in case they are invoked going forward. */ - if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) - pm_wakeup_event(dev, 0); + pm_runtime_barrier(dev); if (pm_wakeup_pending()) { dev->power.direct_complete = false; -- GitLab From 9d57313ce14b6449369f04e07afd8bfe2c70a2bc Mon Sep 17 00:00:00 2001 From: Heikki Krogerus Date: Fri, 21 Aug 2020 13:53:42 +0300 Subject: [PATCH 0587/1304] device property: Fix the secondary firmware node handling in set_primary_fwnode() commit c15e1bdda4365a5f17cdadf22bf1c1df13884a9e upstream. When the primary firmware node pointer is removed from a device (set to NULL) the secondary firmware node pointer, when it exists, is made the primary node for the device. However, the secondary firmware node pointer of the original primary firmware node is never cleared (set to NULL). To avoid situation where the secondary firmware node pointer is pointing to a non-existing object, clearing it properly when the primary node is removed from a device in set_primary_fwnode(). Fixes: 97badf873ab6 ("device property: Make it possible to use secondary firmware nodes") Cc: All applicable Signed-off-by: Heikki Krogerus Signed-off-by: Rafael J. Wysocki Signed-off-by: Greg Kroah-Hartman --- drivers/base/core.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/drivers/base/core.c b/drivers/base/core.c index 928fc1532a70..b911c38ad18c 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -3333,9 +3333,9 @@ static inline bool fwnode_is_primary(struct fwnode_handle *fwnode) */ void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode) { - if (fwnode) { - struct fwnode_handle *fn = dev->fwnode; + struct fwnode_handle *fn = dev->fwnode; + if (fwnode) { if (fwnode_is_primary(fn)) fn = fn->secondary; @@ -3345,8 +3345,12 @@ void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode) } dev->fwnode = fwnode; } else { - dev->fwnode = fwnode_is_primary(dev->fwnode) ? - dev->fwnode->secondary : NULL; + if (fwnode_is_primary(fn)) { + dev->fwnode = fn->secondary; + fn->secondary = NULL; + } else { + dev->fwnode = NULL; + } } } EXPORT_SYMBOL_GPL(set_primary_fwnode); -- GitLab From d9eeca1ed80366ad71906f868979d451e532af1c Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 30 Aug 2020 19:07:53 +0200 Subject: [PATCH 0588/1304] genirq/matrix: Deal with the sillyness of for_each_cpu() on UP commit 784a0830377d0761834e385975bc46861fea9fa0 upstream. Most of the CPU mask operations behave the same way, but for_each_cpu() and it's variants ignore the cpumask argument and claim that CPU0 is always in the mask. This is historical, inconsistent and annoying behaviour. The matrix allocator uses for_each_cpu() and can be called on UP with an empty cpumask. The calling code does not expect that this succeeds but until commit e027fffff799 ("x86/irq: Unbreak interrupt affinity setting") this went unnoticed. That commit added a WARN_ON() to catch cases which move an interrupt from one vector to another on the same CPU. The warning triggers on UP. Add a check for the cpumask being empty to prevent this. Fixes: 2f75d9e1c905 ("genirq: Implement bitmap matrix allocator") Reported-by: kernel test robot Signed-off-by: Thomas Gleixner Cc: stable@vger.kernel.org Signed-off-by: Greg Kroah-Hartman --- kernel/irq/matrix.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c index 30cc217b8631..651a4ad6d711 100644 --- a/kernel/irq/matrix.c +++ b/kernel/irq/matrix.c @@ -380,6 +380,13 @@ int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk, unsigned int cpu, bit; struct cpumap *cm; + /* + * Not required in theory, but matrix_find_best_cpu() uses + * for_each_cpu() which ignores the cpumask on UP . + */ + if (cpumask_empty(msk)) + return -EINVAL; + cpu = matrix_find_best_cpu(m, msk); if (cpu == UINT_MAX) return -ENOSPC; -- GitLab From 93404fc8b630670170658bdb8d1ae8b2ab7d7634 Mon Sep 17 00:00:00 2001 From: qiuguorui1 Date: Thu, 20 Aug 2020 11:16:29 +0800 Subject: [PATCH 0589/1304] irqchip/stm32-exti: Avoid losing interrupts due to clearing pending bits by mistake commit e579076ac0a3bebb440fab101aef3c42c9f4c709 upstream. In the current code, when the eoi callback of the exti clears the pending bit of the current interrupt, it will first read the values of fpr and rpr, then logically OR the corresponding bit of the interrupt number, and finally write back to fpr and rpr. We found through experiments that if two exti interrupts, we call them int1/int2, arrive almost at the same time. in our scenario, the time difference is 30 microseconds, assuming int1 is triggered first. there will be an extreme scenario: both int's pending bit are set to 1, the irq handle of int1 is executed first, and eoi handle is then executed, at this moment, all pending bits are cleared, but the int 2 has not finally been reported to the cpu yet, which eventually lost int2. According to stm32's TRM description about rpr and fpr: Writing a 1 to this bit will trigger a rising edge event on event x, Writing 0 has no effect. Therefore, when clearing the pending bit, we only need to clear the pending bit of the irq. Fixes: 927abfc4461e7 ("irqchip/stm32: Add stm32mp1 support with hierarchy domain") Signed-off-by: qiuguorui1 Signed-off-by: Marc Zyngier Cc: stable@vger.kernel.org # v4.18+ Link: https://lore.kernel.org/r/20200820031629.15582-1-qiuguorui1@huawei.com Signed-off-by: Greg Kroah-Hartman --- drivers/irqchip/irq-stm32-exti.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c index 97b27f338c30..f605470855f1 100644 --- a/drivers/irqchip/irq-stm32-exti.c +++ b/drivers/irqchip/irq-stm32-exti.c @@ -382,6 +382,16 @@ static void stm32_irq_ack(struct irq_data *d) irq_gc_unlock(gc); } +/* directly set the target bit without reading first. */ +static inline void stm32_exti_write_bit(struct irq_data *d, u32 reg) +{ + struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d); + void __iomem *base = chip_data->host_data->base; + u32 val = BIT(d->hwirq % IRQS_PER_BANK); + + writel_relaxed(val, base + reg); +} + static inline u32 stm32_exti_set_bit(struct irq_data *d, u32 reg) { struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d); @@ -415,9 +425,9 @@ static void stm32_exti_h_eoi(struct irq_data *d) raw_spin_lock(&chip_data->rlock); - stm32_exti_set_bit(d, stm32_bank->rpr_ofst); + stm32_exti_write_bit(d, stm32_bank->rpr_ofst); if (stm32_bank->fpr_ofst != UNDEF_REG) - stm32_exti_set_bit(d, stm32_bank->fpr_ofst); + stm32_exti_write_bit(d, stm32_bank->fpr_ofst); raw_spin_unlock(&chip_data->rlock); -- GitLab From 3d55a510668d303ed411d19464013615b8dd2c81 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 25 Aug 2020 11:43:45 -0400 Subject: [PATCH 0590/1304] drm/amdgpu: Fix buffer overflow in INFO ioctl commit b5b97cab55eb71daba3283c8b1d2cce456d511a1 upstream. The values for "se_num" and "sh_num" come from the user in the ioctl. They can be in the 0-255 range but if they're more than AMDGPU_GFX_MAX_SE (4) or AMDGPU_GFX_MAX_SH_PER_SE (2) then it results in an out of bounds read. Reported-by: Dan Carpenter Acked-by: Dan Carpenter Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 2beaaf4bee68..dd9b8feb3a66 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -524,8 +524,12 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file * in the bitfields */ if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK) se_num = 0xffffffff; + else if (se_num >= AMDGPU_GFX_MAX_SE) + return -EINVAL; if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK) sh_num = 0xffffffff; + else if (sh_num >= AMDGPU_GFX_MAX_SH_PER_SE) + return -EINVAL; if (info->read_mmr_reg.count > 128) return -EINVAL; -- GitLab From 513e0593f5aae022e4c2ca32392b1cebac8601d0 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Fri, 21 Aug 2020 12:05:03 +0800 Subject: [PATCH 0591/1304] drm/amd/pm: correct Vega10 swctf limit setting commit b05d71b51078fc428c6b72582126d9d75d3c1f4c upstream. Correct the Vega10 thermal swctf limit. Bug: https://gitlab.freedesktop.org/drm/amd/-/issues/1267 Signed-off-by: Evan Quan Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c index aa044c1955fe..a2b2a6c67cda 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c @@ -362,6 +362,9 @@ int vega10_thermal_get_temperature(struct pp_hwmgr *hwmgr) static int vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, struct PP_TemperatureRange *range) { + struct phm_ppt_v2_information *pp_table_info = + (struct phm_ppt_v2_information *)(hwmgr->pptable); + struct phm_tdp_table *tdp_table = pp_table_info->tdp_table; struct amdgpu_device *adev = hwmgr->adev; int low = VEGA10_THERMAL_MINIMUM_ALERT_TEMP * PP_TEMPERATURE_UNITS_PER_CENTIGRADES; @@ -371,8 +374,8 @@ static int vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, if (low < range->min) low = range->min; - if (high > range->max) - high = range->max; + if (high > tdp_table->usSoftwareShutdownTemp) + high = tdp_table->usSoftwareShutdownTemp; if (low > high) return -EINVAL; -- GitLab From 2bb0a4e7760fd3b2a2df195b5a9698ca454981fe Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Fri, 21 Aug 2020 12:18:58 +0800 Subject: [PATCH 0592/1304] drm/amd/pm: correct Vega12 swctf limit setting commit e0ffd340249699ad27a6c91abdfa3e89f7823941 upstream. Correct the Vega12 thermal swctf limit. Signed-off-by: Evan Quan Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c index 904eb2c9155b..601a596e94f0 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c @@ -170,6 +170,8 @@ int vega12_thermal_get_temperature(struct pp_hwmgr *hwmgr) static int vega12_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, struct PP_TemperatureRange *range) { + struct phm_ppt_v3_information *pptable_information = + (struct phm_ppt_v3_information *)hwmgr->pptable; struct amdgpu_device *adev = hwmgr->adev; int low = VEGA12_THERMAL_MINIMUM_ALERT_TEMP * PP_TEMPERATURE_UNITS_PER_CENTIGRADES; @@ -179,8 +181,8 @@ static int vega12_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, if (low < range->min) low = range->min; - if (high > range->max) - high = range->max; + if (high > pptable_information->us_software_shutdown_temp) + high = pptable_information->us_software_shutdown_temp; if (low > high) return -EINVAL; -- GitLab From df9ad14fccdc1d167bb206cc6453dc9aa3dcf0c9 Mon Sep 17 00:00:00 2001 From: Alan Stern Date: Mon, 10 Aug 2020 14:29:54 -0400 Subject: [PATCH 0593/1304] USB: yurex: Fix bad gfp argument commit f176ede3a3bde5b398a6777a7f9ff091baa2d3ff upstream. The syzbot fuzzer identified a bug in the yurex driver: It passes GFP_KERNEL as a memory-allocation flag to usb_submit_urb() at a time when its state is TASK_INTERRUPTIBLE, not TASK_RUNNING: do not call blocking ops when !TASK_RUNNING; state=1 set at [<00000000370c7c68>] prepare_to_wait+0xb1/0x2a0 kernel/sched/wait.c:247 WARNING: CPU: 1 PID: 340 at kernel/sched/core.c:7253 __might_sleep+0x135/0x190 kernel/sched/core.c:7253 Kernel panic - not syncing: panic_on_warn set ... CPU: 1 PID: 340 Comm: syz-executor677 Not tainted 5.8.0-syzkaller #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:77 [inline] dump_stack+0xf6/0x16e lib/dump_stack.c:118 panic+0x2aa/0x6e1 kernel/panic.c:231 __warn.cold+0x20/0x50 kernel/panic.c:600 report_bug+0x1bd/0x210 lib/bug.c:198 handle_bug+0x41/0x80 arch/x86/kernel/traps.c:234 exc_invalid_op+0x14/0x40 arch/x86/kernel/traps.c:254 asm_exc_invalid_op+0x12/0x20 arch/x86/include/asm/idtentry.h:536 RIP: 0010:__might_sleep+0x135/0x190 kernel/sched/core.c:7253 Code: 65 48 8b 1c 25 40 ef 01 00 48 8d 7b 10 48 89 fe 48 c1 ee 03 80 3c 06 00 75 2b 48 8b 73 10 48 c7 c7 e0 9e 06 86 e8 ed 12 f6 ff <0f> 0b e9 46 ff ff ff e8 1f b2 4b 00 e9 29 ff ff ff e8 15 b2 4b 00 RSP: 0018:ffff8881cdb77a28 EFLAGS: 00010282 RAX: 0000000000000000 RBX: ffff8881c6458000 RCX: 0000000000000000 RDX: ffff8881c6458000 RSI: ffffffff8129ec93 RDI: ffffed1039b6ef37 RBP: ffffffff86fdade2 R08: 0000000000000001 R09: ffff8881db32f54f R10: 0000000000000000 R11: 0000000030343354 R12: 00000000000001f2 R13: 0000000000000000 R14: 0000000000000068 R15: ffffffff83c1b1aa slab_pre_alloc_hook.constprop.0+0xea/0x200 mm/slab.h:498 slab_alloc_node mm/slub.c:2816 [inline] slab_alloc mm/slub.c:2900 [inline] kmem_cache_alloc_trace+0x46/0x220 mm/slub.c:2917 kmalloc include/linux/slab.h:554 [inline] dummy_urb_enqueue+0x7a/0x880 drivers/usb/gadget/udc/dummy_hcd.c:1251 usb_hcd_submit_urb+0x2b2/0x22d0 drivers/usb/core/hcd.c:1547 usb_submit_urb+0xb4e/0x13e0 drivers/usb/core/urb.c:570 yurex_write+0x3ea/0x820 drivers/usb/misc/yurex.c:495 This patch changes the call to use GFP_ATOMIC instead of GFP_KERNEL. Reported-and-tested-by: syzbot+c2c3302f9c601a4b1be2@syzkaller.appspotmail.com Signed-off-by: Alan Stern CC: Link: https://lore.kernel.org/r/20200810182954.GB307778@rowland.harvard.edu Signed-off-by: Greg Kroah-Hartman --- drivers/usb/misc/yurex.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c index be0505b8b5d4..785080f79073 100644 --- a/drivers/usb/misc/yurex.c +++ b/drivers/usb/misc/yurex.c @@ -492,7 +492,7 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer, prepare_to_wait(&dev->waitq, &wait, TASK_INTERRUPTIBLE); dev_dbg(&dev->interface->dev, "%s - submit %c\n", __func__, dev->cntl_buffer[0]); - retval = usb_submit_urb(dev->cntl_urb, GFP_KERNEL); + retval = usb_submit_urb(dev->cntl_urb, GFP_ATOMIC); if (retval >= 0) timeout = schedule_timeout(YUREX_WRITE_TIMEOUT); finish_wait(&dev->waitq, &wait); -- GitLab From 9e8bc59dfbb42f731c6487663f4c9e1f386075be Mon Sep 17 00:00:00 2001 From: Thinh Nguyen Date: Tue, 18 Aug 2020 19:27:47 -0700 Subject: [PATCH 0594/1304] usb: uas: Add quirk for PNY Pro Elite commit 9a469bc9f32dd33c7aac5744669d21a023a719cd upstream. PNY Pro Elite USB 3.1 Gen 2 device (SSD) doesn't respond to ATA_12 pass-through command (i.e. it just hangs). If it doesn't support this command, it should respond properly to the host. Let's just add a quirk to be able to move forward with other operations. Cc: stable@vger.kernel.org Signed-off-by: Thinh Nguyen Link: https://lore.kernel.org/r/2b0585228b003eedcc82db84697b31477df152e0.1597803605.git.thinhn@synopsys.com Signed-off-by: Greg Kroah-Hartman --- drivers/usb/storage/unusual_uas.h | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h index 37157ed9a881..191bfa28917d 100644 --- a/drivers/usb/storage/unusual_uas.h +++ b/drivers/usb/storage/unusual_uas.h @@ -80,6 +80,13 @@ UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_BROKEN_FUA), +/* Reported-by: Thinh Nguyen */ +UNUSUAL_DEV(0x154b, 0xf00d, 0x0000, 0x9999, + "PNY", + "Pro Elite SSD", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_NO_ATA_1X), + /* Reported-by: Hans de Goede */ UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999, "VIA", -- GitLab From 9107ee9a118b6a5b3cd7ca633e4fbe5c672d0c80 Mon Sep 17 00:00:00 2001 From: Kai-Heng Feng Date: Fri, 31 Jul 2020 13:16:20 +0800 Subject: [PATCH 0595/1304] USB: quirks: Add no-lpm quirk for another Raydium touchscreen commit 5967116e8358899ebaa22702d09b0af57fef23e1 upstream. There's another Raydium touchscreen needs the no-lpm quirk: [ 1.339149] usb 1-9: New USB device found, idVendor=2386, idProduct=350e, bcdDevice= 0.00 [ 1.339150] usb 1-9: New USB device strings: Mfr=1, Product=2, SerialNumber=0 [ 1.339151] usb 1-9: Product: Raydium Touch System [ 1.339152] usb 1-9: Manufacturer: Raydium Corporation ... [ 6.450497] usb 1-9: can't set config #1, error -110 BugLink: https://bugs.launchpad.net/bugs/1889446 Signed-off-by: Kai-Heng Feng Cc: stable Link: https://lore.kernel.org/r/20200731051622.28643-1-kai.heng.feng@canonical.com Signed-off-by: Greg Kroah-Hartman --- drivers/usb/core/quirks.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index c96c50faccf7..dec3ae539d20 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -465,6 +465,8 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x2386, 0x3119), .driver_info = USB_QUIRK_NO_LPM }, + { USB_DEVICE(0x2386, 0x350e), .driver_info = USB_QUIRK_NO_LPM }, + /* DJI CineSSD */ { USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM }, -- GitLab From 5e8b409caa894946ae187ad1195a3356468a0e72 Mon Sep 17 00:00:00 2001 From: Alan Stern Date: Wed, 26 Aug 2020 15:46:24 -0400 Subject: [PATCH 0596/1304] USB: quirks: Ignore duplicate endpoint on Sound Devices MixPre-D commit 068834a2773b6a12805105cfadbb3d4229fc6e0a upstream. The Sound Devices MixPre-D audio card suffers from the same defect as the Sound Devices USBPre2: an endpoint shared between a normal audio interface and a vendor-specific interface, in violation of the USB spec. Since the USB core now treats duplicated endpoints as bugs and ignores them, the audio endpoint isn't available and the card can't be used for audio capture. Along the same lines as commit bdd1b147b802 ("USB: quirks: blacklist duplicate ep on Sound Devices USBPre2"), this patch adds a quirks entry saying to ignore ep5in for interface 1, leaving it available for use with standard audio interface 2. Reported-and-tested-by: Jean-Christophe Barnoud Signed-off-by: Alan Stern CC: Fixes: 3e4f8e21c4f2 ("USB: core: fix check for duplicate endpoints") Link: https://lore.kernel.org/r/20200826194624.GA412633@rowland.harvard.edu Signed-off-by: Greg Kroah-Hartman --- drivers/usb/core/quirks.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index dec3ae539d20..2f068e525a37 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -370,6 +370,10 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x0926, 0x0202), .driver_info = USB_QUIRK_ENDPOINT_BLACKLIST }, + /* Sound Devices MixPre-D */ + { USB_DEVICE(0x0926, 0x0208), .driver_info = + USB_QUIRK_ENDPOINT_BLACKLIST }, + /* Keytouch QWERTY Panel keyboard */ { USB_DEVICE(0x0926, 0x3333), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, @@ -511,6 +515,7 @@ static const struct usb_device_id usb_amd_resume_quirk_list[] = { */ static const struct usb_device_id usb_endpoint_blacklist[] = { { USB_DEVICE_INTERFACE_NUMBER(0x0926, 0x0202, 1), .driver_info = 0x85 }, + { USB_DEVICE_INTERFACE_NUMBER(0x0926, 0x0208, 1), .driver_info = 0x85 }, { } }; -- GitLab From 2948369b5ee15f98951f24268121779129b53de0 Mon Sep 17 00:00:00 2001 From: Cyril Roelandt Date: Tue, 25 Aug 2020 23:22:31 +0200 Subject: [PATCH 0597/1304] USB: Ignore UAS for JMicron JMS567 ATA/ATAPI Bridge commit 9aa37788e7ebb3f489fb4b71ce07adadd444264a upstream. This device does not support UAS properly and a similar entry already exists in drivers/usb/storage/unusual_uas.h. Without this patch, storage_probe() defers the handling of this device to UAS, which cannot handle it either. Tested-by: Brice Goglin Fixes: bc3bdb12bbb3 ("usb-storage: Disable UAS on JMicron SATA enclosure") Acked-by: Alan Stern CC: Signed-off-by: Cyril Roelandt Link: https://lore.kernel.org/r/20200825212231.46309-1-tipecaml@gmail.com Signed-off-by: Greg Kroah-Hartman --- drivers/usb/storage/unusual_devs.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index f6c3681fa2e9..88275842219e 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -2328,7 +2328,7 @@ UNUSUAL_DEV( 0x357d, 0x7788, 0x0114, 0x0114, "JMicron", "USB to ATA/ATAPI Bridge", USB_SC_DEVICE, USB_PR_DEVICE, NULL, - US_FL_BROKEN_FUA ), + US_FL_BROKEN_FUA | US_FL_IGNORE_UAS ), /* Reported by Andrey Rahmatullin */ UNUSUAL_DEV( 0x4102, 0x1020, 0x0100, 0x0100, -- GitLab From 3670e3e697a3ada2f5e342750e66fbbac1afa0b2 Mon Sep 17 00:00:00 2001 From: Tang Bin Date: Wed, 26 Aug 2020 22:49:31 +0800 Subject: [PATCH 0598/1304] usb: host: ohci-exynos: Fix error handling in exynos_ohci_probe() commit 1d4169834628d18b2392a2da92b7fbf5e8e2ce89 upstream. If the function platform_get_irq() failed, the negative value returned will not be detected here. So fix error handling in exynos_ohci_probe(). And when get irq failed, the function platform_get_irq() logs an error message, so remove redundant message here. Fixes: 62194244cf87 ("USB: Add Samsung Exynos OHCI diver") Signed-off-by: Zhang Shengju Cc: stable Signed-off-by: Tang Bin Reviewed-by: Krzysztof Kozlowski Link: https://lore.kernel.org/r/20200826144931.1828-1-tangbin@cmss.chinamobile.com Signed-off-by: Greg Kroah-Hartman --- drivers/usb/host/ohci-exynos.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/usb/host/ohci-exynos.c b/drivers/usb/host/ohci-exynos.c index c0c4dcca6f3c..a4a88b6de3c4 100644 --- a/drivers/usb/host/ohci-exynos.c +++ b/drivers/usb/host/ohci-exynos.c @@ -156,9 +156,8 @@ static int exynos_ohci_probe(struct platform_device *pdev) hcd->rsrc_len = resource_size(res); irq = platform_get_irq(pdev, 0); - if (!irq) { - dev_err(&pdev->dev, "Failed to get IRQ\n"); - err = -ENODEV; + if (irq < 0) { + err = irq; goto fail_io; } -- GitLab From bbd25d805c30234826265e1cc1b2955731308a0d Mon Sep 17 00:00:00 2001 From: Brooke Basile Date: Tue, 25 Aug 2020 09:05:08 -0400 Subject: [PATCH 0599/1304] USB: gadget: u_f: add overflow checks to VLA macros commit b1cd1b65afba95971fa457dfdb2c941c60d38c5b upstream. size can potentially hold an overflowed value if its assigned expression is left unchecked, leading to a smaller than needed allocation when vla_group_size() is used by callers to allocate memory. To fix this, add a test for saturation before declaring variables and an overflow check to (n) * sizeof(type). If the expression results in overflow, vla_group_size() will return SIZE_MAX. Reported-by: Ilja Van Sprundel Suggested-by: Kees Cook Signed-off-by: Brooke Basile Acked-by: Felipe Balbi Cc: stable Signed-off-by: Greg Kroah-Hartman --- drivers/usb/gadget/u_f.h | 38 +++++++++++++++++++++++++++----------- 1 file changed, 27 insertions(+), 11 deletions(-) diff --git a/drivers/usb/gadget/u_f.h b/drivers/usb/gadget/u_f.h index 09f90447fed5..494420af23bb 100644 --- a/drivers/usb/gadget/u_f.h +++ b/drivers/usb/gadget/u_f.h @@ -14,6 +14,7 @@ #define __U_F_H__ #include +#include /* Variable Length Array Macros **********************************************/ #define vla_group(groupname) size_t groupname##__next = 0 @@ -21,21 +22,36 @@ #define vla_item(groupname, type, name, n) \ size_t groupname##_##name##__offset = ({ \ - size_t align_mask = __alignof__(type) - 1; \ - size_t offset = (groupname##__next + align_mask) & ~align_mask;\ - size_t size = (n) * sizeof(type); \ - groupname##__next = offset + size; \ + size_t offset = 0; \ + if (groupname##__next != SIZE_MAX) { \ + size_t align_mask = __alignof__(type) - 1; \ + size_t offset = (groupname##__next + align_mask) \ + & ~align_mask; \ + size_t size = array_size(n, sizeof(type)); \ + if (check_add_overflow(offset, size, \ + &groupname##__next)) { \ + groupname##__next = SIZE_MAX; \ + offset = 0; \ + } \ + } \ offset; \ }) #define vla_item_with_sz(groupname, type, name, n) \ - size_t groupname##_##name##__sz = (n) * sizeof(type); \ - size_t groupname##_##name##__offset = ({ \ - size_t align_mask = __alignof__(type) - 1; \ - size_t offset = (groupname##__next + align_mask) & ~align_mask;\ - size_t size = groupname##_##name##__sz; \ - groupname##__next = offset + size; \ - offset; \ + size_t groupname##_##name##__sz = array_size(n, sizeof(type)); \ + size_t groupname##_##name##__offset = ({ \ + size_t offset = 0; \ + if (groupname##__next != SIZE_MAX) { \ + size_t align_mask = __alignof__(type) - 1; \ + size_t offset = (groupname##__next + align_mask) \ + & ~align_mask; \ + if (check_add_overflow(offset, groupname##_##name##__sz,\ + &groupname##__next)) { \ + groupname##__next = SIZE_MAX; \ + offset = 0; \ + } \ + } \ + offset; \ }) #define vla_ptr(ptr, groupname, name) \ -- GitLab From 471b23586387a32857778c511be60ab31c98dcfd Mon Sep 17 00:00:00 2001 From: Brooke Basile Date: Tue, 25 Aug 2020 09:07:27 -0400 Subject: [PATCH 0600/1304] USB: gadget: f_ncm: add bounds checks to ncm_unwrap_ntb() commit 2b74b0a04d3e9f9f08ff026e5663dce88ff94e52 upstream. Some values extracted by ncm_unwrap_ntb() could possibly lead to several different out of bounds reads of memory. Specifically the values passed to netdev_alloc_skb_ip_align() need to be checked so that memory is not overflowed. Resolve this by applying bounds checking to a number of different indexes and lengths of the structure parsing logic. Reported-by: Ilja Van Sprundel Signed-off-by: Brooke Basile Acked-by: Felipe Balbi Cc: stable Signed-off-by: Greg Kroah-Hartman --- drivers/usb/gadget/function/f_ncm.c | 81 ++++++++++++++++++++++++----- 1 file changed, 69 insertions(+), 12 deletions(-) diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c index cfca4584ae13..8d8c81d43069 100644 --- a/drivers/usb/gadget/function/f_ncm.c +++ b/drivers/usb/gadget/function/f_ncm.c @@ -1184,12 +1184,15 @@ static int ncm_unwrap_ntb(struct gether *port, int ndp_index; unsigned dg_len, dg_len2; unsigned ndp_len; + unsigned block_len; struct sk_buff *skb2; int ret = -EINVAL; - unsigned max_size = le32_to_cpu(ntb_parameters.dwNtbOutMaxSize); + unsigned ntb_max = le32_to_cpu(ntb_parameters.dwNtbOutMaxSize); + unsigned frame_max = le16_to_cpu(ecm_desc.wMaxSegmentSize); const struct ndp_parser_opts *opts = ncm->parser_opts; unsigned crc_len = ncm->is_crc ? sizeof(uint32_t) : 0; int dgram_counter; + bool ndp_after_header; /* dwSignature */ if (get_unaligned_le32(tmp) != opts->nth_sign) { @@ -1208,25 +1211,37 @@ static int ncm_unwrap_ntb(struct gether *port, } tmp++; /* skip wSequence */ + block_len = get_ncm(&tmp, opts->block_length); /* (d)wBlockLength */ - if (get_ncm(&tmp, opts->block_length) > max_size) { + if (block_len > ntb_max) { INFO(port->func.config->cdev, "OUT size exceeded\n"); goto err; } ndp_index = get_ncm(&tmp, opts->ndp_index); + ndp_after_header = false; /* Run through all the NDP's in the NTB */ do { - /* NCM 3.2 */ - if (((ndp_index % 4) != 0) && - (ndp_index < opts->nth_size)) { + /* + * NCM 3.2 + * dwNdpIndex + */ + if (((ndp_index % 4) != 0) || + (ndp_index < opts->nth_size) || + (ndp_index > (block_len - + opts->ndp_size))) { INFO(port->func.config->cdev, "Bad index: %#X\n", ndp_index); goto err; } + if (ndp_index == opts->nth_size) + ndp_after_header = true; - /* walk through NDP */ + /* + * walk through NDP + * dwSignature + */ tmp = (void *)(skb->data + ndp_index); if (get_unaligned_le32(tmp) != ncm->ndp_sign) { INFO(port->func.config->cdev, "Wrong NDP SIGN\n"); @@ -1237,14 +1252,15 @@ static int ncm_unwrap_ntb(struct gether *port, ndp_len = get_unaligned_le16(tmp++); /* * NCM 3.3.1 + * wLength * entry is 2 items * item size is 16/32 bits, opts->dgram_item_len * 2 bytes * minimal: struct usb_cdc_ncm_ndpX + normal entry + zero entry * Each entry is a dgram index and a dgram length. */ if ((ndp_len < opts->ndp_size - + 2 * 2 * (opts->dgram_item_len * 2)) - || (ndp_len % opts->ndplen_align != 0)) { + + 2 * 2 * (opts->dgram_item_len * 2)) || + (ndp_len % opts->ndplen_align != 0)) { INFO(port->func.config->cdev, "Bad NDP length: %#X\n", ndp_len); goto err; @@ -1261,8 +1277,21 @@ static int ncm_unwrap_ntb(struct gether *port, do { index = index2; + /* wDatagramIndex[0] */ + if ((index < opts->nth_size) || + (index > block_len - opts->dpe_size)) { + INFO(port->func.config->cdev, + "Bad index: %#X\n", index); + goto err; + } + dg_len = dg_len2; - if (dg_len < 14 + crc_len) { /* ethernet hdr + crc */ + /* + * wDatagramLength[0] + * ethernet hdr + crc or larger than max frame size + */ + if ((dg_len < 14 + crc_len) || + (dg_len > frame_max)) { INFO(port->func.config->cdev, "Bad dgram length: %#X\n", dg_len); goto err; @@ -1286,6 +1315,37 @@ static int ncm_unwrap_ntb(struct gether *port, index2 = get_ncm(&tmp, opts->dgram_item_len); dg_len2 = get_ncm(&tmp, opts->dgram_item_len); + if (index2 == 0 || dg_len2 == 0) + break; + + /* wDatagramIndex[1] */ + if (ndp_after_header) { + if (index2 < opts->nth_size + opts->ndp_size) { + INFO(port->func.config->cdev, + "Bad index: %#X\n", index2); + goto err; + } + } else { + if (index2 < opts->nth_size + opts->dpe_size) { + INFO(port->func.config->cdev, + "Bad index: %#X\n", index2); + goto err; + } + } + if (index2 > block_len - opts->dpe_size) { + INFO(port->func.config->cdev, + "Bad index: %#X\n", index2); + goto err; + } + + /* wDatagramLength[1] */ + if ((dg_len2 < 14 + crc_len) || + (dg_len2 > frame_max)) { + INFO(port->func.config->cdev, + "Bad dgram length: %#X\n", dg_len); + goto err; + } + /* * Copy the data into a new skb. * This ensures the truesize is correct @@ -1302,9 +1362,6 @@ static int ncm_unwrap_ntb(struct gether *port, ndp_len -= 2 * (opts->dgram_item_len * 2); dgram_counter++; - - if (index2 == 0 || dg_len2 == 0) - break; } while (ndp_len > 2 * (opts->dgram_item_len * 2)); } while (ndp_index); -- GitLab From 0a0e5894e9936a03f4c8087f408e1e4eff4439f2 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Wed, 26 Aug 2020 22:21:19 +0300 Subject: [PATCH 0601/1304] USB: gadget: u_f: Unbreak offset calculation in VLAs commit bfd08d06d978d0304eb6f7855b548aa2cd1c5486 upstream. Inadvertently the commit b1cd1b65afba ("USB: gadget: u_f: add overflow checks to VLA macros") makes VLA macros to always return 0 due to different scope of two variables of the same name. Obviously we need to have only one. Fixes: b1cd1b65afba ("USB: gadget: u_f: add overflow checks to VLA macros") Reported-by: Marek Szyprowski Tested-by: Marek Szyprowski Signed-off-by: Andy Shevchenko Cc: Brooke Basile Cc: stable Link: https://lore.kernel.org/r/20200826192119.56450-1-andriy.shevchenko@linux.intel.com Signed-off-by: Greg Kroah-Hartman --- drivers/usb/gadget/u_f.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/usb/gadget/u_f.h b/drivers/usb/gadget/u_f.h index 494420af23bb..cfa642655337 100644 --- a/drivers/usb/gadget/u_f.h +++ b/drivers/usb/gadget/u_f.h @@ -25,9 +25,9 @@ size_t offset = 0; \ if (groupname##__next != SIZE_MAX) { \ size_t align_mask = __alignof__(type) - 1; \ - size_t offset = (groupname##__next + align_mask) \ - & ~align_mask; \ size_t size = array_size(n, sizeof(type)); \ + offset = (groupname##__next + align_mask) & \ + ~align_mask; \ if (check_add_overflow(offset, size, \ &groupname##__next)) { \ groupname##__next = SIZE_MAX; \ @@ -43,8 +43,8 @@ size_t offset = 0; \ if (groupname##__next != SIZE_MAX) { \ size_t align_mask = __alignof__(type) - 1; \ - size_t offset = (groupname##__next + align_mask) \ - & ~align_mask; \ + offset = (groupname##__next + align_mask) & \ + ~align_mask; \ if (check_add_overflow(offset, groupname##_##name##__sz,\ &groupname##__next)) { \ groupname##__next = SIZE_MAX; \ -- GitLab From 77b8ac359dfb887cbf4517c54f7fb70fa1b7530d Mon Sep 17 00:00:00 2001 From: Tom Rix Date: Sat, 1 Aug 2020 08:21:54 -0700 Subject: [PATCH 0602/1304] USB: cdc-acm: rework notification_buffer resizing commit f4b9d8a582f738c24ebeabce5cc15f4b8159d74e upstream. Clang static analysis reports this error cdc-acm.c:409:3: warning: Use of memory after it is freed acm_process_notification(acm, (unsigned char *)dr); There are three problems, the first one is that dr is not reset The variable dr is set with if (acm->nb_index) dr = (struct usb_cdc_notification *)acm->notification_buffer; But if the notification_buffer is too small it is resized with if (acm->nb_size) { kfree(acm->notification_buffer); acm->nb_size = 0; } alloc_size = roundup_pow_of_two(expected_size); /* * kmalloc ensures a valid notification_buffer after a * use of kfree in case the previous allocation was too * small. Final freeing is done on disconnect. */ acm->notification_buffer = kmalloc(alloc_size, GFP_ATOMIC); dr should point to the new acm->notification_buffer. The second problem is any data in the notification_buffer is lost when the pointer is freed. In the normal case, the current data is accumulated in the notification_buffer here. memcpy(&acm->notification_buffer[acm->nb_index], urb->transfer_buffer, copy_size); When a resize happens, anything before notification_buffer[acm->nb_index] is garbage. The third problem is the acm->nb_index is not reset on a resizing buffer error. So switch resizing to using krealloc and reassign dr and reset nb_index. Fixes: ea2583529cd1 ("cdc-acm: reassemble fragmented notifications") Signed-off-by: Tom Rix Cc: stable Acked-by: Oliver Neukum Link: https://lore.kernel.org/r/20200801152154.20683-1-trix@redhat.com Signed-off-by: Greg Kroah-Hartman --- drivers/usb/class/cdc-acm.c | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index ea7883e1fbe2..41453bf6fc0b 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -378,21 +378,19 @@ static void acm_ctrl_irq(struct urb *urb) if (current_size < expected_size) { /* notification is transmitted fragmented, reassemble */ if (acm->nb_size < expected_size) { - if (acm->nb_size) { - kfree(acm->notification_buffer); - acm->nb_size = 0; - } + u8 *new_buffer; alloc_size = roundup_pow_of_two(expected_size); - /* - * kmalloc ensures a valid notification_buffer after a - * use of kfree in case the previous allocation was too - * small. Final freeing is done on disconnect. - */ - acm->notification_buffer = - kmalloc(alloc_size, GFP_ATOMIC); - if (!acm->notification_buffer) + /* Final freeing is done on disconnect. */ + new_buffer = krealloc(acm->notification_buffer, + alloc_size, GFP_ATOMIC); + if (!new_buffer) { + acm->nb_index = 0; goto exit; + } + + acm->notification_buffer = new_buffer; acm->nb_size = alloc_size; + dr = (struct usb_cdc_notification *)acm->notification_buffer; } copy_size = min(current_size, -- GitLab From 2cce2472d99164c5ffb910f4dac2e81bb62341ca Mon Sep 17 00:00:00 2001 From: Alan Stern Date: Wed, 26 Aug 2020 10:32:29 -0400 Subject: [PATCH 0603/1304] usb: storage: Add unusual_uas entry for Sony PSZ drives MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 20934c0de13b49a072fb1e0ca79fe0fe0e40eae5 upstream. The PSZ-HA* family of USB disk drives from Sony can't handle the REPORT OPCODES command when using the UAS protocol. This patch adds an appropriate quirks entry. Reported-and-tested-by: Till Dörges Signed-off-by: Alan Stern CC: Link: https://lore.kernel.org/r/20200826143229.GB400430@rowland.harvard.edu Signed-off-by: Greg Kroah-Hartman --- drivers/usb/storage/unusual_uas.h | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h index 191bfa28917d..dcdfcdfd2ad1 100644 --- a/drivers/usb/storage/unusual_uas.h +++ b/drivers/usb/storage/unusual_uas.h @@ -28,6 +28,13 @@ * and don't forget to CC: the USB development list */ +/* Reported-by: Till Dörges */ +UNUSUAL_DEV(0x054c, 0x087d, 0x0000, 0x9999, + "Sony", + "PSZ-HA*", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_NO_REPORT_OPCODES), + /* Reported-by: Julian Groß */ UNUSUAL_DEV(0x059f, 0x105f, 0x0000, 0x9999, "LaCie", -- GitLab From 2d037285c566f07319a3c4da9093bf3b8d7c197f Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 10 Aug 2020 17:31:16 -0400 Subject: [PATCH 0604/1304] btrfs: check the right error variable in btrfs_del_dir_entries_in_log [ Upstream commit fb2fecbad50964b9f27a3b182e74e437b40753ef ] With my new locking code dbench is so much faster that I tripped over a transaction abort from ENOSPC. This turned out to be because btrfs_del_dir_entries_in_log was checking for ret == -ENOSPC, but this function sets err on error, and returns err. So instead of properly marking the inode as needing a full commit, we were returning -ENOSPC and aborting in __btrfs_unlink_inode. Fix this by checking the proper variable so that we return the correct thing in the case of ENOSPC. The ENOENT needs to be checked, because btrfs_lookup_dir_item_index() can return -ENOENT if the dir item isn't in the tree log (which would happen if we hadn't fsync'ed this guy). We actually handle that case in __btrfs_unlink_inode, so it's an expected error to get back. Fixes: 4a500fd178c8 ("Btrfs: Metadata ENOSPC handling for tree log") CC: stable@vger.kernel.org # 4.4+ Reviewed-by: Filipe Manana Signed-off-by: Josef Bacik Reviewed-by: David Sterba [ add note and comment about ENOENT ] Signed-off-by: David Sterba Signed-off-by: Sasha Levin --- fs/btrfs/tree-log.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 090315f4ac78..3e903e6a3387 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -3422,11 +3422,13 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, btrfs_free_path(path); out_unlock: mutex_unlock(&dir->log_mutex); - if (ret == -ENOSPC) { + if (err == -ENOSPC) { btrfs_set_log_full_commit(root->fs_info, trans); - ret = 0; - } else if (ret < 0) - btrfs_abort_transaction(trans, ret); + err = 0; + } else if (err < 0 && err != -ENOENT) { + /* ENOENT can be returned if the entry hasn't been fsynced yet */ + btrfs_abort_transaction(trans, err); + } btrfs_end_log_trans(root); -- GitLab From c4d7362a5cc5297bf79d3b3465c43722af5f3da7 Mon Sep 17 00:00:00 2001 From: Thinh Nguyen Date: Thu, 6 Aug 2020 19:46:23 -0700 Subject: [PATCH 0605/1304] usb: dwc3: gadget: Don't setup more than requested [ Upstream commit 5d187c0454ef4c5e046a81af36882d4d515922ec ] The SG list may be set up with entry size more than the requested length. Check the usb_request->length and make sure that we don't setup the TRBs to send/receive more than requested. This case may occur when the SG entry is allocated up to a certain minimum size, but the request length is less than that. It can also occur when the request is reused for a different request length. Cc: # v4.18+ Fixes: a31e63b608ff ("usb: dwc3: gadget: Correct handling of scattergather lists") Signed-off-by: Thinh Nguyen Signed-off-by: Felipe Balbi Signed-off-by: Sasha Levin --- drivers/usb/dwc3/gadget.c | 51 +++++++++++++++++++++++++++------------ 1 file changed, 35 insertions(+), 16 deletions(-) diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 2f5f4ca5c0d0..5d8a28efddad 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -1017,26 +1017,24 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb, * dwc3_prepare_one_trb - setup one TRB from one request * @dep: endpoint for which this request is prepared * @req: dwc3_request pointer + * @trb_length: buffer size of the TRB * @chain: should this TRB be chained to the next? * @node: only for isochronous endpoints. First TRB needs different type. */ static void dwc3_prepare_one_trb(struct dwc3_ep *dep, - struct dwc3_request *req, unsigned chain, unsigned node) + struct dwc3_request *req, unsigned int trb_length, + unsigned chain, unsigned node) { struct dwc3_trb *trb; - unsigned int length; dma_addr_t dma; unsigned stream_id = req->request.stream_id; unsigned short_not_ok = req->request.short_not_ok; unsigned no_interrupt = req->request.no_interrupt; - if (req->request.num_sgs > 0) { - length = sg_dma_len(req->start_sg); + if (req->request.num_sgs > 0) dma = sg_dma_address(req->start_sg); - } else { - length = req->request.length; + else dma = req->request.dma; - } trb = &dep->trb_pool[dep->trb_enqueue]; @@ -1048,7 +1046,7 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep, req->num_trbs++; - __dwc3_prepare_one_trb(dep, trb, dma, length, chain, node, + __dwc3_prepare_one_trb(dep, trb, dma, trb_length, chain, node, stream_id, short_not_ok, no_interrupt); } @@ -1058,16 +1056,27 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep, struct scatterlist *sg = req->start_sg; struct scatterlist *s; int i; - + unsigned int length = req->request.length; unsigned int remaining = req->request.num_mapped_sgs - req->num_queued_sgs; + /* + * If we resume preparing the request, then get the remaining length of + * the request and resume where we left off. + */ + for_each_sg(req->request.sg, s, req->num_queued_sgs, i) + length -= sg_dma_len(s); + for_each_sg(sg, s, remaining, i) { - unsigned int length = req->request.length; unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc); unsigned int rem = length % maxp; + unsigned int trb_length; unsigned chain = true; + trb_length = min_t(unsigned int, length, sg_dma_len(s)); + + length -= trb_length; + /* * IOMMU driver is coalescing the list of sgs which shares a * page boundary into one and giving it to USB driver. With @@ -1075,7 +1084,7 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep, * sgs passed. So mark the chain bit to false if it isthe last * mapped sg. */ - if (i == remaining - 1) + if ((i == remaining - 1) || !length) chain = false; if (rem && usb_endpoint_dir_out(dep->endpoint.desc) && !chain) { @@ -1085,7 +1094,7 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep, req->needs_extra_trb = true; /* prepare normal TRB */ - dwc3_prepare_one_trb(dep, req, true, i); + dwc3_prepare_one_trb(dep, req, trb_length, true, i); /* Now prepare one extra TRB to align transfer size */ trb = &dep->trb_pool[dep->trb_enqueue]; @@ -1096,7 +1105,7 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep, req->request.short_not_ok, req->request.no_interrupt); } else { - dwc3_prepare_one_trb(dep, req, chain, i); + dwc3_prepare_one_trb(dep, req, trb_length, chain, i); } /* @@ -1111,6 +1120,16 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep, req->num_queued_sgs++; + /* + * The number of pending SG entries may not correspond to the + * number of mapped SG entries. If all the data are queued, then + * don't include unused SG entries. + */ + if (length == 0) { + req->num_pending_sgs -= req->request.num_mapped_sgs - req->num_queued_sgs; + break; + } + if (!dwc3_calc_trbs_left(dep)) break; } @@ -1130,7 +1149,7 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep, req->needs_extra_trb = true; /* prepare normal TRB */ - dwc3_prepare_one_trb(dep, req, true, 0); + dwc3_prepare_one_trb(dep, req, length, true, 0); /* Now prepare one extra TRB to align transfer size */ trb = &dep->trb_pool[dep->trb_enqueue]; @@ -1147,7 +1166,7 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep, req->needs_extra_trb = true; /* prepare normal TRB */ - dwc3_prepare_one_trb(dep, req, true, 0); + dwc3_prepare_one_trb(dep, req, length, true, 0); /* Now prepare one extra TRB to handle ZLP */ trb = &dep->trb_pool[dep->trb_enqueue]; @@ -1157,7 +1176,7 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep, req->request.short_not_ok, req->request.no_interrupt); } else { - dwc3_prepare_one_trb(dep, req, false, 0); + dwc3_prepare_one_trb(dep, req, length, false, 0); } } -- GitLab From 7012bf6d6c3a56f2866c4c08539932be98b5585d Mon Sep 17 00:00:00 2001 From: Thinh Nguyen Date: Thu, 6 Aug 2020 19:46:29 -0700 Subject: [PATCH 0606/1304] usb: dwc3: gadget: Fix handling ZLP [ Upstream commit d2ee3ff79e6a3d4105e684021017d100524dc560 ] The usb_request->zero doesn't apply for isoc. Also, if we prepare a 0-length (ZLP) TRB for the OUT direction, we need to prepare an extra TRB to pad up to the MPS alignment. Use the same bounce buffer for the ZLP TRB and the extra pad TRB. Cc: # v4.5+ Fixes: d6e5a549cc4d ("usb: dwc3: simplify ZLP handling") Fixes: 04c03d10e507 ("usb: dwc3: gadget: handle request->zero") Signed-off-by: Thinh Nguyen Signed-off-by: Felipe Balbi Signed-off-by: Sasha Levin --- drivers/usb/dwc3/gadget.c | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 5d8a28efddad..9f6b43077300 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -1159,6 +1159,7 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep, req->request.short_not_ok, req->request.no_interrupt); } else if (req->request.zero && req->request.length && + !usb_endpoint_xfer_isoc(dep->endpoint.desc) && (IS_ALIGNED(req->request.length, maxp))) { struct dwc3 *dwc = dep->dwc; struct dwc3_trb *trb; @@ -1168,13 +1169,23 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep, /* prepare normal TRB */ dwc3_prepare_one_trb(dep, req, length, true, 0); - /* Now prepare one extra TRB to handle ZLP */ + /* Prepare one extra TRB to handle ZLP */ trb = &dep->trb_pool[dep->trb_enqueue]; req->num_trbs++; __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, 0, - false, 1, req->request.stream_id, + !req->direction, 1, req->request.stream_id, req->request.short_not_ok, req->request.no_interrupt); + + /* Prepare one more TRB to handle MPS alignment for OUT */ + if (!req->direction) { + trb = &dep->trb_pool[dep->trb_enqueue]; + req->num_trbs++; + __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, maxp, + false, 1, req->request.stream_id, + req->request.short_not_ok, + req->request.no_interrupt); + } } else { dwc3_prepare_one_trb(dep, req, length, false, 0); } @@ -2347,8 +2358,17 @@ static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep, status); if (req->needs_extra_trb) { + unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc); + ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event, status); + + /* Reclaim MPS padding TRB for ZLP */ + if (!req->direction && req->request.zero && req->request.length && + !usb_endpoint_xfer_isoc(dep->endpoint.desc) && + (IS_ALIGNED(req->request.length, maxp))) + ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event, status); + req->needs_extra_trb = false; } -- GitLab From 9c7514464430cb4d57182542fc15c09d07eb808d Mon Sep 17 00:00:00 2001 From: Thinh Nguyen Date: Thu, 6 Aug 2020 19:46:35 -0700 Subject: [PATCH 0607/1304] usb: dwc3: gadget: Handle ZLP for sg requests [ Upstream commit bc9a2e226ea95e1699f7590845554de095308b75 ] Currently dwc3 doesn't handle usb_request->zero for SG requests. This change checks and prepares extra TRBs for the ZLP for SG requests. Cc: # v4.5+ Fixes: 04c03d10e507 ("usb: dwc3: gadget: handle request->zero") Signed-off-by: Thinh Nguyen Signed-off-by: Felipe Balbi Signed-off-by: Sasha Levin --- drivers/usb/dwc3/gadget.c | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 9f6b43077300..7bf2573dd459 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -1104,6 +1104,35 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep, req->request.stream_id, req->request.short_not_ok, req->request.no_interrupt); + } else if (req->request.zero && req->request.length && + !usb_endpoint_xfer_isoc(dep->endpoint.desc) && + !rem && !chain) { + struct dwc3 *dwc = dep->dwc; + struct dwc3_trb *trb; + + req->needs_extra_trb = true; + + /* Prepare normal TRB */ + dwc3_prepare_one_trb(dep, req, trb_length, true, i); + + /* Prepare one extra TRB to handle ZLP */ + trb = &dep->trb_pool[dep->trb_enqueue]; + req->num_trbs++; + __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, 0, + !req->direction, 1, + req->request.stream_id, + req->request.short_not_ok, + req->request.no_interrupt); + + /* Prepare one more TRB to handle MPS alignment */ + if (!req->direction) { + trb = &dep->trb_pool[dep->trb_enqueue]; + req->num_trbs++; + __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, maxp, + false, 1, req->request.stream_id, + req->request.short_not_ok, + req->request.no_interrupt); + } } else { dwc3_prepare_one_trb(dep, req, trb_length, chain, i); } -- GitLab From dc828b79feea72cfbc34fd6104a8386bade6fd78 Mon Sep 17 00:00:00 2001 From: Jarkko Sakkinen Date: Mon, 31 Aug 2020 14:59:29 -0400 Subject: [PATCH 0608/1304] tpm: Unify the mismatching TPM space buffer sizes [ Upstream commit 6c4e79d99e6f42b79040f1a33cd4018f5425030b ] The size of the buffers for storing context's and sessions can vary from arch to arch as PAGE_SIZE can be anything between 4 kB and 256 kB (the maximum for PPC64). Define a fixed buffer size set to 16 kB. This should be enough for most use with three handles (that is how many we allow at the moment). Parametrize the buffer size while doing this, so that it is easier to revisit this later on if required. Cc: stable@vger.kernel.org Reported-by: Stefan Berger Fixes: 745b361e989a ("tpm: infrastructure for TPM spaces") Reviewed-by: Jerry Snitselaar Tested-by: Stefan Berger Signed-off-by: Jarkko Sakkinen Signed-off-by: Sasha Levin --- drivers/char/tpm/tpm-chip.c | 9 ++------- drivers/char/tpm/tpm.h | 6 +++++- drivers/char/tpm/tpm2-space.c | 26 ++++++++++++++++---------- drivers/char/tpm/tpmrm-dev.c | 2 +- 4 files changed, 24 insertions(+), 19 deletions(-) diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c index 4946c5b37d04..f79f87794273 100644 --- a/drivers/char/tpm/tpm-chip.c +++ b/drivers/char/tpm/tpm-chip.c @@ -276,13 +276,8 @@ struct tpm_chip *tpm_chip_alloc(struct device *pdev, chip->cdev.owner = THIS_MODULE; chip->cdevs.owner = THIS_MODULE; - chip->work_space.context_buf = kzalloc(PAGE_SIZE, GFP_KERNEL); - if (!chip->work_space.context_buf) { - rc = -ENOMEM; - goto out; - } - chip->work_space.session_buf = kzalloc(PAGE_SIZE, GFP_KERNEL); - if (!chip->work_space.session_buf) { + rc = tpm2_init_space(&chip->work_space, TPM2_SPACE_BUFFER_SIZE); + if (rc) { rc = -ENOMEM; goto out; } diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h index 289221d653cb..b9a30f0b8825 100644 --- a/drivers/char/tpm/tpm.h +++ b/drivers/char/tpm/tpm.h @@ -188,6 +188,7 @@ struct tpm_space { u8 *context_buf; u32 session_tbl[3]; u8 *session_buf; + u32 buf_size; }; enum tpm_chip_flags { @@ -278,6 +279,9 @@ struct tpm_output_header { #define TPM_TAG_RQU_COMMAND 193 +/* TPM2 specific constants. */ +#define TPM2_SPACE_BUFFER_SIZE 16384 /* 16 kB */ + struct stclear_flags_t { __be16 tag; u8 deactivated; @@ -595,7 +599,7 @@ void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type); unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal); int tpm2_probe(struct tpm_chip *chip); int tpm2_find_cc(struct tpm_chip *chip, u32 cc); -int tpm2_init_space(struct tpm_space *space); +int tpm2_init_space(struct tpm_space *space, unsigned int buf_size); void tpm2_del_space(struct tpm_chip *chip, struct tpm_space *space); int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u32 cc, u8 *cmd); diff --git a/drivers/char/tpm/tpm2-space.c b/drivers/char/tpm/tpm2-space.c index d2e101b32482..9f4e22dcde27 100644 --- a/drivers/char/tpm/tpm2-space.c +++ b/drivers/char/tpm/tpm2-space.c @@ -43,18 +43,21 @@ static void tpm2_flush_sessions(struct tpm_chip *chip, struct tpm_space *space) } } -int tpm2_init_space(struct tpm_space *space) +int tpm2_init_space(struct tpm_space *space, unsigned int buf_size) { - space->context_buf = kzalloc(PAGE_SIZE, GFP_KERNEL); + space->context_buf = kzalloc(buf_size, GFP_KERNEL); if (!space->context_buf) return -ENOMEM; - space->session_buf = kzalloc(PAGE_SIZE, GFP_KERNEL); + space->session_buf = kzalloc(buf_size, GFP_KERNEL); if (space->session_buf == NULL) { kfree(space->context_buf); + /* Prevent caller getting a dangling pointer. */ + space->context_buf = NULL; return -ENOMEM; } + space->buf_size = buf_size; return 0; } @@ -276,8 +279,10 @@ int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u32 cc, sizeof(space->context_tbl)); memcpy(&chip->work_space.session_tbl, &space->session_tbl, sizeof(space->session_tbl)); - memcpy(chip->work_space.context_buf, space->context_buf, PAGE_SIZE); - memcpy(chip->work_space.session_buf, space->session_buf, PAGE_SIZE); + memcpy(chip->work_space.context_buf, space->context_buf, + space->buf_size); + memcpy(chip->work_space.session_buf, space->session_buf, + space->buf_size); rc = tpm2_load_space(chip); if (rc) { @@ -456,7 +461,7 @@ static int tpm2_save_space(struct tpm_chip *chip) continue; rc = tpm2_save_context(chip, space->context_tbl[i], - space->context_buf, PAGE_SIZE, + space->context_buf, space->buf_size, &offset); if (rc == -ENOENT) { space->context_tbl[i] = 0; @@ -474,9 +479,8 @@ static int tpm2_save_space(struct tpm_chip *chip) continue; rc = tpm2_save_context(chip, space->session_tbl[i], - space->session_buf, PAGE_SIZE, + space->session_buf, space->buf_size, &offset); - if (rc == -ENOENT) { /* handle error saving session, just forget it */ space->session_tbl[i] = 0; @@ -522,8 +526,10 @@ int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space, sizeof(space->context_tbl)); memcpy(&space->session_tbl, &chip->work_space.session_tbl, sizeof(space->session_tbl)); - memcpy(space->context_buf, chip->work_space.context_buf, PAGE_SIZE); - memcpy(space->session_buf, chip->work_space.session_buf, PAGE_SIZE); + memcpy(space->context_buf, chip->work_space.context_buf, + space->buf_size); + memcpy(space->session_buf, chip->work_space.session_buf, + space->buf_size); return 0; } diff --git a/drivers/char/tpm/tpmrm-dev.c b/drivers/char/tpm/tpmrm-dev.c index 1a0e97a5da5a..162fb16243d0 100644 --- a/drivers/char/tpm/tpmrm-dev.c +++ b/drivers/char/tpm/tpmrm-dev.c @@ -22,7 +22,7 @@ static int tpmrm_open(struct inode *inode, struct file *file) if (priv == NULL) return -ENOMEM; - rc = tpm2_init_space(&priv->space); + rc = tpm2_init_space(&priv->space, TPM2_SPACE_BUFFER_SIZE); if (rc) { kfree(priv); return -ENOMEM; -- GitLab From a83adbe00b32df180a108adc86c2620521574c8d Mon Sep 17 00:00:00 2001 From: Peilin Ye Date: Wed, 29 Jul 2020 07:37:12 -0400 Subject: [PATCH 0609/1304] HID: hiddev: Fix slab-out-of-bounds write in hiddev_ioctl_usage() commit 25a097f5204675550afb879ee18238ca917cba7a upstream. `uref->usage_index` is not always being properly checked, causing hiddev_ioctl_usage() to go out of bounds under some cases. Fix it. Reported-by: syzbot+34ee1b45d88571c2fa8b@syzkaller.appspotmail.com Link: https://syzkaller.appspot.com/bug?id=f2aebe90b8c56806b050a20b36f51ed6acabe802 Reviewed-by: Dan Carpenter Signed-off-by: Peilin Ye Signed-off-by: Jiri Kosina Signed-off-by: Greg Kroah-Hartman --- drivers/hid/usbhid/hiddev.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c index c34ef95d7cef..2dff663847c6 100644 --- a/drivers/hid/usbhid/hiddev.c +++ b/drivers/hid/usbhid/hiddev.c @@ -532,12 +532,16 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd, switch (cmd) { case HIDIOCGUSAGE: + if (uref->usage_index >= field->report_count) + goto inval; uref->value = field->value[uref->usage_index]; if (copy_to_user(user_arg, uref, sizeof(*uref))) goto fault; goto goodreturn; case HIDIOCSUSAGE: + if (uref->usage_index >= field->report_count) + goto inval; field->value[uref->usage_index] = uref->value; goto goodreturn; -- GitLab From 79361df22e723d97607b4a8e871f0b3720ada7be Mon Sep 17 00:00:00 2001 From: Hector Martin Date: Sun, 16 Aug 2020 17:44:31 +0900 Subject: [PATCH 0610/1304] ALSA: usb-audio: Update documentation comment for MS2109 quirk commit 74a2a7de81a2ef20732ec02087314e92692a7a1b upstream. As the recent fix addressed the channel swap problem more properly, update the comment as well. Fixes: 1b7ecc241a67 ("ALSA: usb-audio: work around streaming quirk for MacroSilicon MS2109") Signed-off-by: Hector Martin Link: https://lore.kernel.org/r/20200816084431.102151-1-marcan@marcan.st Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/usb/quirks-table.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h index 89b70308b551..83f72ddf4fda 100644 --- a/sound/usb/quirks-table.h +++ b/sound/usb/quirks-table.h @@ -3524,8 +3524,8 @@ ALC1220_VB_DESKTOP(0x26ce, 0x0a01), /* Asrock TRX40 Creator */ * they pretend to be 96kHz mono as a workaround for stereo being broken * by that... * - * They also have swapped L-R channels, but that's for userspace to deal - * with. + * They also have an issue with initial stream alignment that causes the + * channels to be swapped and out of phase, which is dealt with in quirks.c. */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | -- GitLab From c37da90efff5f183bea6ae4c2af33571f61fe317 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Thu, 3 Sep 2020 11:24:31 +0200 Subject: [PATCH 0611/1304] Linux 4.19.143 Tested-by: Shuah Khan Tested-by: Linux Kernel Functional Testing Tested-by: Guenter Roeck Signed-off-by: Greg Kroah-Hartman --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index e5e46aecf357..6fa3278df77c 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 4 PATCHLEVEL = 19 -SUBLEVEL = 142 +SUBLEVEL = 143 EXTRAVERSION = NAME = "People's Front" -- GitLab From abae259fdccc5e41ff302dd80a2b944ce385c970 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sat, 29 Aug 2020 12:26:01 +0100 Subject: [PATCH 0612/1304] HID: core: Correctly handle ReportSize being zero commit bce1305c0ece3dc549663605e567655dd701752c upstream. It appears that a ReportSize value of zero is legal, even if a bit non-sensical. Most of the HID code seems to handle that gracefully, except when computing the total size in bytes. When fed as input to memset, this leads to some funky outcomes. Detect the corner case and correctly compute the size. Cc: stable@vger.kernel.org Signed-off-by: Marc Zyngier Signed-off-by: Benjamin Tissoires Signed-off-by: Greg Kroah-Hartman --- drivers/hid/hid-core.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 2c85d075daee..05122167d9d8 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1425,6 +1425,17 @@ static void hid_output_field(const struct hid_device *hid, } } +/* + * Compute the size of a report. + */ +static size_t hid_compute_report_size(struct hid_report *report) +{ + if (report->size) + return ((report->size - 1) >> 3) + 1; + + return 0; +} + /* * Create a report. 'data' has to be allocated using * hid_alloc_report_buf() so that it has proper size. @@ -1437,7 +1448,7 @@ void hid_output_report(struct hid_report *report, __u8 *data) if (report->id > 0) *data++ = report->id; - memset(data, 0, ((report->size - 1) >> 3) + 1); + memset(data, 0, hid_compute_report_size(report)); for (n = 0; n < report->maxfield; n++) hid_output_field(report->device, report->field[n], data); } @@ -1564,7 +1575,7 @@ int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size, csize--; } - rsize = ((report->size - 1) >> 3) + 1; + rsize = hid_compute_report_size(report); if (report_enum->numbered && rsize >= HID_MAX_BUFFER_SIZE) rsize = HID_MAX_BUFFER_SIZE - 1; -- GitLab From a47b8511d90528c77346597e2012100dfc28cd8c Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 1 Sep 2020 10:52:33 +0100 Subject: [PATCH 0613/1304] HID: core: Sanitize event code and type when mapping input commit 35556bed836f8dc07ac55f69c8d17dce3e7f0e25 upstream. When calling into hid_map_usage(), the passed event code is blindly stored as is, even if it doesn't fit in the associated bitmap. This event code can come from a variety of sources, including devices masquerading as input devices, only a bit more "programmable". Instead of taking the event code at face value, check that it actually fits the corresponding bitmap, and if it doesn't: - spit out a warning so that we know which device is acting up - NULLify the bitmap pointer so that we catch unexpected uses Code paths that can make use of untrusted inputs can now check that the mapping was indeed correct and bail out if not. Cc: stable@vger.kernel.org Signed-off-by: Marc Zyngier Signed-off-by: Benjamin Tissoires Signed-off-by: Greg Kroah-Hartman --- drivers/hid/hid-input.c | 4 ++++ drivers/hid/hid-multitouch.c | 2 ++ include/linux/hid.h | 42 +++++++++++++++++++++++++----------- 3 files changed, 35 insertions(+), 13 deletions(-) diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index 51bfe23d00bc..a9da1526c40a 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -1125,6 +1125,10 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel } mapped: + /* Mapping failed, bail out */ + if (!bit) + return; + if (device->driver->input_mapped && device->driver->input_mapped(device, hidinput, field, usage, &bit, &max) < 0) { diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index 8baf10beb1d5..ccda72f748ee 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -841,6 +841,8 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi, code = BTN_0 + ((usage->hid - 1) & HID_USAGE); hid_map_usage(hi, usage, bit, max, EV_KEY, code); + if (!*bit) + return -1; input_set_capability(hi->input, EV_KEY, code); return 1; diff --git a/include/linux/hid.h b/include/linux/hid.h index 8506637f070d..a46b6832b373 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -956,34 +956,49 @@ static inline void hid_device_io_stop(struct hid_device *hid) { * @max: maximal valid usage->code to consider later (out parameter) * @type: input event type (EV_KEY, EV_REL, ...) * @c: code which corresponds to this usage and type + * + * The value pointed to by @bit will be set to NULL if either @type is + * an unhandled event type, or if @c is out of range for @type. This + * can be used as an error condition. */ static inline void hid_map_usage(struct hid_input *hidinput, struct hid_usage *usage, unsigned long **bit, int *max, - __u8 type, __u16 c) + __u8 type, unsigned int c) { struct input_dev *input = hidinput->input; - - usage->type = type; - usage->code = c; + unsigned long *bmap = NULL; + unsigned int limit = 0; switch (type) { case EV_ABS: - *bit = input->absbit; - *max = ABS_MAX; + bmap = input->absbit; + limit = ABS_MAX; break; case EV_REL: - *bit = input->relbit; - *max = REL_MAX; + bmap = input->relbit; + limit = REL_MAX; break; case EV_KEY: - *bit = input->keybit; - *max = KEY_MAX; + bmap = input->keybit; + limit = KEY_MAX; break; case EV_LED: - *bit = input->ledbit; - *max = LED_MAX; + bmap = input->ledbit; + limit = LED_MAX; break; } + + if (unlikely(c > limit || !bmap)) { + pr_warn_ratelimited("%s: Invalid code %d type %d\n", + input->name, c, type); + *bit = NULL; + return; + } + + usage->type = type; + usage->code = c; + *max = limit; + *bit = bmap; } /** @@ -997,7 +1012,8 @@ static inline void hid_map_usage_clear(struct hid_input *hidinput, __u8 type, __u16 c) { hid_map_usage(hidinput, usage, bit, max, type, c); - clear_bit(c, *bit); + if (*bit) + clear_bit(usage->code, *bit); } /** -- GitLab From 92d0750a38b09b5aa65c8b240aaa08f6935b19e9 Mon Sep 17 00:00:00 2001 From: Kim Phillips Date: Tue, 1 Sep 2020 16:58:53 -0500 Subject: [PATCH 0614/1304] perf record/stat: Explicitly call out event modifiers in the documentation commit e48a73a312ebf19cc3d72aa74985db25c30757c1 upstream. Event modifiers are not mentioned in the perf record or perf stat manpages. Add them to orient new users more effectively by pointing them to the perf list manpage for details. Fixes: 2055fdaf8703 ("perf list: Document precise event sampling for AMD IBS") Signed-off-by: Kim Phillips Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Alexey Budankov Cc: Ian Rogers Cc: Jin Yao Cc: Jiri Olsa Cc: Mark Rutland Cc: Namhyung Kim Cc: Paul Clarke Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Tony Jones Cc: stable@vger.kernel.org Link: http://lore.kernel.org/lkml/20200901215853.276234-1-kim.phillips@amd.com Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: Greg Kroah-Hartman --- tools/perf/Documentation/perf-record.txt | 4 ++++ tools/perf/Documentation/perf-stat.txt | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt index 246dee081efd..edf2be251788 100644 --- a/tools/perf/Documentation/perf-record.txt +++ b/tools/perf/Documentation/perf-record.txt @@ -33,6 +33,10 @@ OPTIONS - a raw PMU event (eventsel+umask) in the form of rNNN where NNN is a hexadecimal event descriptor. + - a symbolic or raw PMU event followed by an optional colon + and a list of event modifiers, e.g., cpu-cycles:p. See the + linkperf:perf-list[1] man page for details on event modifiers. + - a symbolically formed PMU event like 'pmu/param1=0x3,param2/' where 'param1', 'param2', etc are defined as formats for the PMU in /sys/bus/event_source/devices//format/*. diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt index b10a90b6a718..239af8f71f79 100644 --- a/tools/perf/Documentation/perf-stat.txt +++ b/tools/perf/Documentation/perf-stat.txt @@ -39,6 +39,10 @@ report:: - a raw PMU event (eventsel+umask) in the form of rNNN where NNN is a hexadecimal event descriptor. + - a symbolic or raw PMU event followed by an optional colon + and a list of event modifiers, e.g., cpu-cycles:p. See the + linkperf:perf-list[1] man page for details on event modifiers. + - a symbolically formed event like 'pmu/param1=0x3,param2/' where param1 and param2 are defined as formats for the PMU in /sys/bus/event_source/devices//format/* -- GitLab From 04414e46923f83b1c870d126eca18c000ff04ba7 Mon Sep 17 00:00:00 2001 From: Bodo Stroesser Date: Thu, 28 May 2020 21:31:08 +0200 Subject: [PATCH 0615/1304] scsi: target: tcmu: Fix size in calls to tcmu_flush_dcache_range commit 8c4e0f212398cdd1eb4310a5981d06a723cdd24f upstream. 1) If remaining ring space before the end of the ring is smaller then the next cmd to write, tcmu writes a padding entry which fills the remaining space at the end of the ring. Then tcmu calls tcmu_flush_dcache_range() with the size of struct tcmu_cmd_entry as data length to flush. If the space filled by the padding was smaller then tcmu_cmd_entry, tcmu_flush_dcache_range() is called for an address range reaching behind the end of the vmalloc'ed ring. tcmu_flush_dcache_range() in a loop calls flush_dcache_page(virt_to_page(start)); for every page being part of the range. On x86 the line is optimized out by the compiler, as flush_dcache_page() is empty on x86. But I assume the above can cause trouble on other architectures that really have a flush_dcache_page(). For paddings only the header part of an entry is relevant due to alignment rules the header always fits in the remaining space, if padding is needed. So tcmu_flush_dcache_range() can safely be called with sizeof(entry->hdr) as the length here. 2) After it has written a command to cmd ring, tcmu calls tcmu_flush_dcache_range() using the size of a struct tcmu_cmd_entry as data length to flush. But if a command needs many iovecs, the real size of the command may be bigger then tcmu_cmd_entry, so a part of the written command is not flushed then. Link: https://lore.kernel.org/r/20200528193108.9085-1-bstroesser@ts.fujitsu.com Acked-by: Mike Christie Signed-off-by: Bodo Stroesser Signed-off-by: Martin K. Petersen Signed-off-by: Greg Kroah-Hartman --- drivers/target/target_core_user.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 91dbac7446a4..719520fe2d91 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -1018,7 +1018,7 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) entry->hdr.cmd_id = 0; /* not used for PAD */ entry->hdr.kflags = 0; entry->hdr.uflags = 0; - tcmu_flush_dcache_range(entry, sizeof(*entry)); + tcmu_flush_dcache_range(entry, sizeof(entry->hdr)); UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size); tcmu_flush_dcache_range(mb, sizeof(*mb)); @@ -1083,7 +1083,7 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) cdb_off = CMDR_OFF + cmd_head + base_command_size; memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb)); entry->req.cdb_off = cdb_off; - tcmu_flush_dcache_range(entry, sizeof(*entry)); + tcmu_flush_dcache_range(entry, command_size); UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size); tcmu_flush_dcache_range(mb, sizeof(*mb)); -- GitLab From e6309b813b79ce1dbb902465cb502f4ed9cb6aa9 Mon Sep 17 00:00:00 2001 From: Bodo Stroesser Date: Thu, 18 Jun 2020 15:16:31 +0200 Subject: [PATCH 0616/1304] scsi: target: tcmu: Optimize use of flush_dcache_page commit 3c58f737231e2c8cbf543a09d84d8c8e80e05e43 upstream. (scatter|gather)_data_area() need to flush dcache after writing data to or before reading data from a page in uio data area. The two routines are able to handle data transfer to/from such a page in fragments and flush the cache after each fragment was copied by calling the wrapper tcmu_flush_dcache_range(). That means: 1) flush_dcache_page() can be called multiple times for the same page. 2) Calling flush_dcache_page() indirectly using the wrapper does not make sense, because each call of the wrapper is for one single page only and the calling routine already has the correct page pointer. Change (scatter|gather)_data_area() such that, instead of calling tcmu_flush_dcache_range() before/after each memcpy, it now calls flush_dcache_page() before unmapping a page (when writing is complete for that page) or after mapping a page (when starting to read the page). After this change only calls to tcmu_flush_dcache_range() for addresses in vmalloc'ed command ring are left over. The patch was tested on ARM with kernel 4.19.118 and 5.7.2 Link: https://lore.kernel.org/r/20200618131632.32748-2-bstroesser@ts.fujitsu.com Tested-by: JiangYu Tested-by: Daniel Meyerholt Acked-by: Mike Christie Signed-off-by: Bodo Stroesser Signed-off-by: Martin K. Petersen Signed-off-by: Greg Kroah-Hartman --- drivers/target/target_core_user.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 719520fe2d91..99314e516244 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -687,8 +687,10 @@ static void scatter_data_area(struct tcmu_dev *udev, from = kmap_atomic(sg_page(sg)) + sg->offset; while (sg_remaining > 0) { if (block_remaining == 0) { - if (to) + if (to) { + flush_dcache_page(page); kunmap_atomic(to); + } block_remaining = DATA_BLOCK_SIZE; dbi = tcmu_cmd_get_dbi(tcmu_cmd); @@ -733,7 +735,6 @@ static void scatter_data_area(struct tcmu_dev *udev, memcpy(to + offset, from + sg->length - sg_remaining, copy_bytes); - tcmu_flush_dcache_range(to, copy_bytes); } sg_remaining -= copy_bytes; @@ -742,8 +743,10 @@ static void scatter_data_area(struct tcmu_dev *udev, kunmap_atomic(from - sg->offset); } - if (to) + if (to) { + flush_dcache_page(page); kunmap_atomic(to); + } } static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd, @@ -789,13 +792,13 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd, dbi = tcmu_cmd_get_dbi(cmd); page = tcmu_get_block_page(udev, dbi); from = kmap_atomic(page); + flush_dcache_page(page); } copy_bytes = min_t(size_t, sg_remaining, block_remaining); if (read_len < copy_bytes) copy_bytes = read_len; offset = DATA_BLOCK_SIZE - block_remaining; - tcmu_flush_dcache_range(from, copy_bytes); memcpy(to + sg->length - sg_remaining, from + offset, copy_bytes); -- GitLab From ee385b41089d948987764f462adc246be028e32e Mon Sep 17 00:00:00 2001 From: John Stultz Date: Tue, 11 Aug 2020 02:50:44 +0000 Subject: [PATCH 0617/1304] tty: serial: qcom_geni_serial: Drop __init from qcom_geni_console_setup [ Upstream commit 975efc66d4e654207c17f939eb737ac591ac38fe ] When booting with heavily modularized config, the serial console may not be able to load until after init when modules that satisfy needed dependencies have time to load. Unfortunately, as qcom_geni_console_setup is marked as __init, the function may have been freed before we get to run it, causing boot time crashes such as: [ 6.469057] Unable to handle kernel paging request at virtual address ffffffe645d4e6cc [ 6.481623] Mem abort info: [ 6.484466] ESR = 0x86000007 [ 6.487557] EC = 0x21: IABT (current EL), IL = 32 bits [ 6.492929] SET = 0, FnV = 0g [ 6.496016] EA = 0, S1PTW = 0 [ 6.499202] swapper pgtable: 4k pages, 39-bit VAs, pgdp=000000008151e000 [ 6.501286] ufshcd-qcom 1d84000.ufshc: ufshcd_print_pwr_info:[RX, TX]: gear=[3, 3], lane[2, 2], pwr[FAST MODE, FAST MODE], rate = 2 [ 6.505977] [ffffffe645d4e6cc] pgd=000000017df9f003, p4d=000000017df9f003, pud=000000017df9f003, pmd=000000017df9c003, pte=0000000000000000 [ 6.505990] Internal error: Oops: 86000007 [#1] PREEMPT SMP [ 6.505995] Modules linked in: zl10353 zl10039 zl10036 zd1301_demod xc5000 xc4000 ves1x93 ves1820 tuner_xc2028 tuner_simple tuner_types tua9001 tua6100 1 [ 6.506152] isl6405 [ 6.518104] ufshcd-qcom 1d84000.ufshc: ufshcd_find_max_sup_active_icc_level: Regulator capability was not set, actvIccLevel=0 [ 6.530549] horus3a helene fc2580 fc0013 fc0012 fc0011 ec100 e4000 dvb_pll ds3000 drxk drxd drx39xyj dib9000 dib8000 dib7000p dib7000m dib3000mc dibx003 [ 6.624271] CPU: 7 PID: 148 Comm: kworker/7:2 Tainted: G W 5.8.0-mainline-12021-g6defd37ba1cd #3455 [ 6.624273] Hardware name: Thundercomm Dragonboard 845c (DT) [ 6.624290] Workqueue: events deferred_probe_work_func [ 6.624296] pstate: 40c00005 (nZcv daif +PAN +UAO BTYPE=--) [ 6.624307] pc : qcom_geni_console_setup+0x0/0x110 [ 6.624316] lr : try_enable_new_console+0xa0/0x140 [ 6.624318] sp : ffffffc010843a30 [ 6.624320] x29: ffffffc010843a30 x28: ffffffe645c3e7d0 [ 6.624325] x27: ffffff80f8022180 x26: ffffffc010843b28 [ 6.637937] x25: 0000000000000000 x24: ffffffe6462a2000 [ 6.637941] x23: ffffffe646398000 x22: 0000000000000000 [ 6.637945] x21: 0000000000000000 x20: ffffffe6462a5ce8 [ 6.637952] x19: ffffffe646398e38 x18: ffffffffffffffff [ 6.680296] x17: 0000000000000000 x16: ffffffe64492b900 [ 6.680300] x15: ffffffe6461e9d08 x14: 69202930203d2064 [ 6.680305] x13: 7561625f65736162 x12: 202c363331203d20 [ 6.696434] x11: 0000000000000030 x10: 0101010101010101 [ 6.696438] x9 : 4d4d20746120304d x8 : 7f7f7f7f7f7f7f7f [ 6.707249] x7 : feff4c524c787373 x6 : 0000000000008080 [ 6.707253] x5 : 0000000000000000 x4 : 8080000000000000 [ 6.707257] x3 : 0000000000000000 x2 : ffffffe645d4e6cc [ 6.744223] qcom_geni_serial 898000.serial: dev_pm_opp_set_rate: failed to find OPP for freq 102400000 (-34) [ 6.744966] x1 : fffffffefe74e174 x0 : ffffffe6462a5ce8 [ 6.753580] qcom_geni_serial 898000.serial: dev_pm_opp_set_rate: failed to find OPP for freq 102400000 (-34) [ 6.761634] Call trace: [ 6.761639] qcom_geni_console_setup+0x0/0x110 [ 6.761645] register_console+0x29c/0x2f8 [ 6.767981] Bluetooth: hci0: Frame reassembly failed (-84) [ 6.775252] uart_add_one_port+0x438/0x500 [ 6.775258] qcom_geni_serial_probe+0x2c4/0x4a8 [ 6.775266] platform_drv_probe+0x58/0xa8 [ 6.855359] really_probe+0xec/0x398 [ 6.855362] driver_probe_device+0x5c/0xb8 [ 6.855367] __device_attach_driver+0x98/0xb8 [ 7.184945] bus_for_each_drv+0x74/0xd8 [ 7.188825] __device_attach+0xec/0x148 [ 7.192705] device_initial_probe+0x24/0x30 [ 7.196937] bus_probe_device+0x9c/0xa8 [ 7.200816] deferred_probe_work_func+0x7c/0xb8 [ 7.205398] process_one_work+0x20c/0x4b0 [ 7.209456] worker_thread+0x48/0x460 [ 7.213157] kthread+0x14c/0x158 [ 7.216432] ret_from_fork+0x10/0x18 [ 7.220049] Code: bad PC value [ 7.223139] ---[ end trace 73f3b21e251d5a70 ]--- Thus this patch removes the __init avoiding crash in such configs. Cc: Andy Gross Cc: Jiri Slaby Cc: Saravana Kannan Cc: Todd Kjos Cc: Amit Pundir Cc: linux-arm-msm@vger.kernel.org Cc: linux-serial@vger.kernel.org Suggested-by: Saravana Kannan Signed-off-by: John Stultz Reviewed-by: Bjorn Andersson Link: https://lore.kernel.org/r/20200811025044.70626-1-john.stultz@linaro.org Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin --- drivers/tty/serial/qcom_geni_serial.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c index 0d405cc58e72..cd0768c3e773 100644 --- a/drivers/tty/serial/qcom_geni_serial.c +++ b/drivers/tty/serial/qcom_geni_serial.c @@ -1050,7 +1050,7 @@ static unsigned int qcom_geni_serial_tx_empty(struct uart_port *uport) } #ifdef CONFIG_SERIAL_QCOM_GENI_CONSOLE -static int __init qcom_geni_console_setup(struct console *co, char *options) +static int qcom_geni_console_setup(struct console *co, char *options) { struct uart_port *uport; struct qcom_geni_serial_port *port; -- GitLab From 8bc5c9ba5a1d67aaf6957199b2ab8a24402741a9 Mon Sep 17 00:00:00 2001 From: Krishna Manikandan Date: Mon, 1 Jun 2020 16:33:22 +0530 Subject: [PATCH 0618/1304] drm/msm: add shutdown support for display platform_driver [ Upstream commit 9d5cbf5fe46e350715389d89d0c350d83289a102 ] Define shutdown callback for display drm driver, so as to disable all the CRTCS when shutdown notification is received by the driver. This change will turn off the timing engine so that no display transactions are requested while mmu translations are getting disabled during reboot sequence. Signed-off-by: Krishna Manikandan Changes in v2: - Remove NULL check from msm_pdev_shutdown (Stephen Boyd) - Change commit text to reflect when this issue was uncovered (Sai Prakash Ranjan) Signed-off-by: Rob Clark Signed-off-by: Sasha Levin --- drivers/gpu/drm/msm/msm_drv.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 6f81de85fb86..7f45486b6650 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -1358,6 +1358,13 @@ static int msm_pdev_remove(struct platform_device *pdev) return 0; } +static void msm_pdev_shutdown(struct platform_device *pdev) +{ + struct drm_device *drm = platform_get_drvdata(pdev); + + drm_atomic_helper_shutdown(drm); +} + static const struct of_device_id dt_match[] = { { .compatible = "qcom,mdp4", .data = (void *)KMS_MDP4 }, { .compatible = "qcom,mdss", .data = (void *)KMS_MDP5 }, @@ -1369,6 +1376,7 @@ MODULE_DEVICE_TABLE(of, dt_match); static struct platform_driver msm_platform_driver = { .probe = msm_pdev_probe, .remove = msm_pdev_remove, + .shutdown = msm_pdev_shutdown, .driver = { .name = "msm", .of_match_table = dt_match, -- GitLab From bb0d61385e21d231322c9f78815c2b4f967b1e84 Mon Sep 17 00:00:00 2001 From: Tom Rix Date: Thu, 20 Aug 2020 06:19:32 -0700 Subject: [PATCH 0619/1304] hwmon: (applesmc) check status earlier. [ Upstream commit cecf7560f00a8419396a2ed0f6e5d245ccb4feac ] clang static analysis reports this representative problem applesmc.c:758:10: warning: 1st function call argument is an uninitialized value left = be16_to_cpu(*(__be16 *)(buffer + 6)) >> 2; ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ buffer is filled by the earlier call ret = applesmc_read_key(LIGHT_SENSOR_LEFT_KEY, ... This problem is reported because a goto skips the status check. Other similar problems use data from applesmc_read_key before checking the status. So move the checks to before the use. Signed-off-by: Tom Rix Reviewed-by: Henrik Rydberg Link: https://lore.kernel.org/r/20200820131932.10590-1-trix@redhat.com Signed-off-by: Guenter Roeck Signed-off-by: Sasha Levin --- drivers/hwmon/applesmc.c | 31 ++++++++++++++++--------------- 1 file changed, 16 insertions(+), 15 deletions(-) diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c index 5c677ba44014..b201129a9bea 100644 --- a/drivers/hwmon/applesmc.c +++ b/drivers/hwmon/applesmc.c @@ -760,15 +760,18 @@ static ssize_t applesmc_light_show(struct device *dev, } ret = applesmc_read_key(LIGHT_SENSOR_LEFT_KEY, buffer, data_length); + if (ret) + goto out; /* newer macbooks report a single 10-bit bigendian value */ if (data_length == 10) { left = be16_to_cpu(*(__be16 *)(buffer + 6)) >> 2; goto out; } left = buffer[2]; + + ret = applesmc_read_key(LIGHT_SENSOR_RIGHT_KEY, buffer, data_length); if (ret) goto out; - ret = applesmc_read_key(LIGHT_SENSOR_RIGHT_KEY, buffer, data_length); right = buffer[2]; out: @@ -817,12 +820,11 @@ static ssize_t applesmc_show_fan_speed(struct device *dev, to_index(attr)); ret = applesmc_read_key(newkey, buffer, 2); - speed = ((buffer[0] << 8 | buffer[1]) >> 2); - if (ret) return ret; - else - return snprintf(sysfsbuf, PAGE_SIZE, "%u\n", speed); + + speed = ((buffer[0] << 8 | buffer[1]) >> 2); + return snprintf(sysfsbuf, PAGE_SIZE, "%u\n", speed); } static ssize_t applesmc_store_fan_speed(struct device *dev, @@ -858,12 +860,11 @@ static ssize_t applesmc_show_fan_manual(struct device *dev, u8 buffer[2]; ret = applesmc_read_key(FANS_MANUAL, buffer, 2); - manual = ((buffer[0] << 8 | buffer[1]) >> to_index(attr)) & 0x01; - if (ret) return ret; - else - return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", manual); + + manual = ((buffer[0] << 8 | buffer[1]) >> to_index(attr)) & 0x01; + return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", manual); } static ssize_t applesmc_store_fan_manual(struct device *dev, @@ -879,10 +880,11 @@ static ssize_t applesmc_store_fan_manual(struct device *dev, return -EINVAL; ret = applesmc_read_key(FANS_MANUAL, buffer, 2); - val = (buffer[0] << 8 | buffer[1]); if (ret) goto out; + val = (buffer[0] << 8 | buffer[1]); + if (input) val = val | (0x01 << to_index(attr)); else @@ -958,13 +960,12 @@ static ssize_t applesmc_key_count_show(struct device *dev, u32 count; ret = applesmc_read_key(KEY_COUNT_KEY, buffer, 4); - count = ((u32)buffer[0]<<24) + ((u32)buffer[1]<<16) + - ((u32)buffer[2]<<8) + buffer[3]; - if (ret) return ret; - else - return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", count); + + count = ((u32)buffer[0]<<24) + ((u32)buffer[1]<<16) + + ((u32)buffer[2]<<8) + buffer[3]; + return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", count); } static ssize_t applesmc_key_at_index_read_show(struct device *dev, -- GitLab From 76abdb81893fd282c7844632e20275fffce2ec41 Mon Sep 17 00:00:00 2001 From: Amit Engel Date: Wed, 19 Aug 2020 11:31:11 +0300 Subject: [PATCH 0620/1304] nvmet: Disable keep-alive timer when kato is cleared to 0h [ Upstream commit 0d3b6a8d213a30387b5104b2fb25376d18636f23 ] Based on nvme spec, when keep alive timeout is set to zero the keep-alive timer should be disabled. Signed-off-by: Amit Engel Signed-off-by: Sagi Grimberg Signed-off-by: Jens Axboe Signed-off-by: Sasha Levin --- drivers/nvme/target/core.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 776b7e9e23b9..f28df233dfcd 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -307,6 +307,9 @@ static void nvmet_keep_alive_timer(struct work_struct *work) static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl) { + if (unlikely(ctrl->kato == 0)) + return; + pr_debug("ctrl %d start keep-alive timer for %d secs\n", ctrl->cntlid, ctrl->kato); @@ -316,6 +319,9 @@ static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl) static void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl) { + if (unlikely(ctrl->kato == 0)) + return; + pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid); cancel_delayed_work_sync(&ctrl->ka_work); -- GitLab From 6cd351b7d0cfe963c30fe63ee8de05084d976b41 Mon Sep 17 00:00:00 2001 From: Dmitry Baryshkov Date: Thu, 20 Aug 2020 12:36:22 +0300 Subject: [PATCH 0621/1304] drm/msm/a6xx: fix gmu start on newer firmware [ Upstream commit f5749d6181fa7df5ae741788e5d96f593d3a60b6 ] New Qualcomm firmware has changed a way it reports back the 'started' event. Support new register values. Signed-off-by: Dmitry Baryshkov Signed-off-by: Rob Clark Signed-off-by: Sasha Levin --- drivers/gpu/drm/msm/adreno/a6xx_gmu.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index 9cde79a7335c..739ca9c2081a 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -117,12 +117,22 @@ static int a6xx_gmu_start(struct a6xx_gmu *gmu) { int ret; u32 val; + u32 mask, reset_val; + + val = gmu_read(gmu, REG_A6XX_GMU_CM3_DTCM_START + 0xff8); + if (val <= 0x20010004) { + mask = 0xffffffff; + reset_val = 0xbabeface; + } else { + mask = 0x1ff; + reset_val = 0x100; + } gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1); gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 0); ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, val, - val == 0xbabeface, 100, 10000); + (val & mask) == reset_val, 100, 10000); if (ret) dev_err(gmu->dev, "GMU firmware initialization timed out\n"); -- GitLab From e69973638f741a0c712bde6090b37d17e608b533 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Thu, 20 Aug 2020 11:00:26 -0400 Subject: [PATCH 0622/1304] ceph: don't allow setlease on cephfs [ Upstream commit 496ceaf12432b3d136dcdec48424312e71359ea7 ] Leases don't currently work correctly on kcephfs, as they are not broken when caps are revoked. They could eventually be implemented similarly to how we did them in libcephfs, but for now don't allow them. [ idryomov: no need for simple_nosetlease() in ceph_dir_fops and ceph_snapdir_fops ] Signed-off-by: Jeff Layton Reviewed-by: Ilya Dryomov Signed-off-by: Ilya Dryomov Signed-off-by: Sasha Levin --- fs/ceph/file.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/ceph/file.c b/fs/ceph/file.c index faca455bd3c6..4ce2752c8b71 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -1819,6 +1819,7 @@ const struct file_operations ceph_file_fops = { .mmap = ceph_mmap, .fsync = ceph_fsync, .lock = ceph_lock, + .setlease = simple_nosetlease, .flock = ceph_flock, .splice_read = generic_file_splice_read, .splice_write = iter_file_splice_write, -- GitLab From c12a4e6ac32c6d5978fecc0501b7bb9c8ddd2bae Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 20 Aug 2020 16:47:24 +0200 Subject: [PATCH 0623/1304] cpuidle: Fixup IRQ state [ Upstream commit 49d9c5936314e44d314c605c39cce0fd947f9c3a ] Match the pattern elsewhere in this file. Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Steven Rostedt (VMware) Reviewed-by: Thomas Gleixner Acked-by: Rafael J. Wysocki Tested-by: Marco Elver Link: https://lkml.kernel.org/r/20200821085348.251340558@infradead.org Signed-off-by: Sasha Levin --- drivers/cpuidle/cpuidle.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 6df894d65d9e..2d182dc1b49e 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -148,7 +148,8 @@ static void enter_s2idle_proper(struct cpuidle_driver *drv, */ stop_critical_timings(); drv->states[index].enter_s2idle(dev, drv, index); - WARN_ON(!irqs_disabled()); + if (WARN_ON_ONCE(!irqs_disabled())) + local_irq_disable(); /* * timekeeping_resume() that will be called by tick_unfreeze() for the * first CPU executing it calls functions containing RCU read-side -- GitLab From 228d5227dcdc74d9157a4f36cfa52ac6c1a088f9 Mon Sep 17 00:00:00 2001 From: Sven Schnelle Date: Thu, 20 Aug 2020 09:48:23 +0200 Subject: [PATCH 0624/1304] s390: don't trace preemption in percpu macros [ Upstream commit 1196f12a2c960951d02262af25af0bb1775ebcc2 ] Since commit a21ee6055c30 ("lockdep: Change hardirq{s_enabled,_context} to per-cpu variables") the lockdep code itself uses percpu variables. This leads to recursions because the percpu macros are calling preempt_enable() which might call trace_preempt_on(). Signed-off-by: Sven Schnelle Reviewed-by: Vasily Gorbik Signed-off-by: Vasily Gorbik Signed-off-by: Sasha Levin --- arch/s390/include/asm/percpu.h | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h index 0095ddb58ff6..50f6661ba566 100644 --- a/arch/s390/include/asm/percpu.h +++ b/arch/s390/include/asm/percpu.h @@ -29,7 +29,7 @@ typedef typeof(pcp) pcp_op_T__; \ pcp_op_T__ old__, new__, prev__; \ pcp_op_T__ *ptr__; \ - preempt_disable(); \ + preempt_disable_notrace(); \ ptr__ = raw_cpu_ptr(&(pcp)); \ prev__ = *ptr__; \ do { \ @@ -37,7 +37,7 @@ new__ = old__ op (val); \ prev__ = cmpxchg(ptr__, old__, new__); \ } while (prev__ != old__); \ - preempt_enable(); \ + preempt_enable_notrace(); \ new__; \ }) @@ -68,7 +68,7 @@ typedef typeof(pcp) pcp_op_T__; \ pcp_op_T__ val__ = (val); \ pcp_op_T__ old__, *ptr__; \ - preempt_disable(); \ + preempt_disable_notrace(); \ ptr__ = raw_cpu_ptr(&(pcp)); \ if (__builtin_constant_p(val__) && \ ((szcast)val__ > -129) && ((szcast)val__ < 128)) { \ @@ -84,7 +84,7 @@ : [val__] "d" (val__) \ : "cc"); \ } \ - preempt_enable(); \ + preempt_enable_notrace(); \ } #define this_cpu_add_4(pcp, val) arch_this_cpu_add(pcp, val, "laa", "asi", int) @@ -95,14 +95,14 @@ typedef typeof(pcp) pcp_op_T__; \ pcp_op_T__ val__ = (val); \ pcp_op_T__ old__, *ptr__; \ - preempt_disable(); \ + preempt_disable_notrace(); \ ptr__ = raw_cpu_ptr(&(pcp)); \ asm volatile( \ op " %[old__],%[val__],%[ptr__]\n" \ : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \ : [val__] "d" (val__) \ : "cc"); \ - preempt_enable(); \ + preempt_enable_notrace(); \ old__ + val__; \ }) @@ -114,14 +114,14 @@ typedef typeof(pcp) pcp_op_T__; \ pcp_op_T__ val__ = (val); \ pcp_op_T__ old__, *ptr__; \ - preempt_disable(); \ + preempt_disable_notrace(); \ ptr__ = raw_cpu_ptr(&(pcp)); \ asm volatile( \ op " %[old__],%[val__],%[ptr__]\n" \ : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \ : [val__] "d" (val__) \ : "cc"); \ - preempt_enable(); \ + preempt_enable_notrace(); \ } #define this_cpu_and_4(pcp, val) arch_this_cpu_to_op(pcp, val, "lan") @@ -136,10 +136,10 @@ typedef typeof(pcp) pcp_op_T__; \ pcp_op_T__ ret__; \ pcp_op_T__ *ptr__; \ - preempt_disable(); \ + preempt_disable_notrace(); \ ptr__ = raw_cpu_ptr(&(pcp)); \ ret__ = cmpxchg(ptr__, oval, nval); \ - preempt_enable(); \ + preempt_enable_notrace(); \ ret__; \ }) @@ -152,10 +152,10 @@ ({ \ typeof(pcp) *ptr__; \ typeof(pcp) ret__; \ - preempt_disable(); \ + preempt_disable_notrace(); \ ptr__ = raw_cpu_ptr(&(pcp)); \ ret__ = xchg(ptr__, nval); \ - preempt_enable(); \ + preempt_enable_notrace(); \ ret__; \ }) @@ -171,11 +171,11 @@ typeof(pcp1) *p1__; \ typeof(pcp2) *p2__; \ int ret__; \ - preempt_disable(); \ + preempt_disable_notrace(); \ p1__ = raw_cpu_ptr(&(pcp1)); \ p2__ = raw_cpu_ptr(&(pcp2)); \ ret__ = __cmpxchg_double(p1__, p2__, o1__, o2__, n1__, n2__); \ - preempt_enable(); \ + preempt_enable_notrace(); \ ret__; \ }) -- GitLab From 47eb291ba65bfade197e73ee13610d97809cb087 Mon Sep 17 00:00:00 2001 From: Simon Leiner Date: Tue, 25 Aug 2020 11:31:52 +0200 Subject: [PATCH 0625/1304] xen/xenbus: Fix granting of vmalloc'd memory [ Upstream commit d742db70033c745e410523e00522ee0cfe2aa416 ] On some architectures (like ARM), virt_to_gfn cannot be used for vmalloc'd memory because of its reliance on virt_to_phys. This patch introduces a check for vmalloc'd addresses and obtains the PFN using vmalloc_to_pfn in that case. Signed-off-by: Simon Leiner Reviewed-by: Stefano Stabellini Link: https://lore.kernel.org/r/20200825093153.35500-1-simon@leiner.me Signed-off-by: Juergen Gross Signed-off-by: Sasha Levin --- drivers/xen/xenbus/xenbus_client.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c index e94a61eaeceb..f7b553faadb1 100644 --- a/drivers/xen/xenbus/xenbus_client.c +++ b/drivers/xen/xenbus/xenbus_client.c @@ -365,8 +365,14 @@ int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr, int i, j; for (i = 0; i < nr_pages; i++) { - err = gnttab_grant_foreign_access(dev->otherend_id, - virt_to_gfn(vaddr), 0); + unsigned long gfn; + + if (is_vmalloc_addr(vaddr)) + gfn = pfn_to_gfn(vmalloc_to_pfn(vaddr)); + else + gfn = virt_to_gfn(vaddr); + + err = gnttab_grant_foreign_access(dev->otherend_id, gfn, 0); if (err < 0) { xenbus_dev_fatal(dev, err, "granting access to ring page"); -- GitLab From fcaafd72ad6408bd4d463f18b289c27c16cb331e Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Thu, 6 Aug 2020 13:49:28 +0300 Subject: [PATCH 0626/1304] dmaengine: of-dma: Fix of_dma_router_xlate's of_dma_xlate handling [ Upstream commit 5b2aa9f918f6837ae943557f8cec02c34fcf80e7 ] of_dma_xlate callback can return ERR_PTR as well NULL in case of failure. If error code is returned (not NULL) then the route should be released and the router should not be registered for the channel. Fixes: 56f13c0d9524c ("dmaengine: of_dma: Support for DMA routers") Signed-off-by: Peter Ujfalusi Link: https://lore.kernel.org/r/20200806104928.25975-1-peter.ujfalusi@ti.com Signed-off-by: Vinod Koul Signed-off-by: Sasha Levin --- drivers/dma/of-dma.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c index 91fd395c90c4..8344a60c2131 100644 --- a/drivers/dma/of-dma.c +++ b/drivers/dma/of-dma.c @@ -72,12 +72,12 @@ static struct dma_chan *of_dma_router_xlate(struct of_phandle_args *dma_spec, return NULL; chan = ofdma_target->of_dma_xlate(&dma_spec_target, ofdma_target); - if (chan) { - chan->router = ofdma->dma_router; - chan->route_data = route_data; - } else { + if (IS_ERR_OR_NULL(chan)) { ofdma->dma_router->route_free(ofdma->dma_router->dev, route_data); + } else { + chan->router = ofdma->dma_router; + chan->route_data = route_data; } /* -- GitLab From 1046c9a3563e48eb086a8333a5ca6ab692f43638 Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Wed, 22 Jul 2020 20:36:43 +0200 Subject: [PATCH 0627/1304] batman-adv: Avoid uninitialized chaddr when handling DHCP [ Upstream commit 303216e76dcab6049c9d42390b1032f0649a8206 ] The gateway client code can try to optimize the delivery of DHCP packets to avoid broadcasting them through the whole mesh. But also transmissions to the client can be optimized by looking up the destination via the chaddr of the DHCP packet. But the chaddr is currently only done when chaddr is fully inside the non-paged area of the skbuff. Otherwise it will not be initialized and the unoptimized path should have been taken. But the implementation didn't handle this correctly. It didn't retrieve the correct chaddr but still tried to perform the TT lookup with this uninitialized memory. Reported-by: syzbot+ab16e463b903f5a37036@syzkaller.appspotmail.com Fixes: 6c413b1c22a2 ("batman-adv: send every DHCP packet as bat-unicast") Signed-off-by: Sven Eckelmann Acked-by: Antonio Quartulli Signed-off-by: Simon Wunderlich Signed-off-by: Sasha Levin --- net/batman-adv/gateway_client.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c index 140c61a3f1ec..0c59fefc1371 100644 --- a/net/batman-adv/gateway_client.c +++ b/net/batman-adv/gateway_client.c @@ -714,8 +714,10 @@ batadv_gw_dhcp_recipient_get(struct sk_buff *skb, unsigned int *header_len, chaddr_offset = *header_len + BATADV_DHCP_CHADDR_OFFSET; /* store the client address if the message is going to a client */ - if (ret == BATADV_DHCP_TO_CLIENT && - pskb_may_pull(skb, chaddr_offset + ETH_ALEN)) { + if (ret == BATADV_DHCP_TO_CLIENT) { + if (!pskb_may_pull(skb, chaddr_offset + ETH_ALEN)) + return BATADV_DHCP_NO; + /* check if the DHCP packet carries an Ethernet DHCP */ p = skb->data + *header_len + BATADV_DHCP_HTYPE_OFFSET; if (*p != BATADV_DHCP_HTYPE_ETHERNET) -- GitLab From e6723a869abe3aa171cc2f6bc1686025447fce95 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Linus=20L=C3=BCssing?= Date: Thu, 23 Jul 2020 14:28:08 +0200 Subject: [PATCH 0628/1304] batman-adv: Fix own OGM check in aggregated OGMs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit d8bf0c01642275c7dca1e5d02c34e4199c200b1f ] The own OGM check is currently misplaced and can lead to the following issues: For one thing we might receive an aggregated OGM from a neighbor node which has our own OGM in the first place. We would then not only skip our own OGM but erroneously also any other, following OGM in the aggregate. For another, we might receive an OGM aggregate which has our own OGM in a place other then the first one. Then we would wrongly not skip this OGM, leading to populating the orginator and gateway table with ourself. Fixes: 9323158ef9f4 ("batman-adv: OGMv2 - implement originators logic") Signed-off-by: Linus Lüssing Signed-off-by: Sven Eckelmann Signed-off-by: Simon Wunderlich Signed-off-by: Sasha Levin --- net/batman-adv/bat_v_ogm.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c index 0458de53cb64..04a620fd1301 100644 --- a/net/batman-adv/bat_v_ogm.c +++ b/net/batman-adv/bat_v_ogm.c @@ -716,6 +716,12 @@ static void batadv_v_ogm_process(const struct sk_buff *skb, int ogm_offset, ntohl(ogm_packet->seqno), ogm_throughput, ogm_packet->ttl, ogm_packet->version, ntohs(ogm_packet->tvlv_len)); + if (batadv_is_my_mac(bat_priv, ogm_packet->orig)) { + batadv_dbg(BATADV_DBG_BATMAN, bat_priv, + "Drop packet: originator packet from ourself\n"); + return; + } + /* If the throughput metric is 0, immediately drop the packet. No need * to create orig_node / neigh_node for an unusable route. */ @@ -843,11 +849,6 @@ int batadv_v_ogm_packet_recv(struct sk_buff *skb, if (batadv_is_my_mac(bat_priv, ethhdr->h_source)) goto free_skb; - ogm_packet = (struct batadv_ogm2_packet *)skb->data; - - if (batadv_is_my_mac(bat_priv, ogm_packet->orig)) - goto free_skb; - batadv_inc_counter(bat_priv, BATADV_CNT_MGMT_RX); batadv_add_counter(bat_priv, BATADV_CNT_MGMT_RX_BYTES, skb->len + ETH_HLEN); -- GitLab From 3eabc73c4a3ce80559886c9386f132399f0ad8fe Mon Sep 17 00:00:00 2001 From: Jussi Kivilinna Date: Tue, 18 Aug 2020 17:46:10 +0300 Subject: [PATCH 0629/1304] batman-adv: bla: use netif_rx_ni when not in interrupt context [ Upstream commit 279e89b2281af3b1a9f04906e157992c19c9f163 ] batadv_bla_send_claim() gets called from worker thread context through batadv_bla_periodic_work(), thus netif_rx_ni needs to be used in that case. This fixes "NOHZ: local_softirq_pending 08" log messages seen when batman-adv is enabled. Fixes: 23721387c409 ("batman-adv: add basic bridge loop avoidance code") Signed-off-by: Jussi Kivilinna Signed-off-by: Sven Eckelmann Signed-off-by: Simon Wunderlich Signed-off-by: Sasha Levin --- net/batman-adv/bridge_loop_avoidance.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index 85faf25c2912..9b8bf06ccb61 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c @@ -450,7 +450,10 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, u8 *mac, batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES, skb->len + ETH_HLEN); - netif_rx(skb); + if (in_interrupt()) + netif_rx(skb); + else + netif_rx_ni(skb); out: if (primary_if) batadv_hardif_put(primary_if); -- GitLab From cd8c3a766bf286a5e9f4a72ec69e611ae2c88ea6 Mon Sep 17 00:00:00 2001 From: Yu Kuai Date: Mon, 17 Aug 2020 19:57:26 +0800 Subject: [PATCH 0630/1304] dmaengine: at_hdmac: check return value of of_find_device_by_node() in at_dma_xlate() [ Upstream commit 0cef8e2c5a07d482ec907249dbd6687e8697677f ] The reurn value of of_find_device_by_node() is not checked, thus null pointer dereference will be triggered if of_find_device_by_node() failed. Fixes: bbe89c8e3d59 ("at_hdmac: move to generic DMA binding") Signed-off-by: Yu Kuai Link: https://lore.kernel.org/r/20200817115728.1706719-2-yukuai3@huawei.com Signed-off-by: Vinod Koul Signed-off-by: Sasha Levin --- drivers/dma/at_hdmac.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index dbc51154f122..86427f6ba78c 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -1677,6 +1677,8 @@ static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec, return NULL; dmac_pdev = of_find_device_by_node(dma_spec->np); + if (!dmac_pdev) + return NULL; dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); -- GitLab From 04b3604008265fb84f8fc7d7646ee652b4546834 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Wed, 19 Aug 2020 11:26:44 -0700 Subject: [PATCH 0631/1304] MIPS: mm: BMIPS5000 has inclusive physical caches [ Upstream commit dbfc95f98f0158958d1f1e6bf06d74be38dbd821 ] When the BMIPS generic cpu-feature-overrides.h file was introduced, cpu_has_inclusive_caches/MIPS_CPU_INCLUSIVE_CACHES was not set for BMIPS5000 CPUs. Correct this when we have initialized the MIPS secondary cache successfully. Fixes: f337967d6d87 ("MIPS: BMIPS: Add cpu-feature-overrides.h") Signed-off-by: Florian Fainelli Signed-off-by: Thomas Bogendoerfer Signed-off-by: Sasha Levin --- arch/mips/mm/c-r4k.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 05a539d3a597..7650edd5cf7f 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -1789,7 +1789,11 @@ static void setup_scache(void) printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n", scache_size >> 10, way_string[c->scache.ways], c->scache.linesz); + + if (current_cpu_type() == CPU_BMIPS5000) + c->options |= MIPS_CPU_INCLUSIVE_CACHES; } + #else if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT)) panic("Dunno how to handle MIPS32 / MIPS64 second level cache"); -- GitLab From 3e2dae2272915679d38440726011a37e980b6245 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Wed, 19 Aug 2020 11:26:45 -0700 Subject: [PATCH 0632/1304] MIPS: BMIPS: Also call bmips_cpu_setup() for secondary cores [ Upstream commit e14f633b66902615cf7faa5d032b45ab8b6fb158 ] The initialization done by bmips_cpu_setup() typically affects both threads of a given core, on 7435 which supports 2 cores and 2 threads, logical CPU number 2 and 3 would not run this initialization. Fixes: 738a3f79027b ("MIPS: BMIPS: Add early CPU initialization code") Signed-off-by: Florian Fainelli Signed-off-by: Thomas Bogendoerfer Signed-off-by: Sasha Levin --- arch/mips/kernel/smp-bmips.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c index 5ec546b5eed1..d16e6654a655 100644 --- a/arch/mips/kernel/smp-bmips.c +++ b/arch/mips/kernel/smp-bmips.c @@ -240,6 +240,8 @@ static int bmips_boot_secondary(int cpu, struct task_struct *idle) */ static void bmips_init_secondary(void) { + bmips_cpu_setup(); + switch (current_cpu_type()) { case CPU_BMIPS4350: case CPU_BMIPS4380: -- GitLab From 88c161599f17249deed573befc5f85f4cd919e7b Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Thu, 20 Aug 2020 14:12:54 +0200 Subject: [PATCH 0633/1304] netfilter: nf_tables: add NFTA_SET_USERDATA if not null [ Upstream commit 6f03bf43ee05b31d3822def2a80f11b3591c55b3 ] Kernel sends an empty NFTA_SET_USERDATA attribute with no value if userspace adds a set with no NFTA_SET_USERDATA attribute. Fixes: e6d8ecac9e68 ("netfilter: nf_tables: Add new attributes into nft_set to store user data.") Signed-off-by: Pablo Neira Ayuso Signed-off-by: Sasha Levin --- net/netfilter/nf_tables_api.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 1b8a53081632..159ec1533c98 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -3204,7 +3204,8 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx, goto nla_put_failure; } - if (nla_put(skb, NFTA_SET_USERDATA, set->udlen, set->udata)) + if (set->udata && + nla_put(skb, NFTA_SET_USERDATA, set->udlen, set->udata)) goto nla_put_failure; desc = nla_nest_start(skb, NFTA_SET_DESC); -- GitLab From 3f21d1dd7cafb0230dc141e64ec5da622b3b1c46 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Thu, 20 Aug 2020 14:12:55 +0200 Subject: [PATCH 0634/1304] netfilter: nf_tables: incorrect enum nft_list_attributes definition [ Upstream commit da9125df854ea48a6240c66e8a67be06e2c12c03 ] This should be NFTA_LIST_UNSPEC instead of NFTA_LIST_UNPEC, all other similar attribute definitions are postfixed with _UNSPEC. Fixes: 96518518cc41 ("netfilter: add nftables") Signed-off-by: Pablo Neira Ayuso Signed-off-by: Sasha Levin --- include/uapi/linux/netfilter/nf_tables.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index 5eac62e1b68d..cc00be102b9f 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -132,7 +132,7 @@ enum nf_tables_msg_types { * @NFTA_LIST_ELEM: list element (NLA_NESTED) */ enum nft_list_attributes { - NFTA_LIST_UNPEC, + NFTA_LIST_UNSPEC, NFTA_LIST_ELEM, __NFTA_LIST_MAX }; -- GitLab From 86c459915577b0c87287b11f4dde44f908885461 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Thu, 20 Aug 2020 21:05:50 +0200 Subject: [PATCH 0635/1304] netfilter: nf_tables: fix destination register zeroing [ Upstream commit 1e105e6afa6c3d32bfb52c00ffa393894a525c27 ] Following bug was reported via irc: nft list ruleset set knock_candidates_ipv4 { type ipv4_addr . inet_service size 65535 elements = { 127.0.0.1 . 123, 127.0.0.1 . 123 } } .. udp dport 123 add @knock_candidates_ipv4 { ip saddr . 123 } udp dport 123 add @knock_candidates_ipv4 { ip saddr . udp dport } It should not have been possible to add a duplicate set entry. After some debugging it turned out that the problem is the immediate value (123) in the second-to-last rule. Concatenations use 32bit registers, i.e. the elements are 8 bytes each, not 6 and it turns out the kernel inserted inet firewall @knock_candidates_ipv4 element 0100007f ffff7b00 : 0 [end] element 0100007f 00007b00 : 0 [end] Note the non-zero upper bits of the first element. It turns out that nft_immediate doesn't zero the destination register, but this is needed when the length isn't a multiple of 4. Furthermore, the zeroing in nft_payload is broken. We can't use [len / 4] = 0 -- if len is a multiple of 4, index is off by one. Skip zeroing in this case and use a conditional instead of (len -1) / 4. Fixes: 49499c3e6e18 ("netfilter: nf_tables: switch registers to 32 bit addressing") Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso Signed-off-by: Sasha Levin --- include/net/netfilter/nf_tables.h | 2 ++ net/netfilter/nft_payload.c | 4 +++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 024636c31adc..93253ba1eeac 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -130,6 +130,8 @@ static inline u8 nft_reg_load8(u32 *sreg) static inline void nft_data_copy(u32 *dst, const struct nft_data *src, unsigned int len) { + if (len % NFT_REG32_SIZE) + dst[len / NFT_REG32_SIZE] = 0; memcpy(dst, src, len); } diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c index 19446a89a2a8..b1a9f330a51f 100644 --- a/net/netfilter/nft_payload.c +++ b/net/netfilter/nft_payload.c @@ -79,7 +79,9 @@ static void nft_payload_eval(const struct nft_expr *expr, u32 *dest = ®s->data[priv->dreg]; int offset; - dest[priv->len / NFT_REG32_SIZE] = 0; + if (priv->len % NFT_REG32_SIZE) + dest[priv->len / NFT_REG32_SIZE] = 0; + switch (priv->base) { case NFT_PAYLOAD_LL_HEADER: if (!skb_mac_header_was_set(skb)) -- GitLab From 743f4a03d469695f21a1e5bcd9ec857982ba8c7c Mon Sep 17 00:00:00 2001 From: Dinghao Liu Date: Mon, 24 Aug 2020 13:44:42 +0800 Subject: [PATCH 0636/1304] net: hns: Fix memleak in hns_nic_dev_probe [ Upstream commit 100e3345c6e719d2291e1efd5de311cc24bb9c0b ] hns_nic_dev_probe allocates ndev, but not free it on two error handling paths, which may lead to memleak. Fixes: 63434888aaf1b ("net: hns: net: hns: enet adds support of acpi") Signed-off-by: Dinghao Liu Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/ethernet/hisilicon/hns/hns_enet.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 024b08fafd3b..4de65a9de0a6 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -2297,8 +2297,10 @@ static int hns_nic_dev_probe(struct platform_device *pdev) priv->enet_ver = AE_VERSION_1; else if (acpi_dev_found(hns_enet_acpi_match[1].id)) priv->enet_ver = AE_VERSION_2; - else - return -ENXIO; + else { + ret = -ENXIO; + goto out_read_prop_fail; + } /* try to find port-idx-in-ae first */ ret = acpi_node_get_property_reference(dev->fwnode, @@ -2314,7 +2316,8 @@ static int hns_nic_dev_probe(struct platform_device *pdev) priv->fwnode = args.fwnode; } else { dev_err(dev, "cannot read cfg data from OF or acpi\n"); - return -ENXIO; + ret = -ENXIO; + goto out_read_prop_fail; } ret = device_property_read_u32(dev, "port-idx-in-ae", &port_id); -- GitLab From c7d1a655c7b4f0fde2c64ee43e293d2df676fdbe Mon Sep 17 00:00:00 2001 From: Dinghao Liu Date: Mon, 24 Aug 2020 13:58:31 +0800 Subject: [PATCH 0637/1304] net: systemport: Fix memleak in bcm_sysport_probe [ Upstream commit 7ef1fc57301f3cef7201497aa27e89ccb91737fe ] When devm_kcalloc() fails, dev should be freed just like what we've done in the subsequent error paths. Fixes: 7b78be48a8eb6 ("net: systemport: Dynamically allocate number of TX rings") Signed-off-by: Dinghao Liu Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/ethernet/broadcom/bcmsysport.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 6b761f6b8fd5..9a614c5cdfa2 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -2441,8 +2441,10 @@ static int bcm_sysport_probe(struct platform_device *pdev) priv->tx_rings = devm_kcalloc(&pdev->dev, txq, sizeof(struct bcm_sysport_tx_ring), GFP_KERNEL); - if (!priv->tx_rings) - return -ENOMEM; + if (!priv->tx_rings) { + ret = -ENOMEM; + goto err_free_netdev; + } priv->is_lite = params->is_lite; priv->num_rx_desc_words = params->num_rx_desc_words; -- GitLab From fb3a780e7a76cf8efb055f8322ec039923cee41f Mon Sep 17 00:00:00 2001 From: Yuusuke Ashizuka Date: Thu, 20 Aug 2020 18:43:07 +0900 Subject: [PATCH 0638/1304] ravb: Fixed to be able to unload modules [ Upstream commit 1838d6c62f57836639bd3d83e7855e0ee4f6defc ] When this driver is built as a module, I cannot rmmod it after insmoding it. This is because that this driver calls ravb_mdio_init() at the time of probe, and module->refcnt is incremented by alloc_mdio_bitbang() called after that. Therefore, even if ifup is not performed, the driver is in use and rmmod cannot be performed. $ lsmod Module Size Used by ravb 40960 1 $ rmmod ravb rmmod: ERROR: Module ravb is in use Call ravb_mdio_init() at open and free_mdio_bitbang() at close, thereby rmmod is possible in the ifdown state. Fixes: c156633f1353 ("Renesas Ethernet AVB driver proper") Signed-off-by: Yuusuke Ashizuka Reviewed-by: Sergei Shtylyov Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/ethernet/renesas/ravb_main.c | 110 +++++++++++------------ 1 file changed, 55 insertions(+), 55 deletions(-) diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 569e698b5c80..b5066cf86c85 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -1337,6 +1337,51 @@ static inline int ravb_hook_irq(unsigned int irq, irq_handler_t handler, return error; } +/* MDIO bus init function */ +static int ravb_mdio_init(struct ravb_private *priv) +{ + struct platform_device *pdev = priv->pdev; + struct device *dev = &pdev->dev; + int error; + + /* Bitbang init */ + priv->mdiobb.ops = &bb_ops; + + /* MII controller setting */ + priv->mii_bus = alloc_mdio_bitbang(&priv->mdiobb); + if (!priv->mii_bus) + return -ENOMEM; + + /* Hook up MII support for ethtool */ + priv->mii_bus->name = "ravb_mii"; + priv->mii_bus->parent = dev; + snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", + pdev->name, pdev->id); + + /* Register MDIO bus */ + error = of_mdiobus_register(priv->mii_bus, dev->of_node); + if (error) + goto out_free_bus; + + return 0; + +out_free_bus: + free_mdio_bitbang(priv->mii_bus); + return error; +} + +/* MDIO bus release function */ +static int ravb_mdio_release(struct ravb_private *priv) +{ + /* Unregister mdio bus */ + mdiobus_unregister(priv->mii_bus); + + /* Free bitbang info */ + free_mdio_bitbang(priv->mii_bus); + + return 0; +} + /* Network device open function for Ethernet AVB */ static int ravb_open(struct net_device *ndev) { @@ -1345,6 +1390,13 @@ static int ravb_open(struct net_device *ndev) struct device *dev = &pdev->dev; int error; + /* MDIO bus init */ + error = ravb_mdio_init(priv); + if (error) { + netdev_err(ndev, "failed to initialize MDIO\n"); + return error; + } + napi_enable(&priv->napi[RAVB_BE]); napi_enable(&priv->napi[RAVB_NC]); @@ -1422,6 +1474,7 @@ static int ravb_open(struct net_device *ndev) out_napi_off: napi_disable(&priv->napi[RAVB_NC]); napi_disable(&priv->napi[RAVB_BE]); + ravb_mdio_release(priv); return error; } @@ -1721,6 +1774,8 @@ static int ravb_close(struct net_device *ndev) ravb_ring_free(ndev, RAVB_BE); ravb_ring_free(ndev, RAVB_NC); + ravb_mdio_release(priv); + return 0; } @@ -1867,51 +1922,6 @@ static const struct net_device_ops ravb_netdev_ops = { .ndo_set_features = ravb_set_features, }; -/* MDIO bus init function */ -static int ravb_mdio_init(struct ravb_private *priv) -{ - struct platform_device *pdev = priv->pdev; - struct device *dev = &pdev->dev; - int error; - - /* Bitbang init */ - priv->mdiobb.ops = &bb_ops; - - /* MII controller setting */ - priv->mii_bus = alloc_mdio_bitbang(&priv->mdiobb); - if (!priv->mii_bus) - return -ENOMEM; - - /* Hook up MII support for ethtool */ - priv->mii_bus->name = "ravb_mii"; - priv->mii_bus->parent = dev; - snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", - pdev->name, pdev->id); - - /* Register MDIO bus */ - error = of_mdiobus_register(priv->mii_bus, dev->of_node); - if (error) - goto out_free_bus; - - return 0; - -out_free_bus: - free_mdio_bitbang(priv->mii_bus); - return error; -} - -/* MDIO bus release function */ -static int ravb_mdio_release(struct ravb_private *priv) -{ - /* Unregister mdio bus */ - mdiobus_unregister(priv->mii_bus); - - /* Free bitbang info */ - free_mdio_bitbang(priv->mii_bus); - - return 0; -} - static const struct of_device_id ravb_match_table[] = { { .compatible = "renesas,etheravb-r8a7790", .data = (void *)RCAR_GEN2 }, { .compatible = "renesas,etheravb-r8a7794", .data = (void *)RCAR_GEN2 }, @@ -2138,13 +2148,6 @@ static int ravb_probe(struct platform_device *pdev) eth_hw_addr_random(ndev); } - /* MDIO bus init */ - error = ravb_mdio_init(priv); - if (error) { - dev_err(&pdev->dev, "failed to initialize MDIO\n"); - goto out_dma_free; - } - netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll, 64); netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll, 64); @@ -2166,8 +2169,6 @@ static int ravb_probe(struct platform_device *pdev) out_napi_del: netif_napi_del(&priv->napi[RAVB_NC]); netif_napi_del(&priv->napi[RAVB_BE]); - ravb_mdio_release(priv); -out_dma_free: dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat, priv->desc_bat_dma); @@ -2199,7 +2200,6 @@ static int ravb_remove(struct platform_device *pdev) unregister_netdev(ndev); netif_napi_del(&priv->napi[RAVB_NC]); netif_napi_del(&priv->napi[RAVB_BE]); - ravb_mdio_release(priv); pm_runtime_disable(&pdev->dev); free_netdev(ndev); platform_set_drvdata(pdev, NULL); -- GitLab From 9986cb065b9092fe99098acf410f14247f188b6e Mon Sep 17 00:00:00 2001 From: Dinghao Liu Date: Sun, 23 Aug 2020 16:56:47 +0800 Subject: [PATCH 0639/1304] net: arc_emac: Fix memleak in arc_mdio_probe [ Upstream commit e2d79cd8875fa8c3cc7defa98a8cc99a1ed0c62f ] When devm_gpiod_get_optional() fails, bus should be freed just like when of_mdiobus_register() fails. Fixes: 1bddd96cba03d ("net: arc_emac: support the phy reset for emac driver") Signed-off-by: Dinghao Liu Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/ethernet/arc/emac_mdio.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/arc/emac_mdio.c b/drivers/net/ethernet/arc/emac_mdio.c index 0187dbf3b87d..54cdafdd067d 100644 --- a/drivers/net/ethernet/arc/emac_mdio.c +++ b/drivers/net/ethernet/arc/emac_mdio.c @@ -153,6 +153,7 @@ int arc_mdio_probe(struct arc_emac_priv *priv) if (IS_ERR(data->reset_gpio)) { error = PTR_ERR(data->reset_gpio); dev_err(priv->dev, "Failed to request gpio: %d\n", error); + mdiobus_free(bus); return error; } -- GitLab From b36678b8da827585c457f22b41e6c93a1f502710 Mon Sep 17 00:00:00 2001 From: Marek Szyprowski Date: Tue, 25 Aug 2020 08:46:17 +0200 Subject: [PATCH 0640/1304] dmaengine: pl330: Fix burst length if burst size is smaller than bus width [ Upstream commit 0661cef675d37e2c4b66a996389ebeae8568e49e ] Move the burst len fixup after setting the generic value for it. This finally enables the fixup introduced by commit 137bd11090d8 ("dmaengine: pl330: Align DMA memcpy operations to MFIFO width"), which otherwise was overwritten by the generic value. Reported-by: kernel test robot Fixes: 137bd11090d8 ("dmaengine: pl330: Align DMA memcpy operations to MFIFO width") Signed-off-by: Marek Szyprowski Link: https://lore.kernel.org/r/20200825064617.16193-1-m.szyprowski@samsung.com Signed-off-by: Vinod Koul Signed-off-by: Sasha Levin --- drivers/dma/pl330.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index bc8050c025b7..c564df713efc 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -2769,6 +2769,7 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, while (burst != (1 << desc->rqcfg.brst_size)) desc->rqcfg.brst_size++; + desc->rqcfg.brst_len = get_burst_len(desc, len); /* * If burst size is smaller than bus width then make sure we only * transfer one at a time to avoid a burst stradling an MFIFO entry. @@ -2776,7 +2777,6 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, if (desc->rqcfg.brst_size * 8 < pl330->pcfg.data_bus_width) desc->rqcfg.brst_len = 1; - desc->rqcfg.brst_len = get_burst_len(desc, len); desc->bytes_requested = len; desc->txd.flags = flags; -- GitLab From da559d2ed21cb3c17d9b137773b2d29edd5fb385 Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Tue, 25 Aug 2020 14:59:40 +0200 Subject: [PATCH 0641/1304] gtp: add GTPA_LINK info to msg sent to userspace [ Upstream commit b274e47d9e3f4dcd4ad4028a316ec22dc4533ac7 ] During a dump, this attribute is essential, it enables the userspace to know on which interface the context is linked to. Fixes: 459aa660eb1d ("gtp: add initial driver for datapath of GPRS Tunneling Protocol (GTP-U)") Signed-off-by: Nicolas Dichtel Tested-by: Gabriel Ganne Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/gtp.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index d73850ebb671..f2fecb684220 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -1187,6 +1187,7 @@ static int gtp_genl_fill_info(struct sk_buff *skb, u32 snd_portid, u32 snd_seq, goto nlmsg_failure; if (nla_put_u32(skb, GTPA_VERSION, pctx->gtp_version) || + nla_put_u32(skb, GTPA_LINK, pctx->dev->ifindex) || nla_put_be32(skb, GTPA_PEER_ADDRESS, pctx->peer_addr_ip4.s_addr) || nla_put_be32(skb, GTPA_MS_ADDRESS, pctx->ms_addr_ip4.s_addr)) goto nla_put_failure; -- GitLab From 296802fe11fe2060fae691006172b2f7d937f184 Mon Sep 17 00:00:00 2001 From: Pavan Chebbi Date: Wed, 26 Aug 2020 01:08:32 -0400 Subject: [PATCH 0642/1304] bnxt_en: Don't query FW when netif_running() is false. [ Upstream commit c1c2d77408022a398a1a7c51cf20488c922629de ] In rare conditions like two stage OS installation, the ethtool's get_channels function may be called when the device is in D3 state, leading to uncorrectable PCI error. Check netif_running() first before making any query to FW which involves writing to BAR. Fixes: db4723b3cd2d ("bnxt_en: Check max_tx_scheduler_inputs value from firmware.") Signed-off-by: Pavan Chebbi Signed-off-by: Michael Chan Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 63730e449e08..14fe4f9f24b8 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -471,7 +471,7 @@ static void bnxt_get_channels(struct net_device *dev, int max_tx_sch_inputs; /* Get the most up-to-date max_tx_sch_inputs. */ - if (BNXT_NEW_RM(bp)) + if (netif_running(dev) && BNXT_NEW_RM(bp)) bnxt_hwrm_func_resc_qcaps(bp, false); max_tx_sch_inputs = hw_resc->max_tx_sch_inputs; -- GitLab From 8674defc50ba2026203a99d2ce11d01ebffb03bc Mon Sep 17 00:00:00 2001 From: Vasundhara Volam Date: Wed, 26 Aug 2020 01:08:33 -0400 Subject: [PATCH 0643/1304] bnxt_en: Check for zero dir entries in NVRAM. [ Upstream commit dbbfa96ad920c50d58bcaefa57f5f33ceef9d00e ] If firmware goes into unstable state, HWRM_NVM_GET_DIR_INFO firmware command may return zero dir entries. Return error in such case to avoid zero length dma buffer request. Fixes: c0c050c58d84 ("bnxt_en: New Broadcom ethernet driver.") Signed-off-by: Vasundhara Volam Signed-off-by: Michael Chan Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 14fe4f9f24b8..a1cb99110092 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -1877,6 +1877,9 @@ static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data) if (rc != 0) return rc; + if (!dir_entries || !entry_length) + return -EIO; + /* Insert 2 bytes of directory info (count and size of entries) */ if (len < 2) return -EINVAL; -- GitLab From 9895dfea9610ae54be8890b98eb17fd7f1496c75 Mon Sep 17 00:00:00 2001 From: Vasundhara Volam Date: Wed, 26 Aug 2020 01:08:35 -0400 Subject: [PATCH 0644/1304] bnxt_en: Fix PCI AER error recovery flow [ Upstream commit df3875ec550396974b1d8a518bd120d034738236 ] When a PCI error is detected the PCI state could be corrupt, save the PCI state after initialization and restore it after the slot reset. Fixes: 6316ea6db93d ("bnxt_en: Enable AER support.") Signed-off-by: Vasundhara Volam Signed-off-by: Michael Chan Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index ab4d1dacb585..7047f4237cea 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -9128,6 +9128,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) (long)pci_resource_start(pdev, 0), dev->dev_addr); pcie_print_link_status(pdev); + pci_save_state(pdev); return 0; init_err_cleanup_tc: @@ -9289,6 +9290,8 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev) "Cannot re-enable PCI device after reset.\n"); } else { pci_set_master(pdev); + pci_restore_state(pdev); + pci_save_state(pdev); err = bnxt_hwrm_func_reset(bp); if (!err && netif_running(netdev)) -- GitLab From fd7b07382fff9ff6d090adfa0236d28b6b21402c Mon Sep 17 00:00:00 2001 From: Edwin Peer Date: Wed, 26 Aug 2020 01:08:37 -0400 Subject: [PATCH 0645/1304] bnxt_en: fix HWRM error when querying VF temperature [ Upstream commit 12cce90b934bf2b0ed9c339b4d5503e69954351a ] Firmware returns RESOURCE_ACCESS_DENIED for HWRM_TEMP_MONITORY_QUERY for VFs. This produces unpleasing error messages in the log when temp1_input is queried via the hwmon sysfs interface from a VF. The error is harmless and expected, so silence it and return unknown as the value. Since the device temperature is not particularly sensitive information, provide flexibility to change this policy in future by silencing the error rather than avoiding the HWRM call entirely for VFs. Fixes: cde49a42a9bb ("bnxt_en: Add hwmon sysfs support to read temperature") Cc: Marc Smith Reported-by: Marc Smith Signed-off-by: Edwin Peer Signed-off-by: Michael Chan Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 7047f4237cea..df3514503dee 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -6836,16 +6836,19 @@ static ssize_t bnxt_show_temp(struct device *dev, struct hwrm_temp_monitor_query_input req = {0}; struct hwrm_temp_monitor_query_output *resp; struct bnxt *bp = dev_get_drvdata(dev); - u32 temp = 0; + u32 len = 0; resp = bp->hwrm_cmd_resp_addr; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1); mutex_lock(&bp->hwrm_cmd_lock); - if (!_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT)) - temp = resp->temp * 1000; /* display millidegree */ + if (!_hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT)) + len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */ mutex_unlock(&bp->hwrm_cmd_lock); - return sprintf(buf, "%u\n", temp); + if (len) + return len; + + return sprintf(buf, "unknown\n"); } static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0); -- GitLab From 017265f1421529a473c25dc46a0cee98facbb1a3 Mon Sep 17 00:00:00 2001 From: Eric Sandeen Date: Wed, 26 Aug 2020 14:11:58 -0700 Subject: [PATCH 0646/1304] xfs: fix boundary test in xfs_attr_shortform_verify [ Upstream commit f4020438fab05364018c91f7e02ebdd192085933 ] The boundary test for the fixed-offset parts of xfs_attr_sf_entry in xfs_attr_shortform_verify is off by one, because the variable array at the end is defined as nameval[1] not nameval[]. Hence we need to subtract 1 from the calculation. This can be shown by: # touch file # setfattr -n root.a file and verifications will fail when it's written to disk. This only matters for a last attribute which has a single-byte name and no value, otherwise the combination of namelen & valuelen will push endp further out and this test won't fail. Fixes: 1e1bbd8e7ee06 ("xfs: create structure verifier function for shortform xattrs") Signed-off-by: Eric Sandeen Reviewed-by: Darrick J. Wong Signed-off-by: Darrick J. Wong Reviewed-by: Christoph Hellwig Signed-off-by: Sasha Levin --- fs/xfs/libxfs/xfs_attr_leaf.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c index 2652d00842d6..087a5715cf20 100644 --- a/fs/xfs/libxfs/xfs_attr_leaf.c +++ b/fs/xfs/libxfs/xfs_attr_leaf.c @@ -935,8 +935,10 @@ xfs_attr_shortform_verify( * struct xfs_attr_sf_entry has a variable length. * Check the fixed-offset parts of the structure are * within the data buffer. + * xfs_attr_sf_entry is defined with a 1-byte variable + * array at the end, so we must subtract that off. */ - if (((char *)sfep + sizeof(*sfep)) >= endp) + if (((char *)sfep + sizeof(*sfep) - 1) >= endp) return __this_address; /* Don't allow names with known bad length. */ -- GitLab From 05163210d6abcee771d3526fa78bf6b00b2c2041 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 26 Aug 2020 12:40:07 -0700 Subject: [PATCH 0647/1304] bnxt: don't enable NAPI until rings are ready [ Upstream commit 96ecdcc992eb7f468b2cf829b0f5408a1fad4668 ] Netpoll can try to poll napi as soon as napi_enable() is called. It crashes trying to access a doorbell which is still NULL: BUG: kernel NULL pointer dereference, address: 0000000000000000 CPU: 59 PID: 6039 Comm: ethtool Kdump: loaded Tainted: G S 5.9.0-rc1-00469-g5fd99b5d9950-dirty #26 RIP: 0010:bnxt_poll+0x121/0x1c0 Code: c4 20 44 89 e0 5b 5d 41 5c 41 5d 41 5e 41 5f c3 41 8b 86 a0 01 00 00 41 23 85 18 01 00 00 49 8b 96 a8 01 00 00 0d 00 00 00 24 <89> 02 41 f6 45 77 02 74 cb 49 8b ae d8 01 00 00 31 c0 c7 44 24 1a netpoll_poll_dev+0xbd/0x1a0 __netpoll_send_skb+0x1b2/0x210 netpoll_send_udp+0x2c9/0x406 write_ext_msg+0x1d7/0x1f0 console_unlock+0x23c/0x520 vprintk_emit+0xe0/0x1d0 printk+0x58/0x6f x86_vector_activate.cold+0xf/0x46 __irq_domain_activate_irq+0x50/0x80 __irq_domain_activate_irq+0x32/0x80 __irq_domain_activate_irq+0x32/0x80 irq_domain_activate_irq+0x25/0x40 __setup_irq+0x2d2/0x700 request_threaded_irq+0xfb/0x160 __bnxt_open_nic+0x3b1/0x750 bnxt_open_nic+0x19/0x30 ethtool_set_channels+0x1ac/0x220 dev_ethtool+0x11ba/0x2240 dev_ioctl+0x1cf/0x390 sock_do_ioctl+0x95/0x130 Reported-by: Rob Sherwood Fixes: c0c050c58d84 ("bnxt_en: New Broadcom ethernet driver.") Signed-off-by: Jakub Kicinski Reviewed-by: Michael Chan Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index df3514503dee..a267380b267d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -7027,15 +7027,15 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) } } - bnxt_enable_napi(bp); - bnxt_debug_dev_init(bp); - rc = bnxt_init_nic(bp, irq_re_init); if (rc) { netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); - goto open_err; + goto open_err_irq; } + bnxt_enable_napi(bp); + bnxt_debug_dev_init(bp); + if (link_re_init) { mutex_lock(&bp->link_lock); rc = bnxt_update_phy_setting(bp); @@ -7066,10 +7066,6 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) bnxt_vf_reps_open(bp); return 0; -open_err: - bnxt_debug_dev_exit(bp); - bnxt_disable_napi(bp); - open_err_irq: bnxt_del_napi(bp); -- GitLab From 0430561c8e0f4b497b29a169445e2477c607e27b Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Wed, 26 Aug 2020 10:17:36 +0200 Subject: [PATCH 0648/1304] selftests/bpf: Fix massive output from test_maps [ Upstream commit fa4505675e093e895b7ec49a76d44f6b5ad9602e ] When stdout output from the selftests tool 'test_maps' gets redirected into e.g file or pipe, then the output lines increase a lot (from 21 to 33949 lines). This is caused by the printf that happens before the fork() call, and there are user-space buffered printf data that seems to be duplicated into the forked process. To fix this fflush() stdout before the fork loop in __run_parallel(). Fixes: 1a97cf1fe503 ("selftests/bpf: speedup test_maps") Signed-off-by: Jesper Dangaard Brouer Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/159842985651.1050885.2154399297503372406.stgit@firesoul Signed-off-by: Sasha Levin --- tools/testing/selftests/bpf/test_maps.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c index 9b552c0fc47d..4e202217fae1 100644 --- a/tools/testing/selftests/bpf/test_maps.c +++ b/tools/testing/selftests/bpf/test_maps.c @@ -1017,6 +1017,8 @@ static void __run_parallel(int tasks, void (*fn)(int task, void *data), pid_t pid[tasks]; int i; + fflush(stdout); + for (i = 0; i < tasks; i++) { pid[i] = fork(); if (pid[i] == 0) { -- GitLab From 9ad2f018636c6741c41867f14d49d9441b50930d Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Sun, 23 Aug 2020 13:55:36 +0200 Subject: [PATCH 0649/1304] netfilter: nfnetlink: nfnetlink_unicast() reports EAGAIN instead of ENOBUFS [ Upstream commit ee921183557af39c1a0475f982d43b0fcac25e2e ] Frontend callback reports EAGAIN to nfnetlink to retry a command, this is used to signal that module autoloading is required. Unfortunately, nlmsg_unicast() reports EAGAIN in case the receiver socket buffer gets full, so it enters a busy-loop. This patch updates nfnetlink_unicast() to turn EAGAIN into ENOBUFS and to use nlmsg_unicast(). Remove the flags field in nfnetlink_unicast() since this is always MSG_DONTWAIT in the existing code which is exactly what nlmsg_unicast() passes to netlink_unicast() as parameter. Fixes: 96518518cc41 ("netfilter: add nftables") Reported-by: Phil Sutter Signed-off-by: Pablo Neira Ayuso Signed-off-by: Sasha Levin --- include/linux/netfilter/nfnetlink.h | 3 +- net/netfilter/nf_tables_api.c | 61 ++++++++++++++--------------- net/netfilter/nfnetlink.c | 11 ++++-- net/netfilter/nfnetlink_log.c | 3 +- net/netfilter/nfnetlink_queue.c | 2 +- 5 files changed, 40 insertions(+), 40 deletions(-) diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h index cf09ab37b45b..e713476ff29d 100644 --- a/include/linux/netfilter/nfnetlink.h +++ b/include/linux/netfilter/nfnetlink.h @@ -43,8 +43,7 @@ int nfnetlink_has_listeners(struct net *net, unsigned int group); int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 portid, unsigned int group, int echo, gfp_t flags); int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error); -int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid, - int flags); +int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid); static inline u16 nfnl_msg_type(u8 subsys, u8 msg_type) { diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 159ec1533c98..5b4632826dc6 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -718,11 +718,11 @@ static int nf_tables_gettable(struct net *net, struct sock *nlsk, nlh->nlmsg_seq, NFT_MSG_NEWTABLE, 0, family, table); if (err < 0) - goto err; + goto err_fill_table_info; - return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid); + return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid); -err: +err_fill_table_info: kfree_skb(skb2); return err; } @@ -1383,11 +1383,11 @@ static int nf_tables_getchain(struct net *net, struct sock *nlsk, nlh->nlmsg_seq, NFT_MSG_NEWCHAIN, 0, family, table, chain); if (err < 0) - goto err; + goto err_fill_chain_info; - return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid); + return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid); -err: +err_fill_chain_info: kfree_skb(skb2); return err; } @@ -2488,11 +2488,11 @@ static int nf_tables_getrule(struct net *net, struct sock *nlsk, nlh->nlmsg_seq, NFT_MSG_NEWRULE, 0, family, table, chain, rule); if (err < 0) - goto err; + goto err_fill_rule_info; - return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid); + return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid); -err: +err_fill_rule_info: kfree_skb(skb2); return err; } @@ -3377,11 +3377,11 @@ static int nf_tables_getset(struct net *net, struct sock *nlsk, err = nf_tables_fill_set(skb2, &ctx, set, NFT_MSG_NEWSET, 0); if (err < 0) - goto err; + goto err_fill_set_info; - return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid); + return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid); -err: +err_fill_set_info: kfree_skb(skb2); return err; } @@ -4157,24 +4157,18 @@ static int nft_get_set_elem(struct nft_ctx *ctx, struct nft_set *set, err = -ENOMEM; skb = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC); if (skb == NULL) - goto err1; + return err; err = nf_tables_fill_setelem_info(skb, ctx, ctx->seq, ctx->portid, NFT_MSG_NEWSETELEM, 0, set, &elem); if (err < 0) - goto err2; + goto err_fill_setelem; - err = nfnetlink_unicast(skb, ctx->net, ctx->portid, MSG_DONTWAIT); - /* This avoids a loop in nfnetlink. */ - if (err < 0) - goto err1; + return nfnetlink_unicast(skb, ctx->net, ctx->portid); - return 0; -err2: +err_fill_setelem: kfree_skb(skb); -err1: - /* this avoids a loop in nfnetlink. */ - return err == -EAGAIN ? -ENOBUFS : err; + return err; } /* called with rcu_read_lock held */ @@ -5273,10 +5267,11 @@ static int nf_tables_getobj(struct net *net, struct sock *nlsk, nlh->nlmsg_seq, NFT_MSG_NEWOBJ, 0, family, table, obj, reset); if (err < 0) - goto err; + goto err_fill_obj_info; - return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid); -err: + return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid); + +err_fill_obj_info: kfree_skb(skb2); return err; } @@ -5933,10 +5928,11 @@ static int nf_tables_getflowtable(struct net *net, struct sock *nlsk, NFT_MSG_NEWFLOWTABLE, 0, family, flowtable); if (err < 0) - goto err; + goto err_fill_flowtable_info; - return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid); -err: + return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid); + +err_fill_flowtable_info: kfree_skb(skb2); return err; } @@ -6097,10 +6093,11 @@ static int nf_tables_getgen(struct net *net, struct sock *nlsk, err = nf_tables_fill_gen_info(skb2, net, NETLINK_CB(skb).portid, nlh->nlmsg_seq); if (err < 0) - goto err; + goto err_fill_gen_info; - return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid); -err: + return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid); + +err_fill_gen_info: kfree_skb(skb2); return err; } diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index 7f2c1915763f..9bacddc761ba 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c @@ -148,10 +148,15 @@ int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error) } EXPORT_SYMBOL_GPL(nfnetlink_set_err); -int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid, - int flags) +int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid) { - return netlink_unicast(net->nfnl, skb, portid, flags); + int err; + + err = nlmsg_unicast(net->nfnl, skb, portid); + if (err == -EAGAIN) + err = -ENOBUFS; + + return err; } EXPORT_SYMBOL_GPL(nfnetlink_unicast); diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index 332c69d27b47..25298b3eb854 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c @@ -359,8 +359,7 @@ __nfulnl_send(struct nfulnl_instance *inst) goto out; } } - nfnetlink_unicast(inst->skb, inst->net, inst->peer_portid, - MSG_DONTWAIT); + nfnetlink_unicast(inst->skb, inst->net, inst->peer_portid); out: inst->qlen = 0; inst->skb = NULL; diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index d33094f4ec41..f81a3ce0fe48 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c @@ -685,7 +685,7 @@ __nfqnl_enqueue_packet(struct net *net, struct nfqnl_instance *queue, *packet_id_ptr = htonl(entry->id); /* nfnetlink_unicast will either free the nskb or add it to a socket */ - err = nfnetlink_unicast(nskb, net, queue->peer_portid, MSG_DONTWAIT); + err = nfnetlink_unicast(nskb, net, queue->peer_portid); if (err < 0) { if (queue->flags & NFQA_CFG_F_FAIL_OPEN) { failopen = 1; -- GitLab From dff6a2c2828bce13f32c62029def97195f8830f6 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Fri, 21 Aug 2020 09:58:19 +0200 Subject: [PATCH 0650/1304] nvmet-fc: Fix a missed _irqsave version of spin_lock in 'nvmet_fc_fod_op_done()' [ Upstream commit 70e37988db94aba607d5491a94f80ba08e399b6b ] The way 'spin_lock()' and 'spin_lock_irqsave()' are used is not consistent in this function. Use 'spin_lock_irqsave()' also here, as there is no guarantee that interruptions are disabled at that point, according to surrounding code. Fixes: a97ec51b37ef ("nvmet_fc: Rework target side abort handling") Signed-off-by: Christophe JAILLET Reviewed-by: Christoph Hellwig Signed-off-by: Sagi Grimberg Signed-off-by: Sasha Levin --- drivers/nvme/target/fc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c index 29b4b236afd8..77e4d184bc99 100644 --- a/drivers/nvme/target/fc.c +++ b/drivers/nvme/target/fc.c @@ -1986,9 +1986,9 @@ nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod) return; if (fcpreq->fcp_error || fcpreq->transferred_length != fcpreq->transfer_length) { - spin_lock(&fod->flock); + spin_lock_irqsave(&fod->flock, flags); fod->abort = true; - spin_unlock(&fod->flock); + spin_unlock_irqrestore(&fod->flock, flags); nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); return; -- GitLab From 5154e806105266406156b3fa67d05df7a398aa6c Mon Sep 17 00:00:00 2001 From: Al Grant Date: Tue, 1 Sep 2020 12:10:14 -0300 Subject: [PATCH 0651/1304] perf tools: Correct SNOOPX field offset [ Upstream commit 39c0a53b114d0317e5c4e76b631f41d133af5cb0 ] perf_event.h has macros that define the field offsets in the data_src bitmask in perf records. The SNOOPX and REMOTE offsets were both 37. These are distinct fields, and the bitfield layout in perf_mem_data_src confirms that SNOOPX should be at offset 38. Committer notes: This was extracted from a larger patch that also contained kernel changes. Fixes: 52839e653b5629bd ("perf tools: Add support for printing new mem_info encodings") Signed-off-by: Al Grant Reviewed-by: Andi Kleen Cc: Adrian Hunter Cc: Ian Rogers Cc: Jiri Olsa Cc: Namhyung Kim Cc: Peter Zijlstra Link: http://lore.kernel.org/lkml/9974f2d0-bf7f-518e-d9f7-4520e5ff1bb0@foss.arm.com Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: Sasha Levin --- tools/include/uapi/linux/perf_event.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/include/uapi/linux/perf_event.h b/tools/include/uapi/linux/perf_event.h index f35eb72739c0..a45e7b4f0316 100644 --- a/tools/include/uapi/linux/perf_event.h +++ b/tools/include/uapi/linux/perf_event.h @@ -1079,7 +1079,7 @@ union perf_mem_data_src { #define PERF_MEM_SNOOPX_FWD 0x01 /* forward */ /* 1 free */ -#define PERF_MEM_SNOOPX_SHIFT 37 +#define PERF_MEM_SNOOPX_SHIFT 38 /* locked instruction */ #define PERF_MEM_LOCK_NA 0x01 /* not available */ -- GitLab From f00d82c3fb4368afb41cba89b287801a7888627c Mon Sep 17 00:00:00 2001 From: Shung-Hsi Yu Date: Mon, 31 Aug 2020 22:37:09 +0800 Subject: [PATCH 0652/1304] net: ethernet: mlx4: Fix memory allocation in mlx4_buddy_init() [ Upstream commit cbedcb044e9cc4e14bbe6658111224bb923094f4 ] On machines with much memory (> 2 TByte) and log_mtts_per_seg == 0, a max_order of 31 will be passed to mlx_buddy_init(), which results in s = BITS_TO_LONGS(1 << 31) becoming a negative value, leading to kvmalloc_array() failure when it is converted to size_t. mlx4_core 0000:b1:00.0: Failed to initialize memory region table, aborting mlx4_core: probe of 0000:b1:00.0 failed with error -12 Fix this issue by changing the left shifting operand from a signed literal to an unsigned one. Fixes: 225c7b1feef1 ("IB/mlx4: Add a driver Mellanox ConnectX InfiniBand adapters") Signed-off-by: Shung-Hsi Yu Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/ethernet/mellanox/mlx4/mr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c index 1a11bc0e1612..cfa0bba3940f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mr.c +++ b/drivers/net/ethernet/mellanox/mlx4/mr.c @@ -114,7 +114,7 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order) goto err_out; for (i = 0; i <= buddy->max_order; ++i) { - s = BITS_TO_LONGS(1 << (buddy->max_order - i)); + s = BITS_TO_LONGS(1UL << (buddy->max_order - i)); buddy->bits[i] = kvmalloc_array(s, sizeof(long), GFP_KERNEL | __GFP_ZERO); if (!buddy->bits[i]) goto err_out_free; -- GitLab From 37d933e8b41b83bb8278815e366aec5a542b7e31 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 2 Sep 2020 11:30:48 -0400 Subject: [PATCH 0653/1304] fix regression in "epoll: Keep a reference on files added to the check list" [ Upstream commit 77f4689de17c0887775bb77896f4cc11a39bf848 ] epoll_loop_check_proc() can run into a file already committed to destruction; we can't grab a reference on those and don't need to add them to the set for reverse path check anyway. Tested-by: Marc Zyngier Fixes: a9ed4a6560b8 ("epoll: Keep a reference on files added to the check list") Signed-off-by: Al Viro Signed-off-by: Sasha Levin --- fs/eventpoll.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/fs/eventpoll.c b/fs/eventpoll.c index f988ccd064a2..61a52bb26d12 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -1891,9 +1891,9 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests) * during ep_insert(). */ if (list_empty(&epi->ffd.file->f_tfile_llink)) { - get_file(epi->ffd.file); - list_add(&epi->ffd.file->f_tfile_llink, - &tfile_check_list); + if (get_file_rcu(epi->ffd.file)) + list_add(&epi->ffd.file->f_tfile_llink, + &tfile_check_list); } } } -- GitLab From 2a7241fe4d340bce8c13854976f0eabf2a72d4eb Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 2 Sep 2020 14:56:31 +0300 Subject: [PATCH 0654/1304] net: gemini: Fix another missing clk_disable_unprepare() in probe [ Upstream commit eb0f3bc463d59d86402f19c59aa44e82dc3fab6d ] We recently added some calls to clk_disable_unprepare() but we missed the last error path if register_netdev() fails. I made a couple cleanups so we avoid mistakes like this in the future. First I reversed the "if (!ret)" condition and pulled the code in one indent level. Also, the "port->netdev = NULL;" is not required because "port" isn't used again outside this function so I deleted that line. Fixes: 4d5ae32f5e1e ("net: ethernet: Add a driver for Gemini gigabit ethernet") Signed-off-by: Dan Carpenter Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/ethernet/cortina/gemini.c | 34 +++++++++++++-------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c index 16de0fa92ab7..5242687060b4 100644 --- a/drivers/net/ethernet/cortina/gemini.c +++ b/drivers/net/ethernet/cortina/gemini.c @@ -2451,8 +2451,8 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev) port->reset = devm_reset_control_get_exclusive(dev, NULL); if (IS_ERR(port->reset)) { dev_err(dev, "no reset\n"); - clk_disable_unprepare(port->pclk); - return PTR_ERR(port->reset); + ret = PTR_ERR(port->reset); + goto unprepare; } reset_control_reset(port->reset); usleep_range(100, 500); @@ -2507,25 +2507,25 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev) IRQF_SHARED, port_names[port->id], port); - if (ret) { - clk_disable_unprepare(port->pclk); - return ret; - } + if (ret) + goto unprepare; ret = register_netdev(netdev); - if (!ret) { + if (ret) + goto unprepare; + + netdev_info(netdev, + "irq %d, DMA @ 0x%pap, GMAC @ 0x%pap\n", + port->irq, &dmares->start, + &gmacres->start); + ret = gmac_setup_phy(netdev); + if (ret) netdev_info(netdev, - "irq %d, DMA @ 0x%pap, GMAC @ 0x%pap\n", - port->irq, &dmares->start, - &gmacres->start); - ret = gmac_setup_phy(netdev); - if (ret) - netdev_info(netdev, - "PHY init failed, deferring to ifup time\n"); - return 0; - } + "PHY init failed, deferring to ifup time\n"); + return 0; - port->netdev = NULL; +unprepare: + clk_disable_unprepare(port->pclk); return ret; } -- GitLab From ab2413892e2d26015eae2f279f30935846ca24aa Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Wed, 2 Sep 2020 10:47:02 -0700 Subject: [PATCH 0655/1304] xfs: fix xfs_bmap_validate_extent_raw when checking attr fork of rt files [ Upstream commit d0c20d38af135b2b4b90aa59df7878ef0c8fbef4 ] The realtime flag only applies to the data fork, so don't use the realtime block number checks on the attr fork of a realtime file. Fixes: 30b0984d9117 ("xfs: refactor bmap record validation") Signed-off-by: Darrick J. Wong Reviewed-by: Eric Sandeen Signed-off-by: Sasha Levin --- fs/xfs/libxfs/xfs_bmap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c index 0b7145fdb8aa..f35e1801f1c9 100644 --- a/fs/xfs/libxfs/xfs_bmap.c +++ b/fs/xfs/libxfs/xfs_bmap.c @@ -6130,7 +6130,7 @@ xfs_bmap_validate_extent( isrt = XFS_IS_REALTIME_INODE(ip); endfsb = irec->br_startblock + irec->br_blockcount - 1; - if (isrt) { + if (isrt && whichfork == XFS_DATA_FORK) { if (!xfs_verify_rtbno(mp, irec->br_startblock)) return __this_address; if (!xfs_verify_rtbno(mp, endfsb)) -- GitLab From faec94592f7364be5f1e512f6330d3b86136166b Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Fri, 4 Sep 2020 00:25:10 +0900 Subject: [PATCH 0656/1304] perf jevents: Fix suspicious code in fixregex() [ Upstream commit e62458e3940eb3dfb009481850e140fbee183b04 ] The new string should have enough space for the original string and the back slashes IMHO. Fixes: fbc2844e84038ce3 ("perf vendor events: Use more flexible pattern matching for CPU identification for mapfile.csv") Signed-off-by: Namhyung Kim Reviewed-by: Ian Rogers Cc: Alexander Shishkin Cc: Andi Kleen Cc: Jiri Olsa Cc: John Garry Cc: Kajol Jain Cc: Mark Rutland Cc: Peter Zijlstra Cc: Stephane Eranian Cc: William Cohen Link: http://lore.kernel.org/lkml/20200903152510.489233-1-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: Sasha Levin --- tools/perf/pmu-events/jevents.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c index 38b5888ef7b3..c17e59404171 100644 --- a/tools/perf/pmu-events/jevents.c +++ b/tools/perf/pmu-events/jevents.c @@ -137,7 +137,7 @@ static char *fixregex(char *s) return s; /* allocate space for a new string */ - fixed = (char *) malloc(len + 1); + fixed = (char *) malloc(len + esc_count + 1); if (!fixed) return NULL; -- GitLab From aea7be6444a67e455ebcfcc0501c32bee50a47d6 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Thu, 3 Sep 2020 14:28:54 -0400 Subject: [PATCH 0657/1304] tg3: Fix soft lockup when tg3_reset_task() fails. [ Upstream commit 556699341efa98243e08e34401b3f601da91f5a3 ] If tg3_reset_task() fails, the device state is left in an inconsistent state with IFF_RUNNING still set but NAPI state not enabled. A subsequent operation, such as ifdown or AER error can cause it to soft lock up when it tries to disable NAPI state. Fix it by bringing down the device to !IFF_RUNNING state when tg3_reset_task() fails. tg3_reset_task() running from workqueue will now call tg3_close() when the reset fails. We need to modify tg3_reset_task_cancel() slightly to avoid tg3_close() calling cancel_work_sync() to cancel tg3_reset_task(). Otherwise cancel_work_sync() will wait forever for tg3_reset_task() to finish. Reported-by: David Christensen Reported-by: Baptiste Covolato Fixes: db2199737990 ("tg3: Schedule at most one tg3_reset_task run") Signed-off-by: Michael Chan Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/ethernet/broadcom/tg3.c | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index be845df05039..6fcf9646d141 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -7219,8 +7219,8 @@ static inline void tg3_reset_task_schedule(struct tg3 *tp) static inline void tg3_reset_task_cancel(struct tg3 *tp) { - cancel_work_sync(&tp->reset_task); - tg3_flag_clear(tp, RESET_TASK_PENDING); + if (test_and_clear_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags)) + cancel_work_sync(&tp->reset_task); tg3_flag_clear(tp, TX_RECOVERY_PENDING); } @@ -11213,18 +11213,27 @@ static void tg3_reset_task(struct work_struct *work) tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); err = tg3_init_hw(tp, true); - if (err) + if (err) { + tg3_full_unlock(tp); + tp->irq_sync = 0; + tg3_napi_enable(tp); + /* Clear this flag so that tg3_reset_task_cancel() will not + * call cancel_work_sync() and wait forever. + */ + tg3_flag_clear(tp, RESET_TASK_PENDING); + dev_close(tp->dev); goto out; + } tg3_netif_start(tp); -out: tg3_full_unlock(tp); if (!err) tg3_phy_start(tp); tg3_flag_clear(tp, RESET_TASK_PENDING); +out: rtnl_unlock(); } -- GitLab From f10d77cdedbe8b4aaf2799f4cea6126b2612dd93 Mon Sep 17 00:00:00 2001 From: Huang Ying Date: Fri, 4 Sep 2020 14:10:47 +0800 Subject: [PATCH 0658/1304] x86, fakenuma: Fix invalid starting node ID [ Upstream commit ccae0f36d500aef727f98acd8d0601e6b262a513 ] Commit: cc9aec03e58f ("x86/numa_emulation: Introduce uniform split capability") uses "-1" as the starting node ID, which causes the strange kernel log as follows, when "numa=fake=32G" is added to the kernel command line: Faking node -1 at [mem 0x0000000000000000-0x0000000893ffffff] (35136MB) Faking node 0 at [mem 0x0000001840000000-0x000000203fffffff] (32768MB) Faking node 1 at [mem 0x0000000894000000-0x000000183fffffff] (64192MB) Faking node 2 at [mem 0x0000002040000000-0x000000283fffffff] (32768MB) Faking node 3 at [mem 0x0000002840000000-0x000000303fffffff] (32768MB) And finally the kernel crashes: BUG: Bad page state in process swapper pfn:00011 page:(____ptrval____) refcount:0 mapcount:1 mapping:(____ptrval____) index:0x55cd7e44b270 pfn:0x11 failed to read mapping contents, not a valid kernel address? flags: 0x5(locked|uptodate) raw: 0000000000000005 000055cd7e44af30 000055cd7e44af50 0000000100000006 raw: 000055cd7e44b270 000055cd7e44b290 0000000000000000 000055cd7e44b510 page dumped because: page still charged to cgroup page->mem_cgroup:000055cd7e44b510 Modules linked in: CPU: 0 PID: 0 Comm: swapper Not tainted 5.9.0-rc2 #1 Hardware name: Intel Corporation S2600WFT/S2600WFT, BIOS SE5C620.86B.02.01.0008.031920191559 03/19/2019 Call Trace: dump_stack+0x57/0x80 bad_page.cold+0x63/0x94 __free_pages_ok+0x33f/0x360 memblock_free_all+0x127/0x195 mem_init+0x23/0x1f5 start_kernel+0x219/0x4f5 secondary_startup_64+0xb6/0xc0 Fix this bug via using 0 as the starting node ID. This restores the original behavior before cc9aec03e58f. [ mingo: Massaged the changelog. ] Fixes: cc9aec03e58f ("x86/numa_emulation: Introduce uniform split capability") Signed-off-by: "Huang, Ying" Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/20200904061047.612950-1-ying.huang@intel.com Signed-off-by: Sasha Levin --- arch/x86/mm/numa_emulation.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c index d71d72cf6c66..4686757a74d7 100644 --- a/arch/x86/mm/numa_emulation.c +++ b/arch/x86/mm/numa_emulation.c @@ -322,7 +322,7 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei, u64 addr, u64 max_addr, u64 size) { return split_nodes_size_interleave_uniform(ei, pi, addr, max_addr, size, - 0, NULL, NUMA_NO_NODE); + 0, NULL, 0); } int __init setup_emu2phys_nid(int *dfl_phys_nid) -- GitLab From 519837cc18e3389a95ffbff5f165030c0b3d7d42 Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Fri, 28 Aug 2020 08:06:15 +0800 Subject: [PATCH 0659/1304] iommu/vt-d: Serialize IOMMU GCMD register modifications [ Upstream commit 6e4e9ec65078093165463c13d4eb92b3e8d7b2e8 ] The VT-d spec requires (10.4.4 Global Command Register, GCMD_REG General Description) that: If multiple control fields in this register need to be modified, software must serialize the modifications through multiple writes to this register. However, in irq_remapping.c, modifications of IRE and CFI are done in one write. We need to do two separate writes with STS checking after each. It also checks the status register before writing command register to avoid unnecessary register write. Fixes: af8d102f999a4 ("x86/intel/irq_remapping: Clean up x2apic opt-out security warning mess") Signed-off-by: Lu Baolu Reviewed-by: Kevin Tian Cc: Andy Lutomirski Cc: Jacob Pan Cc: Kevin Tian Cc: Ashok Raj Link: https://lore.kernel.org/r/20200828000615.8281-1-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel Signed-off-by: Sasha Levin --- drivers/iommu/intel_irq_remapping.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c index 15a4ad31c510..9d2d03545bb0 100644 --- a/drivers/iommu/intel_irq_remapping.c +++ b/drivers/iommu/intel_irq_remapping.c @@ -479,12 +479,18 @@ static void iommu_enable_irq_remapping(struct intel_iommu *iommu) /* Enable interrupt-remapping */ iommu->gcmd |= DMA_GCMD_IRE; - iommu->gcmd &= ~DMA_GCMD_CFI; /* Block compatibility-format MSIs */ writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); - IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_IRES), sts); + /* Block compatibility-format MSIs */ + if (sts & DMA_GSTS_CFIS) { + iommu->gcmd &= ~DMA_GCMD_CFI; + writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); + IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, + readl, !(sts & DMA_GSTS_CFIS), sts); + } + /* * With CFI clear in the Global Command register, we should be * protected from dangerous (i.e. compatibility) interrupts -- GitLab From b0a3e33221cc9e6ba141d9d62c178c9022f56989 Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Mon, 6 Jul 2020 11:33:38 -0700 Subject: [PATCH 0660/1304] thermal: ti-soc-thermal: Fix bogus thermal shutdowns for omap4430 [ Upstream commit 30d24faba0532d6972df79a1bf060601994b5873 ] We can sometimes get bogus thermal shutdowns on omap4430 at least with droid4 running idle with a battery charger connected: thermal thermal_zone0: critical temperature reached (143 C), shutting down Dumping out the register values shows we can occasionally get a 0x7f value that is outside the TRM listed values in the ADC conversion table. And then we get a normal value when reading again after that. Reading the register multiple times does not seem help avoiding the bogus values as they stay until the next sample is ready. Looking at the TRM chapter "18.4.10.2.3 ADC Codes Versus Temperature", we should have values from 13 to 107 listed with a total of 95 values. But looking at the omap4430_adc_to_temp array, the values are off, and the end values are missing. And it seems that the 4430 ADC table is similar to omap3630 rather than omap4460. Let's fix the issue by using values based on the omap3630 table and just ignoring invalid values. Compared to the 4430 TRM, the omap3630 table has the missing values added while the TRM table only shows every second value. Note that sometimes the ADC register values within the valid table can also be way off for about 1 out of 10 values. But it seems that those just show about 25 C too low values rather than too high values. So those do not cause a bogus thermal shutdown. Fixes: 1a31270e54d7 ("staging: omap-thermal: add OMAP4 data structures") Cc: Merlijn Wajer Cc: Pavel Machek Cc: Sebastian Reichel Signed-off-by: Tony Lindgren Signed-off-by: Daniel Lezcano Link: https://lore.kernel.org/r/20200706183338.25622-1-tony@atomide.com Signed-off-by: Sasha Levin --- .../ti-soc-thermal/omap4-thermal-data.c | 23 ++++++++++--------- .../thermal/ti-soc-thermal/omap4xxx-bandgap.h | 10 +++++--- 2 files changed, 19 insertions(+), 14 deletions(-) diff --git a/drivers/thermal/ti-soc-thermal/omap4-thermal-data.c b/drivers/thermal/ti-soc-thermal/omap4-thermal-data.c index c12211eaaac4..0b9f835d931f 100644 --- a/drivers/thermal/ti-soc-thermal/omap4-thermal-data.c +++ b/drivers/thermal/ti-soc-thermal/omap4-thermal-data.c @@ -46,20 +46,21 @@ static struct temp_sensor_data omap4430_mpu_temp_sensor_data = { /* * Temperature values in milli degree celsius - * ADC code values from 530 to 923 + * ADC code values from 13 to 107, see TRM + * "18.4.10.2.3 ADC Codes Versus Temperature". */ static const int omap4430_adc_to_temp[OMAP4430_ADC_END_VALUE - OMAP4430_ADC_START_VALUE + 1] = { - -38000, -35000, -34000, -32000, -30000, -28000, -26000, -24000, -22000, - -20000, -18000, -17000, -15000, -13000, -12000, -10000, -8000, -6000, - -5000, -3000, -1000, 0, 2000, 3000, 5000, 6000, 8000, 10000, 12000, - 13000, 15000, 17000, 19000, 21000, 23000, 25000, 27000, 28000, 30000, - 32000, 33000, 35000, 37000, 38000, 40000, 42000, 43000, 45000, 47000, - 48000, 50000, 52000, 53000, 55000, 57000, 58000, 60000, 62000, 64000, - 66000, 68000, 70000, 71000, 73000, 75000, 77000, 78000, 80000, 82000, - 83000, 85000, 87000, 88000, 90000, 92000, 93000, 95000, 97000, 98000, - 100000, 102000, 103000, 105000, 107000, 109000, 111000, 113000, 115000, - 117000, 118000, 120000, 122000, 123000, + -40000, -38000, -35000, -34000, -32000, -30000, -28000, -26000, -24000, + -22000, -20000, -18500, -17000, -15000, -13500, -12000, -10000, -8000, + -6500, -5000, -3500, -1500, 0, 2000, 3500, 5000, 6500, 8500, 10000, + 12000, 13500, 15000, 17000, 19000, 21000, 23000, 25000, 27000, 28500, + 30000, 32000, 33500, 35000, 37000, 38500, 40000, 42000, 43500, 45000, + 47000, 48500, 50000, 52000, 53500, 55000, 57000, 58500, 60000, 62000, + 64000, 66000, 68000, 70000, 71500, 73500, 75000, 77000, 78500, 80000, + 82000, 83500, 85000, 87000, 88500, 90000, 92000, 93500, 95000, 97000, + 98500, 100000, 102000, 103500, 105000, 107000, 109000, 111000, 113000, + 115000, 117000, 118500, 120000, 122000, 123500, 125000, }; /* OMAP4430 data */ diff --git a/drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h b/drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h index b87c8659ec60..8a081abce4b5 100644 --- a/drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h +++ b/drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h @@ -67,9 +67,13 @@ * and thresholds for OMAP4430. */ -/* ADC conversion table limits */ -#define OMAP4430_ADC_START_VALUE 0 -#define OMAP4430_ADC_END_VALUE 127 +/* + * ADC conversion table limits. Ignore values outside the TRM listed + * range to avoid bogus thermal shutdowns. See omap4430 TRM chapter + * "18.4.10.2.3 ADC Codes Versus Temperature". + */ +#define OMAP4430_ADC_START_VALUE 13 +#define OMAP4430_ADC_END_VALUE 107 /* bandgap clock limits (no control on 4430) */ #define OMAP4430_MAX_FREQ 32768 #define OMAP4430_MIN_FREQ 32768 -- GitLab From 95968e5cbb5db7ff33288d01a95349d476f19248 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Fri, 4 Sep 2020 16:36:19 -0700 Subject: [PATCH 0661/1304] include/linux/log2.h: add missing () around n in roundup_pow_of_two() [ Upstream commit 428fc0aff4e59399ec719ffcc1f7a5d29a4ee476 ] Otherwise gcc generates warnings if the expression is complicated. Fixes: 312a0c170945 ("[PATCH] LOG2: Alter roundup_pow_of_two() so that it can use a ilog2() on a constant") Signed-off-by: Jason Gunthorpe Signed-off-by: Andrew Morton Link: https://lkml.kernel.org/r/0-v1-8a2697e3c003+41165-log_brackets_jgg@nvidia.com Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin --- include/linux/log2.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/log2.h b/include/linux/log2.h index 2af7f77866d0..78496801cddf 100644 --- a/include/linux/log2.h +++ b/include/linux/log2.h @@ -177,7 +177,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n) #define roundup_pow_of_two(n) \ ( \ __builtin_constant_p(n) ? ( \ - (n == 1) ? 1 : \ + ((n) == 1) ? 1 : \ (1UL << (ilog2((n) - 1) + 1)) \ ) : \ __roundup_pow_of_two(n) \ -- GitLab From da0d5ccf845fd5337ce9afaddd46e99859f78502 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Sat, 5 Sep 2020 08:12:01 -0400 Subject: [PATCH 0662/1304] ext2: don't update mtime on COW faults commit 1ef6ea0efe8e68d0299dad44c39dc6ad9e5d1f39 upstream. When running in a dax mode, if the user maps a page with MAP_PRIVATE and PROT_WRITE, the ext2 filesystem would incorrectly update ctime and mtime when the user hits a COW fault. This breaks building of the Linux kernel. How to reproduce: 1. extract the Linux kernel tree on dax-mounted ext2 filesystem 2. run make clean 3. run make -j12 4. run make -j12 at step 4, make would incorrectly rebuild the whole kernel (although it was already built in step 3). The reason for the breakage is that almost all object files depend on objtool. When we run objtool, it takes COW page fault on its .data section, and these faults will incorrectly update the timestamp of the objtool binary. The updated timestamp causes make to rebuild the whole tree. Signed-off-by: Mikulas Patocka Cc: stable@vger.kernel.org Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- fs/ext2/file.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/fs/ext2/file.c b/fs/ext2/file.c index 28b2609f25c1..d39d90c1b670 100644 --- a/fs/ext2/file.c +++ b/fs/ext2/file.c @@ -93,8 +93,10 @@ static vm_fault_t ext2_dax_fault(struct vm_fault *vmf) struct inode *inode = file_inode(vmf->vma->vm_file); struct ext2_inode_info *ei = EXT2_I(inode); vm_fault_t ret; + bool write = (vmf->flags & FAULT_FLAG_WRITE) && + (vmf->vma->vm_flags & VM_SHARED); - if (vmf->flags & FAULT_FLAG_WRITE) { + if (write) { sb_start_pagefault(inode->i_sb); file_update_time(vmf->vma->vm_file); } @@ -103,7 +105,7 @@ static vm_fault_t ext2_dax_fault(struct vm_fault *vmf) ret = dax_iomap_fault(vmf, PE_SIZE_PTE, NULL, NULL, &ext2_iomap_ops); up_read(&ei->dax_sem); - if (vmf->flags & FAULT_FLAG_WRITE) + if (write) sb_end_pagefault(inode->i_sb); return ret; } -- GitLab From 884fee7632168ab59ed49a26de430fa3ed5c6a86 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Sat, 5 Sep 2020 08:13:02 -0400 Subject: [PATCH 0663/1304] xfs: don't update mtime on COW faults commit b17164e258e3888d376a7434415013175d637377 upstream. When running in a dax mode, if the user maps a page with MAP_PRIVATE and PROT_WRITE, the xfs filesystem would incorrectly update ctime and mtime when the user hits a COW fault. This breaks building of the Linux kernel. How to reproduce: 1. extract the Linux kernel tree on dax-mounted xfs filesystem 2. run make clean 3. run make -j12 4. run make -j12 at step 4, make would incorrectly rebuild the whole kernel (although it was already built in step 3). The reason for the breakage is that almost all object files depend on objtool. When we run objtool, it takes COW page fault on its .data section, and these faults will incorrectly update the timestamp of the objtool binary. The updated timestamp causes make to rebuild the whole tree. Signed-off-by: Mikulas Patocka Cc: stable@vger.kernel.org Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- fs/xfs/xfs_file.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 259549698ba7..f22acfd53850 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -1095,6 +1095,14 @@ __xfs_filemap_fault( return ret; } +static inline bool +xfs_is_write_fault( + struct vm_fault *vmf) +{ + return (vmf->flags & FAULT_FLAG_WRITE) && + (vmf->vma->vm_flags & VM_SHARED); +} + static vm_fault_t xfs_filemap_fault( struct vm_fault *vmf) @@ -1102,7 +1110,7 @@ xfs_filemap_fault( /* DAX can shortcut the normal fault path on write faults! */ return __xfs_filemap_fault(vmf, PE_SIZE_PTE, IS_DAX(file_inode(vmf->vma->vm_file)) && - (vmf->flags & FAULT_FLAG_WRITE)); + xfs_is_write_fault(vmf)); } static vm_fault_t @@ -1115,7 +1123,7 @@ xfs_filemap_huge_fault( /* DAX can shortcut the normal fault path on write faults! */ return __xfs_filemap_fault(vmf, pe_size, - (vmf->flags & FAULT_FLAG_WRITE)); + xfs_is_write_fault(vmf)); } static vm_fault_t -- GitLab From 43eadb9e37a2b0888d38e1ebe5d2206928a38e3b Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 10 Aug 2020 11:42:26 -0400 Subject: [PATCH 0664/1304] btrfs: drop path before adding new uuid tree entry commit 9771a5cf937129307d9f58922d60484d58ababe7 upstream. With the conversion of the tree locks to rwsem I got the following lockdep splat: ====================================================== WARNING: possible circular locking dependency detected 5.8.0-rc7-00167-g0d7ba0c5b375-dirty #925 Not tainted ------------------------------------------------------ btrfs-uuid/7955 is trying to acquire lock: ffff88bfbafec0f8 (btrfs-root-00){++++}-{3:3}, at: __btrfs_tree_read_lock+0x39/0x180 but task is already holding lock: ffff88bfbafef2a8 (btrfs-uuid-00){++++}-{3:3}, at: __btrfs_tree_read_lock+0x39/0x180 which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #1 (btrfs-uuid-00){++++}-{3:3}: down_read_nested+0x3e/0x140 __btrfs_tree_read_lock+0x39/0x180 __btrfs_read_lock_root_node+0x3a/0x50 btrfs_search_slot+0x4bd/0x990 btrfs_uuid_tree_add+0x89/0x2d0 btrfs_uuid_scan_kthread+0x330/0x390 kthread+0x133/0x150 ret_from_fork+0x1f/0x30 -> #0 (btrfs-root-00){++++}-{3:3}: __lock_acquire+0x1272/0x2310 lock_acquire+0x9e/0x360 down_read_nested+0x3e/0x140 __btrfs_tree_read_lock+0x39/0x180 __btrfs_read_lock_root_node+0x3a/0x50 btrfs_search_slot+0x4bd/0x990 btrfs_find_root+0x45/0x1b0 btrfs_read_tree_root+0x61/0x100 btrfs_get_root_ref.part.50+0x143/0x630 btrfs_uuid_tree_iterate+0x207/0x314 btrfs_uuid_rescan_kthread+0x12/0x50 kthread+0x133/0x150 ret_from_fork+0x1f/0x30 other info that might help us debug this: Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock(btrfs-uuid-00); lock(btrfs-root-00); lock(btrfs-uuid-00); lock(btrfs-root-00); *** DEADLOCK *** 1 lock held by btrfs-uuid/7955: #0: ffff88bfbafef2a8 (btrfs-uuid-00){++++}-{3:3}, at: __btrfs_tree_read_lock+0x39/0x180 stack backtrace: CPU: 73 PID: 7955 Comm: btrfs-uuid Kdump: loaded Not tainted 5.8.0-rc7-00167-g0d7ba0c5b375-dirty #925 Hardware name: Quanta Tioga Pass Single Side 01-0030993006/Tioga Pass Single Side, BIOS F08_3A18 12/20/2018 Call Trace: dump_stack+0x78/0xa0 check_noncircular+0x165/0x180 __lock_acquire+0x1272/0x2310 lock_acquire+0x9e/0x360 ? __btrfs_tree_read_lock+0x39/0x180 ? btrfs_root_node+0x1c/0x1d0 down_read_nested+0x3e/0x140 ? __btrfs_tree_read_lock+0x39/0x180 __btrfs_tree_read_lock+0x39/0x180 __btrfs_read_lock_root_node+0x3a/0x50 btrfs_search_slot+0x4bd/0x990 btrfs_find_root+0x45/0x1b0 btrfs_read_tree_root+0x61/0x100 btrfs_get_root_ref.part.50+0x143/0x630 btrfs_uuid_tree_iterate+0x207/0x314 ? btree_readpage+0x20/0x20 btrfs_uuid_rescan_kthread+0x12/0x50 kthread+0x133/0x150 ? kthread_create_on_node+0x60/0x60 ret_from_fork+0x1f/0x30 This problem exists because we have two different rescan threads, btrfs_uuid_scan_kthread which creates the uuid tree, and btrfs_uuid_tree_iterate that goes through and updates or deletes any out of date roots. The problem is they both do things in different order. btrfs_uuid_scan_kthread() reads the tree_root, and then inserts entries into the uuid_root. btrfs_uuid_tree_iterate() scans the uuid_root, but then does a btrfs_get_fs_root() which can read from the tree_root. It's actually easy enough to not be holding the path in btrfs_uuid_scan_kthread() when we add a uuid entry, as we already drop it further down and re-start the search when we loop. So simply move the path release before we add our entry to the uuid tree. This also fixes a problem where we're holding a path open after we do btrfs_end_transaction(), which has it's own problems. CC: stable@vger.kernel.org # 4.4+ Reviewed-by: Filipe Manana Signed-off-by: Josef Bacik Reviewed-by: David Sterba Signed-off-by: David Sterba Signed-off-by: Greg Kroah-Hartman --- fs/btrfs/volumes.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 4abb2a155ac5..498ec4b10e61 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -4172,6 +4172,7 @@ static int btrfs_uuid_scan_kthread(void *data) goto skip; } update_tree: + btrfs_release_path(path); if (!btrfs_is_empty_uuid(root_item.uuid)) { ret = btrfs_uuid_tree_add(trans, root_item.uuid, BTRFS_UUID_KEY_SUBVOL, @@ -4196,6 +4197,7 @@ static int btrfs_uuid_scan_kthread(void *data) } skip: + btrfs_release_path(path); if (trans) { ret = btrfs_end_transaction(trans); trans = NULL; @@ -4203,7 +4205,6 @@ static int btrfs_uuid_scan_kthread(void *data) break; } - btrfs_release_path(path); if (key.offset < (u64)-1) { key.offset++; } else if (key.type < BTRFS_ROOT_ITEM_KEY) { -- GitLab From 6df210762f80f174affd2bbcb346f2f0b233546c Mon Sep 17 00:00:00 2001 From: Alex Williamson Date: Mon, 7 Sep 2020 15:47:20 +0530 Subject: [PATCH 0665/1304] vfio/type1: Support faulting PFNMAP vmas commit 41311242221e3482b20bfed10fa4d9db98d87016 upstream. With conversion to follow_pfn(), DMA mapping a PFNMAP range depends on the range being faulted into the vma. Add support to manually provide that, in the same way as done on KVM with hva_to_pfn_remapped(). Reviewed-by: Peter Xu Signed-off-by: Alex Williamson [Ajay: Regenerated the patch for v4.19] Signed-off-by: Ajay Kaher Signed-off-by: Sasha Levin --- drivers/vfio/vfio_iommu_type1.c | 36 ++++++++++++++++++++++++++++++--- 1 file changed, 33 insertions(+), 3 deletions(-) diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 52083b710b87..05d8553635ee 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -343,6 +343,32 @@ static int put_pfn(unsigned long pfn, int prot) return 0; } +static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm, + unsigned long vaddr, unsigned long *pfn, + bool write_fault) +{ + int ret; + + ret = follow_pfn(vma, vaddr, pfn); + if (ret) { + bool unlocked = false; + + ret = fixup_user_fault(NULL, mm, vaddr, + FAULT_FLAG_REMOTE | + (write_fault ? FAULT_FLAG_WRITE : 0), + &unlocked); + if (unlocked) + return -EAGAIN; + + if (ret) + return ret; + + ret = follow_pfn(vma, vaddr, pfn); + } + + return ret; +} + static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, int prot, unsigned long *pfn) { @@ -382,12 +408,16 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, down_read(&mm->mmap_sem); +retry: vma = find_vma_intersection(mm, vaddr, vaddr + 1); if (vma && vma->vm_flags & VM_PFNMAP) { - if (!follow_pfn(vma, vaddr, pfn) && - is_invalid_reserved_pfn(*pfn)) - ret = 0; + ret = follow_fault_pfn(vma, mm, vaddr, pfn, prot & IOMMU_WRITE); + if (ret == -EAGAIN) + goto retry; + + if (!ret && !is_invalid_reserved_pfn(*pfn)) + ret = -EFAULT; } up_read(&mm->mmap_sem); -- GitLab From 6c7f2f24a886a398a539baf30f56688308f93b70 Mon Sep 17 00:00:00 2001 From: Alex Williamson Date: Mon, 7 Sep 2020 15:47:21 +0530 Subject: [PATCH 0666/1304] vfio-pci: Fault mmaps to enable vma tracking commit 11c4cd07ba111a09f49625f9e4c851d83daf0a22 upstream. Rather than calling remap_pfn_range() when a region is mmap'd, setup a vm_ops handler to support dynamic faulting of the range on access. This allows us to manage a list of vmas actively mapping the area that we can later use to invalidate those mappings. The open callback invalidates the vma range so that all tracking is inserted in the fault handler and removed in the close handler. Reviewed-by: Peter Xu Signed-off-by: Alex Williamson [Ajay: Regenerated the patch for v4.19] Signed-off-by: Ajay Kaher Signed-off-by: Sasha Levin --- drivers/vfio/pci/vfio_pci.c | 76 ++++++++++++++++++++++++++++- drivers/vfio/pci/vfio_pci_private.h | 7 +++ 2 files changed, 81 insertions(+), 2 deletions(-) diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index 66783a37f450..3cd596023c2b 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c @@ -1121,6 +1121,70 @@ static ssize_t vfio_pci_write(void *device_data, const char __user *buf, return vfio_pci_rw(device_data, (char __user *)buf, count, ppos, true); } +static int vfio_pci_add_vma(struct vfio_pci_device *vdev, + struct vm_area_struct *vma) +{ + struct vfio_pci_mmap_vma *mmap_vma; + + mmap_vma = kmalloc(sizeof(*mmap_vma), GFP_KERNEL); + if (!mmap_vma) + return -ENOMEM; + + mmap_vma->vma = vma; + + mutex_lock(&vdev->vma_lock); + list_add(&mmap_vma->vma_next, &vdev->vma_list); + mutex_unlock(&vdev->vma_lock); + + return 0; +} + +/* + * Zap mmaps on open so that we can fault them in on access and therefore + * our vma_list only tracks mappings accessed since last zap. + */ +static void vfio_pci_mmap_open(struct vm_area_struct *vma) +{ + zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start); +} + +static void vfio_pci_mmap_close(struct vm_area_struct *vma) +{ + struct vfio_pci_device *vdev = vma->vm_private_data; + struct vfio_pci_mmap_vma *mmap_vma; + + mutex_lock(&vdev->vma_lock); + list_for_each_entry(mmap_vma, &vdev->vma_list, vma_next) { + if (mmap_vma->vma == vma) { + list_del(&mmap_vma->vma_next); + kfree(mmap_vma); + break; + } + } + mutex_unlock(&vdev->vma_lock); +} + +static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + struct vfio_pci_device *vdev = vma->vm_private_data; + + if (vfio_pci_add_vma(vdev, vma)) + return VM_FAULT_OOM; + + if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, + vma->vm_end - vma->vm_start, vma->vm_page_prot)) + return VM_FAULT_SIGBUS; + + return VM_FAULT_NOPAGE; +} + +static const struct vm_operations_struct vfio_pci_mmap_ops = { + .open = vfio_pci_mmap_open, + .close = vfio_pci_mmap_close, + .fault = vfio_pci_mmap_fault, +}; + static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma) { struct vfio_pci_device *vdev = device_data; @@ -1170,8 +1234,14 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma) vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_pgoff = (pci_resource_start(pdev, index) >> PAGE_SHIFT) + pgoff; - return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, - req_len, vma->vm_page_prot); + /* + * See remap_pfn_range(), called from vfio_pci_fault() but we can't + * change vm_flags within the fault handler. Set them now. + */ + vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; + vma->vm_ops = &vfio_pci_mmap_ops; + + return 0; } static void vfio_pci_request(void *device_data, unsigned int count) @@ -1243,6 +1313,8 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) spin_lock_init(&vdev->irqlock); mutex_init(&vdev->ioeventfds_lock); INIT_LIST_HEAD(&vdev->ioeventfds_list); + mutex_init(&vdev->vma_lock); + INIT_LIST_HEAD(&vdev->vma_list); ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev); if (ret) { diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h index cde3b5d3441a..9743c934199d 100644 --- a/drivers/vfio/pci/vfio_pci_private.h +++ b/drivers/vfio/pci/vfio_pci_private.h @@ -76,6 +76,11 @@ struct vfio_pci_dummy_resource { struct list_head res_next; }; +struct vfio_pci_mmap_vma { + struct vm_area_struct *vma; + struct list_head vma_next; +}; + struct vfio_pci_device { struct pci_dev *pdev; void __iomem *barmap[PCI_STD_RESOURCE_END + 1]; @@ -111,6 +116,8 @@ struct vfio_pci_device { struct list_head dummy_resources_list; struct mutex ioeventfds_lock; struct list_head ioeventfds_list; + struct mutex vma_lock; + struct list_head vma_list; }; #define is_intx(vdev) (vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX) -- GitLab From da7aea6eb5608695f590dcd72523536b709d0399 Mon Sep 17 00:00:00 2001 From: Alex Williamson Date: Mon, 7 Sep 2020 15:47:22 +0530 Subject: [PATCH 0667/1304] vfio-pci: Invalidate mmaps and block MMIO access on disabled memory commit abafbc551fddede3e0a08dee1dcde08fc0eb8476 upstream. Accessing the disabled memory space of a PCI device would typically result in a master abort response on conventional PCI, or an unsupported request on PCI express. The user would generally see these as a -1 response for the read return data and the write would be silently discarded, possibly with an uncorrected, non-fatal AER error triggered on the host. Some systems however take it upon themselves to bring down the entire system when they see something that might indicate a loss of data, such as this discarded write to a disabled memory space. To avoid this, we want to try to block the user from accessing memory spaces while they're disabled. We start with a semaphore around the memory enable bit, where writers modify the memory enable state and must be serialized, while readers make use of the memory region and can access in parallel. Writers include both direct manipulation via the command register, as well as any reset path where the internal mechanics of the reset may both explicitly and implicitly disable memory access, and manipulation of the MSI-X configuration, where the MSI-X vector table resides in MMIO space of the device. Readers include the read and write file ops to access the vfio device fd offsets as well as memory mapped access. In the latter case, we make use of our new vma list support to zap, or invalidate, those memory mappings in order to force them to be faulted back in on access. Our semaphore usage will stall user access to MMIO spaces across internal operations like reset, but the user might experience new behavior when trying to access the MMIO space while disabled via the PCI command register. Access via read or write while disabled will return -EIO and access via memory maps will result in a SIGBUS. This is expected to be compatible with known use cases and potentially provides better error handling capabilities than present in the hardware, while avoiding the more readily accessible and severe platform error responses that might otherwise occur. Fixes: CVE-2020-12888 Reviewed-by: Peter Xu Signed-off-by: Alex Williamson [Ajay: Regenerated the patch for v4.19] Signed-off-by: Ajay Kaher Signed-off-by: Sasha Levin --- drivers/vfio/pci/vfio_pci.c | 291 ++++++++++++++++++++++++---- drivers/vfio/pci/vfio_pci_config.c | 36 +++- drivers/vfio/pci/vfio_pci_intrs.c | 14 ++ drivers/vfio/pci/vfio_pci_private.h | 9 + drivers/vfio/pci/vfio_pci_rdwr.c | 24 ++- 5 files changed, 331 insertions(+), 43 deletions(-) diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index 3cd596023c2b..9f72a6ee13b5 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c @@ -29,6 +29,7 @@ #include #include #include +#include #include "vfio_pci_private.h" @@ -181,6 +182,7 @@ static void vfio_pci_probe_mmaps(struct vfio_pci_device *vdev) static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev); static void vfio_pci_disable(struct vfio_pci_device *vdev); +static int vfio_pci_try_zap_and_vma_lock_cb(struct pci_dev *pdev, void *data); /* * INTx masking requires the ability to disable INTx signaling via PCI_COMMAND @@ -623,6 +625,12 @@ int vfio_pci_register_dev_region(struct vfio_pci_device *vdev, return 0; } +struct vfio_devices { + struct vfio_device **devices; + int cur_index; + int max_index; +}; + static long vfio_pci_ioctl(void *device_data, unsigned int cmd, unsigned long arg) { @@ -696,7 +704,7 @@ static long vfio_pci_ioctl(void *device_data, { void __iomem *io; size_t size; - u16 orig_cmd; + u16 cmd; info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); info.flags = 0; @@ -716,10 +724,7 @@ static long vfio_pci_ioctl(void *device_data, * Is it really there? Enable memory decode for * implicit access in pci_map_rom(). */ - pci_read_config_word(pdev, PCI_COMMAND, &orig_cmd); - pci_write_config_word(pdev, PCI_COMMAND, - orig_cmd | PCI_COMMAND_MEMORY); - + cmd = vfio_pci_memory_lock_and_enable(vdev); io = pci_map_rom(pdev, &size); if (io) { info.flags = VFIO_REGION_INFO_FLAG_READ; @@ -727,8 +732,8 @@ static long vfio_pci_ioctl(void *device_data, } else { info.size = 0; } + vfio_pci_memory_unlock_and_restore(vdev, cmd); - pci_write_config_word(pdev, PCI_COMMAND, orig_cmd); break; } case VFIO_PCI_VGA_REGION_INDEX: @@ -865,8 +870,16 @@ static long vfio_pci_ioctl(void *device_data, return ret; } else if (cmd == VFIO_DEVICE_RESET) { - return vdev->reset_works ? - pci_try_reset_function(vdev->pdev) : -EINVAL; + int ret; + + if (!vdev->reset_works) + return -EINVAL; + + vfio_pci_zap_and_down_write_memory_lock(vdev); + ret = pci_try_reset_function(vdev->pdev); + up_write(&vdev->memory_lock); + + return ret; } else if (cmd == VFIO_DEVICE_GET_PCI_HOT_RESET_INFO) { struct vfio_pci_hot_reset_info hdr; @@ -946,8 +959,9 @@ static long vfio_pci_ioctl(void *device_data, int32_t *group_fds; struct vfio_pci_group_entry *groups; struct vfio_pci_group_info info; + struct vfio_devices devs = { .cur_index = 0 }; bool slot = false; - int i, count = 0, ret = 0; + int i, group_idx, mem_idx = 0, count = 0, ret = 0; minsz = offsetofend(struct vfio_pci_hot_reset, count); @@ -999,9 +1013,9 @@ static long vfio_pci_ioctl(void *device_data, * user interface and store the group and iommu ID. This * ensures the group is held across the reset. */ - for (i = 0; i < hdr.count; i++) { + for (group_idx = 0; group_idx < hdr.count; group_idx++) { struct vfio_group *group; - struct fd f = fdget(group_fds[i]); + struct fd f = fdget(group_fds[group_idx]); if (!f.file) { ret = -EBADF; break; @@ -1014,8 +1028,9 @@ static long vfio_pci_ioctl(void *device_data, break; } - groups[i].group = group; - groups[i].id = vfio_external_user_iommu_id(group); + groups[group_idx].group = group; + groups[group_idx].id = + vfio_external_user_iommu_id(group); } kfree(group_fds); @@ -1034,13 +1049,63 @@ static long vfio_pci_ioctl(void *device_data, ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_validate_devs, &info, slot); - if (!ret) - /* User has access, do the reset */ - ret = pci_reset_bus(vdev->pdev); + if (ret) + goto hot_reset_release; + + devs.max_index = count; + devs.devices = kcalloc(count, sizeof(struct vfio_device *), + GFP_KERNEL); + if (!devs.devices) { + ret = -ENOMEM; + goto hot_reset_release; + } + + /* + * We need to get memory_lock for each device, but devices + * can share mmap_sem, therefore we need to zap and hold + * the vma_lock for each device, and only then get each + * memory_lock. + */ + ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, + vfio_pci_try_zap_and_vma_lock_cb, + &devs, slot); + if (ret) + goto hot_reset_release; + + for (; mem_idx < devs.cur_index; mem_idx++) { + struct vfio_pci_device *tmp; + + tmp = vfio_device_data(devs.devices[mem_idx]); + + ret = down_write_trylock(&tmp->memory_lock); + if (!ret) { + ret = -EBUSY; + goto hot_reset_release; + } + mutex_unlock(&tmp->vma_lock); + } + + /* User has access, do the reset */ + ret = pci_reset_bus(vdev->pdev); hot_reset_release: - for (i--; i >= 0; i--) - vfio_group_put_external_user(groups[i].group); + for (i = 0; i < devs.cur_index; i++) { + struct vfio_device *device; + struct vfio_pci_device *tmp; + + device = devs.devices[i]; + tmp = vfio_device_data(device); + + if (i < mem_idx) + up_write(&tmp->memory_lock); + else + mutex_unlock(&tmp->vma_lock); + vfio_device_put(device); + } + kfree(devs.devices); + + for (group_idx--; group_idx >= 0; group_idx--) + vfio_group_put_external_user(groups[group_idx].group); kfree(groups); return ret; @@ -1121,8 +1186,126 @@ static ssize_t vfio_pci_write(void *device_data, const char __user *buf, return vfio_pci_rw(device_data, (char __user *)buf, count, ppos, true); } -static int vfio_pci_add_vma(struct vfio_pci_device *vdev, - struct vm_area_struct *vma) +/* Return 1 on zap and vma_lock acquired, 0 on contention (only with @try) */ +static int vfio_pci_zap_and_vma_lock(struct vfio_pci_device *vdev, bool try) +{ + struct vfio_pci_mmap_vma *mmap_vma, *tmp; + + /* + * Lock ordering: + * vma_lock is nested under mmap_sem for vm_ops callback paths. + * The memory_lock semaphore is used by both code paths calling + * into this function to zap vmas and the vm_ops.fault callback + * to protect the memory enable state of the device. + * + * When zapping vmas we need to maintain the mmap_sem => vma_lock + * ordering, which requires using vma_lock to walk vma_list to + * acquire an mm, then dropping vma_lock to get the mmap_sem and + * reacquiring vma_lock. This logic is derived from similar + * requirements in uverbs_user_mmap_disassociate(). + * + * mmap_sem must always be the top-level lock when it is taken. + * Therefore we can only hold the memory_lock write lock when + * vma_list is empty, as we'd need to take mmap_sem to clear + * entries. vma_list can only be guaranteed empty when holding + * vma_lock, thus memory_lock is nested under vma_lock. + * + * This enables the vm_ops.fault callback to acquire vma_lock, + * followed by memory_lock read lock, while already holding + * mmap_sem without risk of deadlock. + */ + while (1) { + struct mm_struct *mm = NULL; + + if (try) { + if (!mutex_trylock(&vdev->vma_lock)) + return 0; + } else { + mutex_lock(&vdev->vma_lock); + } + while (!list_empty(&vdev->vma_list)) { + mmap_vma = list_first_entry(&vdev->vma_list, + struct vfio_pci_mmap_vma, + vma_next); + mm = mmap_vma->vma->vm_mm; + if (mmget_not_zero(mm)) + break; + + list_del(&mmap_vma->vma_next); + kfree(mmap_vma); + mm = NULL; + } + if (!mm) + return 1; + mutex_unlock(&vdev->vma_lock); + + if (try) { + if (!down_read_trylock(&mm->mmap_sem)) { + mmput(mm); + return 0; + } + } else { + down_read(&mm->mmap_sem); + } + if (mmget_still_valid(mm)) { + if (try) { + if (!mutex_trylock(&vdev->vma_lock)) { + up_read(&mm->mmap_sem); + mmput(mm); + return 0; + } + } else { + mutex_lock(&vdev->vma_lock); + } + list_for_each_entry_safe(mmap_vma, tmp, + &vdev->vma_list, vma_next) { + struct vm_area_struct *vma = mmap_vma->vma; + + if (vma->vm_mm != mm) + continue; + + list_del(&mmap_vma->vma_next); + kfree(mmap_vma); + + zap_vma_ptes(vma, vma->vm_start, + vma->vm_end - vma->vm_start); + } + mutex_unlock(&vdev->vma_lock); + } + up_read(&mm->mmap_sem); + mmput(mm); + } +} + +void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_device *vdev) +{ + vfio_pci_zap_and_vma_lock(vdev, false); + down_write(&vdev->memory_lock); + mutex_unlock(&vdev->vma_lock); +} + +u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_device *vdev) +{ + u16 cmd; + + down_write(&vdev->memory_lock); + pci_read_config_word(vdev->pdev, PCI_COMMAND, &cmd); + if (!(cmd & PCI_COMMAND_MEMORY)) + pci_write_config_word(vdev->pdev, PCI_COMMAND, + cmd | PCI_COMMAND_MEMORY); + + return cmd; +} + +void vfio_pci_memory_unlock_and_restore(struct vfio_pci_device *vdev, u16 cmd) +{ + pci_write_config_word(vdev->pdev, PCI_COMMAND, cmd); + up_write(&vdev->memory_lock); +} + +/* Caller holds vma_lock */ +static int __vfio_pci_add_vma(struct vfio_pci_device *vdev, + struct vm_area_struct *vma) { struct vfio_pci_mmap_vma *mmap_vma; @@ -1131,10 +1314,7 @@ static int vfio_pci_add_vma(struct vfio_pci_device *vdev, return -ENOMEM; mmap_vma->vma = vma; - - mutex_lock(&vdev->vma_lock); list_add(&mmap_vma->vma_next, &vdev->vma_list); - mutex_unlock(&vdev->vma_lock); return 0; } @@ -1168,15 +1348,32 @@ static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct vfio_pci_device *vdev = vma->vm_private_data; + vm_fault_t ret = VM_FAULT_NOPAGE; + + mutex_lock(&vdev->vma_lock); + down_read(&vdev->memory_lock); + + if (!__vfio_pci_memory_enabled(vdev)) { + ret = VM_FAULT_SIGBUS; + mutex_unlock(&vdev->vma_lock); + goto up_out; + } + + if (__vfio_pci_add_vma(vdev, vma)) { + ret = VM_FAULT_OOM; + mutex_unlock(&vdev->vma_lock); + goto up_out; + } - if (vfio_pci_add_vma(vdev, vma)) - return VM_FAULT_OOM; + mutex_unlock(&vdev->vma_lock); if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, vma->vm_end - vma->vm_start, vma->vm_page_prot)) - return VM_FAULT_SIGBUS; + ret = VM_FAULT_SIGBUS; - return VM_FAULT_NOPAGE; +up_out: + up_read(&vdev->memory_lock); + return ret; } static const struct vm_operations_struct vfio_pci_mmap_ops = { @@ -1315,6 +1512,7 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) INIT_LIST_HEAD(&vdev->ioeventfds_list); mutex_init(&vdev->vma_lock); INIT_LIST_HEAD(&vdev->vma_list); + init_rwsem(&vdev->memory_lock); ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev); if (ret) { @@ -1410,12 +1608,6 @@ static struct pci_driver vfio_pci_driver = { .err_handler = &vfio_err_handlers, }; -struct vfio_devices { - struct vfio_device **devices; - int cur_index; - int max_index; -}; - static int vfio_pci_get_devs(struct pci_dev *pdev, void *data) { struct vfio_devices *devs = data; @@ -1437,6 +1629,39 @@ static int vfio_pci_get_devs(struct pci_dev *pdev, void *data) return 0; } +static int vfio_pci_try_zap_and_vma_lock_cb(struct pci_dev *pdev, void *data) +{ + struct vfio_devices *devs = data; + struct vfio_device *device; + struct vfio_pci_device *vdev; + + if (devs->cur_index == devs->max_index) + return -ENOSPC; + + device = vfio_device_get_from_dev(&pdev->dev); + if (!device) + return -EINVAL; + + if (pci_dev_driver(pdev) != &vfio_pci_driver) { + vfio_device_put(device); + return -EBUSY; + } + + vdev = vfio_device_data(device); + + /* + * Locking multiple devices is prone to deadlock, runaway and + * unwind if we hit contention. + */ + if (!vfio_pci_zap_and_vma_lock(vdev, true)) { + vfio_device_put(device); + return -EBUSY; + } + + devs->devices[devs->cur_index++] = device; + return 0; +} + /* * Attempt to do a bus/slot reset if there are devices affected by a reset for * this device that are needs_reset and all of the affected devices are unused diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c index 36bc8f104e42..4fe71fbce194 100644 --- a/drivers/vfio/pci/vfio_pci_config.c +++ b/drivers/vfio/pci/vfio_pci_config.c @@ -398,6 +398,14 @@ static inline void p_setd(struct perm_bits *p, int off, u32 virt, u32 write) *(__le32 *)(&p->write[off]) = cpu_to_le32(write); } +/* Caller should hold memory_lock semaphore */ +bool __vfio_pci_memory_enabled(struct vfio_pci_device *vdev) +{ + u16 cmd = le16_to_cpu(*(__le16 *)&vdev->vconfig[PCI_COMMAND]); + + return cmd & PCI_COMMAND_MEMORY; +} + /* * Restore the *real* BARs after we detect a FLR or backdoor reset. * (backdoor = some device specific technique that we didn't catch) @@ -558,13 +566,18 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos, new_cmd = le32_to_cpu(val); + phys_io = !!(phys_cmd & PCI_COMMAND_IO); + virt_io = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_IO); + new_io = !!(new_cmd & PCI_COMMAND_IO); + phys_mem = !!(phys_cmd & PCI_COMMAND_MEMORY); virt_mem = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_MEMORY); new_mem = !!(new_cmd & PCI_COMMAND_MEMORY); - phys_io = !!(phys_cmd & PCI_COMMAND_IO); - virt_io = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_IO); - new_io = !!(new_cmd & PCI_COMMAND_IO); + if (!new_mem) + vfio_pci_zap_and_down_write_memory_lock(vdev); + else + down_write(&vdev->memory_lock); /* * If the user is writing mem/io enable (new_mem/io) and we @@ -581,8 +594,11 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos, } count = vfio_default_config_write(vdev, pos, count, perm, offset, val); - if (count < 0) + if (count < 0) { + if (offset == PCI_COMMAND) + up_write(&vdev->memory_lock); return count; + } /* * Save current memory/io enable bits in vconfig to allow for @@ -593,6 +609,8 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos, *virt_cmd &= cpu_to_le16(~mask); *virt_cmd |= cpu_to_le16(new_cmd & mask); + + up_write(&vdev->memory_lock); } /* Emulate INTx disable */ @@ -830,8 +848,11 @@ static int vfio_exp_config_write(struct vfio_pci_device *vdev, int pos, pos - offset + PCI_EXP_DEVCAP, &cap); - if (!ret && (cap & PCI_EXP_DEVCAP_FLR)) + if (!ret && (cap & PCI_EXP_DEVCAP_FLR)) { + vfio_pci_zap_and_down_write_memory_lock(vdev); pci_try_reset_function(vdev->pdev); + up_write(&vdev->memory_lock); + } } /* @@ -909,8 +930,11 @@ static int vfio_af_config_write(struct vfio_pci_device *vdev, int pos, pos - offset + PCI_AF_CAP, &cap); - if (!ret && (cap & PCI_AF_CAP_FLR) && (cap & PCI_AF_CAP_TP)) + if (!ret && (cap & PCI_AF_CAP_FLR) && (cap & PCI_AF_CAP_TP)) { + vfio_pci_zap_and_down_write_memory_lock(vdev); pci_try_reset_function(vdev->pdev); + up_write(&vdev->memory_lock); + } } return count; diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c index 94594dc63c41..bdfdd506bc58 100644 --- a/drivers/vfio/pci/vfio_pci_intrs.c +++ b/drivers/vfio/pci/vfio_pci_intrs.c @@ -252,6 +252,7 @@ static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix) struct pci_dev *pdev = vdev->pdev; unsigned int flag = msix ? PCI_IRQ_MSIX : PCI_IRQ_MSI; int ret; + u16 cmd; if (!is_irq_none(vdev)) return -EINVAL; @@ -261,13 +262,16 @@ static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix) return -ENOMEM; /* return the number of supported vectors if we can't get all: */ + cmd = vfio_pci_memory_lock_and_enable(vdev); ret = pci_alloc_irq_vectors(pdev, 1, nvec, flag); if (ret < nvec) { if (ret > 0) pci_free_irq_vectors(pdev); + vfio_pci_memory_unlock_and_restore(vdev, cmd); kfree(vdev->ctx); return ret; } + vfio_pci_memory_unlock_and_restore(vdev, cmd); vdev->num_ctx = nvec; vdev->irq_type = msix ? VFIO_PCI_MSIX_IRQ_INDEX : @@ -290,6 +294,7 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev, struct pci_dev *pdev = vdev->pdev; struct eventfd_ctx *trigger; int irq, ret; + u16 cmd; if (vector < 0 || vector >= vdev->num_ctx) return -EINVAL; @@ -298,7 +303,11 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev, if (vdev->ctx[vector].trigger) { irq_bypass_unregister_producer(&vdev->ctx[vector].producer); + + cmd = vfio_pci_memory_lock_and_enable(vdev); free_irq(irq, vdev->ctx[vector].trigger); + vfio_pci_memory_unlock_and_restore(vdev, cmd); + kfree(vdev->ctx[vector].name); eventfd_ctx_put(vdev->ctx[vector].trigger); vdev->ctx[vector].trigger = NULL; @@ -326,6 +335,7 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev, * such a reset it would be unsuccessful. To avoid this, restore the * cached value of the message prior to enabling. */ + cmd = vfio_pci_memory_lock_and_enable(vdev); if (msix) { struct msi_msg msg; @@ -335,6 +345,7 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev, ret = request_irq(irq, vfio_msihandler, 0, vdev->ctx[vector].name, trigger); + vfio_pci_memory_unlock_and_restore(vdev, cmd); if (ret) { kfree(vdev->ctx[vector].name); eventfd_ctx_put(trigger); @@ -379,6 +390,7 @@ static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix) { struct pci_dev *pdev = vdev->pdev; int i; + u16 cmd; for (i = 0; i < vdev->num_ctx; i++) { vfio_virqfd_disable(&vdev->ctx[i].unmask); @@ -387,7 +399,9 @@ static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix) vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix); + cmd = vfio_pci_memory_lock_and_enable(vdev); pci_free_irq_vectors(pdev); + vfio_pci_memory_unlock_and_restore(vdev, cmd); /* * Both disable paths above use pci_intx_for_msi() to clear DisINTx diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h index 9743c934199d..17d2bae5b013 100644 --- a/drivers/vfio/pci/vfio_pci_private.h +++ b/drivers/vfio/pci/vfio_pci_private.h @@ -118,6 +118,7 @@ struct vfio_pci_device { struct list_head ioeventfds_list; struct mutex vma_lock; struct list_head vma_list; + struct rw_semaphore memory_lock; }; #define is_intx(vdev) (vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX) @@ -156,6 +157,14 @@ extern int vfio_pci_register_dev_region(struct vfio_pci_device *vdev, unsigned int type, unsigned int subtype, const struct vfio_pci_regops *ops, size_t size, u32 flags, void *data); + +extern bool __vfio_pci_memory_enabled(struct vfio_pci_device *vdev); +extern void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_device + *vdev); +extern u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_device *vdev); +extern void vfio_pci_memory_unlock_and_restore(struct vfio_pci_device *vdev, + u16 cmd); + #ifdef CONFIG_VFIO_PCI_IGD extern int vfio_pci_igd_init(struct vfio_pci_device *vdev); #else diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c index a6029d0a5524..3d0ec2bbe131 100644 --- a/drivers/vfio/pci/vfio_pci_rdwr.c +++ b/drivers/vfio/pci/vfio_pci_rdwr.c @@ -165,6 +165,7 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf, size_t x_start = 0, x_end = 0; resource_size_t end; void __iomem *io; + struct resource *res = &vdev->pdev->resource[bar]; ssize_t done; if (pci_resource_start(pdev, bar)) @@ -180,6 +181,14 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf, count = min(count, (size_t)(end - pos)); + if (res->flags & IORESOURCE_MEM) { + down_read(&vdev->memory_lock); + if (!__vfio_pci_memory_enabled(vdev)) { + up_read(&vdev->memory_lock); + return -EIO; + } + } + if (bar == PCI_ROM_RESOURCE) { /* * The ROM can fill less space than the BAR, so we start the @@ -187,13 +196,17 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf, * filling large ROM BARs much faster. */ io = pci_map_rom(pdev, &x_start); - if (!io) - return -ENOMEM; + if (!io) { + done = -ENOMEM; + goto out; + } x_end = end; } else { int ret = vfio_pci_setup_barmap(vdev, bar); - if (ret) - return ret; + if (ret) { + done = ret; + goto out; + } io = vdev->barmap[bar]; } @@ -210,6 +223,9 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf, if (bar == PCI_ROM_RESOURCE) pci_unmap_rom(pdev, io); +out: + if (res->flags & IORESOURCE_MEM) + up_read(&vdev->memory_lock); return done; } -- GitLab From 2ca6e25f06070fb5a8f8429f4e7a53ff538d1673 Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Wed, 15 Aug 2018 18:26:53 +0300 Subject: [PATCH 0668/1304] btrfs: Remove redundant extent_buffer_get in get_old_root [ Upstream commit 6c122e2a0c515cfb3f3a9cefb5dad4cb62109c78 ] get_old_root used used only by btrfs_search_old_slot to initialise the path structure. The old root is always a cloned buffer (either via alloc dummy or via btrfs_clone_extent_buffer) and its reference count is 2: 1 from allocation, 1 from extent_buffer_get call in get_old_root. This latter explicit ref count acquire operation is in fact unnecessary since the semantic is such that the newly allocated buffer is handed over to the btrfs_path for lifetime management. Considering this just remove the extra extent_buffer_get in get_old_root. Signed-off-by: Nikolay Borisov Reviewed-by: David Sterba Signed-off-by: David Sterba Signed-off-by: Sasha Levin --- fs/btrfs/ctree.c | 1 - 1 file changed, 1 deletion(-) diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index c9943d70e2cb..6c13d7d83f5c 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -1421,7 +1421,6 @@ get_old_root(struct btrfs_root *root, u64 time_seq) if (!eb) return NULL; - extent_buffer_get(eb); btrfs_tree_read_lock(eb); if (old_root) { btrfs_set_header_bytenr(eb, eb->start); -- GitLab From 88814d0bc8cdbf651e03a62de1d4530d337b8952 Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Wed, 15 Aug 2018 18:26:54 +0300 Subject: [PATCH 0669/1304] btrfs: Remove extraneous extent_buffer_get from tree_mod_log_rewind [ Upstream commit 24cee18a1c1d7c731ea5987e0c99daea22ae7f4a ] When a rewound buffer is created it already has a ref count of 1 and the dummy flag set. Then another ref is taken bumping the count to 2. Finally when this buffer is released from btrfs_release_path the extra reference is decremented by the special handling code in free_extent_buffer. However, this special code is in fact redundant sinca ref count of 1 is still correct since the buffer is only accessed via btrfs_path struct. This paves the way forward of removing the special handling in free_extent_buffer. Signed-off-by: Nikolay Borisov Reviewed-by: David Sterba Signed-off-by: David Sterba Signed-off-by: Sasha Levin --- fs/btrfs/ctree.c | 1 - 1 file changed, 1 deletion(-) diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 6c13d7d83f5c..12b1a1c80c1b 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -1347,7 +1347,6 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path, btrfs_tree_read_unlock_blocking(eb); free_extent_buffer(eb); - extent_buffer_get(eb_rewin); btrfs_tree_read_lock(eb_rewin); __tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm); WARN_ON(btrfs_header_nritems(eb_rewin) > -- GitLab From 4689380be50c038b584933171c2f1dbca90a56be Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 10 Aug 2020 11:42:31 -0400 Subject: [PATCH 0670/1304] btrfs: set the lockdep class for log tree extent buffers [ Upstream commit d3beaa253fd6fa40b8b18a216398e6e5376a9d21 ] These are special extent buffers that get rewound in order to lookup the state of the tree at a specific point in time. As such they do not go through the normal initialization paths that set their lockdep class, so handle them appropriately when they are created and before they are locked. CC: stable@vger.kernel.org # 4.4+ Reviewed-by: Filipe Manana Signed-off-by: Josef Bacik Reviewed-by: David Sterba Signed-off-by: David Sterba Signed-off-by: Sasha Levin --- fs/btrfs/ctree.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 12b1a1c80c1b..8007b6aacec6 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -1347,6 +1347,8 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path, btrfs_tree_read_unlock_blocking(eb); free_extent_buffer(eb); + btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb_rewin), + eb_rewin, btrfs_header_level(eb_rewin)); btrfs_tree_read_lock(eb_rewin); __tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm); WARN_ON(btrfs_header_nritems(eb_rewin) > @@ -1420,7 +1422,6 @@ get_old_root(struct btrfs_root *root, u64 time_seq) if (!eb) return NULL; - btrfs_tree_read_lock(eb); if (old_root) { btrfs_set_header_bytenr(eb, eb->start); btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV); @@ -1428,6 +1429,9 @@ get_old_root(struct btrfs_root *root, u64 time_seq) btrfs_set_header_level(eb, old_root->level); btrfs_set_header_generation(eb, old_generation); } + btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb), eb, + btrfs_header_level(eb)); + btrfs_tree_read_lock(eb); if (tm) __tree_mod_log_rewind(fs_info, eb, time_seq, tm); else -- GitLab From 61135a9c74c8f79fce637c28f6109ab58467d5bd Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 15 May 2019 14:38:18 +0900 Subject: [PATCH 0671/1304] uaccess: Add non-pagefault user-space read functions [ Upstream commit 3d7081822f7f9eab867d9bcc8fd635208ec438e0 ] Add probe_user_read(), strncpy_from_unsafe_user() and strnlen_unsafe_user() which allows caller to access user-space in IRQ context. Current probe_kernel_read() and strncpy_from_unsafe() are not available for user-space memory, because it sets KERNEL_DS while accessing data. On some arch, user address space and kernel address space can be co-exist, but others can not. In that case, setting KERNEL_DS means given address is treated as a kernel address space. Also strnlen_user() is only available from user context since it can sleep if pagefault is enabled. To access user-space memory without pagefault, we need these new functions which sets USER_DS while accessing the data. Link: http://lkml.kernel.org/r/155789869802.26965.4940338412595759063.stgit@devnote2 Acked-by: Ingo Molnar Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (VMware) Signed-off-by: Sasha Levin --- include/linux/uaccess.h | 14 +++++ mm/maccess.c | 122 ++++++++++++++++++++++++++++++++++++++-- 2 files changed, 130 insertions(+), 6 deletions(-) diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index d55b68b113de..db88f36540e9 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -242,6 +242,17 @@ static inline unsigned long __copy_from_user_inatomic_nocache(void *to, extern long probe_kernel_read(void *dst, const void *src, size_t size); extern long __probe_kernel_read(void *dst, const void *src, size_t size); +/* + * probe_user_read(): safely attempt to read from a location in user space + * @dst: pointer to the buffer that shall take the data + * @src: address to read from + * @size: size of the data chunk + * + * Safely read from address @src to the buffer at @dst. If a kernel fault + * happens, handle that and return -EFAULT. + */ +extern long probe_user_read(void *dst, const void __user *src, size_t size); + /* * probe_kernel_write(): safely attempt to write to a location * @dst: address to write to @@ -255,6 +266,9 @@ extern long notrace probe_kernel_write(void *dst, const void *src, size_t size); extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size); extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count); +extern long strncpy_from_unsafe_user(char *dst, const void __user *unsafe_addr, + long count); +extern long strnlen_unsafe_user(const void __user *unsafe_addr, long count); /** * probe_kernel_address(): safely attempt to read from a location diff --git a/mm/maccess.c b/mm/maccess.c index ec00be51a24f..80d70cb5cc0b 100644 --- a/mm/maccess.c +++ b/mm/maccess.c @@ -5,8 +5,20 @@ #include #include +static __always_inline long +probe_read_common(void *dst, const void __user *src, size_t size) +{ + long ret; + + pagefault_disable(); + ret = __copy_from_user_inatomic(dst, src, size); + pagefault_enable(); + + return ret ? -EFAULT : 0; +} + /** - * probe_kernel_read(): safely attempt to read from a location + * probe_kernel_read(): safely attempt to read from a kernel-space location * @dst: pointer to the buffer that shall take the data * @src: address to read from * @size: size of the data chunk @@ -29,16 +41,40 @@ long __probe_kernel_read(void *dst, const void *src, size_t size) mm_segment_t old_fs = get_fs(); set_fs(KERNEL_DS); - pagefault_disable(); - ret = __copy_from_user_inatomic(dst, - (__force const void __user *)src, size); - pagefault_enable(); + ret = probe_read_common(dst, (__force const void __user *)src, size); set_fs(old_fs); - return ret ? -EFAULT : 0; + return ret; } EXPORT_SYMBOL_GPL(probe_kernel_read); +/** + * probe_user_read(): safely attempt to read from a user-space location + * @dst: pointer to the buffer that shall take the data + * @src: address to read from. This must be a user address. + * @size: size of the data chunk + * + * Safely read from user address @src to the buffer at @dst. If a kernel fault + * happens, handle that and return -EFAULT. + */ + +long __weak probe_user_read(void *dst, const void __user *src, size_t size) + __attribute__((alias("__probe_user_read"))); + +long __probe_user_read(void *dst, const void __user *src, size_t size) +{ + long ret = -EFAULT; + mm_segment_t old_fs = get_fs(); + + set_fs(USER_DS); + if (access_ok(VERIFY_READ, src, size)) + ret = probe_read_common(dst, src, size); + set_fs(old_fs); + + return ret; +} +EXPORT_SYMBOL_GPL(probe_user_read); + /** * probe_kernel_write(): safely attempt to write to a location * @dst: address to write to @@ -66,6 +102,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size) } EXPORT_SYMBOL_GPL(probe_kernel_write); + /** * strncpy_from_unsafe: - Copy a NUL terminated string from unsafe address. * @dst: Destination address, in kernel space. This buffer must be at @@ -105,3 +142,76 @@ long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count) return ret ? -EFAULT : src - unsafe_addr; } + +/** + * strncpy_from_unsafe_user: - Copy a NUL terminated string from unsafe user + * address. + * @dst: Destination address, in kernel space. This buffer must be at + * least @count bytes long. + * @unsafe_addr: Unsafe user address. + * @count: Maximum number of bytes to copy, including the trailing NUL. + * + * Copies a NUL-terminated string from unsafe user address to kernel buffer. + * + * On success, returns the length of the string INCLUDING the trailing NUL. + * + * If access fails, returns -EFAULT (some data may have been copied + * and the trailing NUL added). + * + * If @count is smaller than the length of the string, copies @count-1 bytes, + * sets the last byte of @dst buffer to NUL and returns @count. + */ +long strncpy_from_unsafe_user(char *dst, const void __user *unsafe_addr, + long count) +{ + mm_segment_t old_fs = get_fs(); + long ret; + + if (unlikely(count <= 0)) + return 0; + + set_fs(USER_DS); + pagefault_disable(); + ret = strncpy_from_user(dst, unsafe_addr, count); + pagefault_enable(); + set_fs(old_fs); + + if (ret >= count) { + ret = count; + dst[ret - 1] = '\0'; + } else if (ret > 0) { + ret++; + } + + return ret; +} + +/** + * strnlen_unsafe_user: - Get the size of a user string INCLUDING final NUL. + * @unsafe_addr: The string to measure. + * @count: Maximum count (including NUL) + * + * Get the size of a NUL-terminated string in user space without pagefault. + * + * Returns the size of the string INCLUDING the terminating NUL. + * + * If the string is too long, returns a number larger than @count. User + * has to check the return value against "> count". + * On exception (or invalid count), returns 0. + * + * Unlike strnlen_user, this can be used from IRQ handler etc. because + * it disables pagefaults. + */ +long strnlen_unsafe_user(const void __user *unsafe_addr, long count) +{ + mm_segment_t old_fs = get_fs(); + int ret; + + set_fs(USER_DS); + pagefault_disable(); + ret = strnlen_user(unsafe_addr, count); + pagefault_enable(); + set_fs(old_fs); + + return ret; +} -- GitLab From cfb4721fce554cb596ea86af116d3d68a4d91254 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Sat, 2 Nov 2019 00:17:56 +0100 Subject: [PATCH 0672/1304] uaccess: Add non-pagefault user-space write function [ Upstream commit 1d1585ca0f48fe7ed95c3571f3e4a82b2b5045dc ] Commit 3d7081822f7f ("uaccess: Add non-pagefault user-space read functions") missed to add probe write function, therefore factor out a probe_write_common() helper with most logic of probe_kernel_write() except setting KERNEL_DS, and add a new probe_user_write() helper so it can be used from BPF side. Again, on some archs, the user address space and kernel address space can co-exist and be overlapping, so in such case, setting KERNEL_DS would mean that the given address is treated as being in kernel address space. Signed-off-by: Daniel Borkmann Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Cc: Masami Hiramatsu Link: https://lore.kernel.org/bpf/9df2542e68141bfa3addde631441ee45503856a8.1572649915.git.daniel@iogearbox.net Signed-off-by: Sasha Levin --- include/linux/uaccess.h | 12 +++++++++++ mm/maccess.c | 45 +++++++++++++++++++++++++++++++++++++---- 2 files changed, 53 insertions(+), 4 deletions(-) diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index db88f36540e9..db9b0dd0a7a3 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -265,6 +265,18 @@ extern long probe_user_read(void *dst, const void __user *src, size_t size); extern long notrace probe_kernel_write(void *dst, const void *src, size_t size); extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size); +/* + * probe_user_write(): safely attempt to write to a location in user space + * @dst: address to write to + * @src: pointer to the data that shall be written + * @size: size of the data chunk + * + * Safely write to address @dst from the buffer at @src. If a kernel fault + * happens, handle that and return -EFAULT. + */ +extern long notrace probe_user_write(void __user *dst, const void *src, size_t size); +extern long notrace __probe_user_write(void __user *dst, const void *src, size_t size); + extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count); extern long strncpy_from_unsafe_user(char *dst, const void __user *unsafe_addr, long count); diff --git a/mm/maccess.c b/mm/maccess.c index 80d70cb5cc0b..6e41ba452e5e 100644 --- a/mm/maccess.c +++ b/mm/maccess.c @@ -17,6 +17,18 @@ probe_read_common(void *dst, const void __user *src, size_t size) return ret ? -EFAULT : 0; } +static __always_inline long +probe_write_common(void __user *dst, const void *src, size_t size) +{ + long ret; + + pagefault_disable(); + ret = __copy_to_user_inatomic(dst, src, size); + pagefault_enable(); + + return ret ? -EFAULT : 0; +} + /** * probe_kernel_read(): safely attempt to read from a kernel-space location * @dst: pointer to the buffer that shall take the data @@ -84,6 +96,7 @@ EXPORT_SYMBOL_GPL(probe_user_read); * Safely write to address @dst from the buffer at @src. If a kernel fault * happens, handle that and return -EFAULT. */ + long __weak probe_kernel_write(void *dst, const void *src, size_t size) __attribute__((alias("__probe_kernel_write"))); @@ -93,15 +106,39 @@ long __probe_kernel_write(void *dst, const void *src, size_t size) mm_segment_t old_fs = get_fs(); set_fs(KERNEL_DS); - pagefault_disable(); - ret = __copy_to_user_inatomic((__force void __user *)dst, src, size); - pagefault_enable(); + ret = probe_write_common((__force void __user *)dst, src, size); set_fs(old_fs); - return ret ? -EFAULT : 0; + return ret; } EXPORT_SYMBOL_GPL(probe_kernel_write); +/** + * probe_user_write(): safely attempt to write to a user-space location + * @dst: address to write to + * @src: pointer to the data that shall be written + * @size: size of the data chunk + * + * Safely write to address @dst from the buffer at @src. If a kernel fault + * happens, handle that and return -EFAULT. + */ + +long __weak probe_user_write(void __user *dst, const void *src, size_t size) + __attribute__((alias("__probe_user_write"))); + +long __probe_user_write(void __user *dst, const void *src, size_t size) +{ + long ret = -EFAULT; + mm_segment_t old_fs = get_fs(); + + set_fs(USER_DS); + if (access_ok(VERIFY_WRITE, dst, size)) + ret = probe_write_common(dst, src, size); + set_fs(old_fs); + + return ret; +} +EXPORT_SYMBOL_GPL(probe_user_write); /** * strncpy_from_unsafe: - Copy a NUL terminated string from unsafe address. -- GitLab From 7c9bf5c3ed15fd6ad777663c636a6f4a5d9edf2c Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 10 Aug 2020 11:42:27 -0400 Subject: [PATCH 0673/1304] btrfs: fix potential deadlock in the search ioctl [ Upstream commit a48b73eca4ceb9b8a4b97f290a065335dbcd8a04 ] With the conversion of the tree locks to rwsem I got the following lockdep splat: ====================================================== WARNING: possible circular locking dependency detected 5.8.0-rc7-00165-g04ec4da5f45f-dirty #922 Not tainted ------------------------------------------------------ compsize/11122 is trying to acquire lock: ffff889fabca8768 (&mm->mmap_lock#2){++++}-{3:3}, at: __might_fault+0x3e/0x90 but task is already holding lock: ffff889fe720fe40 (btrfs-fs-00){++++}-{3:3}, at: __btrfs_tree_read_lock+0x39/0x180 which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #2 (btrfs-fs-00){++++}-{3:3}: down_write_nested+0x3b/0x70 __btrfs_tree_lock+0x24/0x120 btrfs_search_slot+0x756/0x990 btrfs_lookup_inode+0x3a/0xb4 __btrfs_update_delayed_inode+0x93/0x270 btrfs_async_run_delayed_root+0x168/0x230 btrfs_work_helper+0xd4/0x570 process_one_work+0x2ad/0x5f0 worker_thread+0x3a/0x3d0 kthread+0x133/0x150 ret_from_fork+0x1f/0x30 -> #1 (&delayed_node->mutex){+.+.}-{3:3}: __mutex_lock+0x9f/0x930 btrfs_delayed_update_inode+0x50/0x440 btrfs_update_inode+0x8a/0xf0 btrfs_dirty_inode+0x5b/0xd0 touch_atime+0xa1/0xd0 btrfs_file_mmap+0x3f/0x60 mmap_region+0x3a4/0x640 do_mmap+0x376/0x580 vm_mmap_pgoff+0xd5/0x120 ksys_mmap_pgoff+0x193/0x230 do_syscall_64+0x50/0x90 entry_SYSCALL_64_after_hwframe+0x44/0xa9 -> #0 (&mm->mmap_lock#2){++++}-{3:3}: __lock_acquire+0x1272/0x2310 lock_acquire+0x9e/0x360 __might_fault+0x68/0x90 _copy_to_user+0x1e/0x80 copy_to_sk.isra.32+0x121/0x300 search_ioctl+0x106/0x200 btrfs_ioctl_tree_search_v2+0x7b/0xf0 btrfs_ioctl+0x106f/0x30a0 ksys_ioctl+0x83/0xc0 __x64_sys_ioctl+0x16/0x20 do_syscall_64+0x50/0x90 entry_SYSCALL_64_after_hwframe+0x44/0xa9 other info that might help us debug this: Chain exists of: &mm->mmap_lock#2 --> &delayed_node->mutex --> btrfs-fs-00 Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock(btrfs-fs-00); lock(&delayed_node->mutex); lock(btrfs-fs-00); lock(&mm->mmap_lock#2); *** DEADLOCK *** 1 lock held by compsize/11122: #0: ffff889fe720fe40 (btrfs-fs-00){++++}-{3:3}, at: __btrfs_tree_read_lock+0x39/0x180 stack backtrace: CPU: 17 PID: 11122 Comm: compsize Kdump: loaded Not tainted 5.8.0-rc7-00165-g04ec4da5f45f-dirty #922 Hardware name: Quanta Tioga Pass Single Side 01-0030993006/Tioga Pass Single Side, BIOS F08_3A18 12/20/2018 Call Trace: dump_stack+0x78/0xa0 check_noncircular+0x165/0x180 __lock_acquire+0x1272/0x2310 lock_acquire+0x9e/0x360 ? __might_fault+0x3e/0x90 ? find_held_lock+0x72/0x90 __might_fault+0x68/0x90 ? __might_fault+0x3e/0x90 _copy_to_user+0x1e/0x80 copy_to_sk.isra.32+0x121/0x300 ? btrfs_search_forward+0x2a6/0x360 search_ioctl+0x106/0x200 btrfs_ioctl_tree_search_v2+0x7b/0xf0 btrfs_ioctl+0x106f/0x30a0 ? __do_sys_newfstat+0x5a/0x70 ? ksys_ioctl+0x83/0xc0 ksys_ioctl+0x83/0xc0 __x64_sys_ioctl+0x16/0x20 do_syscall_64+0x50/0x90 entry_SYSCALL_64_after_hwframe+0x44/0xa9 The problem is we're doing a copy_to_user() while holding tree locks, which can deadlock if we have to do a page fault for the copy_to_user(). This exists even without my locking changes, so it needs to be fixed. Rework the search ioctl to do the pre-fault and then copy_to_user_nofault for the copying. CC: stable@vger.kernel.org # 4.4+ Reviewed-by: Filipe Manana Signed-off-by: Josef Bacik Reviewed-by: David Sterba Signed-off-by: David Sterba Signed-off-by: Sasha Levin --- fs/btrfs/extent_io.c | 8 ++++---- fs/btrfs/extent_io.h | 6 +++--- fs/btrfs/ioctl.c | 27 ++++++++++++++++++++------- 3 files changed, 27 insertions(+), 14 deletions(-) diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index fbcd18d96c52..82d597b16152 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -5377,9 +5377,9 @@ void read_extent_buffer(const struct extent_buffer *eb, void *dstv, } } -int read_extent_buffer_to_user(const struct extent_buffer *eb, - void __user *dstv, - unsigned long start, unsigned long len) +int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb, + void __user *dstv, + unsigned long start, unsigned long len) { size_t cur; size_t offset; @@ -5400,7 +5400,7 @@ int read_extent_buffer_to_user(const struct extent_buffer *eb, cur = min(len, (PAGE_SIZE - offset)); kaddr = page_address(page); - if (copy_to_user(dst, kaddr + offset, cur)) { + if (probe_user_write(dst, kaddr + offset, cur)) { ret = -EFAULT; break; } diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index a3598b24441e..d5089cadd7c4 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -448,9 +448,9 @@ int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv, void read_extent_buffer(const struct extent_buffer *eb, void *dst, unsigned long start, unsigned long len); -int read_extent_buffer_to_user(const struct extent_buffer *eb, - void __user *dst, unsigned long start, - unsigned long len); +int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb, + void __user *dst, unsigned long start, + unsigned long len); void write_extent_buffer_fsid(struct extent_buffer *eb, const void *src); void write_extent_buffer_chunk_tree_uuid(struct extent_buffer *eb, const void *src); diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index a5ae02bf3652..85990755edd9 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -2079,9 +2079,14 @@ static noinline int copy_to_sk(struct btrfs_path *path, sh.len = item_len; sh.transid = found_transid; - /* copy search result header */ - if (copy_to_user(ubuf + *sk_offset, &sh, sizeof(sh))) { - ret = -EFAULT; + /* + * Copy search result header. If we fault then loop again so we + * can fault in the pages and -EFAULT there if there's a + * problem. Otherwise we'll fault and then copy the buffer in + * properly this next time through + */ + if (probe_user_write(ubuf + *sk_offset, &sh, sizeof(sh))) { + ret = 0; goto out; } @@ -2089,10 +2094,14 @@ static noinline int copy_to_sk(struct btrfs_path *path, if (item_len) { char __user *up = ubuf + *sk_offset; - /* copy the item */ - if (read_extent_buffer_to_user(leaf, up, - item_off, item_len)) { - ret = -EFAULT; + /* + * Copy the item, same behavior as above, but reset the + * * sk_offset so we copy the full thing again. + */ + if (read_extent_buffer_to_user_nofault(leaf, up, + item_off, item_len)) { + ret = 0; + *sk_offset -= sizeof(sh); goto out; } @@ -2180,6 +2189,10 @@ static noinline int search_ioctl(struct inode *inode, key.offset = sk->min_offset; while (1) { + ret = fault_in_pages_writeable(ubuf, *buf_size - sk_offset); + if (ret) + break; + ret = btrfs_search_forward(root, &key, path, sk->min_transid); if (ret != 0) { if (ret > 0) -- GitLab From 3d7de9fe191d4a86ba40f7a549bb265e05635f84 Mon Sep 17 00:00:00 2001 From: Daniele Palmas Date: Wed, 9 Oct 2019 11:07:18 +0200 Subject: [PATCH 0674/1304] net: usb: qmi_wwan: add Telit 0x1050 composition MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit e0ae2c578d3909e60e9448207f5d83f785f1129f ] This patch adds support for Telit FN980 0x1050 composition 0x1050: tty, adb, rmnet, tty, tty, tty, tty Signed-off-by: Daniele Palmas Acked-by: Bjørn Mork Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- drivers/net/usb/qmi_wwan.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index ea3c89118614..41fbb8669845 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1262,6 +1262,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1031, 3)}, /* Telit LE910C1-EUX */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */ + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */ {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */ {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */ {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ -- GitLab From 669f22290ec9204145b38a6eedcb8461c279769b Mon Sep 17 00:00:00 2001 From: Rogan Dawes Date: Wed, 17 Jul 2019 11:14:33 +0200 Subject: [PATCH 0675/1304] usb: qmi_wwan: add D-Link DWM-222 A2 device ID [ Upstream commit 7d6053097311643545a8118100175a39bd6fa637 ] Signed-off-by: Rogan Dawes Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/usb/qmi_wwan.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 41fbb8669845..af58bf54aa9b 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1227,6 +1227,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x2001, 0x7e16, 3)}, /* D-Link DWM-221 */ {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */ {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */ + {QMI_FIXED_INTF(0x2001, 0x7e3d, 4)}, /* D-Link DWM-222 A2 */ {QMI_FIXED_INTF(0x2020, 0x2031, 4)}, /* Olicard 600 */ {QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */ {QMI_FIXED_INTF(0x2020, 0x2060, 4)}, /* BroadMobi BM818 */ -- GitLab From a69e790bce633b69759817cc53d22e0c8ab1c657 Mon Sep 17 00:00:00 2001 From: Tong Zhang Date: Mon, 24 Aug 2020 18:45:41 -0400 Subject: [PATCH 0676/1304] ALSA: ca0106: fix error code handling commit ee0761d1d8222bcc5c86bf10849dc86cf008557c upstream. snd_ca0106_spi_write() returns 1 on error, snd_ca0106_pcm_power_dac() is returning the error code directly, and the caller is expecting an negative error code Signed-off-by: Tong Zhang Cc: Link: https://lore.kernel.org/r/20200824224541.1260307-1-ztong0001@gmail.com Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/pci/ca0106/ca0106_main.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/sound/pci/ca0106/ca0106_main.c b/sound/pci/ca0106/ca0106_main.c index cd27b5536654..675b812e96d6 100644 --- a/sound/pci/ca0106/ca0106_main.c +++ b/sound/pci/ca0106/ca0106_main.c @@ -551,7 +551,8 @@ static int snd_ca0106_pcm_power_dac(struct snd_ca0106 *chip, int channel_id, else /* Power down */ chip->spi_dac_reg[reg] |= bit; - return snd_ca0106_spi_write(chip, chip->spi_dac_reg[reg]); + if (snd_ca0106_spi_write(chip, chip->spi_dac_reg[reg]) != 0) + return -ENXIO; } return 0; } -- GitLab From 569e1b621797a9cdba8369f45fc5612ce1bec323 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Tue, 1 Sep 2020 15:18:02 +0200 Subject: [PATCH 0677/1304] ALSA: pcm: oss: Remove superfluous WARN_ON() for mulaw sanity check commit 949a1ebe8cea7b342085cb6a4946b498306b9493 upstream. The PCM OSS mulaw plugin has a check of the format of the counter part whether it's a linear format. The check is with snd_BUG_ON() that emits WARN_ON() when the debug config is set, and it confuses syzkaller as if it were a serious issue. Let's drop snd_BUG_ON() for avoiding that. While we're at it, correct the error code to a more suitable, EINVAL. Reported-by: syzbot+23b22dc2e0b81cbfcc95@syzkaller.appspotmail.com Cc: Link: https://lore.kernel.org/r/20200901131802.18157-1-tiwai@suse.de Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/core/oss/mulaw.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sound/core/oss/mulaw.c b/sound/core/oss/mulaw.c index 3788906421a7..fe27034f2846 100644 --- a/sound/core/oss/mulaw.c +++ b/sound/core/oss/mulaw.c @@ -329,8 +329,8 @@ int snd_pcm_plugin_build_mulaw(struct snd_pcm_substream *plug, snd_BUG(); return -EINVAL; } - if (snd_BUG_ON(!snd_pcm_format_linear(format->format))) - return -ENXIO; + if (!snd_pcm_format_linear(format->format)) + return -EINVAL; err = snd_pcm_plugin_build(plug, "Mu-Law<->linear conversion", src_format, dst_format, -- GitLab From 374843e4df7503ad71fd9db89df1a964958a9607 Mon Sep 17 00:00:00 2001 From: Kai Vehmanen Date: Wed, 26 Aug 2020 20:03:06 +0300 Subject: [PATCH 0678/1304] ALSA: hda/hdmi: always check pin power status in i915 pin fixup commit 858e0ad9301d1270c02b5aca97537d2d6ee9dd68 upstream. When system is suspended with active audio playback to HDMI/DP, two alternative sequences can happen at resume: a) monitor is detected first and ALSA prepare follows normal stream setup sequence, or b) ALSA prepare is called first, but monitor is not yet detected, so PCM is restarted without a pin, In case of (b), on i915 systems, haswell_verify_D0() is not called at resume and the pin power state may be incorrect. Result is lack of audio after resume with no error reported back to user-space. Fix the problem by always verifying converter and pin state in the i915_pin_cvt_fixup(). BugLink: https://github.com/thesofproject/linux/issues/2388 Signed-off-by: Kai Vehmanen Cc: Link: https://lore.kernel.org/r/20200826170306.701566-1-kai.vehmanen@linux.intel.com Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/pci/hda/patch_hdmi.c | 1 + 1 file changed, 1 insertion(+) diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 419d099b5582..b8e5f2b19ff8 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -2574,6 +2574,7 @@ static void i915_pin_cvt_fixup(struct hda_codec *codec, hda_nid_t cvt_nid) { if (per_pin) { + haswell_verify_D0(codec, per_pin->cvt_nid, per_pin->pin_nid); snd_hda_set_dev_select(codec, per_pin->pin_nid, per_pin->dev_id); intel_verify_pin_cvt_connect(codec, per_pin); -- GitLab From 3319b83f6cc68b709ec43eb90d8617be2d7fa834 Mon Sep 17 00:00:00 2001 From: Takashi Sakamoto Date: Sun, 23 Aug 2020 16:55:45 +0900 Subject: [PATCH 0679/1304] ALSA: firewire-digi00x: exclude Avid Adrenaline from detection commit acd46a6b6de88569654567810acad2b0a0a25cea upstream. Avid Adrenaline is reported that ALSA firewire-digi00x driver is bound to. However, as long as he investigated, the design of this model is hardly similar to the one of Digi 00x family. It's better to exclude the model from modalias of ALSA firewire-digi00x driver. This commit changes device entries so that the model is excluded. $ python3 crpp < ~/git/am-config-rom/misc/avid-adrenaline.img ROM header and bus information block ----------------------------------------------------------------- 400 04203a9c bus_info_length 4, crc_length 32, crc 15004 404 31333934 bus_name "1394" 408 e064a002 irmc 1, cmc 1, isc 1, bmc 0, cyc_clk_acc 100, max_rec 10 (2048) 40c 00a07e01 company_id 00a07e | 410 00085257 device_id 0100085257 | EUI-64 00a07e0100085257 root directory ----------------------------------------------------------------- 414 0005d08c directory_length 5, crc 53388 418 0300a07e vendor 41c 8100000c --> descriptor leaf at 44c 420 0c008380 node capabilities 424 8d000002 --> eui-64 leaf at 42c 428 d1000004 --> unit directory at 438 eui-64 leaf at 42c ----------------------------------------------------------------- 42c 0002410f leaf_length 2, crc 16655 430 00a07e01 company_id 00a07e | 434 00085257 device_id 0100085257 | EUI-64 00a07e0100085257 unit directory at 438 ----------------------------------------------------------------- 438 0004d6c9 directory_length 4, crc 54985 43c 1200a02d specifier id: 1394 TA 440 13014001 version: Vender Unique and AV/C 444 17000001 model 448 81000009 --> descriptor leaf at 46c descriptor leaf at 44c ----------------------------------------------------------------- 44c 00077205 leaf_length 7, crc 29189 450 00000000 textual descriptor 454 00000000 minimal ASCII 458 41766964 "Avid" 45c 20546563 " Tec" 460 686e6f6c "hnol" 464 6f677900 "ogy" 468 00000000 descriptor leaf at 46c ----------------------------------------------------------------- 46c 000599a5 leaf_length 5, crc 39333 470 00000000 textual descriptor 474 00000000 minimal ASCII 478 41647265 "Adre" 47c 6e616c69 "nali" 480 6e650000 "ne" Reported-by: Simon Wood Fixes: 9edf723fd858 ("ALSA: firewire-digi00x: add skeleton for Digi 002/003 family") Cc: # 4.4+ Signed-off-by: Takashi Sakamoto Link: https://lore.kernel.org/r/20200823075545.56305-1-o-takashi@sakamocchi.jp Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/firewire/digi00x/digi00x.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/sound/firewire/digi00x/digi00x.c b/sound/firewire/digi00x/digi00x.c index ef689997d6a5..bf53e342788e 100644 --- a/sound/firewire/digi00x/digi00x.c +++ b/sound/firewire/digi00x/digi00x.c @@ -15,6 +15,7 @@ MODULE_LICENSE("GPL v2"); #define VENDOR_DIGIDESIGN 0x00a07e #define MODEL_CONSOLE 0x000001 #define MODEL_RACK 0x000002 +#define SPEC_VERSION 0x000001 static int name_card(struct snd_dg00x *dg00x) { @@ -185,14 +186,18 @@ static const struct ieee1394_device_id snd_dg00x_id_table[] = { /* Both of 002/003 use the same ID. */ { .match_flags = IEEE1394_MATCH_VENDOR_ID | + IEEE1394_MATCH_VERSION | IEEE1394_MATCH_MODEL_ID, .vendor_id = VENDOR_DIGIDESIGN, + .version = SPEC_VERSION, .model_id = MODEL_CONSOLE, }, { .match_flags = IEEE1394_MATCH_VENDOR_ID | + IEEE1394_MATCH_VERSION | IEEE1394_MATCH_MODEL_ID, .vendor_id = VENDOR_DIGIDESIGN, + .version = SPEC_VERSION, .model_id = MODEL_RACK, }, {} -- GitLab From c0a7b7fe0e0f7baa7c1779e401d293d176307c51 Mon Sep 17 00:00:00 2001 From: Dan Crawford Date: Sat, 29 Aug 2020 12:49:46 +1000 Subject: [PATCH 0680/1304] ALSA: hda - Fix silent audio output and corrupted input on MSI X570-A PRO commit 15cbff3fbbc631952c346744f862fb294504b5e2 upstream. Following Christian Lachner's patch for Gigabyte X570-based motherboards, also patch the MSI X570-A PRO motherboard; the ALC1220 codec requires the same workaround for Clevo laptops to enforce the DAC/mixer connection path. Set up a quirk entry for that. I suspect most if all X570 motherboards will require similar patches. [ The entries reordered in the SSID order -- tiwai ] Related buglink: https://bugzilla.kernel.org/show_bug.cgi?id=205275 Signed-off-by: Dan Crawford Cc: Link: https://lore.kernel.org/r/20200829024946.5691-1-dnlcrwfrd@gmail.com Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/pci/hda/patch_realtek.c | 1 + 1 file changed, 1 insertion(+) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 9c5b3d19bfa7..8092fd5617fa 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -2452,6 +2452,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { SND_PCI_QUIRK(0x1462, 0x1276, "MSI-GL73", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1462, 0x1293, "MSI-GP65", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD), + SND_PCI_QUIRK(0x1462, 0x9c37, "MSI X570-A PRO", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1462, 0xda57, "MSI Z270-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS), SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3), SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX), -- GitLab From 814f95011c5197683335df082761482667d5ba51 Mon Sep 17 00:00:00 2001 From: Sean Young Date: Sat, 8 Aug 2020 13:38:02 +0200 Subject: [PATCH 0681/1304] media: rc: do not access device via sysfs after rc_unregister_device() commit a2e2d73fa28136598e84db9d021091f1b98cbb1a upstream. Device drivers do not expect to have change_protocol or wakeup re-programming to be accesed after rc_unregister_device(). This can cause the device driver to access deallocated resources. Cc: # 4.16+ Signed-off-by: Sean Young Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Greg Kroah-Hartman --- drivers/media/rc/rc-main.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c index c30affbd43a9..c4e7aa23aac7 100644 --- a/drivers/media/rc/rc-main.c +++ b/drivers/media/rc/rc-main.c @@ -1245,6 +1245,10 @@ static ssize_t store_protocols(struct device *device, } mutex_lock(&dev->lock); + if (!dev->registered) { + mutex_unlock(&dev->lock); + return -ENODEV; + } old_protocols = *current_protocols; new_protocols = old_protocols; @@ -1383,6 +1387,10 @@ static ssize_t store_filter(struct device *device, return -EINVAL; mutex_lock(&dev->lock); + if (!dev->registered) { + mutex_unlock(&dev->lock); + return -ENODEV; + } new_filter = *filter; if (fattr->mask) @@ -1497,6 +1505,10 @@ static ssize_t store_wakeup_protocols(struct device *device, int i; mutex_lock(&dev->lock); + if (!dev->registered) { + mutex_unlock(&dev->lock); + return -ENODEV; + } allowed = dev->allowed_wakeup_protocols; -- GitLab From fd1c0e39c65dfc2c7a5274dd4d6de50c3c355025 Mon Sep 17 00:00:00 2001 From: Sean Young Date: Sat, 8 Aug 2020 13:19:12 +0200 Subject: [PATCH 0682/1304] media: rc: uevent sysfs file races with rc_unregister_device() commit 4f0835d6677dc69263f90f976524cb92b257d9f4 upstream. Only report uevent file contents if device still registered, else we might read freed memory. Reported-by: syzbot+ceef16277388d6f24898@syzkaller.appspotmail.com Cc: Hillf Danton Cc: # 4.16+ Signed-off-by: Sean Young Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Greg Kroah-Hartman --- drivers/media/rc/rc-main.c | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c index c4e7aa23aac7..cf3df733d960 100644 --- a/drivers/media/rc/rc-main.c +++ b/drivers/media/rc/rc-main.c @@ -1568,25 +1568,25 @@ static void rc_dev_release(struct device *device) kfree(dev); } -#define ADD_HOTPLUG_VAR(fmt, val...) \ - do { \ - int err = add_uevent_var(env, fmt, val); \ - if (err) \ - return err; \ - } while (0) - static int rc_dev_uevent(struct device *device, struct kobj_uevent_env *env) { struct rc_dev *dev = to_rc_dev(device); + int ret = 0; - if (dev->rc_map.name) - ADD_HOTPLUG_VAR("NAME=%s", dev->rc_map.name); - if (dev->driver_name) - ADD_HOTPLUG_VAR("DRV_NAME=%s", dev->driver_name); - if (dev->device_name) - ADD_HOTPLUG_VAR("DEV_NAME=%s", dev->device_name); + mutex_lock(&dev->lock); - return 0; + if (!dev->registered) + ret = -ENODEV; + if (ret == 0 && dev->rc_map.name) + ret = add_uevent_var(env, "NAME=%s", dev->rc_map.name); + if (ret == 0 && dev->driver_name) + ret = add_uevent_var(env, "DRV_NAME=%s", dev->driver_name); + if (ret == 0 && dev->device_name) + ret = add_uevent_var(env, "DEV_NAME=%s", dev->device_name); + + mutex_unlock(&dev->lock); + + return ret; } /* @@ -1970,14 +1970,14 @@ void rc_unregister_device(struct rc_dev *dev) del_timer_sync(&dev->timer_keyup); del_timer_sync(&dev->timer_repeat); - rc_free_rx_device(dev); - mutex_lock(&dev->lock); if (dev->users && dev->close) dev->close(dev); dev->registered = false; mutex_unlock(&dev->lock); + rc_free_rx_device(dev); + /* * lirc device should be freed with dev->registered = false, so * that userspace polling will get notified. -- GitLab From b0a689f84d53a8b923302cfab10527ada27d962c Mon Sep 17 00:00:00 2001 From: Max Staudt Date: Thu, 27 Aug 2020 17:49:00 +0200 Subject: [PATCH 0683/1304] affs: fix basic permission bits to actually work commit d3a84a8d0dde4e26bc084b36ffcbdc5932ac85e2 upstream. The basic permission bits (protection bits in AmigaOS) have been broken in Linux' AFFS - it would only set bits, but never delete them. Also, contrary to the documentation, the Archived bit was not handled. Let's fix this for good, and set the bits such that Linux and classic AmigaOS can coexist in the most peaceful manner. Also, update the documentation to represent the current state of things. Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Cc: stable@vger.kernel.org Signed-off-by: Max Staudt Signed-off-by: David Sterba Signed-off-by: Greg Kroah-Hartman --- Documentation/filesystems/affs.txt | 16 ++++++++++------ fs/affs/amigaffs.c | 27 +++++++++++++++++++++++++++ fs/affs/file.c | 26 +++++++++++++++++++++++++- 3 files changed, 62 insertions(+), 7 deletions(-) diff --git a/Documentation/filesystems/affs.txt b/Documentation/filesystems/affs.txt index 71b63c2b9841..a8f1a58e3692 100644 --- a/Documentation/filesystems/affs.txt +++ b/Documentation/filesystems/affs.txt @@ -93,13 +93,15 @@ The Amiga protection flags RWEDRWEDHSPARWED are handled as follows: - R maps to r for user, group and others. On directories, R implies x. - - If both W and D are allowed, w will be set. + - W maps to w. - E maps to x. - - H and P are always retained and ignored under Linux. + - D is ignored. - - A is always reset when a file is written to. + - H, S and P are always retained and ignored under Linux. + + - A is cleared when a file is written to. User id and group id will be used unless set[gu]id are given as mount options. Since most of the Amiga file systems are single user systems @@ -111,11 +113,13 @@ Linux -> Amiga: The Linux rwxrwxrwx file mode is handled as follows: - - r permission will set R for user, group and others. + - r permission will allow R for user, group and others. + + - w permission will allow W for user, group and others. - - w permission will set W and D for user, group and others. + - x permission of the user will allow E for plain files. - - x permission of the user will set E for plain files. + - D will be allowed for user, group and others. - All other flags (suid, sgid, ...) are ignored and will not be retained. diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c index 14a6c1b90c9f..9a1e761b64a2 100644 --- a/fs/affs/amigaffs.c +++ b/fs/affs/amigaffs.c @@ -420,24 +420,51 @@ affs_mode_to_prot(struct inode *inode) u32 prot = AFFS_I(inode)->i_protect; umode_t mode = inode->i_mode; + /* + * First, clear all RWED bits for owner, group, other. + * Then, recalculate them afresh. + * + * We'll always clear the delete-inhibit bit for the owner, as that is + * the classic single-user mode AmigaOS protection bit and we need to + * stay compatible with all scenarios. + * + * Since multi-user AmigaOS is an extension, we'll only set the + * delete-allow bit if any of the other bits in the same user class + * (group/other) are used. + */ + prot &= ~(FIBF_NOEXECUTE | FIBF_NOREAD + | FIBF_NOWRITE | FIBF_NODELETE + | FIBF_GRP_EXECUTE | FIBF_GRP_READ + | FIBF_GRP_WRITE | FIBF_GRP_DELETE + | FIBF_OTR_EXECUTE | FIBF_OTR_READ + | FIBF_OTR_WRITE | FIBF_OTR_DELETE); + + /* Classic single-user AmigaOS flags. These are inverted. */ if (!(mode & 0100)) prot |= FIBF_NOEXECUTE; if (!(mode & 0400)) prot |= FIBF_NOREAD; if (!(mode & 0200)) prot |= FIBF_NOWRITE; + + /* Multi-user extended flags. Not inverted. */ if (mode & 0010) prot |= FIBF_GRP_EXECUTE; if (mode & 0040) prot |= FIBF_GRP_READ; if (mode & 0020) prot |= FIBF_GRP_WRITE; + if (mode & 0070) + prot |= FIBF_GRP_DELETE; + if (mode & 0001) prot |= FIBF_OTR_EXECUTE; if (mode & 0004) prot |= FIBF_OTR_READ; if (mode & 0002) prot |= FIBF_OTR_WRITE; + if (mode & 0007) + prot |= FIBF_OTR_DELETE; AFFS_I(inode)->i_protect = prot; } diff --git a/fs/affs/file.c b/fs/affs/file.c index a85817f54483..ba084b0b214b 100644 --- a/fs/affs/file.c +++ b/fs/affs/file.c @@ -428,6 +428,24 @@ static int affs_write_begin(struct file *file, struct address_space *mapping, return ret; } +static int affs_write_end(struct file *file, struct address_space *mapping, + loff_t pos, unsigned int len, unsigned int copied, + struct page *page, void *fsdata) +{ + struct inode *inode = mapping->host; + int ret; + + ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); + + /* Clear Archived bit on file writes, as AmigaOS would do */ + if (AFFS_I(inode)->i_protect & FIBF_ARCHIVED) { + AFFS_I(inode)->i_protect &= ~FIBF_ARCHIVED; + mark_inode_dirty(inode); + } + + return ret; +} + static sector_t _affs_bmap(struct address_space *mapping, sector_t block) { return generic_block_bmap(mapping,block,affs_get_block); @@ -437,7 +455,7 @@ const struct address_space_operations affs_aops = { .readpage = affs_readpage, .writepage = affs_writepage, .write_begin = affs_write_begin, - .write_end = generic_write_end, + .write_end = affs_write_end, .direct_IO = affs_direct_IO, .bmap = _affs_bmap }; @@ -794,6 +812,12 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping, if (tmp > inode->i_size) inode->i_size = AFFS_I(inode)->mmu_private = tmp; + /* Clear Archived bit on file writes, as AmigaOS would do */ + if (AFFS_I(inode)->i_protect & FIBF_ARCHIVED) { + AFFS_I(inode)->i_protect &= ~FIBF_ARCHIVED; + mark_inode_dirty(inode); + } + err_first_bh: unlock_page(page); put_page(page); -- GitLab From b48bcb664b657ae94b19c0728978c88e012f7a37 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Mon, 17 Aug 2020 18:00:55 +0800 Subject: [PATCH 0684/1304] block: allow for_each_bvec to support zero len bvec commit 7e24969022cbd61ddc586f14824fc205661bb124 upstream. Block layer usually doesn't support or allow zero-length bvec. Since commit 1bdc76aea115 ("iov_iter: use bvec iterator to implement iterate_bvec()"), iterate_bvec() switches to bvec iterator. However, Al mentioned that 'Zero-length segments are not disallowed' in iov_iter. Fixes for_each_bvec() so that it can move on after seeing one zero length bvec. Fixes: 1bdc76aea115 ("iov_iter: use bvec iterator to implement iterate_bvec()") Reported-by: syzbot Signed-off-by: Ming Lei Tested-by: Tetsuo Handa Cc: Al Viro Cc: Matthew Wilcox Cc: Link: https://www.mail-archive.com/linux-kernel@vger.kernel.org/msg2262077.html Signed-off-by: Jens Axboe Signed-off-by: Greg Kroah-Hartman --- include/linux/bvec.h | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/include/linux/bvec.h b/include/linux/bvec.h index fe7a22dd133b..bc1f16e9f3f4 100644 --- a/include/linux/bvec.h +++ b/include/linux/bvec.h @@ -119,11 +119,18 @@ static inline bool bvec_iter_rewind(const struct bio_vec *bv, return true; } +static inline void bvec_iter_skip_zero_bvec(struct bvec_iter *iter) +{ + iter->bi_bvec_done = 0; + iter->bi_idx++; +} + #define for_each_bvec(bvl, bio_vec, iter, start) \ for (iter = (start); \ (iter).bi_size && \ ((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \ - bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len)) + (bvl).bv_len ? (void)bvec_iter_advance((bio_vec), &(iter), \ + (bvl).bv_len) : bvec_iter_skip_zero_bvec(&(iter))) /* for iterating one bio from start to end */ #define BVEC_ITER_ALL_INIT (struct bvec_iter) \ -- GitLab From a8bb7740aa313994bfa4c21cba399f65985a8a35 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 2 Sep 2020 12:32:45 -0400 Subject: [PATCH 0685/1304] libata: implement ATA_HORKAGE_MAX_TRIM_128M and apply to Sandisks commit 3b5455636fe26ea21b4189d135a424a6da016418 upstream. All three generations of Sandisk SSDs lock up hard intermittently. Experiments showed that disabling NCQ lowered the failure rate significantly and the kernel has been disabling NCQ for some models of SD7's and 8's, which is obviously undesirable. Karthik worked with Sandisk to root cause the hard lockups to trim commands larger than 128M. This patch implements ATA_HORKAGE_MAX_TRIM_128M which limits max trim size to 128M and applies it to all three generations of Sandisk SSDs. Signed-off-by: Tejun Heo Cc: Karthik Shivaram Cc: stable@vger.kernel.org Signed-off-by: Jens Axboe Signed-off-by: Greg Kroah-Hartman --- drivers/ata/libata-core.c | 5 ++--- drivers/ata/libata-scsi.c | 8 +++++++- include/linux/libata.h | 1 + 3 files changed, 10 insertions(+), 4 deletions(-) diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 6b372fa58382..fead7243930c 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -4492,9 +4492,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */ { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, }, - /* Some Sandisk SSDs lock up hard with NCQ enabled. Reported on - SD7SN6S256G and SD8SN8U256G */ - { "SanDisk SD[78]SN*G", NULL, ATA_HORKAGE_NONCQ, }, + /* Sandisk SD7/8/9s lock up hard on large trims */ + { "SanDisk SD[789]*", NULL, ATA_HORKAGE_MAX_TRIM_128M, }, /* devices which puke on READ_NATIVE_MAX */ { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, }, diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 6c2c2b07f029..e7af41d95490 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -2391,6 +2391,7 @@ static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf) static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf) { + struct ata_device *dev = args->dev; u16 min_io_sectors; rbuf[1] = 0xb0; @@ -2416,7 +2417,12 @@ static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf) * with the unmap bit set. */ if (ata_id_has_trim(args->id)) { - put_unaligned_be64(65535 * ATA_MAX_TRIM_RNUM, &rbuf[36]); + u64 max_blocks = 65535 * ATA_MAX_TRIM_RNUM; + + if (dev->horkage & ATA_HORKAGE_MAX_TRIM_128M) + max_blocks = 128 << (20 - SECTOR_SHIFT); + + put_unaligned_be64(max_blocks, &rbuf[36]); put_unaligned_be32(1, &rbuf[28]); } diff --git a/include/linux/libata.h b/include/linux/libata.h index ed1453c15041..afc1d72161ba 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -439,6 +439,7 @@ enum { ATA_HORKAGE_NO_DMA_LOG = (1 << 23), /* don't use DMA for log read */ ATA_HORKAGE_NOTRIM = (1 << 24), /* don't use TRIM */ ATA_HORKAGE_MAX_SEC_1024 = (1 << 25), /* Limit max sects to 1024 */ + ATA_HORKAGE_MAX_TRIM_128M = (1 << 26), /* Limit max trim size to 128M */ /* DMA mask for user DMA control: User visible values; DO NOT renumber */ -- GitLab From 154096e9966123f198cfc2f959bc559c562fc6b7 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Mon, 24 Aug 2020 11:09:47 -0400 Subject: [PATCH 0686/1304] dm writecache: handle DAX to partitions on persistent memory correctly commit f9e040efcc28309e5c592f7e79085a9a52e31f58 upstream. The function dax_direct_access doesn't take partitions into account, it always maps pages from the beginning of the device. Therefore, persistent_memory_claim() must get the partition offset using get_start_sect() and add it to the page offsets passed to dax_direct_access(). Signed-off-by: Mikulas Patocka Fixes: 48debafe4f2f ("dm: add writecache target") Cc: stable@vger.kernel.org # 4.18+ Signed-off-by: Mike Snitzer Signed-off-by: Greg Kroah-Hartman --- drivers/md/dm-writecache.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c index cc028353f9d5..776aaf5951e4 100644 --- a/drivers/md/dm-writecache.c +++ b/drivers/md/dm-writecache.c @@ -226,6 +226,7 @@ static int persistent_memory_claim(struct dm_writecache *wc) pfn_t pfn; int id; struct page **pages; + sector_t offset; wc->memory_vmapped = false; @@ -244,9 +245,16 @@ static int persistent_memory_claim(struct dm_writecache *wc) goto err1; } + offset = get_start_sect(wc->ssd_dev->bdev); + if (offset & (PAGE_SIZE / 512 - 1)) { + r = -EINVAL; + goto err1; + } + offset >>= PAGE_SHIFT - 9; + id = dax_read_lock(); - da = dax_direct_access(wc->ssd_dev->dax_dev, 0, p, &wc->memory_map, &pfn); + da = dax_direct_access(wc->ssd_dev->dax_dev, offset, p, &wc->memory_map, &pfn); if (da < 0) { wc->memory_map = NULL; r = da; @@ -268,7 +276,7 @@ static int persistent_memory_claim(struct dm_writecache *wc) i = 0; do { long daa; - daa = dax_direct_access(wc->ssd_dev->dax_dev, i, p - i, + daa = dax_direct_access(wc->ssd_dev->dax_dev, offset + i, p - i, NULL, &pfn); if (daa <= 0) { r = daa ? daa : -EINVAL; -- GitLab From 67f03c3d6829cf9c4b4015b24326a2cb7b8d6664 Mon Sep 17 00:00:00 2001 From: Ye Bin Date: Tue, 1 Sep 2020 14:25:42 +0800 Subject: [PATCH 0687/1304] dm cache metadata: Avoid returning cmd->bm wild pointer on error commit d16ff19e69ab57e08bf908faaacbceaf660249de upstream. Maybe __create_persistent_data_objects() caller will use PTR_ERR as a pointer, it will lead to some strange things. Signed-off-by: Ye Bin Cc: stable@vger.kernel.org Signed-off-by: Mike Snitzer Signed-off-by: Greg Kroah-Hartman --- drivers/md/dm-cache-metadata.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c index 151aa95775be..af6d4f898e4c 100644 --- a/drivers/md/dm-cache-metadata.c +++ b/drivers/md/dm-cache-metadata.c @@ -537,12 +537,16 @@ static int __create_persistent_data_objects(struct dm_cache_metadata *cmd, CACHE_MAX_CONCURRENT_LOCKS); if (IS_ERR(cmd->bm)) { DMERR("could not create block manager"); - return PTR_ERR(cmd->bm); + r = PTR_ERR(cmd->bm); + cmd->bm = NULL; + return r; } r = __open_or_format_metadata(cmd, may_format_device); - if (r) + if (r) { dm_block_manager_destroy(cmd->bm); + cmd->bm = NULL; + } return r; } -- GitLab From 2c00ee626ed48c6ba1f10b8350a8c118846db9ce Mon Sep 17 00:00:00 2001 From: Ye Bin Date: Tue, 1 Sep 2020 14:25:43 +0800 Subject: [PATCH 0688/1304] dm thin metadata: Avoid returning cmd->bm wild pointer on error commit 219403d7e56f9b716ad80ab87db85d29547ee73e upstream. Maybe __create_persistent_data_objects() caller will use PTR_ERR as a pointer, it will lead to some strange things. Signed-off-by: Ye Bin Cc: stable@vger.kernel.org Signed-off-by: Mike Snitzer Signed-off-by: Greg Kroah-Hartman --- drivers/md/dm-thin-metadata.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c index 6a26afcc1fd6..85077f4d257a 100644 --- a/drivers/md/dm-thin-metadata.c +++ b/drivers/md/dm-thin-metadata.c @@ -698,12 +698,16 @@ static int __create_persistent_data_objects(struct dm_pool_metadata *pmd, bool f THIN_MAX_CONCURRENT_LOCKS); if (IS_ERR(pmd->bm)) { DMERR("could not create block manager"); - return PTR_ERR(pmd->bm); + r = PTR_ERR(pmd->bm); + pmd->bm = NULL; + return r; } r = __open_or_format_metadata(pmd, format_device); - if (r) + if (r) { dm_block_manager_destroy(pmd->bm); + pmd->bm = NULL; + } return r; } -- GitLab From af2cf2c5a268071dffd53f493ef51fd85891cc2e Mon Sep 17 00:00:00 2001 From: Eugeniu Rosca Date: Fri, 4 Sep 2020 16:35:30 -0700 Subject: [PATCH 0689/1304] mm: slub: fix conversion of freelist_corrupted() commit dc07a728d49cf025f5da2c31add438d839d076c0 upstream. Commit 52f23478081ae0 ("mm/slub.c: fix corrupted freechain in deactivate_slab()") suffered an update when picked up from LKML [1]. Specifically, relocating 'freelist = NULL' into 'freelist_corrupted()' created a no-op statement. Fix it by sticking to the behavior intended in the original patch [1]. In addition, make freelist_corrupted() immune to passing NULL instead of &freelist. The issue has been spotted via static analysis and code review. [1] https://lore.kernel.org/linux-mm/20200331031450.12182-1-dongli.zhang@oracle.com/ Fixes: 52f23478081ae0 ("mm/slub.c: fix corrupted freechain in deactivate_slab()") Signed-off-by: Eugeniu Rosca Signed-off-by: Andrew Morton Cc: Dongli Zhang Cc: Joe Jin Cc: Christoph Lameter Cc: Pekka Enberg Cc: David Rientjes Cc: Joonsoo Kim Cc: Link: https://lkml.kernel.org/r/20200824130643.10291-1-erosca@de.adit-jv.com Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- mm/slub.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index 882a1e0ae89c..dfc9b4267603 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -646,12 +646,12 @@ static void slab_fix(struct kmem_cache *s, char *fmt, ...) } static bool freelist_corrupted(struct kmem_cache *s, struct page *page, - void *freelist, void *nextfree) + void **freelist, void *nextfree) { if ((s->flags & SLAB_CONSISTENCY_CHECKS) && - !check_valid_pointer(s, page, nextfree)) { - object_err(s, page, freelist, "Freechain corrupt"); - freelist = NULL; + !check_valid_pointer(s, page, nextfree) && freelist) { + object_err(s, page, *freelist, "Freechain corrupt"); + *freelist = NULL; slab_fix(s, "Isolate corrupted freechain"); return true; } @@ -1343,7 +1343,7 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects) {} static bool freelist_corrupted(struct kmem_cache *s, struct page *page, - void *freelist, void *nextfree) + void **freelist, void *nextfree) { return false; } @@ -2037,7 +2037,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page, * 'freelist' is already corrupted. So isolate all objects * starting at 'freelist'. */ - if (freelist_corrupted(s, page, freelist, nextfree)) + if (freelist_corrupted(s, page, &freelist, nextfree)) break; do { -- GitLab From 3290c6ffef87e5acf213e90cb5013bf744e5b607 Mon Sep 17 00:00:00 2001 From: James Morse Date: Fri, 21 Aug 2020 15:07:05 +0100 Subject: [PATCH 0690/1304] KVM: arm64: Add kvm_extable for vaxorcism code commit e9ee186bb735bfc17fa81dbc9aebf268aee5b41e upstream. KVM has a one instruction window where it will allow an SError exception to be consumed by the hypervisor without treating it as a hypervisor bug. This is used to consume asynchronous external abort that were caused by the guest. As we are about to add another location that survives unexpected exceptions, generalise this code to make it behave like the host's extable. KVM's version has to be mapped to EL2 to be accessible on nVHE systems. The SError vaxorcism code is a one instruction window, so has two entries in the extable. Because the KVM code is copied for VHE and nVHE, we end up with four entries, half of which correspond with code that isn't mapped. Signed-off-by: James Morse Reviewed-by: Marc Zyngier Signed-off-by: Catalin Marinas Signed-off-by: Andre Przywara Signed-off-by: Greg Kroah-Hartman --- arch/arm64/include/asm/kvm_asm.h | 15 ++++++++++ arch/arm64/kernel/vmlinux.lds.S | 8 +++++ arch/arm64/kvm/hyp/entry.S | 16 ++++++---- arch/arm64/kvm/hyp/hyp-entry.S | 51 +++++++++++++++++++------------- arch/arm64/kvm/hyp/switch.c | 31 +++++++++++++++++++ 5 files changed, 95 insertions(+), 26 deletions(-) diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 102b5a5c47b6..ed701f5c2d9a 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -111,6 +111,21 @@ extern u32 __init_stage2_translation(void); kern_hyp_va \vcpu .endm +/* + * KVM extable for unexpected exceptions. + * In the same format _asm_extable, but output to a different section so that + * it can be mapped to EL2. The KVM version is not sorted. The caller must + * ensure: + * x18 has the hypervisor value to allow any Shadow-Call-Stack instrumented + * code to write to it, and that SPSR_EL2 and ELR_EL2 are restored by the fixup. + */ +.macro _kvm_extable, from, to + .pushsection __kvm_ex_table, "a" + .align 3 + .long (\from - .), (\to - .) + .popsection +.endm + #endif #endif /* __ARM_KVM_ASM_H__ */ diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index d6050c6e65bc..69e7c8d4a00f 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -24,6 +24,13 @@ ENTRY(_text) jiffies = jiffies_64; + +#define HYPERVISOR_EXTABLE \ + . = ALIGN(SZ_8); \ + __start___kvm_ex_table = .; \ + *(__kvm_ex_table) \ + __stop___kvm_ex_table = .; + #define HYPERVISOR_TEXT \ /* \ * Align to 4 KB so that \ @@ -39,6 +46,7 @@ jiffies = jiffies_64; __hyp_idmap_text_end = .; \ __hyp_text_start = .; \ *(.hyp.text) \ + HYPERVISOR_EXTABLE \ __hyp_text_end = .; #define IDMAP_TEXT \ diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S index fad1e164fe48..c21d279a5aab 100644 --- a/arch/arm64/kvm/hyp/entry.S +++ b/arch/arm64/kvm/hyp/entry.S @@ -148,18 +148,22 @@ alternative_endif // This is our single instruction exception window. A pending // SError is guaranteed to occur at the earliest when we unmask // it, and at the latest just after the ISB. - .global abort_guest_exit_start abort_guest_exit_start: isb - .global abort_guest_exit_end abort_guest_exit_end: + msr daifset, #4 // Mask aborts + ret - // If the exception took place, restore the EL1 exception - // context so that we can report some information. - // Merge the exception code with the SError pending bit. - tbz x0, #ARM_EXIT_WITH_SERROR_BIT, 1f + _kvm_extable abort_guest_exit_start, 9997f + _kvm_extable abort_guest_exit_end, 9997f +9997: + msr daifset, #4 // Mask aborts + mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT) + + // restore the EL1 exception context so that we can report some + // information. Merge the exception code with the SError pending bit. msr elr_el2, x2 msr esr_el2, x3 msr spsr_el2, x4 diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S index 24b4fbafe3e4..68c73c5d780c 100644 --- a/arch/arm64/kvm/hyp/hyp-entry.S +++ b/arch/arm64/kvm/hyp/hyp-entry.S @@ -26,6 +26,30 @@ #include #include +.macro save_caller_saved_regs_vect + /* x0 and x1 were saved in the vector entry */ + stp x2, x3, [sp, #-16]! + stp x4, x5, [sp, #-16]! + stp x6, x7, [sp, #-16]! + stp x8, x9, [sp, #-16]! + stp x10, x11, [sp, #-16]! + stp x12, x13, [sp, #-16]! + stp x14, x15, [sp, #-16]! + stp x16, x17, [sp, #-16]! +.endm + +.macro restore_caller_saved_regs_vect + ldp x16, x17, [sp], #16 + ldp x14, x15, [sp], #16 + ldp x12, x13, [sp], #16 + ldp x10, x11, [sp], #16 + ldp x8, x9, [sp], #16 + ldp x6, x7, [sp], #16 + ldp x4, x5, [sp], #16 + ldp x2, x3, [sp], #16 + ldp x0, x1, [sp], #16 +.endm + .text .pushsection .hyp.text, "ax" @@ -163,27 +187,14 @@ el1_error: b __guest_exit el2_error: - ldp x0, x1, [sp], #16 + save_caller_saved_regs_vect + stp x29, x30, [sp, #-16]! + + bl kvm_unexpected_el2_exception + + ldp x29, x30, [sp], #16 + restore_caller_saved_regs_vect - /* - * Only two possibilities: - * 1) Either we come from the exit path, having just unmasked - * PSTATE.A: change the return code to an EL2 fault, and - * carry on, as we're already in a sane state to handle it. - * 2) Or we come from anywhere else, and that's a bug: we panic. - * - * For (1), x0 contains the original return code and x1 doesn't - * contain anything meaningful at that stage. We can reuse them - * as temp registers. - * For (2), who cares? - */ - mrs x0, elr_el2 - adr x1, abort_guest_exit_start - cmp x0, x1 - adr x1, abort_guest_exit_end - ccmp x0, x1, #4, ne - b.ne __hyp_panic - mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT) eret ENTRY(__hyp_do_panic) diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index 3cdefd84af54..a4f8dfe5d779 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -23,6 +23,7 @@ #include #include +#include #include #include #include @@ -34,6 +35,9 @@ #include #include +extern struct exception_table_entry __start___kvm_ex_table; +extern struct exception_table_entry __stop___kvm_ex_table; + /* Check whether the FP regs were dirtied while in the host-side run loop: */ static bool __hyp_text update_fp_enabled(struct kvm_vcpu *vcpu) { @@ -663,3 +667,30 @@ void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt) unreachable(); } + +asmlinkage void __hyp_text kvm_unexpected_el2_exception(void) +{ + unsigned long addr, fixup; + struct kvm_cpu_context *host_ctxt; + struct exception_table_entry *entry, *end; + unsigned long elr_el2 = read_sysreg(elr_el2); + + entry = hyp_symbol_addr(__start___kvm_ex_table); + end = hyp_symbol_addr(__stop___kvm_ex_table); + host_ctxt = __hyp_this_cpu_ptr(kvm_host_cpu_state); + + while (entry < end) { + addr = (unsigned long)&entry->insn + entry->insn; + fixup = (unsigned long)&entry->fixup + entry->fixup; + + if (addr != elr_el2) { + entry++; + continue; + } + + write_sysreg(fixup, elr_el2); + return; + } + + hyp_panic(host_ctxt); +} -- GitLab From 204f38310ff14ff2ed68428afa3aca012920faa9 Mon Sep 17 00:00:00 2001 From: James Morse Date: Wed, 2 Sep 2020 11:08:19 +0100 Subject: [PATCH 0691/1304] KVM: arm64: Defer guest entry when an asynchronous exception is pending commit 5dcd0fdbb492d49dac6bf21c436dfcb5ded0a895 upstream. SError that occur during world-switch's entry to the guest will be accounted to the guest, as the exception is masked until we enter the guest... but we want to attribute the SError as precisely as possible. Reading DISR_EL1 before guest entry requires free registers, and using ESB+DISR_EL1 to consume and read back the ESR would leave KVM holding a host SError... We would rather leave the SError pending and let the host take it once we exit world-switch. To do this, we need to defer guest-entry if an SError is pending. Read the ISR to see if SError (or an IRQ) is pending. If so fake an exit. Place this check between __guest_enter()'s save of the host registers, and restore of the guest's. SError that occur between here and the eret into the guest must have affected the guest's registers, which we can naturally attribute to the guest. The dsb is needed to ensure any previous writes have been done before we read ISR_EL1. On systems without the v8.2 RAS extensions this doesn't give us anything as we can't contain errors, and the ESR bits to describe the severity are all implementation-defined. Replace this with a nop for these systems. Cc: stable@vger.kernel.org # v4.19 Signed-off-by: James Morse Signed-off-by: Marc Zyngier Signed-off-by: Andre Przywara Signed-off-by: Greg Kroah-Hartman --- arch/arm64/kvm/hyp/entry.S | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S index c21d279a5aab..fc83e932afbe 100644 --- a/arch/arm64/kvm/hyp/entry.S +++ b/arch/arm64/kvm/hyp/entry.S @@ -17,6 +17,7 @@ #include +#include #include #include #include @@ -62,6 +63,20 @@ ENTRY(__guest_enter) // Store the host regs save_callee_saved_regs x1 + // Now the host state is stored if we have a pending RAS SError it must + // affect the host. If any asynchronous exception is pending we defer + // the guest entry. The DSB isn't necessary before v8.2 as any SError + // would be fatal. +alternative_if ARM64_HAS_RAS_EXTN + dsb nshst + isb +alternative_else_nop_endif + mrs x1, isr_el1 + cbz x1, 1f + mov x0, #ARM_EXCEPTION_IRQ + ret + +1: add x18, x0, #VCPU_CONTEXT // Restore guest regs x0-x17 -- GitLab From 4df1ff5f836b21c65e799b507e649c3ece7a6c89 Mon Sep 17 00:00:00 2001 From: James Morse Date: Wed, 2 Sep 2020 11:08:20 +0100 Subject: [PATCH 0692/1304] KVM: arm64: Survive synchronous exceptions caused by AT instructions commit 88a84ccccb3966bcc3f309cdb76092a9892c0260 upstream. KVM doesn't expect any synchronous exceptions when executing, any such exception leads to a panic(). AT instructions access the guest page tables, and can cause a synchronous external abort to be taken. The arm-arm is unclear on what should happen if the guest has configured the hardware update of the access-flag, and a memory type in TCR_EL1 that does not support atomic operations. B2.2.6 "Possible implementation restrictions on using atomic instructions" from DDI0487F.a lists synchronous external abort as a possible behaviour of atomic instructions that target memory that isn't writeback cacheable, but the page table walker may behave differently. Make KVM robust to synchronous exceptions caused by AT instructions. Add a get_user() style helper for AT instructions that returns -EFAULT if an exception was generated. While KVM's version of the exception table mixes synchronous and asynchronous exceptions, only one of these can occur at each location. Re-enter the guest when the AT instructions take an exception on the assumption the guest will take the same exception. This isn't guaranteed to make forward progress, as the AT instructions may always walk the page tables, but guest execution may use the translation cached in the TLB. This isn't a problem, as since commit 5dcd0fdbb492 ("KVM: arm64: Defer guest entry when an asynchronous exception is pending"), KVM will return to the host to process IRQs allowing the rest of the system to keep running. Cc: stable@vger.kernel.org # v4.19 Signed-off-by: James Morse Reviewed-by: Marc Zyngier Signed-off-by: Catalin Marinas Signed-off-by: Andre Przywara Signed-off-by: Greg Kroah-Hartman --- arch/arm64/include/asm/kvm_asm.h | 28 ++++++++++++++++++++++++++++ arch/arm64/kvm/hyp/hyp-entry.S | 12 ++++++++++-- arch/arm64/kvm/hyp/switch.c | 8 ++++---- 3 files changed, 42 insertions(+), 6 deletions(-) diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index ed701f5c2d9a..e3c0dba5bdde 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -87,6 +87,34 @@ extern u32 __init_stage2_translation(void); *__hyp_this_cpu_ptr(sym); \ }) +#define __KVM_EXTABLE(from, to) \ + " .pushsection __kvm_ex_table, \"a\"\n" \ + " .align 3\n" \ + " .long (" #from " - .), (" #to " - .)\n" \ + " .popsection\n" + + +#define __kvm_at(at_op, addr) \ +( { \ + int __kvm_at_err = 0; \ + u64 spsr, elr; \ + asm volatile( \ + " mrs %1, spsr_el2\n" \ + " mrs %2, elr_el2\n" \ + "1: at "at_op", %3\n" \ + " isb\n" \ + " b 9f\n" \ + "2: msr spsr_el2, %1\n" \ + " msr elr_el2, %2\n" \ + " mov %w0, %4\n" \ + "9:\n" \ + __KVM_EXTABLE(1b, 2b) \ + : "+r" (__kvm_at_err), "=&r" (spsr), "=&r" (elr) \ + : "r" (addr), "i" (-EFAULT)); \ + __kvm_at_err; \ +} ) + + #else /* __ASSEMBLY__ */ .macro hyp_adr_this_cpu reg, sym, tmp diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S index 68c73c5d780c..ea063312bca1 100644 --- a/arch/arm64/kvm/hyp/hyp-entry.S +++ b/arch/arm64/kvm/hyp/hyp-entry.S @@ -186,6 +186,15 @@ el1_error: mov x0, #ARM_EXCEPTION_EL1_SERROR b __guest_exit +el2_sync: + save_caller_saved_regs_vect + stp x29, x30, [sp, #-16]! + bl kvm_unexpected_el2_exception + ldp x29, x30, [sp], #16 + restore_caller_saved_regs_vect + + eret + el2_error: save_caller_saved_regs_vect stp x29, x30, [sp, #-16]! @@ -223,7 +232,6 @@ ENDPROC(\label) invalid_vector el2t_irq_invalid invalid_vector el2t_fiq_invalid invalid_vector el2t_error_invalid - invalid_vector el2h_sync_invalid invalid_vector el2h_irq_invalid invalid_vector el2h_fiq_invalid invalid_vector el1_fiq_invalid @@ -251,7 +259,7 @@ ENTRY(__kvm_hyp_vector) invalid_vect el2t_fiq_invalid // FIQ EL2t invalid_vect el2t_error_invalid // Error EL2t - invalid_vect el2h_sync_invalid // Synchronous EL2h + valid_vect el2_sync // Synchronous EL2h invalid_vect el2h_irq_invalid // IRQ EL2h invalid_vect el2h_fiq_invalid // FIQ EL2h valid_vect el2_error // Error EL2h diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index a4f8dfe5d779..f146bff53edf 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -268,10 +268,10 @@ static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar) * saved the guest context yet, and we may return early... */ par = read_sysreg(par_el1); - asm volatile("at s1e1r, %0" : : "r" (far)); - isb(); - - tmp = read_sysreg(par_el1); + if (!__kvm_at("s1e1r", far)) + tmp = read_sysreg(par_el1); + else + tmp = 1; /* back to the guest */ write_sysreg(par, par_el1); if (unlikely(tmp & 1)) -- GitLab From dcaf364fcad9182f6456ffe5b8df0563115fe5ad Mon Sep 17 00:00:00 2001 From: James Morse Date: Wed, 2 Sep 2020 11:08:21 +0100 Subject: [PATCH 0693/1304] KVM: arm64: Set HCR_EL2.PTW to prevent AT taking synchronous exception commit 71a7f8cb1ca4ca7214a700b1243626759b6c11d4 upstream. AT instructions do a translation table walk and return the result, or the fault in PAR_EL1. KVM uses these to find the IPA when the value is not provided by the CPU in HPFAR_EL1. If a translation table walk causes an external abort it is taken as an exception, even if it was due to an AT instruction. (DDI0487F.a's D5.2.11 "Synchronous faults generated by address translation instructions") While we previously made KVM resilient to exceptions taken due to AT instructions, the device access causes mismatched attributes, and may occur speculatively. Prevent this, by forbidding a walk through memory described as device at stage2. Now such AT instructions will report a stage2 fault. Such a fault will cause KVM to restart the guest. If the AT instructions always walk the page tables, but guest execution uses the translation cached in the TLB, the guest can't make forward progress until the TLB entry is evicted. This isn't a problem, as since commit 5dcd0fdbb492 ("KVM: arm64: Defer guest entry when an asynchronous exception is pending"), KVM will return to the host to process IRQs allowing the rest of the system to keep running. Cc: stable@vger.kernel.org # v4.19 Signed-off-by: James Morse Reviewed-by: Marc Zyngier Signed-off-by: Catalin Marinas Signed-off-by: Andre Przywara Signed-off-by: Greg Kroah-Hartman --- arch/arm64/include/asm/kvm_arm.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 8b284cbf8162..a3b6f58d188c 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -83,11 +83,12 @@ * IMO: Override CPSR.I and enable signaling with VI * FMO: Override CPSR.F and enable signaling with VF * SWIO: Turn set/way invalidates into set/way clean+invalidate + * PTW: Take a stage2 fault if a stage1 walk steps in device memory */ #define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \ HCR_TVM | HCR_BSU_IS | HCR_FB | HCR_TAC | \ HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_TLOR | \ - HCR_FMO | HCR_IMO) + HCR_FMO | HCR_IMO | HCR_PTW ) #define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF) #define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK) #define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H) -- GitLab From 81fb345971c404c796ba28b1bae0b555c35d1376 Mon Sep 17 00:00:00 2001 From: Alex Williamson Date: Thu, 25 Jun 2020 11:04:23 -0600 Subject: [PATCH 0694/1304] vfio/pci: Fix SR-IOV VF handling with MMIO blocking commit ebfa440ce38b7e2e04c3124aa89c8a9f4094cf21 upstream. SR-IOV VFs do not implement the memory enable bit of the command register, therefore this bit is not set in config space after pci_enable_device(). This leads to an unintended difference between PF and VF in hand-off state to the user. We can correct this by setting the initial value of the memory enable bit in our virtualized config space. There's really no need however to ever fault a user on a VF though as this would only indicate an error in the user's management of the enable bit, versus a PF where the same access could trigger hardware faults. Fixes: abafbc551fdd ("vfio-pci: Invalidate mmaps and block MMIO access on disabled memory") Signed-off-by: Alex Williamson Signed-off-by: Greg Kroah-Hartman --- drivers/vfio/pci/vfio_pci_config.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c index 4fe71fbce194..a1a26465d224 100644 --- a/drivers/vfio/pci/vfio_pci_config.c +++ b/drivers/vfio/pci/vfio_pci_config.c @@ -401,9 +401,15 @@ static inline void p_setd(struct perm_bits *p, int off, u32 virt, u32 write) /* Caller should hold memory_lock semaphore */ bool __vfio_pci_memory_enabled(struct vfio_pci_device *vdev) { + struct pci_dev *pdev = vdev->pdev; u16 cmd = le16_to_cpu(*(__le16 *)&vdev->vconfig[PCI_COMMAND]); - return cmd & PCI_COMMAND_MEMORY; + /* + * SR-IOV VF memory enable is handled by the MSE bit in the + * PF SR-IOV capability, there's therefore no need to trigger + * faults based on the virtual value. + */ + return pdev->is_virtfn || (cmd & PCI_COMMAND_MEMORY); } /* @@ -1732,6 +1738,15 @@ int vfio_config_init(struct vfio_pci_device *vdev) vconfig[PCI_INTERRUPT_PIN]); vconfig[PCI_INTERRUPT_PIN] = 0; /* Gratuitous for good VFs */ + + /* + * VFs do no implement the memory enable bit of the COMMAND + * register therefore we'll not have it set in our initial + * copy of config space after pci_enable_device(). For + * consistency with PFs, set the virtual enable bit here. + */ + *(__le16 *)&vconfig[PCI_COMMAND] |= + cpu_to_le16(PCI_COMMAND_MEMORY); } if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) || vdev->nointx) -- GitLab From c5927539f17dd0c15f5760003d25ad83c107e053 Mon Sep 17 00:00:00 2001 From: Mrinal Pandey Date: Fri, 4 Sep 2020 16:35:52 -0700 Subject: [PATCH 0695/1304] checkpatch: fix the usage of capture group ( ... ) commit 13e45417cedbfc44b1926124b1846f5ee8c6ba4a upstream. The usage of "capture group (...)" in the immediate condition after `&&` results in `$1` being uninitialized. This issues a warning "Use of uninitialized value $1 in regexp compilation at ./scripts/checkpatch.pl line 2638". I noticed this bug while running checkpatch on the set of commits from v5.7 to v5.8-rc1 of the kernel on the commits with a diff content in their commit message. This bug was introduced in the script by commit e518e9a59ec3 ("checkpatch: emit an error when there's a diff in a changelog"). It has been in the script since then. The author intended to store the match made by capture group in variable `$1`. This should have contained the name of the file as `[\w/]+` matched. However, this couldn't be accomplished due to usage of capture group and `$1` in the same regular expression. Fix this by placing the capture group in the condition before `&&`. Thus, `$1` can be initialized to the text that capture group matches thereby setting it to the desired and required value. Fixes: e518e9a59ec3 ("checkpatch: emit an error when there's a diff in a changelog") Signed-off-by: Mrinal Pandey Signed-off-by: Andrew Morton Tested-by: Lukas Bulwahn Reviewed-by: Lukas Bulwahn Cc: Joe Perches Link: https://lkml.kernel.org/r/20200714032352.f476hanaj2dlmiot@mrinalpandey Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- scripts/checkpatch.pl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index 161b0224d6ae..7eb944cbbaea 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -2541,8 +2541,8 @@ sub process { # Check if the commit log has what seems like a diff which can confuse patch if ($in_commit_log && !$commit_log_has_diff && - (($line =~ m@^\s+diff\b.*a/[\w/]+@ && - $line =~ m@^\s+diff\b.*a/([\w/]+)\s+b/$1\b@) || + (($line =~ m@^\s+diff\b.*a/([\w/]+)@ && + $line =~ m@^\s+diff\b.*a/[\w/]+\s+b/$1\b@) || $line =~ m@^\s*(?:\-\-\-\s+a/|\+\+\+\s+b/)@ || $line =~ m/^\s*\@\@ \-\d+,\d+ \+\d+,\d+ \@\@/)) { ERROR("DIFF_IN_COMMIT_MSG", -- GitLab From 221ea9a3da9169dc3c9a364a5f938e215db6419e Mon Sep 17 00:00:00 2001 From: Muchun Song Date: Fri, 4 Sep 2020 16:36:13 -0700 Subject: [PATCH 0696/1304] mm/hugetlb: fix a race between hugetlb sysctl handlers commit 17743798d81238ab13050e8e2833699b54e15467 upstream. There is a race between the assignment of `table->data` and write value to the pointer of `table->data` in the __do_proc_doulongvec_minmax() on the other thread. CPU0: CPU1: proc_sys_write hugetlb_sysctl_handler proc_sys_call_handler hugetlb_sysctl_handler_common hugetlb_sysctl_handler table->data = &tmp; hugetlb_sysctl_handler_common table->data = &tmp; proc_doulongvec_minmax do_proc_doulongvec_minmax sysctl_head_finish __do_proc_doulongvec_minmax unuse_table i = table->data; *i = val; // corrupt CPU1's stack Fix this by duplicating the `table`, and only update the duplicate of it. And introduce a helper of proc_hugetlb_doulongvec_minmax() to simplify the code. The following oops was seen: BUG: kernel NULL pointer dereference, address: 0000000000000000 #PF: supervisor instruction fetch in kernel mode #PF: error_code(0x0010) - not-present page Code: Bad RIP value. ... Call Trace: ? set_max_huge_pages+0x3da/0x4f0 ? alloc_pool_huge_page+0x150/0x150 ? proc_doulongvec_minmax+0x46/0x60 ? hugetlb_sysctl_handler_common+0x1c7/0x200 ? nr_hugepages_store+0x20/0x20 ? copy_fd_bitmaps+0x170/0x170 ? hugetlb_sysctl_handler+0x1e/0x20 ? proc_sys_call_handler+0x2f1/0x300 ? unregister_sysctl_table+0xb0/0xb0 ? __fd_install+0x78/0x100 ? proc_sys_write+0x14/0x20 ? __vfs_write+0x4d/0x90 ? vfs_write+0xef/0x240 ? ksys_write+0xc0/0x160 ? __ia32_sys_read+0x50/0x50 ? __close_fd+0x129/0x150 ? __x64_sys_write+0x43/0x50 ? do_syscall_64+0x6c/0x200 ? entry_SYSCALL_64_after_hwframe+0x44/0xa9 Fixes: e5ff215941d5 ("hugetlb: multiple hstates for multiple page sizes") Signed-off-by: Muchun Song Signed-off-by: Andrew Morton Reviewed-by: Mike Kravetz Cc: Andi Kleen Link: http://lkml.kernel.org/r/20200828031146.43035-1-songmuchun@bytedance.com Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- mm/hugetlb.c | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 8a5708f31aa0..27e49c5ec219 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2918,6 +2918,22 @@ static unsigned int cpuset_mems_nr(unsigned int *array) } #ifdef CONFIG_SYSCTL +static int proc_hugetlb_doulongvec_minmax(struct ctl_table *table, int write, + void *buffer, size_t *length, + loff_t *ppos, unsigned long *out) +{ + struct ctl_table dup_table; + + /* + * In order to avoid races with __do_proc_doulongvec_minmax(), we + * can duplicate the @table and alter the duplicate of it. + */ + dup_table = *table; + dup_table.data = out; + + return proc_doulongvec_minmax(&dup_table, write, buffer, length, ppos); +} + static int hugetlb_sysctl_handler_common(bool obey_mempolicy, struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos) @@ -2929,9 +2945,8 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy, if (!hugepages_supported()) return -EOPNOTSUPP; - table->data = &tmp; - table->maxlen = sizeof(unsigned long); - ret = proc_doulongvec_minmax(table, write, buffer, length, ppos); + ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos, + &tmp); if (ret) goto out; @@ -2975,9 +2990,8 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write, if (write && hstate_is_gigantic(h)) return -EINVAL; - table->data = &tmp; - table->maxlen = sizeof(unsigned long); - ret = proc_doulongvec_minmax(table, write, buffer, length, ppos); + ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos, + &tmp); if (ret) goto out; -- GitLab From 1b097d5d41a19cca34116ce8ebef566c9c61c482 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 19 Aug 2020 10:46:48 +0200 Subject: [PATCH 0697/1304] cfg80211: regulatory: reject invalid hints commit 47caf685a6854593348f216e0b489b71c10cbe03 upstream. Reject invalid hints early in order to not cause a kernel WARN later if they're restored to or similar. Reported-by: syzbot+d451401ffd00a60677ee@syzkaller.appspotmail.com Link: https://syzkaller.appspot.com/bug?extid=d451401ffd00a60677ee Link: https://lore.kernel.org/r/20200819084648.13956-1-johannes@sipsolutions.net Signed-off-by: Johannes Berg Signed-off-by: Greg Kroah-Hartman --- net/wireless/reg.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 32f575857e41..935aebf15010 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -2936,6 +2936,9 @@ int regulatory_hint_user(const char *alpha2, if (WARN_ON(!alpha2)) return -EINVAL; + if (!is_world_regdom(alpha2) && !is_an_alpha2(alpha2)) + return -EINVAL; + request = kzalloc(sizeof(struct regulatory_request), GFP_KERNEL); if (!request) return -ENOMEM; -- GitLab From 4a986bc9bef88e885c1e0202d424af39c77e3ab5 Mon Sep 17 00:00:00 2001 From: Himadri Pandya Date: Thu, 27 Aug 2020 12:23:55 +0530 Subject: [PATCH 0698/1304] net: usb: Fix uninit-was-stored issue in asix_read_phy_addr() commit a092b7233f0e000cc6f2c71a49e2ecc6f917a5fc upstream. The buffer size is 2 Bytes and we expect to receive the same amount of data. But sometimes we receive less data and run into uninit-was-stored issue upon read. Hence modify the error check on the return value to match with the buffer size as a prevention. Reported-and-tested by: syzbot+a7e220df5a81d1ab400e@syzkaller.appspotmail.com Signed-off-by: Himadri Pandya Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/usb/asix_common.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c index 023b8d0bf175..8d27786acad9 100644 --- a/drivers/net/usb/asix_common.c +++ b/drivers/net/usb/asix_common.c @@ -309,7 +309,7 @@ int asix_read_phy_addr(struct usbnet *dev, int internal) netdev_dbg(dev->net, "asix_get_phy_addr()\n"); - if (ret < 0) { + if (ret < 2) { netdev_err(dev->net, "Error reading PHYID register: %02x\n", ret); goto out; } -- GitLab From 67957f12548c785d0e0b14fd104d2297f3a71835 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Wed, 9 Sep 2020 19:04:32 +0200 Subject: [PATCH 0699/1304] Linux 4.19.144 Tested-by: Shuah Khan Tested-by: Jon Hunter Tested-by: Linux Kernel Functional Testing Tested-by: Guenter Roeck Signed-off-by: Greg Kroah-Hartman --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 6fa3278df77c..ba9d0b4476e1 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 4 PATCHLEVEL = 19 -SUBLEVEL = 143 +SUBLEVEL = 144 EXTRAVERSION = NAME = "People's Front" -- GitLab From 3b165ab697b5740b541978aa3114060bcf646958 Mon Sep 17 00:00:00 2001 From: Martijn Coenen Date: Fri, 21 Aug 2020 14:25:44 +0200 Subject: [PATCH 0700/1304] FROMGIT: binder: print warnings when detecting oneway spamming. The most common cause of the binder transaction buffer filling up is a client rapidly firing oneway transactions into a process, before it has a chance to handle them. Yet the root cause of this is often hard to debug, because either the system or the app will stop, and by that time binder debug information we dump in bugreports is no longer relevant. This change warns as soon as a process dips below 80% of its oneway space (less than 100kB available in the configuration), when any one process is responsible for either more than 50 transactions, or more than 50% of the oneway space. Signed-off-by: Martijn Coenen Signed-off-by: Martijn Coenen Acked-by: Todd Kjos Link: https://lore.kernel.org/r/20200821122544.1277051-1-maco@android.com Signed-off-by: Greg Kroah-Hartman (cherry picked from commit 261e7818f06ec51e488e007f787ccd7e77272918 git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git/ char-misc-next) Signed-off-by: Martijn Coenen Bug: 147795659 Change-Id: Idc2b03ddc779880ca4716fdae47a70df43211f25 --- drivers/android/binder.c | 2 +- drivers/android/binder_alloc.c | 55 +++++++++++++++++++++++-- drivers/android/binder_alloc.h | 5 ++- drivers/android/binder_alloc_selftest.c | 2 +- 4 files changed, 58 insertions(+), 6 deletions(-) diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 90a13e370ac6..555d9e915f53 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -3238,7 +3238,7 @@ static void binder_transaction(struct binder_proc *proc, t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size, tr->offsets_size, extra_buffers_size, - !reply && (t->flags & TF_ONE_WAY)); + !reply && (t->flags & TF_ONE_WAY), current->tgid); if (IS_ERR(t->buffer)) { /* * -ESRCH indicates VMA cleared. The target is dying. diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index e68f9910c3b3..f0ff5fc9d7ea 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -347,12 +347,50 @@ static inline struct vm_area_struct *binder_alloc_get_vma( return vma; } +static void debug_low_async_space_locked(struct binder_alloc *alloc, int pid) +{ + /* + * Find the amount and size of buffers allocated by the current caller; + * The idea is that once we cross the threshold, whoever is responsible + * for the low async space is likely to try to send another async txn, + * and at some point we'll catch them in the act. This is more efficient + * than keeping a map per pid. + */ + struct rb_node *n = alloc->free_buffers.rb_node; + struct binder_buffer *buffer; + size_t total_alloc_size = 0; + size_t num_buffers = 0; + + for (n = rb_first(&alloc->allocated_buffers); n != NULL; + n = rb_next(n)) { + buffer = rb_entry(n, struct binder_buffer, rb_node); + if (buffer->pid != pid) + continue; + if (!buffer->async_transaction) + continue; + total_alloc_size += binder_alloc_buffer_size(alloc, buffer) + + sizeof(struct binder_buffer); + num_buffers++; + } + + /* + * Warn if this pid has more than 50 transactions, or more than 50% of + * async space (which is 25% of total buffer size). + */ + if (num_buffers > 50 || total_alloc_size > alloc->buffer_size / 4) { + binder_alloc_debug(BINDER_DEBUG_USER_ERROR, + "%d: pid %d spamming oneway? %zd buffers allocated for a total size of %zd\n", + alloc->pid, pid, num_buffers, total_alloc_size); + } +} + static struct binder_buffer *binder_alloc_new_buf_locked( struct binder_alloc *alloc, size_t data_size, size_t offsets_size, size_t extra_buffers_size, - int is_async) + int is_async, + int pid) { struct rb_node *n = alloc->free_buffers.rb_node; struct binder_buffer *buffer; @@ -495,11 +533,20 @@ static struct binder_buffer *binder_alloc_new_buf_locked( buffer->offsets_size = offsets_size; buffer->async_transaction = is_async; buffer->extra_buffers_size = extra_buffers_size; + buffer->pid = pid; if (is_async) { alloc->free_async_space -= size + sizeof(struct binder_buffer); binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, "%d: binder_alloc_buf size %zd async free %zd\n", alloc->pid, size, alloc->free_async_space); + if (alloc->free_async_space < alloc->buffer_size / 10) { + /* + * Start detecting spammers once we have less than 20% + * of async space left (which is less than 10% of total + * buffer size). + */ + debug_low_async_space_locked(alloc, pid); + } } return buffer; @@ -517,6 +564,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked( * @offsets_size: user specified buffer offset * @extra_buffers_size: size of extra space for meta-data (eg, security context) * @is_async: buffer for async transaction + * @pid: pid to attribute allocation to (used for debugging) * * Allocate a new buffer given the requested sizes. Returns * the kernel version of the buffer pointer. The size allocated @@ -529,13 +577,14 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, size_t data_size, size_t offsets_size, size_t extra_buffers_size, - int is_async) + int is_async, + int pid) { struct binder_buffer *buffer; mutex_lock(&alloc->mutex); buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size, - extra_buffers_size, is_async); + extra_buffers_size, is_async, pid); mutex_unlock(&alloc->mutex); return buffer; } diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h index b60d161b7a7a..3daa3e211267 100644 --- a/drivers/android/binder_alloc.h +++ b/drivers/android/binder_alloc.h @@ -41,6 +41,7 @@ struct binder_transaction; * @offsets_size: size of array of offsets * @extra_buffers_size: size of space for other objects (like sg lists) * @user_data: user pointer to base of buffer space + * @pid: pid to attribute the buffer to (caller) * * Bookkeeping structure for binder transaction buffers */ @@ -60,6 +61,7 @@ struct binder_buffer { size_t offsets_size; size_t extra_buffers_size; void __user *user_data; + int pid; }; /** @@ -126,7 +128,8 @@ extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, size_t data_size, size_t offsets_size, size_t extra_buffers_size, - int is_async); + int is_async, + int pid); extern void binder_alloc_init(struct binder_alloc *alloc); extern int binder_alloc_shrinker_init(void); extern void binder_alloc_vma_close(struct binder_alloc *alloc); diff --git a/drivers/android/binder_alloc_selftest.c b/drivers/android/binder_alloc_selftest.c index b72708918b06..c839c490fde3 100644 --- a/drivers/android/binder_alloc_selftest.c +++ b/drivers/android/binder_alloc_selftest.c @@ -128,7 +128,7 @@ static void binder_selftest_alloc_buf(struct binder_alloc *alloc, int i; for (i = 0; i < BUFFER_NUM; i++) { - buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0); + buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0, 0); if (IS_ERR(buffers[i]) || !check_buffer_pages_allocated(alloc, buffers[i], sizes[i])) { -- GitLab From 9244e838ac0d3d98887d44ea68bedf08a94ebabf Mon Sep 17 00:00:00 2001 From: Takashi Sakamoto Date: Sun, 23 Aug 2020 16:55:37 +0900 Subject: [PATCH 0701/1304] ALSA; firewire-tascam: exclude Tascam FE-8 from detection Tascam FE-8 is known to support communication by asynchronous transaction only. The support can be implemented in userspace application and snd-firewire-ctl-services project has the support. However, ALSA firewire-tascam driver is bound to the model. This commit changes device entries so that the model is excluded. In a commit 53b3ffee7885 ("ALSA: firewire-tascam: change device probing processing"), I addressed to the concern that version field in configuration differs depending on installed firmware. However, as long as I checked, the version number is fixed. It's safe to return version number back to modalias. Fixes: 53b3ffee7885 ("ALSA: firewire-tascam: change device probing processing") Cc: # 4.4+ Signed-off-by: Takashi Sakamoto Link: https://lore.kernel.org/r/20200823075537.56255-1-o-takashi@sakamocchi.jp Signed-off-by: Takashi Iwai --- sound/firewire/tascam/tascam.c | 30 +++++++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/sound/firewire/tascam/tascam.c b/sound/firewire/tascam/tascam.c index d3fdc463a884..1e61cdce2895 100644 --- a/sound/firewire/tascam/tascam.c +++ b/sound/firewire/tascam/tascam.c @@ -225,11 +225,39 @@ static void snd_tscm_remove(struct fw_unit *unit) } static const struct ieee1394_device_id snd_tscm_id_table[] = { + // Tascam, FW-1884. { .match_flags = IEEE1394_MATCH_VENDOR_ID | - IEEE1394_MATCH_SPECIFIER_ID, + IEEE1394_MATCH_SPECIFIER_ID | + IEEE1394_MATCH_VERSION, .vendor_id = 0x00022e, .specifier_id = 0x00022e, + .version = 0x800000, + }, + // Tascam, FE-8 (.version = 0x800001) + // This kernel module doesn't support FE-8 because the most of features + // can be implemented in userspace without any specific support of this + // module. + // + // .version = 0x800002 is unknown. + // + // Tascam, FW-1082. + { + .match_flags = IEEE1394_MATCH_VENDOR_ID | + IEEE1394_MATCH_SPECIFIER_ID | + IEEE1394_MATCH_VERSION, + .vendor_id = 0x00022e, + .specifier_id = 0x00022e, + .version = 0x800003, + }, + // Tascam, FW-1804. + { + .match_flags = IEEE1394_MATCH_VENDOR_ID | + IEEE1394_MATCH_SPECIFIER_ID | + IEEE1394_MATCH_VERSION, + .vendor_id = 0x00022e, + .specifier_id = 0x00022e, + .version = 0x800004, }, /* FE-08 requires reverse-engineering because it just has faders. */ {} -- GitLab From 732fd460bb72fd51607311009b7d474a6e0e47f3 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 31 Aug 2020 11:20:02 -0600 Subject: [PATCH 0702/1304] block: ensure bdi->io_pages is always initialized [ Upstream commit de1b0ee490eafdf65fac9eef9925391a8369f2dc ] If a driver leaves the limit settings as the defaults, then we don't initialize bdi->io_pages. This means that file systems may need to work around bdi->io_pages == 0, which is somewhat messy. Initialize the default value just like we do for ->ra_pages. Cc: stable@vger.kernel.org Fixes: 9491ae4aade6 ("mm: don't cap request size based on read-ahead setting") Reported-by: OGAWA Hirofumi Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe Signed-off-by: Sasha Levin --- block/blk-core.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/block/blk-core.c b/block/blk-core.c index ea33d6abdcfc..ce3710404544 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1036,6 +1036,8 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id, q->backing_dev_info->ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_SIZE; + q->backing_dev_info->io_pages = + (VM_MAX_READAHEAD * 1024) / PAGE_SIZE; q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK; q->backing_dev_info->name = "block"; q->node = node_id; -- GitLab From a3a5cb67767464b4c36f9dfc2d4bab3eee9a2d08 Mon Sep 17 00:00:00 2001 From: Paul Moore Date: Fri, 21 Aug 2020 16:34:52 -0400 Subject: [PATCH 0703/1304] netlabel: fix problems with mapping removal [ Upstream commit d3b990b7f327e2afa98006e7666fb8ada8ed8683 ] This patch fixes two main problems seen when removing NetLabel mappings: memory leaks and potentially extra audit noise. The memory leaks are caused by not properly free'ing the mapping's address selector struct when free'ing the entire entry as well as not properly cleaning up a temporary mapping entry when adding new address selectors to an existing entry. This patch fixes both these problems such that kmemleak reports no NetLabel associated leaks after running the SELinux test suite. The potentially extra audit noise was caused by the auditing code in netlbl_domhsh_remove_entry() being called regardless of the entry's validity. If another thread had already marked the entry as invalid, but not removed/free'd it from the list of mappings, then it was possible that an additional mapping removal audit record would be generated. This patch fixes this by returning early from the removal function when the entry was previously marked invalid. This change also had the side benefit of improving the code by decreasing the indentation level of large chunk of code by one (accounting for most of the diffstat). Fixes: 63c416887437 ("netlabel: Add network address selectors to the NetLabel/LSM domain mapping") Reported-by: Stephen Smalley Signed-off-by: Paul Moore Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/netlabel/netlabel_domainhash.c | 59 +++++++++++++++--------------- 1 file changed, 30 insertions(+), 29 deletions(-) diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c index 41d0e95d171e..b1a1718495f3 100644 --- a/net/netlabel/netlabel_domainhash.c +++ b/net/netlabel/netlabel_domainhash.c @@ -99,6 +99,7 @@ static void netlbl_domhsh_free_entry(struct rcu_head *entry) kfree(netlbl_domhsh_addr6_entry(iter6)); } #endif /* IPv6 */ + kfree(ptr->def.addrsel); } kfree(ptr->domain); kfree(ptr); @@ -550,6 +551,8 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry, goto add_return; } #endif /* IPv6 */ + /* cleanup the new entry since we've moved everything over */ + netlbl_domhsh_free_entry(&entry->rcu); } else ret_val = -EINVAL; @@ -593,6 +596,12 @@ int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry, { int ret_val = 0; struct audit_buffer *audit_buf; + struct netlbl_af4list *iter4; + struct netlbl_domaddr4_map *map4; +#if IS_ENABLED(CONFIG_IPV6) + struct netlbl_af6list *iter6; + struct netlbl_domaddr6_map *map6; +#endif /* IPv6 */ if (entry == NULL) return -ENOENT; @@ -610,6 +619,9 @@ int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry, ret_val = -ENOENT; spin_unlock(&netlbl_domhsh_lock); + if (ret_val) + return ret_val; + audit_buf = netlbl_audit_start_common(AUDIT_MAC_MAP_DEL, audit_info); if (audit_buf != NULL) { audit_log_format(audit_buf, @@ -619,40 +631,29 @@ int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry, audit_log_end(audit_buf); } - if (ret_val == 0) { - struct netlbl_af4list *iter4; - struct netlbl_domaddr4_map *map4; -#if IS_ENABLED(CONFIG_IPV6) - struct netlbl_af6list *iter6; - struct netlbl_domaddr6_map *map6; -#endif /* IPv6 */ - - switch (entry->def.type) { - case NETLBL_NLTYPE_ADDRSELECT: - netlbl_af4list_foreach_rcu(iter4, - &entry->def.addrsel->list4) { - map4 = netlbl_domhsh_addr4_entry(iter4); - cipso_v4_doi_putdef(map4->def.cipso); - } + switch (entry->def.type) { + case NETLBL_NLTYPE_ADDRSELECT: + netlbl_af4list_foreach_rcu(iter4, &entry->def.addrsel->list4) { + map4 = netlbl_domhsh_addr4_entry(iter4); + cipso_v4_doi_putdef(map4->def.cipso); + } #if IS_ENABLED(CONFIG_IPV6) - netlbl_af6list_foreach_rcu(iter6, - &entry->def.addrsel->list6) { - map6 = netlbl_domhsh_addr6_entry(iter6); - calipso_doi_putdef(map6->def.calipso); - } + netlbl_af6list_foreach_rcu(iter6, &entry->def.addrsel->list6) { + map6 = netlbl_domhsh_addr6_entry(iter6); + calipso_doi_putdef(map6->def.calipso); + } #endif /* IPv6 */ - break; - case NETLBL_NLTYPE_CIPSOV4: - cipso_v4_doi_putdef(entry->def.cipso); - break; + break; + case NETLBL_NLTYPE_CIPSOV4: + cipso_v4_doi_putdef(entry->def.cipso); + break; #if IS_ENABLED(CONFIG_IPV6) - case NETLBL_NLTYPE_CALIPSO: - calipso_doi_putdef(entry->def.calipso); - break; + case NETLBL_NLTYPE_CALIPSO: + calipso_doi_putdef(entry->def.calipso); + break; #endif /* IPv6 */ - } - call_rcu(&entry->rcu, netlbl_domhsh_free_entry); } + call_rcu(&entry->rcu, netlbl_domhsh_free_entry); return ret_val; } -- GitLab From d2ece46d1004319a051d00cb6a38a1532bc90c73 Mon Sep 17 00:00:00 2001 From: Kamil Lorenc Date: Tue, 1 Sep 2020 10:57:38 +0200 Subject: [PATCH 0704/1304] net: usb: dm9601: Add USB ID of Keenetic Plus DSL [ Upstream commit a609d0259183a841621f252e067f40f8cc25d6f6 ] Keenetic Plus DSL is a xDSL modem that uses dm9620 as its USB interface. Signed-off-by: Kamil Lorenc Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/usb/dm9601.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c index b91f92e4e5f2..915ac75b55fc 100644 --- a/drivers/net/usb/dm9601.c +++ b/drivers/net/usb/dm9601.c @@ -625,6 +625,10 @@ static const struct usb_device_id products[] = { USB_DEVICE(0x0a46, 0x1269), /* DM9621A USB to Fast Ethernet Adapter */ .driver_info = (unsigned long)&dm9601_info, }, + { + USB_DEVICE(0x0586, 0x3427), /* ZyXEL Keenetic Plus DSL xDSL modem */ + .driver_info = (unsigned long)&dm9601_info, + }, {}, // END }; -- GitLab From 84cfc87866b7e01b591999cc4cdd176d78a3a69c Mon Sep 17 00:00:00 2001 From: Xin Long Date: Fri, 21 Aug 2020 14:59:38 +0800 Subject: [PATCH 0705/1304] sctp: not disable bh in the whole sctp_get_port_local() [ Upstream commit 3106ecb43a05dc3e009779764b9da245a5d082de ] With disabling bh in the whole sctp_get_port_local(), when snum == 0 and too many ports have been used, the do-while loop will take the cpu for a long time and cause cpu stuck: [ ] watchdog: BUG: soft lockup - CPU#11 stuck for 22s! [ ] RIP: 0010:native_queued_spin_lock_slowpath+0x4de/0x940 [ ] Call Trace: [ ] _raw_spin_lock+0xc1/0xd0 [ ] sctp_get_port_local+0x527/0x650 [sctp] [ ] sctp_do_bind+0x208/0x5e0 [sctp] [ ] sctp_autobind+0x165/0x1e0 [sctp] [ ] sctp_connect_new_asoc+0x355/0x480 [sctp] [ ] __sctp_connect+0x360/0xb10 [sctp] There's no need to disable bh in the whole function of sctp_get_port_local. So fix this cpu stuck by removing local_bh_disable() called at the beginning, and using spin_lock_bh() instead. The same thing was actually done for inet_csk_get_port() in Commit ea8add2b1903 ("tcp/dccp: better use of ephemeral ports in bind()"). Thanks to Marcelo for pointing the buggy code out. v1->v2: - use cond_resched() to yield cpu to other tasks if needed, as Eric noticed. Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Reported-by: Ying Xu Signed-off-by: Xin Long Acked-by: Marcelo Ricardo Leitner Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/sctp/socket.c | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index df4a7d7c5ec0..4a2873f70b37 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -7643,8 +7643,6 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) pr_debug("%s: begins, snum:%d\n", __func__, snum); - local_bh_disable(); - if (snum == 0) { /* Search for an available port. */ int low, high, remaining, index; @@ -7663,20 +7661,21 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) continue; index = sctp_phashfn(sock_net(sk), rover); head = &sctp_port_hashtable[index]; - spin_lock(&head->lock); + spin_lock_bh(&head->lock); sctp_for_each_hentry(pp, &head->chain) if ((pp->port == rover) && net_eq(sock_net(sk), pp->net)) goto next; break; next: - spin_unlock(&head->lock); + spin_unlock_bh(&head->lock); + cond_resched(); } while (--remaining > 0); /* Exhausted local port range during search? */ ret = 1; if (remaining <= 0) - goto fail; + return ret; /* OK, here is the one we will use. HEAD (the port * hash table list entry) is non-NULL and we hold it's @@ -7691,7 +7690,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) * port iterator, pp being NULL. */ head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), snum)]; - spin_lock(&head->lock); + spin_lock_bh(&head->lock); sctp_for_each_hentry(pp, &head->chain) { if ((pp->port == snum) && net_eq(pp->net, sock_net(sk))) goto pp_found; @@ -7773,10 +7772,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) ret = 0; fail_unlock: - spin_unlock(&head->lock); - -fail: - local_bh_enable(); + spin_unlock_bh(&head->lock); return ret; } -- GitLab From 553d1bb7f4d1d2b86b7527c10be31b2f5f78bd8c Mon Sep 17 00:00:00 2001 From: Tetsuo Handa Date: Wed, 2 Sep 2020 22:44:16 +0900 Subject: [PATCH 0706/1304] tipc: fix shutdown() of connectionless socket [ Upstream commit 2a63866c8b51a3f72cea388dfac259d0e14c4ba6 ] syzbot is reporting hung task at nbd_ioctl() [1], for there are two problems regarding TIPC's connectionless socket's shutdown() operation. ---------- #include #include #include #include #include int main(int argc, char *argv[]) { const int fd = open("/dev/nbd0", 3); alarm(5); ioctl(fd, NBD_SET_SOCK, socket(PF_TIPC, SOCK_DGRAM, 0)); ioctl(fd, NBD_DO_IT, 0); /* To be interrupted by SIGALRM. */ return 0; } ---------- One problem is that wait_for_completion() from flush_workqueue() from nbd_start_device_ioctl() from nbd_ioctl() cannot be completed when nbd_start_device_ioctl() received a signal at wait_event_interruptible(), for tipc_shutdown() from kernel_sock_shutdown(SHUT_RDWR) from nbd_mark_nsock_dead() from sock_shutdown() from nbd_start_device_ioctl() is failing to wake up a WQ thread sleeping at wait_woken() from tipc_wait_for_rcvmsg() from sock_recvmsg() from sock_xmit() from nbd_read_stat() from recv_work() scheduled by nbd_start_device() from nbd_start_device_ioctl(). Fix this problem by always invoking sk->sk_state_change() (like inet_shutdown() does) when tipc_shutdown() is called. The other problem is that tipc_wait_for_rcvmsg() cannot return when tipc_shutdown() is called, for tipc_shutdown() sets sk->sk_shutdown to SEND_SHUTDOWN (despite "how" is SHUT_RDWR) while tipc_wait_for_rcvmsg() needs sk->sk_shutdown set to RCV_SHUTDOWN or SHUTDOWN_MASK. Fix this problem by setting sk->sk_shutdown to SHUTDOWN_MASK (like inet_shutdown() does) when the socket is connectionless. [1] https://syzkaller.appspot.com/bug?id=3fe51d307c1f0a845485cf1798aa059d12bf18b2 Reported-by: syzbot Signed-off-by: Tetsuo Handa Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/tipc/socket.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/net/tipc/socket.c b/net/tipc/socket.c index f0184a5e83aa..d0cf7169f08c 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -2565,18 +2565,21 @@ static int tipc_shutdown(struct socket *sock, int how) lock_sock(sk); __tipc_shutdown(sock, TIPC_CONN_SHUTDOWN); - sk->sk_shutdown = SEND_SHUTDOWN; + if (tipc_sk_type_connectionless(sk)) + sk->sk_shutdown = SHUTDOWN_MASK; + else + sk->sk_shutdown = SEND_SHUTDOWN; if (sk->sk_state == TIPC_DISCONNECTING) { /* Discard any unreceived messages */ __skb_queue_purge(&sk->sk_receive_queue); - /* Wake up anyone sleeping in poll */ - sk->sk_state_change(sk); res = 0; } else { res = -ENOTCONN; } + /* Wake up anyone sleeping in poll. */ + sk->sk_state_change(sk); release_sock(sk); return res; -- GitLab From 9f313bcb3b3d021f9b2f04f6d8464f8e9e309452 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 26 Aug 2020 12:40:06 -0700 Subject: [PATCH 0707/1304] net: disable netpoll on fresh napis [ Upstream commit 96e97bc07e90f175a8980a22827faf702ca4cb30 ] napi_disable() makes sure to set the NAPI_STATE_NPSVC bit to prevent netpoll from accessing rings before init is complete. However, the same is not done for fresh napi instances in netif_napi_add(), even though we expect NAPI instances to be added as disabled. This causes crashes during driver reconfiguration (enabling XDP, changing the channel count) - if there is any printk() after netif_napi_add() but before napi_enable(). To ensure memory ordering is correct we need to use RCU accessors. Reported-by: Rob Sherwood Fixes: 2d8bff12699a ("netpoll: Close race condition between poll_one_napi and napi_disable") Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/core/dev.c | 3 ++- net/core/netpoll.c | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/net/core/dev.c b/net/core/dev.c index 42ba150fa18d..c77d12a35f92 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -6196,12 +6196,13 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi, pr_err_once("netif_napi_add() called with weight %d on device %s\n", weight, dev->name); napi->weight = weight; - list_add(&napi->dev_list, &dev->napi_list); napi->dev = dev; #ifdef CONFIG_NETPOLL napi->poll_owner = -1; #endif set_bit(NAPI_STATE_SCHED, &napi->state); + set_bit(NAPI_STATE_NPSVC, &napi->state); + list_add_rcu(&napi->dev_list, &dev->napi_list); napi_hash_add(napi); } EXPORT_SYMBOL(netif_napi_add); diff --git a/net/core/netpoll.c b/net/core/netpoll.c index a581cf101cd9..023ce0fbb496 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -161,7 +161,7 @@ static void poll_napi(struct net_device *dev) struct napi_struct *napi; int cpu = smp_processor_id(); - list_for_each_entry(napi, &dev->napi_list, dev_list) { + list_for_each_entry_rcu(napi, &dev->napi_list, dev_list) { if (cmpxchg(&napi->poll_owner, -1, cpu) == -1) { poll_one_napi(napi); smp_store_release(&napi->poll_owner, -1); -- GitLab From 044be307e550b4532960eadabfb6942de96751f0 Mon Sep 17 00:00:00 2001 From: Roi Dayan Date: Thu, 6 Aug 2020 19:05:42 -0700 Subject: [PATCH 0708/1304] net/mlx5e: Don't support phys switch id if not in switchdev mode Support for phys switch id ndo added for representors and if we do not have representors there is no need to support it. Since each port return different switch id supporting this block support for creating bond over PFs and attaching to bridge in legacy mode. This bug doesn't exist upstream as the code got refactored and the netdev api is totally different. Fixes: cb67b832921c ("net/mlx5e: Introduce SRIOV VF representors") Signed-off-by: Roi Dayan Signed-off-by: Saeed Mahameed Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 701624a63d2f..1ab40d622ae1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -198,7 +198,7 @@ int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr) struct mlx5_eswitch_rep *rep = rpriv->rep; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - if (esw->mode == SRIOV_NONE) + if (esw->mode != SRIOV_OFFLOADS) return -EOPNOTSUPP; switch (attr->id) { -- GitLab From a87f96283793d58b042618c689630db264715274 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Sat, 12 Sep 2020 13:40:23 +0200 Subject: [PATCH 0709/1304] Linux 4.19.145 Tested-by: Jon Hunter Tested-by: Shuah Khan Tested-by: Guenter Roeck Tested-by: Linux Kernel Functional Testing Signed-off-by: Greg Kroah-Hartman --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index ba9d0b4476e1..6bf851efcabe 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 4 PATCHLEVEL = 19 -SUBLEVEL = 144 +SUBLEVEL = 145 EXTRAVERSION = NAME = "People's Front" -- GitLab From ea1951959b9011fe22056a2129b72333befa3de2 Mon Sep 17 00:00:00 2001 From: Giuliano Procida Date: Mon, 14 Sep 2020 11:21:40 +0100 Subject: [PATCH 0710/1304] ANDROID: ABI: refresh with latest libabigail 94f5d4ae This is a one-off change that updates the type ids of anonymous structs and unions to a more stable id. Bug: 163532421 Change-Id: Ie23e7891f05b5521b1da8e0af9089e0fa1afe617 Signed-off-by: Giuliano Procida --- android/abi_gki_aarch64.xml | 4300 +++++++++-------------------------- 1 file changed, 1116 insertions(+), 3184 deletions(-) diff --git a/android/abi_gki_aarch64.xml b/android/abi_gki_aarch64.xml index 59e3d91556bc..faeca9f0ee19 100644 --- a/android/abi_gki_aarch64.xml +++ b/android/abi_gki_aarch64.xml @@ -2897,7 +2897,7 @@ - + @@ -2926,15 +2926,15 @@ - - + + - + @@ -2979,7 +2979,7 @@ - + @@ -3150,7 +3150,7 @@ - + @@ -3435,10 +3435,10 @@ - + - + @@ -3451,12 +3451,12 @@ - + - + - + @@ -3469,10 +3469,10 @@ - + - + @@ -3614,7 +3614,7 @@ - + @@ -3728,7 +3728,7 @@ - + @@ -3737,7 +3737,7 @@ - + @@ -3913,8 +3913,8 @@ - - + + @@ -3954,7 +3954,7 @@ - + @@ -3995,13 +3995,13 @@ - + - + @@ -4159,8 +4159,8 @@ - - + + @@ -4729,7 +4729,7 @@ - + @@ -5184,7 +5184,7 @@ - + @@ -5244,21 +5244,21 @@ - + - + - + - + - + - + @@ -5278,7 +5278,7 @@ - + @@ -5286,7 +5286,7 @@ - + @@ -5298,7 +5298,7 @@ - + @@ -5326,7 +5326,7 @@ - + @@ -5416,8 +5416,8 @@ - - + + @@ -5474,36 +5474,36 @@ - + - + - + - + - + - + - + - + - + - + @@ -5511,7 +5511,7 @@ - + @@ -5535,7 +5535,7 @@ - + @@ -5546,7 +5546,7 @@ - + @@ -5564,26 +5564,26 @@ - + - + - + - + - + - + @@ -5594,7 +5594,7 @@ - + @@ -5602,7 +5602,7 @@ - + @@ -5610,7 +5610,7 @@ - + @@ -5647,8 +5647,8 @@ - - + + @@ -5670,10 +5670,10 @@ - + - + @@ -5682,11 +5682,11 @@ - + - + - + @@ -5695,12 +5695,12 @@ - + - + @@ -5716,7 +5716,7 @@ - + @@ -5729,7 +5729,7 @@ - + @@ -5740,7 +5740,7 @@ - + @@ -5748,7 +5748,7 @@ - + @@ -5761,13 +5761,13 @@ - + - + @@ -5781,15 +5781,15 @@ - + - + - + @@ -5800,7 +5800,7 @@ - + @@ -5817,7 +5817,7 @@ - + @@ -5839,9 +5839,9 @@ - + - + @@ -5851,10 +5851,10 @@ - + - + @@ -5883,7 +5883,7 @@ - + @@ -5893,7 +5893,7 @@ - + @@ -5958,7 +5958,7 @@ - + @@ -6006,8 +6006,8 @@ - - + + @@ -6060,14 +6060,14 @@ - - + + - + @@ -6076,7 +6076,7 @@ - + @@ -6139,7 +6139,7 @@ - + @@ -6367,10 +6367,10 @@ - + - + @@ -6898,7 +6898,7 @@ - + @@ -6912,7 +6912,7 @@ - + @@ -6926,7 +6926,7 @@ - + @@ -6940,7 +6940,7 @@ - + @@ -6951,7 +6951,7 @@ - + @@ -6959,26 +6959,26 @@ - + - + - + - + - + - + @@ -6989,7 +6989,7 @@ - + @@ -6997,10 +6997,10 @@ - + - + @@ -7145,34 +7145,6 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -7268,164 +7240,6 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -7858,7 +7672,7 @@ - + @@ -7871,7 +7685,7 @@ - + @@ -8076,27 +7890,12 @@ - - - - - - - - - - - - - - - - + - + - + @@ -8113,7 +7912,7 @@ - + @@ -8127,23 +7926,7 @@ - - - - - - - - - - - - - - - - - + @@ -8151,7 +7934,7 @@ - + @@ -8160,14 +7943,14 @@ - - - - + + + + - - + + @@ -8544,8 +8327,8 @@ - - + + @@ -8556,21 +8339,21 @@ - - + + - - - + + + - - + + - - + + @@ -8676,7 +8459,7 @@ - + @@ -8700,7 +8483,7 @@ - + @@ -9700,7 +9483,7 @@ - + @@ -9985,7 +9768,7 @@ - + @@ -10065,7 +9848,7 @@ - + @@ -10372,19 +10155,19 @@ - + - + - + - + @@ -10519,7 +10302,7 @@ - + @@ -10537,16 +10320,16 @@ - + - + - + @@ -10597,7 +10380,7 @@ - + @@ -10605,10 +10388,10 @@ - + - + @@ -10642,7 +10425,7 @@ - + @@ -10654,10 +10437,10 @@ - + - + @@ -10670,7 +10453,7 @@ - + @@ -10682,18 +10465,18 @@ - + - + - + - + @@ -10772,10 +10555,10 @@ - + - + @@ -10798,8 +10581,8 @@ - - + + @@ -10807,8 +10590,8 @@ - - + + @@ -10816,8 +10599,8 @@ - - + + @@ -10840,14 +10623,14 @@ - - + + - - + + @@ -10855,8 +10638,8 @@ - - + + @@ -10867,8 +10650,8 @@ - - + + @@ -11141,7 +10924,7 @@ - + @@ -11158,9 +10941,9 @@ - + - + @@ -11170,26 +10953,26 @@ - + - + - + - + - + - + @@ -11200,7 +10983,7 @@ - + @@ -11208,12 +10991,12 @@ - + - + @@ -11736,7 +11519,7 @@ - + @@ -11936,7 +11719,7 @@ - + @@ -12089,7 +11872,7 @@ - + @@ -12136,7 +11919,7 @@ - + @@ -12195,8 +11978,8 @@ - - + + @@ -12424,8 +12207,8 @@ - - + + @@ -13007,7 +12790,7 @@ - + @@ -13057,7 +12840,7 @@ - + @@ -13207,8 +12990,8 @@ - - + + @@ -13332,7 +13115,7 @@ - + @@ -13486,7 +13269,7 @@ - + @@ -14436,23 +14219,6 @@ - - - - - - - - - - - - - - - - - @@ -15433,7 +15199,7 @@ - + @@ -15441,7 +15207,7 @@ - + @@ -15452,7 +15218,7 @@ - + @@ -15460,15 +15226,15 @@ - + - + - + @@ -15476,7 +15242,7 @@ - + @@ -15484,7 +15250,7 @@ - + @@ -15492,9 +15258,9 @@ - + - + @@ -15503,15 +15269,15 @@ - + - + - + @@ -15519,7 +15285,7 @@ - + @@ -15624,7 +15390,7 @@ - + @@ -15632,10 +15398,10 @@ - + - + @@ -16509,7 +16275,7 @@ - + @@ -16564,15 +16330,15 @@ - + - + - + @@ -16755,6 +16521,26 @@ + + + + + + + + + + + + + + + + + + + + @@ -16841,12 +16627,12 @@ - + - + @@ -17494,7 +17280,7 @@ - + @@ -17677,7 +17463,7 @@ - + @@ -18270,12 +18056,12 @@ - + - + @@ -21175,8 +20961,8 @@ - - + + @@ -21217,8 +21003,8 @@ - - + + @@ -21235,8 +21021,8 @@ - - + + @@ -21285,8 +21071,8 @@ - - + + @@ -21321,8 +21107,8 @@ - - + + @@ -21338,8 +21124,8 @@ - - + + @@ -21367,8 +21153,8 @@ - - + + @@ -21384,8 +21170,8 @@ - - + + @@ -21852,13 +21638,13 @@ - + - + @@ -21940,7 +21726,6 @@ - @@ -21950,7 +21735,6 @@ - @@ -23079,7 +22863,7 @@ - + @@ -23838,7 +23622,7 @@ - + @@ -24645,7 +24429,7 @@ - + @@ -25405,7 +25189,7 @@ - + @@ -25440,7 +25224,7 @@ - + @@ -26950,7 +26734,7 @@ - + @@ -28182,58 +27966,7 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + @@ -28311,17 +28044,6 @@ - - - - - - - - - - - @@ -29342,14 +29064,6 @@ - - - - - - - - @@ -29596,10 +29310,10 @@ - + - + @@ -29668,10 +29382,10 @@ - + - + @@ -29713,18 +29427,18 @@ - + - + - + - + - + @@ -29744,7 +29458,7 @@ - + @@ -29934,7 +29648,7 @@ - + @@ -30910,6 +30624,17 @@ + + + + + + + + + + + @@ -30951,17 +30676,6 @@ - - - - - - - - - - - @@ -31599,7 +31313,7 @@ - + @@ -31690,7 +31404,7 @@ - + @@ -32070,7 +31784,7 @@ - + @@ -32078,7 +31792,7 @@ - + @@ -32088,17 +31802,17 @@ - + - + - + @@ -32428,7 +32142,7 @@ - + @@ -32447,7 +32161,7 @@ - + @@ -33281,7 +32995,7 @@ - + @@ -33301,21 +33015,32 @@ - + + + + + + + + + + + + - + - + - + - + - + @@ -33326,7 +33051,7 @@ - + @@ -33334,7 +33059,7 @@ - + @@ -33348,7 +33073,7 @@ - + @@ -33632,9 +33357,9 @@ - + - + @@ -33643,18 +33368,18 @@ - + - + - + - + @@ -33677,7 +33402,7 @@ - + @@ -35567,18 +35292,18 @@ - + - + - + - + @@ -35684,7 +35409,7 @@ - + @@ -35821,7 +35546,7 @@ - + @@ -35876,7 +35601,7 @@ - + @@ -35929,10 +35654,10 @@ - + - + @@ -35944,7 +35669,7 @@ - + @@ -35969,7 +35694,7 @@ - + @@ -35988,7 +35713,7 @@ - + @@ -36083,9 +35808,9 @@ - + - + @@ -36288,7 +36013,7 @@ - + @@ -36874,10 +36599,10 @@ - + - + @@ -37385,10 +37110,10 @@ - + - + @@ -37439,28 +37164,6 @@ - - - - - - - - - - - - - - - - - - - - - - @@ -37512,7 +37215,7 @@ - + @@ -37525,7 +37228,7 @@ - + @@ -37533,13 +37236,13 @@ - + - + - + @@ -37547,7 +37250,7 @@ - + @@ -38037,10 +37740,10 @@ - + - + @@ -38095,7 +37798,7 @@ - + @@ -38104,7 +37807,7 @@ - + @@ -38138,7 +37841,7 @@ - + @@ -38161,7 +37864,7 @@ - + @@ -38262,7 +37965,7 @@ - + @@ -38306,7 +38009,7 @@ - + @@ -38328,7 +38031,7 @@ - + @@ -38337,7 +38040,7 @@ - + @@ -38396,10 +38099,10 @@ - + - + @@ -38577,7 +38280,7 @@ - + @@ -38592,7 +38295,7 @@ - + @@ -38608,10 +38311,10 @@ - + - + @@ -38642,13 +38345,13 @@ - + - + @@ -38791,15 +38494,15 @@ - + - + - + - + @@ -38812,26 +38515,26 @@ - + - + - + - + - + - + - + @@ -38839,7 +38542,7 @@ - + @@ -38849,10 +38552,10 @@ - + - + @@ -38940,13 +38643,13 @@ - + - + @@ -38999,13 +38702,13 @@ - + - + @@ -39930,9 +39633,9 @@ - + - + @@ -40235,10 +39938,10 @@ - + - + @@ -40250,7 +39953,7 @@ - + @@ -40323,7 +40026,7 @@ - + @@ -40331,7 +40034,7 @@ - + @@ -40339,7 +40042,7 @@ - + @@ -40568,7 +40271,7 @@ - + @@ -40657,7 +40360,7 @@ - + @@ -40675,7 +40378,7 @@ - + @@ -40708,7 +40411,7 @@ - + @@ -40726,7 +40429,7 @@ - + @@ -40940,7 +40643,7 @@ - + @@ -40953,7 +40656,7 @@ - + @@ -41651,15 +41354,7 @@ - - - - - - - - - + @@ -41667,15 +41362,15 @@ - + - + - + @@ -41683,127 +41378,6 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -41949,25 +41523,6 @@ - - - - - - - - - - - - - - - - - - - @@ -42403,7 +41958,7 @@ - + @@ -42421,7 +41976,7 @@ - + @@ -42448,7 +42003,7 @@ - + @@ -42663,13 +42218,13 @@ - + - + - + @@ -42693,7 +42248,7 @@ - + @@ -42711,13 +42266,13 @@ - + - + @@ -42726,7 +42281,7 @@ - + @@ -42735,11 +42290,11 @@ - + - + @@ -42748,7 +42303,7 @@ - + @@ -42902,7 +42457,7 @@ - + @@ -42940,23 +42495,12 @@ - - - - - - - - - - - - + @@ -42992,7 +42536,7 @@ - + @@ -43105,8 +42649,8 @@ - - + + @@ -43200,7 +42744,7 @@ - + @@ -43475,10 +43019,10 @@ - + - + @@ -43546,13 +43090,13 @@ - + - + - + @@ -43561,10 +43105,10 @@ - + - + @@ -43572,7 +43116,7 @@ - + @@ -43580,7 +43124,7 @@ - + @@ -43589,7 +43133,7 @@ - + @@ -43790,7 +43334,7 @@ - + @@ -43802,7 +43346,7 @@ - + @@ -43906,7 +43450,7 @@ - + @@ -43917,8 +43461,8 @@ - - + + @@ -43929,7 +43473,7 @@ - + @@ -44013,7 +43557,7 @@ - + @@ -44126,7 +43670,7 @@ - + @@ -44136,7 +43680,7 @@ - + @@ -44144,7 +43688,7 @@ - + @@ -44152,7 +43696,7 @@ - + @@ -44192,7 +43736,7 @@ - + @@ -44205,7 +43749,7 @@ - + @@ -44221,9 +43765,9 @@ - + - + @@ -44234,7 +43778,7 @@ - + @@ -44253,25 +43797,25 @@ - + - + - + - + - + - + - + @@ -44309,7 +43853,7 @@ - + @@ -44317,7 +43861,7 @@ - + @@ -44343,7 +43887,7 @@ - + @@ -44357,7 +43901,7 @@ - + @@ -44368,7 +43912,7 @@ - + @@ -44388,7 +43932,7 @@ - + @@ -44396,7 +43940,7 @@ - + @@ -44922,166 +44466,8 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + @@ -45089,13 +44475,13 @@ - + - + @@ -45106,18 +44492,18 @@ - + - + - + - + @@ -45137,7 +44523,7 @@ - + @@ -45864,12 +45250,12 @@ - + - + - + @@ -45879,13 +45265,13 @@ - + - + @@ -46022,7 +45408,7 @@ - + @@ -46353,13 +45739,13 @@ - + - + @@ -46381,23 +45767,6 @@ - - - - - - - - - - - - - - - - - @@ -47682,23 +47051,23 @@ - + - + - + - + - + @@ -47706,7 +47075,7 @@ - + @@ -47714,7 +47083,7 @@ - + @@ -47722,7 +47091,7 @@ - + @@ -47736,7 +47105,7 @@ - + @@ -47747,15 +47116,7 @@ - - - - - - - - - + @@ -47763,7 +47124,7 @@ - + @@ -47771,7 +47132,7 @@ - + @@ -47779,7 +47140,7 @@ - + @@ -47787,7 +47148,7 @@ - + @@ -47798,7 +47159,7 @@ - + @@ -47806,7 +47167,7 @@ - + @@ -47912,12 +47273,7 @@ - - - - - - + @@ -47991,7 +47347,7 @@ - + @@ -48090,7 +47446,7 @@ - + @@ -48314,7 +47670,7 @@ - + @@ -49387,21 +48743,21 @@ - + - + - + - + @@ -49418,7 +48774,7 @@ - + @@ -49433,7 +48789,7 @@ - + @@ -49499,7 +48855,7 @@ - + @@ -49588,7 +48944,7 @@ - + @@ -49599,7 +48955,7 @@ - + @@ -49863,7 +49219,7 @@ - + @@ -50671,6 +50027,7 @@ + @@ -51201,23 +50558,6 @@ - - - - - - - - - - - - - - - - - @@ -51257,6 +50597,7 @@ + @@ -52036,7 +51377,7 @@ - + @@ -52044,7 +51385,7 @@ - + @@ -52152,7 +51493,7 @@ - + @@ -52568,7 +51909,7 @@ - + @@ -53140,6 +52481,23 @@ + + + + + + + + + + + + + + + + + @@ -53487,7 +52845,7 @@ - + @@ -53510,19 +52868,19 @@ - + - + - + - + - + @@ -53530,7 +52888,7 @@ - + @@ -53541,7 +52899,7 @@ - + @@ -53549,7 +52907,7 @@ - + @@ -53560,7 +52918,7 @@ - + @@ -53568,7 +52926,7 @@ - + @@ -54486,9 +53844,9 @@ - + - + @@ -54497,7 +53855,7 @@ - + @@ -55475,7 +54833,7 @@ - + @@ -55496,7 +54854,7 @@ - + @@ -56105,12 +55463,7 @@ - - - - - - + @@ -56370,8 +55723,8 @@ - - + + @@ -57078,23 +56431,6 @@ - - - - - - - - - - - - - - - - - @@ -58212,164 +57548,6 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -58994,23 +58172,6 @@ - - - - - - - - - - - - - - - - - @@ -63649,7 +62810,7 @@ - + @@ -64210,7 +63371,7 @@ - + @@ -64655,7 +63816,7 @@ - + @@ -64797,10 +63958,10 @@ - + - + @@ -64813,7 +63974,7 @@ - + @@ -64839,15 +64000,15 @@ - + - + - + @@ -64856,7 +64017,7 @@ - + @@ -64964,7 +64125,7 @@ - + @@ -64994,8 +64155,8 @@ - - + + @@ -65037,7 +64198,7 @@ - + @@ -65045,24 +64206,7 @@ - - - - - - - - - - - - - - - - - - + @@ -65163,7 +64307,7 @@ - + @@ -65179,10 +64323,10 @@ - + - + @@ -65190,282 +64334,23 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + - + - + - + - + @@ -65479,7 +64364,7 @@ - + @@ -65499,7 +64384,7 @@ - + @@ -65672,17 +64557,6 @@ - - - - - - - - - - - @@ -65691,18 +64565,18 @@ - + - + - + - + @@ -65764,7 +64638,7 @@ - + @@ -65922,41 +64796,6 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -65977,7 +64816,7 @@ - + @@ -65985,23 +64824,6 @@ - - - - - - - - - - - - - - - - - @@ -66189,7 +65011,7 @@ - + @@ -66201,15 +65023,15 @@ - + - + - + - + @@ -66226,7 +65048,7 @@ - + @@ -66618,16 +65440,16 @@ - - + + - - + + @@ -66792,23 +65614,6 @@ - - - - - - - - - - - - - - - - - @@ -66966,8 +65771,6 @@ - - @@ -67079,13 +65882,13 @@ - + - + @@ -67101,39 +65904,39 @@ - + - + - + - + - + - + - + - + - + - + - + - + @@ -67141,7 +65944,7 @@ - + @@ -67170,7 +65973,7 @@ - + @@ -67197,7 +66000,7 @@ - + @@ -67205,7 +66008,7 @@ - + @@ -67219,7 +66022,7 @@ - + @@ -67230,7 +66033,7 @@ - + @@ -67238,7 +66041,7 @@ - + @@ -67246,12 +66049,12 @@ - + - + @@ -67303,23 +66106,6 @@ - - - - - - - - - - - - - - - - - @@ -67407,50 +66193,6 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -67477,14 +66219,6 @@ - - - - - - - - @@ -67806,15 +66540,19 @@ - + + + + + - + - + @@ -67840,7 +66578,7 @@ - + @@ -67888,15 +66626,15 @@ - + - + - + @@ -67916,7 +66654,7 @@ - + @@ -67924,6 +66662,53 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -67969,7 +66754,7 @@ - + @@ -68100,7 +66885,7 @@ - + @@ -68108,7 +66893,7 @@ - + @@ -68523,37 +67308,15 @@ - - - - + - - - - - - - - - - - - - - - - - - + - - - + - + @@ -68573,6 +67336,7 @@ + @@ -68792,17 +67556,6 @@ - - - - - - - - - - - @@ -68880,99 +67633,6 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -68982,25 +67642,6 @@ - - - - - - - - - - - - - - - - - - - @@ -69563,10 +68204,10 @@ - + - + @@ -69594,7 +68235,7 @@ - + @@ -69632,7 +68273,7 @@ - + @@ -69653,7 +68294,7 @@ - + @@ -69677,7 +68318,7 @@ - + @@ -69989,7 +68630,7 @@ - + @@ -70053,7 +68694,7 @@ - + @@ -70401,7 +69042,7 @@ - + @@ -70684,7 +69325,7 @@ - + @@ -70793,7 +69434,7 @@ - + @@ -70802,7 +69443,7 @@ - + @@ -70821,21 +69462,21 @@ - + - + - + - + @@ -70881,7 +69522,7 @@ - + @@ -70953,7 +69594,7 @@ - + @@ -70983,7 +69624,7 @@ - + @@ -71422,7 +70063,7 @@ - + @@ -72167,7 +70808,7 @@ - + @@ -72325,14 +70966,6 @@ - - - - - - - - @@ -72475,7 +71108,7 @@ - + @@ -72492,7 +71125,7 @@ - + @@ -72504,7 +71137,7 @@ - + @@ -72531,16 +71164,24 @@ - + - + + + + + + + + + @@ -72554,7 +71195,7 @@ - + @@ -72562,7 +71203,7 @@ - + @@ -72718,15 +71359,15 @@ - + - + - + @@ -72740,7 +71381,7 @@ - + @@ -72777,7 +71418,7 @@ - + @@ -72801,15 +71442,15 @@ - + - + - + @@ -72883,7 +71524,7 @@ - + @@ -72944,7 +71585,7 @@ - + @@ -72965,12 +71606,12 @@ - + - + @@ -72987,7 +71628,7 @@ - + @@ -73046,26 +71687,26 @@ - - + + - - + + - - + + - + @@ -73374,7 +72015,7 @@ - + @@ -74156,13 +72797,13 @@ - + - + - + @@ -74273,8 +72914,8 @@ - - + + @@ -74282,21 +72923,21 @@ - + - + - + - + @@ -74341,7 +72982,7 @@ - + @@ -75100,13 +73741,13 @@ - + - + - + @@ -75151,7 +73792,7 @@ - + @@ -75232,7 +73873,7 @@ - + @@ -75324,10 +73965,10 @@ - + - + @@ -75458,7 +74099,7 @@ - + @@ -75466,7 +74107,7 @@ - + @@ -75550,7 +74191,7 @@ - + @@ -76053,25 +74694,6 @@ - - - - - - - - - - - - - - - - - - - @@ -77004,7 +75626,7 @@ - + @@ -77829,14 +76451,14 @@ - - + + - - + + @@ -77852,7 +76474,7 @@ - + @@ -77948,16 +76570,16 @@ - + - + - + @@ -77984,7 +76606,7 @@ - + @@ -77992,7 +76614,7 @@ - + @@ -78000,7 +76622,7 @@ - + @@ -78014,7 +76636,7 @@ - + @@ -78030,7 +76652,7 @@ - + @@ -78069,23 +76691,23 @@ - + - + - + - + @@ -78095,7 +76717,7 @@ - + @@ -78105,7 +76727,7 @@ - + @@ -78124,10 +76746,10 @@ - + - + @@ -78317,7 +76939,7 @@ - + @@ -78329,7 +76951,7 @@ - + @@ -78360,7 +76982,7 @@ - + @@ -78372,7 +76994,7 @@ - + @@ -78423,10 +77045,10 @@ - + - + @@ -78455,7 +77077,7 @@ - + @@ -78463,7 +77085,7 @@ - + @@ -78631,13 +77253,13 @@ - + - + @@ -78648,8 +77270,8 @@ - - + + @@ -79063,8 +77685,8 @@ - - + + @@ -79368,7 +77990,7 @@ - + @@ -79445,7 +78067,7 @@ - + @@ -79467,7 +78089,7 @@ - + @@ -79475,7 +78097,7 @@ - + @@ -79486,18 +78108,18 @@ - + - + - + - + @@ -79593,7 +78215,7 @@ - + @@ -79747,7 +78369,7 @@ - + @@ -80084,7 +78706,7 @@ - + @@ -81716,7 +80338,7 @@ - + @@ -81724,7 +80346,7 @@ - + @@ -81732,7 +80354,7 @@ - + @@ -81740,7 +80362,7 @@ - + @@ -81748,7 +80370,7 @@ - + @@ -81756,7 +80378,7 @@ - + @@ -81767,7 +80389,7 @@ - + @@ -81775,7 +80397,7 @@ - + @@ -81783,7 +80405,7 @@ - + @@ -81791,23 +80413,23 @@ - + - + - + - + - + @@ -81815,7 +80437,7 @@ - + @@ -81823,7 +80445,7 @@ - + @@ -81831,7 +80453,7 @@ - + @@ -81839,7 +80461,7 @@ - + @@ -81847,10 +80469,10 @@ - + - + @@ -81864,15 +80486,15 @@ - + - + - + - + @@ -81880,15 +80502,15 @@ - + - + - + @@ -81896,7 +80518,7 @@ - + @@ -82257,38 +80879,7 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + @@ -82296,7 +80887,7 @@ - + @@ -82333,8 +80924,6 @@ - - @@ -82382,6 +80971,17 @@ + + + + + + + + + + + @@ -82408,6 +81008,7 @@ + @@ -82528,68 +81129,6 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -82657,7 +81196,6 @@ - @@ -82706,7 +81244,7 @@ - + @@ -82718,7 +81256,7 @@ - + @@ -83027,7 +81565,7 @@ - + @@ -83069,7 +81607,7 @@ - + @@ -83533,7 +82071,7 @@ - + @@ -83841,26 +82379,12 @@ - - - - - - - - - - - - - - - + - + @@ -83920,43 +82444,8 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + @@ -84182,70 +82671,6 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -84256,12 +82681,12 @@ - + - + - + @@ -84275,15 +82700,15 @@ - + - + - + @@ -84305,7 +82730,7 @@ - + @@ -84351,17 +82776,6 @@ - - - - - - - - - - - @@ -84394,230 +82808,7 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + @@ -84625,23 +82816,6 @@ - - - - - - - - - - - - - - - - - @@ -84654,25 +82828,6 @@ - - - - - - - - - - - - - - - - - - - @@ -84857,7 +83012,7 @@ - + @@ -85060,15 +83215,7 @@ - - - - - - - - - + @@ -85079,7 +83226,7 @@ - + @@ -85087,7 +83234,7 @@ - + @@ -85095,7 +83242,7 @@ - + @@ -85132,14 +83279,14 @@ - - - + + + - - - + + + @@ -85218,50 +83365,50 @@ - + - - - - + + + + - - - - - - + + + + + + - - - + + + - - + + - - - + + + - - + + - - - + + + - - - - + + + + @@ -85983,15 +84130,15 @@ - + - + - + @@ -86474,10 +84621,10 @@ - + - + @@ -86485,17 +84632,6 @@ - - - - - - - - - - - @@ -86739,7 +84875,7 @@ - + @@ -86798,7 +84934,7 @@ - + @@ -86806,17 +84942,6 @@ - - - - - - - - - - - @@ -87205,7 +85330,7 @@ - + @@ -87219,7 +85344,7 @@ - + @@ -87233,20 +85358,9 @@ - + - - - - - - - - - - - - + @@ -87268,14 +85382,6 @@ - - - - - - - - @@ -87327,7 +85433,7 @@ - + @@ -87496,15 +85602,15 @@ - + - + - + @@ -87512,15 +85618,15 @@ - + - + - + @@ -87528,7 +85634,7 @@ - + @@ -87536,7 +85642,7 @@ - + @@ -87544,7 +85650,7 @@ - + @@ -87555,7 +85661,7 @@ - + @@ -87566,6 +85672,22 @@ + + + + + + + + + + + + + + + + @@ -87600,22 +85722,6 @@ - - - - - - - - - - - - - - - - @@ -87649,7 +85755,7 @@ - + @@ -87661,7 +85767,7 @@ - + @@ -88086,18 +86192,7 @@ - - - - - - - - - - - - + @@ -88105,7 +86200,7 @@ - + @@ -88194,7 +86289,7 @@ - + @@ -88208,17 +86303,6 @@ - - - - - - - - - - - @@ -88253,7 +86337,7 @@ - + @@ -88293,23 +86377,6 @@ - - - - - - - - - - - - - - - - - @@ -88376,10 +86443,10 @@ - + - + @@ -88394,7 +86461,7 @@ - + @@ -88492,7 +86559,7 @@ - + @@ -88705,14 +86772,6 @@ - - - - - - - - @@ -88928,7 +86987,7 @@ - + @@ -90891,7 +88950,7 @@ - + @@ -91767,7 +89826,7 @@ - + @@ -91779,7 +89838,7 @@ - + @@ -93073,9 +91132,9 @@ - + - + @@ -93084,7 +91143,7 @@ - + @@ -93092,23 +91151,23 @@ - + - + - + - + - + @@ -93116,7 +91175,7 @@ - + @@ -93124,7 +91183,7 @@ - + @@ -93132,7 +91191,7 @@ - + @@ -93140,7 +91199,7 @@ - + @@ -93728,7 +91787,7 @@ - + @@ -93736,7 +91795,7 @@ - + @@ -93752,9 +91811,9 @@ - - - + + + @@ -93823,45 +91882,45 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -93875,12 +91934,12 @@ - + - + @@ -93891,7 +91950,7 @@ - + @@ -93905,7 +91964,7 @@ - + @@ -93919,12 +91978,7 @@ - - - - - - + @@ -93938,87 +91992,73 @@ - + - - - - - - - - - - - - - - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -94032,7 +92072,7 @@ - + @@ -94040,7 +92080,7 @@ - + @@ -94065,7 +92105,7 @@ - + @@ -94096,7 +92136,7 @@ - + @@ -94113,7 +92153,7 @@ - + @@ -94130,7 +92170,7 @@ - + @@ -94141,23 +92181,7 @@ - - - - - - - - - - - - - - - - - + @@ -94165,7 +92189,7 @@ - + @@ -94173,7 +92197,7 @@ - + @@ -94181,7 +92205,7 @@ - + @@ -94195,7 +92219,7 @@ - + @@ -94203,7 +92227,7 @@ - + @@ -94214,7 +92238,7 @@ - + @@ -94239,7 +92263,7 @@ - + @@ -94466,7 +92490,7 @@ - + @@ -94501,31 +92525,6 @@ - - - - - - - - - - - - - - - - - - - - - - - - - @@ -95386,31 +93385,31 @@ - + - + - + - + - + - + - + @@ -95421,7 +93420,7 @@ - + @@ -95432,7 +93431,7 @@ - + @@ -95449,7 +93448,7 @@ - + @@ -95466,7 +93465,7 @@ - + @@ -95475,24 +93474,24 @@ - + - + - + - + - + - + @@ -95500,7 +93499,7 @@ - + @@ -95508,7 +93507,7 @@ - + @@ -95516,7 +93515,7 @@ - + @@ -95539,7 +93538,7 @@ - + @@ -95617,20 +93616,12 @@ - + - - - - - - - - @@ -96362,58 +94353,7 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + @@ -96421,15 +94361,7 @@ - - - - - - - - - + @@ -96524,7 +94456,7 @@ - + @@ -96701,7 +94633,7 @@ - + @@ -97126,7 +95058,7 @@ - + @@ -97268,7 +95200,7 @@ - + @@ -97383,10 +95315,10 @@ - + - + @@ -97468,10 +95400,10 @@ - + - + @@ -97490,10 +95422,10 @@ - + - + @@ -98348,7 +96280,7 @@ - + @@ -99269,7 +97201,7 @@ - + @@ -99298,6 +97230,46 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -99565,7 +97537,7 @@ - + @@ -99683,46 +97655,6 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -99985,7 +97917,7 @@ - + @@ -100085,6 +98017,6 @@ -- GitLab From 9c2115d25357d8a7dac4c8f3ab95e387041cc5bd Mon Sep 17 00:00:00 2001 From: Adam Ford Date: Sat, 8 Aug 2020 21:56:10 -0500 Subject: [PATCH 0711/1304] ARM: dts: logicpd-torpedo-baseboard: Fix broken audio [ Upstream commit d7dfee67688ac7f2dfd4c3bc70c053ee990c40b5 ] Older versions of U-Boot would pinmux the whole board, but as the bootloader got updated, it started to only pinmux the pins it needed, and expected Linux to configure what it needed. Unfortunately this caused an issue with the audio, because the mcbsp2 pins were configured in the device tree, they were never referenced by the driver. When U-Boot stopped muxing the audio pins, the audio died. This patch adds the references to the associate the pin controller with the mcbsp2 driver which makes audio operate again. Fixes: 739f85bba5ab ("ARM: dts: Move most of logicpd-torpedo-37xx-devkit to logicpd-torpedo-baseboard") Signed-off-by: Adam Ford Signed-off-by: Tony Lindgren Signed-off-by: Sasha Levin --- arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi b/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi index 86c5644f558c..032e8dde1381 100644 --- a/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi +++ b/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi @@ -84,6 +84,8 @@ }; &mcbsp2 { + pinctrl-names = "default"; + pinctrl-0 = <&mcbsp2_pins>; status = "okay"; }; -- GitLab From a6c1919abbceea9561f1069f3b20c9dd5c5733cf Mon Sep 17 00:00:00 2001 From: Adam Ford Date: Fri, 14 Aug 2020 07:53:38 -0500 Subject: [PATCH 0712/1304] ARM: dts: logicpd-som-lv-baseboard: Fix broken audio [ Upstream commit 4d26e9a028e3d88223e06fa133c3d55af7ddbceb ] Older versions of U-Boot would pinmux the whole board, but as the bootloader got updated, it started to only pinmux the pins it needed, and expected Linux to configure what it needed. Unfortunately this caused an issue with the audio, because the mcbsp2 pins were configured in the device tree but never referenced by the driver. When U-Boot stopped muxing the audio pins, the audio died. This patch adds the references to the associate the pin controller with the mcbsp2 driver which makes audio operate again. Fixes: 5cb8b0fa55a9 ("ARM: dts: Move most of logicpd-som-lv-37xx-devkit.dts to logicpd-som-lv-baseboard.dtsi") Signed-off-by: Adam Ford Signed-off-by: Tony Lindgren Signed-off-by: Sasha Levin --- arch/arm/boot/dts/logicpd-som-lv-baseboard.dtsi | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/arm/boot/dts/logicpd-som-lv-baseboard.dtsi b/arch/arm/boot/dts/logicpd-som-lv-baseboard.dtsi index 3e39b9a1f35d..0093548d50ff 100644 --- a/arch/arm/boot/dts/logicpd-som-lv-baseboard.dtsi +++ b/arch/arm/boot/dts/logicpd-som-lv-baseboard.dtsi @@ -55,6 +55,8 @@ &mcbsp2 { status = "okay"; + pinctrl-names = "default"; + pinctrl-0 = <&mcbsp2_pins>; }; &charger { -- GitLab From 58fd141362309f7e54fe66d79e746149c1403515 Mon Sep 17 00:00:00 2001 From: Dinh Nguyen Date: Fri, 31 Jul 2020 10:26:40 -0500 Subject: [PATCH 0713/1304] ARM: dts: socfpga: fix register entry for timer3 on Arria10 [ Upstream commit 0ff5a4812be4ebd4782bbb555d369636eea164f7 ] Fixes the register address for the timer3 entry on Arria10. Fixes: 475dc86d08de4 ("arm: dts: socfpga: Add a base DTSI for Altera's Arria10 SOC") Signed-off-by: Dinh Nguyen Signed-off-by: Sasha Levin --- arch/arm/boot/dts/socfpga_arria10.dtsi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/boot/dts/socfpga_arria10.dtsi b/arch/arm/boot/dts/socfpga_arria10.dtsi index ba5657574d9b..4b1c8bec2de3 100644 --- a/arch/arm/boot/dts/socfpga_arria10.dtsi +++ b/arch/arm/boot/dts/socfpga_arria10.dtsi @@ -791,7 +791,7 @@ timer3: timer3@ffd00100 { compatible = "snps,dw-apb-timer"; interrupts = <0 118 IRQ_TYPE_LEVEL_HIGH>; - reg = <0xffd01000 0x100>; + reg = <0xffd00100 0x100>; clocks = <&l4_sys_free_clk>; clock-names = "timer"; }; -- GitLab From 4f3fc360b8c4f9fe1f3964e329745e73fc9f6e41 Mon Sep 17 00:00:00 2001 From: Matthias Schiffer Date: Tue, 28 Jul 2020 12:50:06 +0200 Subject: [PATCH 0714/1304] ARM: dts: ls1021a: fix QuadSPI-memory reg range [ Upstream commit 81dbbb417da4d1ac407dca5b434d39d5b6b91ef3 ] According to the Reference Manual, the correct size is 512 MiB. Without this fix, probing the QSPI fails: fsl-quadspi 1550000.spi: ioremap failed for resource [mem 0x40000000-0x7fffffff] fsl-quadspi 1550000.spi: Freescale QuadSPI probe failed fsl-quadspi: probe of 1550000.spi failed with error -12 Fixes: 85f8ee78ab72 ("ARM: dts: ls1021a: Add support for QSPI with ls1021a SoC") Signed-off-by: Matthias Schiffer Signed-off-by: Shawn Guo Signed-off-by: Sasha Levin --- arch/arm/boot/dts/ls1021a.dtsi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi index d18c04326444..b66b2bd1aa85 100644 --- a/arch/arm/boot/dts/ls1021a.dtsi +++ b/arch/arm/boot/dts/ls1021a.dtsi @@ -168,7 +168,7 @@ #address-cells = <1>; #size-cells = <0>; reg = <0x0 0x1550000 0x0 0x10000>, - <0x0 0x40000000 0x0 0x40000000>; + <0x0 0x40000000 0x0 0x20000000>; reg-names = "QuadSPI", "QuadSPI-memory"; interrupts = ; clock-names = "qspi_en", "qspi"; -- GitLab From e549e44694e9a9c07ac57d915d3efa417467fde8 Mon Sep 17 00:00:00 2001 From: Dinghao Liu Date: Wed, 19 Aug 2020 15:56:32 +0800 Subject: [PATCH 0715/1304] RDMA/rxe: Fix memleak in rxe_mem_init_user [ Upstream commit e3ddd6067ee62f6e76ebcf61ff08b2c729ae412b ] When page_address() fails, umem should be freed just like when rxe_mem_alloc() fails. Fixes: 8700e3e7c485 ("Soft RoCE driver") Link: https://lore.kernel.org/r/20200819075632.22285-1-dinghao.liu@zju.edu.cn Signed-off-by: Dinghao Liu Signed-off-by: Jason Gunthorpe Signed-off-by: Sasha Levin --- drivers/infiniband/sw/rxe/rxe_mr.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c index dff605fdf60f..2cca89ca08cd 100644 --- a/drivers/infiniband/sw/rxe/rxe_mr.c +++ b/drivers/infiniband/sw/rxe/rxe_mr.c @@ -203,6 +203,7 @@ int rxe_mem_init_user(struct rxe_pd *pd, u64 start, vaddr = page_address(sg_page(sg)); if (!vaddr) { pr_warn("null vaddr\n"); + ib_umem_release(umem); err = -ENOMEM; goto err1; } -- GitLab From 4b97eeb917dca3202f84bcbb1442d7f737dea822 Mon Sep 17 00:00:00 2001 From: Kamal Heib Date: Sun, 5 Jul 2020 13:43:10 +0300 Subject: [PATCH 0716/1304] RDMA/rxe: Drop pointless checks in rxe_init_ports [ Upstream commit 6112ef62826e91afbae5446d5d47b38e25f47e3f ] Both pkey_tbl_len and gid_tbl_len are set in rxe_init_port_param() - so no need to check if they aren't set. Fixes: 8700e3e7c485 ("Soft RoCE driver") Link: https://lore.kernel.org/r/20200705104313.283034-2-kamalheib1@gmail.com Signed-off-by: Kamal Heib Reviewed-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe Signed-off-by: Sasha Levin --- drivers/infiniband/sw/rxe/rxe.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c index 10999fa69281..94dedabe648c 100644 --- a/drivers/infiniband/sw/rxe/rxe.c +++ b/drivers/infiniband/sw/rxe/rxe.c @@ -163,9 +163,6 @@ static int rxe_init_ports(struct rxe_dev *rxe) rxe_init_port_param(port); - if (!port->attr.pkey_tbl_len || !port->attr.gid_tbl_len) - return -EINVAL; - port->pkey_tbl = kcalloc(port->attr.pkey_tbl_len, sizeof(*port->pkey_tbl), GFP_KERNEL); -- GitLab From 870c11d2e6e176021d0b64c10fc07cc47d2f3ef6 Mon Sep 17 00:00:00 2001 From: Selvin Xavier Date: Mon, 24 Aug 2020 11:14:32 -0700 Subject: [PATCH 0717/1304] RDMA/bnxt_re: Do not report transparent vlan from QP1 [ Upstream commit 2d0e60ee322d512fa6bc62d23a6760b39a380847 ] QP1 Rx CQE reports transparent VLAN ID in the completion and this is used while reporting the completion for received MAD packet. Check if the vlan id is configured before reporting it in the work completion. Fixes: 84511455ac5b ("RDMA/bnxt_re: report vlan_id and sl in qp1 recv completion") Link: https://lore.kernel.org/r/1598292876-26529-3-git-send-email-selvin.xavier@broadcom.com Signed-off-by: Selvin Xavier Signed-off-by: Jason Gunthorpe Signed-off-by: Sasha Levin --- drivers/infiniband/hw/bnxt_re/ib_verbs.c | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index c9af2d139f5c..957da3ffe593 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -3033,6 +3033,19 @@ static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc, wc->wc_flags |= IB_WC_GRH; } +static bool bnxt_re_check_if_vlan_valid(struct bnxt_re_dev *rdev, + u16 vlan_id) +{ + /* + * Check if the vlan is configured in the host. If not configured, it + * can be a transparent VLAN. So dont report the vlan id. + */ + if (!__vlan_find_dev_deep_rcu(rdev->netdev, + htons(ETH_P_8021Q), vlan_id)) + return false; + return true; +} + static bool bnxt_re_is_vlan_pkt(struct bnxt_qplib_cqe *orig_cqe, u16 *vid, u8 *sl) { @@ -3101,9 +3114,11 @@ static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *qp, wc->src_qp = orig_cqe->src_qp; memcpy(wc->smac, orig_cqe->smac, ETH_ALEN); if (bnxt_re_is_vlan_pkt(orig_cqe, &vlan_id, &sl)) { - wc->vlan_id = vlan_id; - wc->sl = sl; - wc->wc_flags |= IB_WC_WITH_VLAN; + if (bnxt_re_check_if_vlan_valid(rdev, vlan_id)) { + wc->vlan_id = vlan_id; + wc->sl = sl; + wc->wc_flags |= IB_WC_WITH_VLAN; + } } wc->port_num = 1; wc->vendor_err = orig_cqe->status; -- GitLab From 097c6cbe765a302f59745f391ef1405b881ca980 Mon Sep 17 00:00:00 2001 From: Ondrej Jirman Date: Fri, 28 Aug 2020 14:50:32 +0200 Subject: [PATCH 0718/1304] drm/sun4i: Fix dsi dcs long write function [ Upstream commit fd90e3808fd2c207560270c39b86b71af2231aa1 ] It's writing too much data. regmap_bulk_write expects number of register sized chunks to write, not a byte sized length of the bounce buffer. Bounce buffer needs to be padded too, so that regmap_bulk_write will not read past the end of the buffer. Fixes: 133add5b5ad4 ("drm/sun4i: Add Allwinner A31 MIPI-DSI controller support") Signed-off-by: Ondrej Jirman Signed-off-by: Maxime Ripard Reviewed-by: Jernej Skrabec Link: https://patchwork.freedesktop.org/patch/msgid/20200828125032.937148-1-megous@megous.com Signed-off-by: Sasha Levin --- drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c index 79eb11cd185d..9a5584efd5e7 100644 --- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c +++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c @@ -761,7 +761,7 @@ static int sun6i_dsi_dcs_write_long(struct sun6i_dsi *dsi, regmap_write(dsi->regs, SUN6I_DSI_CMD_TX_REG(0), sun6i_dsi_dcs_build_pkt_hdr(dsi, msg)); - bounce = kzalloc(msg->tx_len + sizeof(crc), GFP_KERNEL); + bounce = kzalloc(ALIGN(msg->tx_len + sizeof(crc), 4), GFP_KERNEL); if (!bounce) return -ENOMEM; @@ -772,7 +772,7 @@ static int sun6i_dsi_dcs_write_long(struct sun6i_dsi *dsi, memcpy((u8 *)bounce + msg->tx_len, &crc, sizeof(crc)); len += sizeof(crc); - regmap_bulk_write(dsi->regs, SUN6I_DSI_CMD_TX_REG(1), bounce, len); + regmap_bulk_write(dsi->regs, SUN6I_DSI_CMD_TX_REG(1), bounce, DIV_ROUND_UP(len, 4)); regmap_write(dsi->regs, SUN6I_DSI_CMD_CTL_REG, len + 4 - 1); kfree(bounce); -- GitLab From 0d211632108087b35007e3198b109dff4549264b Mon Sep 17 00:00:00 2001 From: Luo Jiaxing Date: Wed, 26 Aug 2020 15:24:26 +0800 Subject: [PATCH 0719/1304] scsi: libsas: Set data_dir as DMA_NONE if libata marks qc as NODATA [ Upstream commit 53de092f47ff40e8d4d78d590d95819d391bf2e0 ] It was discovered that sdparm will fail when attempting to disable write cache on a SATA disk connected via libsas. In the ATA command set the write cache state is controlled through the SET FEATURES operation. This is roughly corresponds to MODE SELECT in SCSI and the latter command is what is used in the SCSI-ATA translation layer. A subtle difference is that a MODE SELECT carries data whereas SET FEATURES is defined as a non-data command in ATA. Set the DMA data direction to DMA_NONE if the requested ATA command is identified as non-data. [mkp: commit desc] Fixes: fa1c1e8f1ece ("[SCSI] Add SATA support to libsas") Link: https://lore.kernel.org/r/1598426666-54544-1-git-send-email-luojiaxing@huawei.com Reviewed-by: John Garry Reviewed-by: Jason Yan Signed-off-by: Luo Jiaxing Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin --- drivers/scsi/libsas/sas_ata.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index 64a958a99f6a..d82698b7dfe6 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c @@ -223,7 +223,10 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc) task->num_scatter = si; } - task->data_dir = qc->dma_dir; + if (qc->tf.protocol == ATA_PROT_NODATA) + task->data_dir = DMA_NONE; + else + task->data_dir = qc->dma_dir; task->scatter = qc->sg; task->ata_task.retry_count = 1; task->task_state_flags = SAS_TASK_STATE_PENDING; -- GitLab From 0f9bbab73b5237cecafa9bc7041e5c3f6a15978e Mon Sep 17 00:00:00 2001 From: Kamal Heib Date: Wed, 2 Sep 2020 15:43:04 +0300 Subject: [PATCH 0720/1304] RDMA/core: Fix reported speed and width [ Upstream commit 28b0865714b315e318ac45c4fc9156f3d4649646 ] When the returned speed from __ethtool_get_link_ksettings() is SPEED_UNKNOWN this will lead to reporting a wrong speed and width for providers that uses ib_get_eth_speed(), fix that by defaulting the netdev_speed to SPEED_1000 in case the returned value from __ethtool_get_link_ksettings() is SPEED_UNKNOWN. Fixes: d41861942fc5 ("IB/core: Add generic function to extract IB speed from netdev") Link: https://lore.kernel.org/r/20200902124304.170912-1-kamalheib1@gmail.com Signed-off-by: Kamal Heib Signed-off-by: Jason Gunthorpe Signed-off-by: Sasha Levin --- drivers/infiniband/core/verbs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index e8432876cc86..e1ecd4682c09 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -1711,7 +1711,7 @@ int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width) dev_put(netdev); - if (!rc) { + if (!rc && lksettings.base.speed != (u32)SPEED_UNKNOWN) { netdev_speed = lksettings.base.speed; } else { netdev_speed = SPEED_1000; -- GitLab From 7463e4ccb732164ebb137994084ee7e9018293bb Mon Sep 17 00:00:00 2001 From: Douglas Anderson Date: Thu, 27 Aug 2020 07:58:41 -0700 Subject: [PATCH 0721/1304] mmc: sdhci-msm: Add retries when all tuning phases are found valid [ Upstream commit 9d5dcefb7b114d610aeb2371f6a6f119af316e43 ] As the comments in this patch say, if we tune and find all phases are valid it's _almost_ as bad as no phases being found valid. Probably all phases are not really reliable but we didn't detect where the unreliable place is. That means we'll essentially be guessing and hoping we get a good phase. This is not just a problem in theory. It was causing real problems on a real board. On that board, most often phase 10 is found as the only invalid phase, though sometimes 10 and 11 are invalid and sometimes just 11. Some percentage of the time, however, all phases are found to be valid. When this happens, the current logic will decide to use phase 11. Since phase 11 is sometimes found to be invalid, this is a bad choice. Sure enough, when phase 11 is picked we often get mmc errors later in boot. I have seen cases where all phases were found to be valid 3 times in a row, so increase the retry count to 10 just to be extra sure. Fixes: 415b5a75da43 ("mmc: sdhci-msm: Add platform_execute_tuning implementation") Signed-off-by: Douglas Anderson Reviewed-by: Veerabhadrarao Badiganti Acked-by: Adrian Hunter Link: https://lore.kernel.org/r/20200827075809.1.If179abf5ecb67c963494db79c3bc4247d987419b@changeid Signed-off-by: Ulf Hansson Signed-off-by: Sasha Levin --- drivers/mmc/host/sdhci-msm.c | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index 643fd1a1b88b..4970cd40813b 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c @@ -1060,7 +1060,7 @@ static void sdhci_msm_set_cdr(struct sdhci_host *host, bool enable) static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode) { struct sdhci_host *host = mmc_priv(mmc); - int tuning_seq_cnt = 3; + int tuning_seq_cnt = 10; u8 phase, tuned_phases[16], tuned_phase_cnt = 0; int rc; struct mmc_ios ios = host->mmc->ios; @@ -1124,6 +1124,22 @@ static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode) } while (++phase < ARRAY_SIZE(tuned_phases)); if (tuned_phase_cnt) { + if (tuned_phase_cnt == ARRAY_SIZE(tuned_phases)) { + /* + * All phases valid is _almost_ as bad as no phases + * valid. Probably all phases are not really reliable + * but we didn't detect where the unreliable place is. + * That means we'll essentially be guessing and hoping + * we get a good phase. Better to try a few times. + */ + dev_dbg(mmc_dev(mmc), "%s: All phases valid; try again\n", + mmc_hostname(mmc)); + if (--tuning_seq_cnt) { + tuned_phase_cnt = 0; + goto retry; + } + } + rc = msm_find_most_appropriate_phase(host, tuned_phases, tuned_phase_cnt); if (rc < 0) -- GitLab From 6d5b69b0f15985c9629cb1a67196b460d051d747 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Wed, 26 Aug 2020 16:43:41 -0700 Subject: [PATCH 0722/1304] ARM: dts: bcm: HR2: Fixed QSPI compatible string [ Upstream commit d663186293a818af97c648624bee6c7a59e8218b ] The string was incorrectly defined before from least to most specific, swap the compatible strings accordingly. Fixes: b9099ec754b5 ("ARM: dts: Add Broadcom Hurricane 2 DTS include file") Signed-off-by: Florian Fainelli Signed-off-by: Sasha Levin --- arch/arm/boot/dts/bcm-hr2.dtsi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/boot/dts/bcm-hr2.dtsi b/arch/arm/boot/dts/bcm-hr2.dtsi index e35398cc60a0..dd71ab08136b 100644 --- a/arch/arm/boot/dts/bcm-hr2.dtsi +++ b/arch/arm/boot/dts/bcm-hr2.dtsi @@ -217,7 +217,7 @@ }; qspi: spi@27200 { - compatible = "brcm,spi-bcm-qspi", "brcm,spi-nsp-qspi"; + compatible = "brcm,spi-nsp-qspi", "brcm,spi-bcm-qspi"; reg = <0x027200 0x184>, <0x027000 0x124>, <0x11c408 0x004>, -- GitLab From ad40dab62da3f69a583737f82e8820ecacb13510 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Wed, 26 Aug 2020 16:44:25 -0700 Subject: [PATCH 0723/1304] ARM: dts: NSP: Fixed QSPI compatible string [ Upstream commit d1ecc40a954fd0f5e3789b91fa80f15e82284e39 ] The string was incorrectly defined before from least to most specific, swap the compatible strings accordingly. Fixes: 329f98c1974e ("ARM: dts: NSP: Add QSPI nodes to NSPI and bcm958625k DTSes") Signed-off-by: Florian Fainelli Signed-off-by: Sasha Levin --- arch/arm/boot/dts/bcm-nsp.dtsi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/boot/dts/bcm-nsp.dtsi b/arch/arm/boot/dts/bcm-nsp.dtsi index 273a31604579..b395cb195db2 100644 --- a/arch/arm/boot/dts/bcm-nsp.dtsi +++ b/arch/arm/boot/dts/bcm-nsp.dtsi @@ -274,7 +274,7 @@ }; qspi: spi@27200 { - compatible = "brcm,spi-bcm-qspi", "brcm,spi-nsp-qspi"; + compatible = "brcm,spi-nsp-qspi", "brcm,spi-bcm-qspi"; reg = <0x027200 0x184>, <0x027000 0x124>, <0x11c408 0x004>, -- GitLab From e3aa4b5bed2b4b18f83d3caec1c9b69462144951 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Wed, 26 Aug 2020 16:45:29 -0700 Subject: [PATCH 0724/1304] ARM: dts: BCM5301X: Fixed QSPI compatible string [ Upstream commit b793dab8d811e103665d6bddaaea1c25db3776eb ] The string was incorrectly defined before from least to most specific, swap the compatible strings accordingly. Fixes: 1c8f40650723 ("ARM: dts: BCM5301X: convert to iProc QSPI") Signed-off-by: Florian Fainelli Signed-off-by: Sasha Levin --- arch/arm/boot/dts/bcm5301x.dtsi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi index a678fb7c9e3b..c91716d5980c 100644 --- a/arch/arm/boot/dts/bcm5301x.dtsi +++ b/arch/arm/boot/dts/bcm5301x.dtsi @@ -445,7 +445,7 @@ }; spi@18029200 { - compatible = "brcm,spi-bcm-qspi", "brcm,spi-nsp-qspi"; + compatible = "brcm,spi-nsp-qspi", "brcm,spi-bcm-qspi"; reg = <0x18029200 0x184>, <0x18029000 0x124>, <0x1811b408 0x004>, -- GitLab From d2dd6d5a77c5dbee03a5ffe75811f6e906599df4 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Wed, 26 Aug 2020 16:49:19 -0700 Subject: [PATCH 0725/1304] arm64: dts: ns2: Fixed QSPI compatible string [ Upstream commit 686e0a0c8c61e0e3f55321d0181fece3efd92777 ] The string was incorrectly defined before from least to most specific, swap the compatible strings accordingly. Fixes: ff73917d38a6 ("ARM64: dts: Add QSPI Device Tree node for NS2") Signed-off-by: Florian Fainelli Signed-off-by: Sasha Levin --- arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi index ea854f689fda..6bfb7bbd264a 100644 --- a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi +++ b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi @@ -745,7 +745,7 @@ }; qspi: spi@66470200 { - compatible = "brcm,spi-bcm-qspi", "brcm,spi-ns2-qspi"; + compatible = "brcm,spi-ns2-qspi", "brcm,spi-bcm-qspi"; reg = <0x66470200 0x184>, <0x66470000 0x124>, <0x67017408 0x004>, -- GitLab From 281c6d20503ec344f37c668f23b908fc28847007 Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Thu, 9 Jul 2020 19:52:32 -0700 Subject: [PATCH 0726/1304] ARC: HSDK: wireup perf irq [ Upstream commit fe81d927b78c4f0557836661d32e41ebc957b024 ] Newer version of HSDK aka HSDK-4xD (with dual issue HS48x4 CPU) wired up the perf interrupt, so enable that in DT. This is OK for old HSDK where this irq is ignored because pct irq is not wired up in hardware. Signed-off-by: Vineet Gupta Signed-off-by: Sasha Levin --- arch/arc/boot/dts/hsdk.dts | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/arc/boot/dts/hsdk.dts b/arch/arc/boot/dts/hsdk.dts index d131c54acd3e..ab01b75bfa67 100644 --- a/arch/arc/boot/dts/hsdk.dts +++ b/arch/arc/boot/dts/hsdk.dts @@ -83,6 +83,8 @@ arcpct: pct { compatible = "snps,archs-pct"; + interrupt-parent = <&cpu_intc>; + interrupts = <20>; }; /* TIMER0 with interrupt for clockevent */ -- GitLab From e26a8e684a6e8e6eeb566c17b5956b5fca19e120 Mon Sep 17 00:00:00 2001 From: Hanjun Guo Date: Wed, 22 Jul 2020 17:54:21 +0800 Subject: [PATCH 0727/1304] dmaengine: acpi: Put the CSRT table after using it [ Upstream commit 7eb48dd094de5fe0e216b550e73aa85257903973 ] The acpi_get_table() should be coupled with acpi_put_table() if the mapped table is not used at runtime to release the table mapping, put the CSRT table buf after using it. Signed-off-by: Hanjun Guo Link: https://lore.kernel.org/r/1595411661-15936-1-git-send-email-guohanjun@huawei.com Signed-off-by: Vinod Koul Signed-off-by: Sasha Levin --- drivers/dma/acpi-dma.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c index 4a748c3435d7..8d99c84361cb 100644 --- a/drivers/dma/acpi-dma.c +++ b/drivers/dma/acpi-dma.c @@ -131,11 +131,13 @@ static void acpi_dma_parse_csrt(struct acpi_device *adev, struct acpi_dma *adma) if (ret < 0) { dev_warn(&adev->dev, "error in parsing resource group\n"); - return; + break; } grp = (struct acpi_csrt_group *)((void *)grp + grp->length); } + + acpi_put_table((struct acpi_table_header *)csrt); } /** -- GitLab From 2ea66fb974d5bee654fb42c304bbea4eaf656845 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Tue, 18 Aug 2020 16:15:58 +0200 Subject: [PATCH 0728/1304] netfilter: conntrack: allow sctp hearbeat after connection re-use [ Upstream commit cc5453a5b7e90c39f713091a7ebc53c1f87d1700 ] If an sctp connection gets re-used, heartbeats are flagged as invalid because their vtag doesn't match. Handle this in a similar way as TCP conntrack when it suspects that the endpoints and conntrack are out-of-sync. When a HEARTBEAT request fails its vtag validation, flag this in the conntrack state and accept the packet. When a HEARTBEAT_ACK is received with an invalid vtag in the reverse direction after we allowed such a HEARTBEAT through, assume we are out-of-sync and re-set the vtag info. v2: remove left-over snippet from an older incarnation that moved new_state/old_state assignments, thats not needed so keep that as-is. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso Signed-off-by: Sasha Levin --- include/linux/netfilter/nf_conntrack_sctp.h | 2 ++ net/netfilter/nf_conntrack_proto_sctp.c | 39 ++++++++++++++++++--- 2 files changed, 37 insertions(+), 4 deletions(-) diff --git a/include/linux/netfilter/nf_conntrack_sctp.h b/include/linux/netfilter/nf_conntrack_sctp.h index 9a33f171aa82..625f491b95de 100644 --- a/include/linux/netfilter/nf_conntrack_sctp.h +++ b/include/linux/netfilter/nf_conntrack_sctp.h @@ -9,6 +9,8 @@ struct ip_ct_sctp { enum sctp_conntrack state; __be32 vtag[IP_CT_DIR_MAX]; + u8 last_dir; + u8 flags; }; #endif /* _NF_CONNTRACK_SCTP_H */ diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c index 7d7e30ea0ecf..a937d4f75613 100644 --- a/net/netfilter/nf_conntrack_proto_sctp.c +++ b/net/netfilter/nf_conntrack_proto_sctp.c @@ -65,6 +65,8 @@ static const unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] = { [SCTP_CONNTRACK_HEARTBEAT_ACKED] = 210 SECS, }; +#define SCTP_FLAG_HEARTBEAT_VTAG_FAILED 1 + #define sNO SCTP_CONNTRACK_NONE #define sCL SCTP_CONNTRACK_CLOSED #define sCW SCTP_CONNTRACK_COOKIE_WAIT @@ -288,6 +290,7 @@ static int sctp_packet(struct nf_conn *ct, u_int32_t offset, count; unsigned int *timeouts; unsigned long map[256 / sizeof(unsigned long)] = { 0 }; + bool ignore = false; sh = skb_header_pointer(skb, dataoff, sizeof(_sctph), &_sctph); if (sh == NULL) @@ -332,15 +335,39 @@ static int sctp_packet(struct nf_conn *ct, /* Sec 8.5.1 (D) */ if (sh->vtag != ct->proto.sctp.vtag[dir]) goto out_unlock; - } else if (sch->type == SCTP_CID_HEARTBEAT || - sch->type == SCTP_CID_HEARTBEAT_ACK) { + } else if (sch->type == SCTP_CID_HEARTBEAT) { + if (ct->proto.sctp.vtag[dir] == 0) { + pr_debug("Setting %d vtag %x for dir %d\n", sch->type, sh->vtag, dir); + ct->proto.sctp.vtag[dir] = sh->vtag; + } else if (sh->vtag != ct->proto.sctp.vtag[dir]) { + if (test_bit(SCTP_CID_DATA, map) || ignore) + goto out_unlock; + + ct->proto.sctp.flags |= SCTP_FLAG_HEARTBEAT_VTAG_FAILED; + ct->proto.sctp.last_dir = dir; + ignore = true; + continue; + } else if (ct->proto.sctp.flags & SCTP_FLAG_HEARTBEAT_VTAG_FAILED) { + ct->proto.sctp.flags &= ~SCTP_FLAG_HEARTBEAT_VTAG_FAILED; + } + } else if (sch->type == SCTP_CID_HEARTBEAT_ACK) { if (ct->proto.sctp.vtag[dir] == 0) { pr_debug("Setting vtag %x for dir %d\n", sh->vtag, dir); ct->proto.sctp.vtag[dir] = sh->vtag; } else if (sh->vtag != ct->proto.sctp.vtag[dir]) { - pr_debug("Verification tag check failed\n"); - goto out_unlock; + if (test_bit(SCTP_CID_DATA, map) || ignore) + goto out_unlock; + + if ((ct->proto.sctp.flags & SCTP_FLAG_HEARTBEAT_VTAG_FAILED) == 0 || + ct->proto.sctp.last_dir == dir) + goto out_unlock; + + ct->proto.sctp.flags &= ~SCTP_FLAG_HEARTBEAT_VTAG_FAILED; + ct->proto.sctp.vtag[dir] = sh->vtag; + ct->proto.sctp.vtag[!dir] = 0; + } else if (ct->proto.sctp.flags & SCTP_FLAG_HEARTBEAT_VTAG_FAILED) { + ct->proto.sctp.flags &= ~SCTP_FLAG_HEARTBEAT_VTAG_FAILED; } } @@ -375,6 +402,10 @@ static int sctp_packet(struct nf_conn *ct, } spin_unlock_bh(&ct->lock); + /* allow but do not refresh timeout */ + if (ignore) + return NF_ACCEPT; + timeouts = nf_ct_timeout_lookup(ct); if (!timeouts) timeouts = sctp_pernet(nf_ct_net(ct))->timeouts; -- GitLab From 1a76e0c124a869365ac7b2becbbe60a77ea63951 Mon Sep 17 00:00:00 2001 From: Xie He Date: Fri, 21 Aug 2020 14:26:59 -0700 Subject: [PATCH 0729/1304] drivers/net/wan/lapbether: Added needed_tailroom [ Upstream commit 1ee39c1448c4e0d480c5b390e2db1987561fb5c2 ] The underlying Ethernet device may request necessary tailroom to be allocated by setting needed_tailroom. This driver should also set needed_tailroom to request the tailroom needed by the underlying Ethernet device to be allocated. Cc: Willem de Bruijn Cc: Martin Schiller Signed-off-by: Xie He Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/wan/lapbether.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c index c94dfa70f2a3..6b2553e893ac 100644 --- a/drivers/net/wan/lapbether.c +++ b/drivers/net/wan/lapbether.c @@ -343,6 +343,7 @@ static int lapbeth_new_device(struct net_device *dev) */ ndev->needed_headroom = -1 + 3 + 2 + dev->hard_header_len + dev->needed_headroom; + ndev->needed_tailroom = dev->needed_tailroom; lapbeth = netdev_priv(ndev); lapbeth->axdev = ndev; -- GitLab From fb7540c25bd01aa7b99a66a8eec7c0ac4dfb00e4 Mon Sep 17 00:00:00 2001 From: Dinghao Liu Date: Sun, 23 Aug 2020 15:23:43 +0800 Subject: [PATCH 0730/1304] NFC: st95hf: Fix memleak in st95hf_in_send_cmd [ Upstream commit f97c04c316d8fea16dca449fdfbe101fbdfee6a2 ] When down_killable() fails, skb_resp should be freed just like when st95hf_spi_send() fails. Signed-off-by: Dinghao Liu Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/nfc/st95hf/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/nfc/st95hf/core.c b/drivers/nfc/st95hf/core.c index 01acb6e53365..c4b6e29c0719 100644 --- a/drivers/nfc/st95hf/core.c +++ b/drivers/nfc/st95hf/core.c @@ -981,7 +981,7 @@ static int st95hf_in_send_cmd(struct nfc_digital_dev *ddev, rc = down_killable(&stcontext->exchange_lock); if (rc) { WARN(1, "Semaphore is not found up in st95hf_in_send_cmd\n"); - return rc; + goto free_skb_resp; } rc = st95hf_spi_send(&stcontext->spicontext, skb->data, -- GitLab From 4ee1769b497fba9dcdc6af249ba73a34decbbbff Mon Sep 17 00:00:00 2001 From: Dinghao Liu Date: Sun, 23 Aug 2020 19:29:35 +0800 Subject: [PATCH 0731/1304] firestream: Fix memleak in fs_open [ Upstream commit 15ac5cdafb9202424206dc5bd376437a358963f9 ] When make_rate() fails, vcc should be freed just like other error paths in fs_open(). Signed-off-by: Dinghao Liu Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/atm/firestream.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c index 112b1001c269..ef395b238816 100644 --- a/drivers/atm/firestream.c +++ b/drivers/atm/firestream.c @@ -1013,6 +1013,7 @@ static int fs_open(struct atm_vcc *atm_vcc) error = make_rate (pcr, r, &tmc0, NULL); if (error) { kfree(tc); + kfree(vcc); return error; } } -- GitLab From c2d03591ac5f666ebc6ba9887efaa94162262bcb Mon Sep 17 00:00:00 2001 From: Mohan Kumar Date: Tue, 25 Aug 2020 10:54:14 +0530 Subject: [PATCH 0732/1304] ALSA: hda: Fix 2 channel swapping for Tegra [ Upstream commit 216116eae43963c662eb84729507bad95214ca6b ] The Tegra HDA codec HW implementation has an issue related to not swapping the 2 channel Audio Sample Packet(ASP) channel mapping. Whatever the FL and FR mapping specified the left channel always comes out of left speaker and right channel on right speaker. So add condition to disallow the swapping of FL,FR during the playback. Signed-off-by: Mohan Kumar Acked-by: Sameer Pujar Link: https://lore.kernel.org/r/20200825052415.20626-2-mkumard@nvidia.com Signed-off-by: Takashi Iwai Signed-off-by: Sasha Levin --- sound/pci/hda/patch_hdmi.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index b8e5f2b19ff8..708efb9b4387 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -3431,6 +3431,7 @@ static int tegra_hdmi_build_pcms(struct hda_codec *codec) static int patch_tegra_hdmi(struct hda_codec *codec) { + struct hdmi_spec *spec; int err; err = patch_generic_hdmi(codec); @@ -3438,6 +3439,10 @@ static int patch_tegra_hdmi(struct hda_codec *codec) return err; codec->patch_ops.build_pcms = tegra_hdmi_build_pcms; + spec = codec->spec; + spec->chmap.ops.chmap_cea_alloc_validate_get_type = + nvhdmi_chmap_cea_alloc_validate_get_type; + spec->chmap.ops.chmap_validate = nvhdmi_chmap_validate; return 0; } -- GitLab From b562614b70dfbe6a32ce625d0280c5d313652581 Mon Sep 17 00:00:00 2001 From: Xie He Date: Tue, 25 Aug 2020 20:03:53 -0700 Subject: [PATCH 0733/1304] drivers/net/wan/lapbether: Set network_header before transmitting [ Upstream commit 91244d108441013b7367b3b4dcc6869998676473 ] Set the skb's network_header before it is passed to the underlying Ethernet device for transmission. This patch fixes the following issue: When we use this driver with AF_PACKET sockets, there would be error messages of: protocol 0805 is buggy, dev (Ethernet interface name) printed in the system "dmesg" log. This is because skbs passed down to the Ethernet device for transmission don't have their network_header properly set, and the dev_queue_xmit_nit function in net/core/dev.c complains about this. Reason of setting the network_header to this place (at the end of the Ethernet header, and at the beginning of the Ethernet payload): Because when this driver receives an skb from the Ethernet device, the network_header is also set at this place. Cc: Martin Schiller Signed-off-by: Xie He Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/wan/lapbether.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c index 6b2553e893ac..15177a54b17d 100644 --- a/drivers/net/wan/lapbether.c +++ b/drivers/net/wan/lapbether.c @@ -213,6 +213,8 @@ static void lapbeth_data_transmit(struct net_device *ndev, struct sk_buff *skb) skb->dev = dev = lapbeth->ethdev; + skb_reset_network_header(skb); + dev_hard_header(skb, dev, ETH_P_DEC, bcast_addr, NULL, 0); dev_queue_xmit(skb); -- GitLab From b701016288dc8cfe55d24b1b9e075adbab25ae78 Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Wed, 26 Aug 2020 14:12:18 -0700 Subject: [PATCH 0734/1304] xfs: initialize the shortform attr header padding entry [ Upstream commit 125eac243806e021f33a1fdea3687eccbb9f7636 ] Don't leak kernel memory contents into the shortform attr fork. Signed-off-by: Darrick J. Wong Reviewed-by: Eric Sandeen Reviewed-by: Dave Chinner Reviewed-by: Christoph Hellwig Signed-off-by: Sasha Levin --- fs/xfs/libxfs/xfs_attr_leaf.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c index 087a5715cf20..bd37f4a292c3 100644 --- a/fs/xfs/libxfs/xfs_attr_leaf.c +++ b/fs/xfs/libxfs/xfs_attr_leaf.c @@ -551,8 +551,8 @@ xfs_attr_shortform_create(xfs_da_args_t *args) ASSERT(ifp->if_flags & XFS_IFINLINE); } xfs_idata_realloc(dp, sizeof(*hdr), XFS_ATTR_FORK); - hdr = (xfs_attr_sf_hdr_t *)ifp->if_u1.if_data; - hdr->count = 0; + hdr = (struct xfs_attr_sf_hdr *)ifp->if_u1.if_data; + memset(hdr, 0, sizeof(*hdr)); hdr->totsize = cpu_to_be16(sizeof(*hdr)); xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_ADATA); } -- GitLab From 10d4f8d200de55d15ef5cf659295de0f2a217728 Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Mon, 24 Aug 2020 12:10:33 -0700 Subject: [PATCH 0735/1304] irqchip/eznps: Fix build error for !ARC700 builds [ Upstream commit 89d29997f103d08264b0685796b420d911658b96 ] eznps driver is supposed to be platform independent however it ends up including stuff from inside arch/arc headers leading to rand config build errors. The quick hack to fix this (proper fix is too much chrun for non active user-base) is to add following to nps platform agnostic header. - copy AUX_IENABLE from arch/arc header - move CTOP_AUX_IACK from arch/arc/plat-eznps/*/** Reported-by: kernel test robot Reported-by: Sebastian Andrzej Siewior Link: https://lkml.kernel.org/r/20200824095831.5lpkmkafelnvlpi2@linutronix.de Signed-off-by: Vineet Gupta Signed-off-by: Sasha Levin --- arch/arc/plat-eznps/include/plat/ctop.h | 1 - include/soc/nps/common.h | 6 ++++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/arch/arc/plat-eznps/include/plat/ctop.h b/arch/arc/plat-eznps/include/plat/ctop.h index 4f6a1673b3a6..ddfca2c3357a 100644 --- a/arch/arc/plat-eznps/include/plat/ctop.h +++ b/arch/arc/plat-eznps/include/plat/ctop.h @@ -43,7 +43,6 @@ #define CTOP_AUX_DPC (CTOP_AUX_BASE + 0x02C) #define CTOP_AUX_LPC (CTOP_AUX_BASE + 0x030) #define CTOP_AUX_EFLAGS (CTOP_AUX_BASE + 0x080) -#define CTOP_AUX_IACK (CTOP_AUX_BASE + 0x088) #define CTOP_AUX_GPA1 (CTOP_AUX_BASE + 0x08C) #define CTOP_AUX_UDMC (CTOP_AUX_BASE + 0x300) diff --git a/include/soc/nps/common.h b/include/soc/nps/common.h index 9b1d43d671a3..8c18dc6d3fde 100644 --- a/include/soc/nps/common.h +++ b/include/soc/nps/common.h @@ -45,6 +45,12 @@ #define CTOP_INST_MOV2B_FLIP_R3_B1_B2_INST 0x5B60 #define CTOP_INST_MOV2B_FLIP_R3_B1_B2_LIMM 0x00010422 +#ifndef AUX_IENABLE +#define AUX_IENABLE 0x40c +#endif + +#define CTOP_AUX_IACK (0xFFFFF800 + 0x088) + #ifndef __ASSEMBLY__ /* In order to increase compilation test coverage */ -- GitLab From ca1bf7451c2f41e259821d0644cc5523c8613071 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Fri, 14 Aug 2020 11:46:51 -0700 Subject: [PATCH 0736/1304] nvme-fabrics: don't check state NVME_CTRL_NEW for request acceptance [ Upstream commit d7144f5c4cf4de95fdc3422943cf51c06aeaf7a7 ] NVME_CTRL_NEW should never see any I/O, because in order to start initialization it has to transition to NVME_CTRL_CONNECTING and from there it will never return to this state. Reviewed-by: Christoph Hellwig Signed-off-by: Sagi Grimberg Signed-off-by: Sasha Levin --- drivers/nvme/host/fabrics.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index bcd09d3a44da..05dd46f98441 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c @@ -577,7 +577,6 @@ bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq, * which is require to set the queue live in the appropinquate states. */ switch (ctrl->state) { - case NVME_CTRL_NEW: case NVME_CTRL_CONNECTING: if (req->cmd->common.opcode == nvme_fabrics_command && req->cmd->fabrics.fctype == nvme_fabrics_type_connect) -- GitLab From d9a0668ae1a4432fe5d380ac6658caba48204afa Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Wed, 5 Aug 2020 18:13:58 -0700 Subject: [PATCH 0737/1304] nvme-rdma: serialize controller teardown sequences [ Upstream commit 5110f40241d08334375eb0495f174b1d2c07657e ] In the timeout handler we may need to complete a request because the request that timed out may be an I/O that is a part of a serial sequence of controller teardown or initialization. In order to complete the request, we need to fence any other context that may compete with us and complete the request that is timing out. In this case, we could have a potential double completion in case a hard-irq or a different competing context triggered error recovery and is running inflight request cancellation concurrently with the timeout handler. Protect using a ctrl teardown_lock to serialize contexts that may complete a cancelled request due to error recovery or a reset. Reviewed-by: Christoph Hellwig Reviewed-by: James Smart Signed-off-by: Sagi Grimberg Signed-off-by: Sasha Levin --- drivers/nvme/host/rdma.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index f393a6193252..7e2cdb17c26d 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -118,6 +118,7 @@ struct nvme_rdma_ctrl { struct sockaddr_storage src_addr; struct nvme_ctrl ctrl; + struct mutex teardown_lock; bool use_inline_data; }; @@ -880,6 +881,7 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new) static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl, bool remove) { + mutex_lock(&ctrl->teardown_lock); blk_mq_quiesce_queue(ctrl->ctrl.admin_q); nvme_rdma_stop_queue(&ctrl->queues[0]); if (ctrl->ctrl.admin_tagset) @@ -887,11 +889,13 @@ static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl, nvme_cancel_request, &ctrl->ctrl); blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); nvme_rdma_destroy_admin_queue(ctrl, remove); + mutex_unlock(&ctrl->teardown_lock); } static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl, bool remove) { + mutex_lock(&ctrl->teardown_lock); if (ctrl->ctrl.queue_count > 1) { nvme_stop_queues(&ctrl->ctrl); nvme_rdma_stop_io_queues(ctrl); @@ -902,6 +906,7 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl, nvme_start_queues(&ctrl->ctrl); nvme_rdma_destroy_io_queues(ctrl, remove); } + mutex_unlock(&ctrl->teardown_lock); } static void nvme_rdma_stop_ctrl(struct nvme_ctrl *nctrl) @@ -1955,6 +1960,7 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev, return ERR_PTR(-ENOMEM); ctrl->ctrl.opts = opts; INIT_LIST_HEAD(&ctrl->list); + mutex_init(&ctrl->teardown_lock); if (opts->mask & NVMF_OPT_TRSVCID) port = opts->trsvcid; -- GitLab From 0574a3b907afbc0befe92c64ca4b39f9219d7f97 Mon Sep 17 00:00:00 2001 From: Nirenjan Krishnan Date: Sun, 30 Aug 2020 17:48:59 -0700 Subject: [PATCH 0738/1304] HID: quirks: Set INCREMENT_USAGE_ON_DUPLICATE for all Saitek X52 devices [ Upstream commit 77df710ba633dfb6c65c65cf99ea9e084a1c9933 ] The Saitek X52 family of joysticks has a pair of axes that were originally (by the Windows driver) used as mouse pointer controls. The corresponding usage page is the Game Controls page, which is not recognized by the generic HID driver, and therefore, both axes get mapped to ABS_MISC. The quirk makes the second axis get mapped to ABS_MISC+1, and therefore made available separately. One Saitek X52 device is already fixed. This patch fixes the other two known devices with VID/PID 06a3:0255 and 06a3:0762. Signed-off-by: Nirenjan Krishnan Signed-off-by: Jiri Kosina Signed-off-by: Sasha Levin --- drivers/hid/hid-ids.h | 2 ++ drivers/hid/hid-quirks.c | 2 ++ 2 files changed, 4 insertions(+) diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 2c100b73d3fc..e18d796d985f 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -985,6 +985,8 @@ #define USB_DEVICE_ID_SAITEK_RAT9 0x0cfa #define USB_DEVICE_ID_SAITEK_MMO7 0x0cd0 #define USB_DEVICE_ID_SAITEK_X52 0x075c +#define USB_DEVICE_ID_SAITEK_X52_2 0x0255 +#define USB_DEVICE_ID_SAITEK_X52_PRO 0x0762 #define USB_VENDOR_ID_SAMSUNG 0x0419 #define USB_DEVICE_ID_SAMSUNG_IR_REMOTE 0x0001 diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c index 62f87f8bd972..2d8d20a7f457 100644 --- a/drivers/hid/hid-quirks.c +++ b/drivers/hid/hid-quirks.c @@ -147,6 +147,8 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_RETROUSB, USB_DEVICE_ID_RETROUSB_SNES_RETROPORT), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE }, { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RUMBLEPAD), HID_QUIRK_BADPAD }, { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE }, + { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52_2), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE }, + { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52_PRO), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE }, { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD2), HID_QUIRK_NO_INIT_REPORTS }, { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD), HID_QUIRK_NO_INIT_REPORTS }, { HID_USB_DEVICE(USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB), HID_QUIRK_NOGET }, -- GitLab From b1fb10d3d6eb62088468ec9e7261612dca1f90ff Mon Sep 17 00:00:00 2001 From: Xie He Date: Fri, 28 Aug 2020 00:07:52 -0700 Subject: [PATCH 0739/1304] drivers/net/wan/hdlc_cisco: Add hard_header_len [ Upstream commit 1a545ebe380bf4c1433e3c136e35a77764fda5ad ] This driver didn't set hard_header_len. This patch sets hard_header_len for it according to its header_ops->create function. This driver's header_ops->create function (cisco_hard_header) creates a header of (struct hdlc_header), so hard_header_len should be set to sizeof(struct hdlc_header). Cc: Martin Schiller Signed-off-by: Xie He Acked-by: Krzysztof Halasa Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/wan/hdlc_cisco.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c index 320039d329c7..c169a26e5359 100644 --- a/drivers/net/wan/hdlc_cisco.c +++ b/drivers/net/wan/hdlc_cisco.c @@ -374,6 +374,7 @@ static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr) memcpy(&state(hdlc)->settings, &new_settings, size); spin_lock_init(&state(hdlc)->lock); dev->header_ops = &cisco_header_ops; + dev->hard_header_len = sizeof(struct hdlc_header); dev->type = ARPHRD_CISCO; call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev); netif_dormant_on(dev); -- GitLab From 4023bad0a528d383800a2262c899f52dcd613c7c Mon Sep 17 00:00:00 2001 From: Dinghao Liu Date: Mon, 31 Aug 2020 17:06:43 +0800 Subject: [PATCH 0740/1304] HID: elan: Fix memleak in elan_input_configured [ Upstream commit b7429ea53d6c0936a0f10a5d64164f0aea440143 ] When input_mt_init_slots() fails, input should be freed to prevent memleak. When input_register_device() fails, we should call input_mt_destroy_slots() to free memory allocated by input_mt_init_slots(). Signed-off-by: Dinghao Liu Signed-off-by: Jiri Kosina Signed-off-by: Sasha Levin --- drivers/hid/hid-elan.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/hid/hid-elan.c b/drivers/hid/hid-elan.c index 07e26c3567eb..6346282e0ff0 100644 --- a/drivers/hid/hid-elan.c +++ b/drivers/hid/hid-elan.c @@ -192,6 +192,7 @@ static int elan_input_configured(struct hid_device *hdev, struct hid_input *hi) ret = input_mt_init_slots(input, ELAN_MAX_FINGERS, INPUT_MT_POINTER); if (ret) { hid_err(hdev, "Failed to init elan MT slots: %d\n", ret); + input_free_device(input); return ret; } @@ -202,6 +203,7 @@ static int elan_input_configured(struct hid_device *hdev, struct hid_input *hi) if (ret) { hid_err(hdev, "Failed to register elan input device: %d\n", ret); + input_mt_destroy_slots(input); input_free_device(input); return ret; } -- GitLab From 099df531a4ca2a411522131090c65add2b35316d Mon Sep 17 00:00:00 2001 From: Evgeniy Didin Date: Tue, 7 Jul 2020 18:38:58 +0300 Subject: [PATCH 0741/1304] ARC: [plat-hsdk]: Switch ethernet phy-mode to rgmii-id [ Upstream commit 26907eb605fbc3ba9dbf888f21d9d8d04471271d ] HSDK board has Micrel KSZ9031, recent commit bcf3440c6dd ("net: phy: micrel: add phy-mode support for the KSZ9031 PHY") caused a breakdown of Ethernet. Using 'phy-mode = "rgmii"' is not correct because accodring RGMII specification it is necessary to have delay on RX (PHY to MAX) which is not generated in case of "rgmii". Using "rgmii-id" adds necessary delay and solves the issue. Also adding name of PHY placed on HSDK board. Signed-off-by: Evgeniy Didin Cc: Eugeniy Paltsev Cc: Alexey Brodkin Signed-off-by: Vineet Gupta Signed-off-by: Sasha Levin --- arch/arc/boot/dts/hsdk.dts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arc/boot/dts/hsdk.dts b/arch/arc/boot/dts/hsdk.dts index ab01b75bfa67..f6b6e3c9ca8a 100644 --- a/arch/arc/boot/dts/hsdk.dts +++ b/arch/arc/boot/dts/hsdk.dts @@ -175,7 +175,7 @@ reg = <0x8000 0x2000>; interrupts = <10>; interrupt-names = "macirq"; - phy-mode = "rgmii"; + phy-mode = "rgmii-id"; snps,pbl = <32>; snps,multicast-filter-bins = <256>; clocks = <&gmacclk>; @@ -193,7 +193,7 @@ #address-cells = <1>; #size-cells = <0>; compatible = "snps,dwmac-mdio"; - phy0: ethernet-phy@0 { + phy0: ethernet-phy@0 { /* Micrel KSZ9031 */ reg = <0>; ti,rx-internal-delay = ; ti,tx-internal-delay = ; -- GitLab From 14a18a3f26b170b93dcd647c574fe0080604886e Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Thu, 20 Aug 2020 17:40:02 +0200 Subject: [PATCH 0742/1304] cpufreq: intel_pstate: Refuse to turn off with HWP enabled [ Upstream commit 43298db3009f06fe5c69e1ca8b6cfc2565772fa1 ] After commit f6ebbcf08f37 ("cpufreq: intel_pstate: Implement passive mode with HWP enabled") it is possible to change the driver status to "off" via sysfs with HWP enabled, which effectively causes the driver to unregister itself, but HWP remains active and it forces the minimum performance, so even if another cpufreq driver is loaded, it will not be able to control the CPU frequency. For this reason, make the driver refuse to change the status to "off" with HWP enabled. Signed-off-by: Rafael J. Wysocki Acked-by: Srinivas Pandruvada Signed-off-by: Sasha Levin --- drivers/cpufreq/intel_pstate.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 99166000ffb7..8fa22aa2ac65 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -2325,9 +2325,15 @@ static int intel_pstate_update_status(const char *buf, size_t size) { int ret; - if (size == 3 && !strncmp(buf, "off", size)) - return intel_pstate_driver ? - intel_pstate_unregister_driver() : -EINVAL; + if (size == 3 && !strncmp(buf, "off", size)) { + if (!intel_pstate_driver) + return -EINVAL; + + if (hwp_active) + return -EBUSY; + + return intel_pstate_unregister_driver(); + } if (size == 6 && !strncmp(buf, "active", size)) { if (intel_pstate_driver) { -- GitLab From 7be3d37a2fe690e3ff41d274a24640733ba22ec3 Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Mon, 31 Aug 2020 20:02:50 -0700 Subject: [PATCH 0743/1304] cpufreq: intel_pstate: Fix intel_pstate_get_hwp_max() for turbo disabled [ Upstream commit eacc9c5a927e474c173a5d53dd7fb8e306511768 ] This fixes the behavior of the scaling_max_freq and scaling_min_freq sysfs files in systems which had turbo disabled by the BIOS. Caleb noticed that the HWP is programmed to operate in the wrong P-state range on his system when the CPUFREQ policy min/max frequency is set via sysfs. This seems to be because in his system intel_pstate_get_hwp_max() is returning the maximum turbo P-state even though turbo was disabled by the BIOS, which causes intel_pstate to scale kHz frequencies incorrectly e.g. setting the maximum turbo frequency whenever the maximum guaranteed frequency is requested via sysfs. Tested-by: Caleb Callaway Signed-off-by: Francisco Jerez Acked-by: Srinivas Pandruvada [ rjw: Minor subject edits ] Signed-off-by: Rafael J. Wysocki Signed-off-by: Sasha Levin --- drivers/cpufreq/intel_pstate.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 8fa22aa2ac65..864a7e8ebdfc 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -712,7 +712,7 @@ static void intel_pstate_get_hwp_max(unsigned int cpu, int *phy_max, rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap); WRITE_ONCE(all_cpu_data[cpu]->hwp_cap_cached, cap); - if (global.no_turbo) + if (global.no_turbo || global.turbo_disabled) *current_max = HWP_GUARANTEED_PERF(cap); else *current_max = HWP_HIGHEST_PERF(cap); -- GitLab From 3e1600cc10dffe654e2699fe9ec4d546cb7c1a30 Mon Sep 17 00:00:00 2001 From: Rander Wang Date: Wed, 2 Sep 2020 18:42:18 +0300 Subject: [PATCH 0744/1304] ALSA: hda: fix a runtime pm issue in SOF when integrated GPU is disabled [ Upstream commit 13774d81f38538c5fa2924bdcdfa509155480fa6 ] In snd_hdac_device_init pm_runtime_set_active is called to increase child_count in parent device. But when it is failed to build connection with GPU for one case that integrated graphic gpu is disabled, snd_hdac_ext_bus_device_exit will be invoked to clean up a HD-audio extended codec base device. At this time the child_count of parent is not decreased, which makes parent device can't get suspended. This patch calls pm_runtime_set_suspended to decrease child_count in parent device in snd_hdac_device_exit to match with snd_hdac_device_init. pm_runtime_set_suspended can make sure that it will not decrease child_count if the device is already suspended. Signed-off-by: Rander Wang Reviewed-by: Ranjani Sridharan Reviewed-by: Pierre-Louis Bossart Reviewed-by: Bard Liao Reviewed-by: Guennadi Liakhovetski Signed-off-by: Kai Vehmanen Link: https://lore.kernel.org/r/20200902154218.1440441-1-kai.vehmanen@linux.intel.com Signed-off-by: Takashi Iwai Signed-off-by: Sasha Levin --- sound/hda/hdac_device.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sound/hda/hdac_device.c b/sound/hda/hdac_device.c index dbf02a3a8d2f..58b53a4bc4d0 100644 --- a/sound/hda/hdac_device.c +++ b/sound/hda/hdac_device.c @@ -124,6 +124,8 @@ EXPORT_SYMBOL_GPL(snd_hdac_device_init); void snd_hdac_device_exit(struct hdac_device *codec) { pm_runtime_put_noidle(&codec->dev); + /* keep balance of runtime PM child_count in parent device */ + pm_runtime_set_suspended(&codec->dev); snd_hdac_bus_remove_device(codec->bus, codec); kfree(codec->vendor_name); kfree(codec->chip_name); -- GitLab From 349eb8ed7302ffd7c9c6bd850b401375eb0daa29 Mon Sep 17 00:00:00 2001 From: Sandeep Raghuraman Date: Thu, 27 Aug 2020 18:43:37 +0530 Subject: [PATCH 0745/1304] drm/amdgpu: Fix bug in reporting voltage for CIK [ Upstream commit d98299885c9ea140c1108545186593deba36c4ac ] On my R9 390, the voltage was reported as a constant 1000 mV. This was due to a bug in smu7_hwmgr.c, in the smu7_read_sensor() function, where some magic constants were used in a condition, to determine whether the voltage should be read from PLANE2_VID or PLANE1_VID. The VDDC mask was incorrectly used, instead of the VDDGFX mask. This patch changes the code to use the correct defined constants (and apply the correct bitshift), thus resulting in correct voltage reporting. Signed-off-by: Sandeep Raghuraman Signed-off-by: Alex Deucher Signed-off-by: Sasha Levin --- drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 219440bebd05..72c0a2ae2dd4 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -3566,7 +3566,8 @@ static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx, case AMDGPU_PP_SENSOR_GPU_POWER: return smu7_get_gpu_power(hwmgr, (uint32_t *)value); case AMDGPU_PP_SENSOR_VDDGFX: - if ((data->vr_config & 0xff) == 0x2) + if ((data->vr_config & VRCONF_VDDGFX_MASK) == + (VR_SVI2_PLANE_2 << VRCONF_VDDGFX_SHIFT)) val_vid = PHM_READ_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_SVI2_STATUS, PLANE2_VID); else -- GitLab From f12ce53f4d3648eff32dbd09a9e1006c7beec685 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 24 Aug 2020 12:54:15 +0200 Subject: [PATCH 0746/1304] iommu/amd: Do not use IOMMUv2 functionality when SME is active [ Upstream commit 2822e582501b65707089b097e773e6fd70774841 ] When memory encryption is active the device is likely not in a direct mapped domain. Forbid using IOMMUv2 functionality for now until finer grained checks for this have been implemented. Signed-off-by: Joerg Roedel Link: https://lore.kernel.org/r/20200824105415.21000-3-joro@8bytes.org Signed-off-by: Joerg Roedel Signed-off-by: Sasha Levin --- drivers/iommu/amd_iommu_v2.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c index 58da65df03f5..7a59a8ebac10 100644 --- a/drivers/iommu/amd_iommu_v2.c +++ b/drivers/iommu/amd_iommu_v2.c @@ -776,6 +776,13 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids) might_sleep(); + /* + * When memory encryption is active the device is likely not in a + * direct-mapped domain. Forbid using IOMMUv2 functionality for now. + */ + if (mem_encrypt_active()) + return -ENODEV; + if (!amd_iommu_v2_supported()) return -ENODEV; -- GitLab From 4a31e14df3943ebbe1b9be30f6a0c4dab7bd9a8d Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Fri, 4 Sep 2020 18:58:08 +0300 Subject: [PATCH 0747/1304] gcov: Disable gcov build with GCC 10 [ Upstream commit cfc905f158eaa099d6258031614d11869e7ef71c ] GCOV built with GCC 10 doesn't initialize n_function variable. This produces different kernel panics as was seen by Colin in Ubuntu and me in FC 32. As a workaround, let's disable GCOV build for broken GCC 10 version. Link: https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1891288 Link: https://lore.kernel.org/lkml/20200827133932.3338519-1-leon@kernel.org Link: https://lore.kernel.org/lkml/CAHk-=whbijeSdSvx-Xcr0DPMj0BiwhJ+uiNnDSVZcr_h_kg7UA@mail.gmail.com/ Cc: Colin Ian King Signed-off-by: Leon Romanovsky Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin --- kernel/gcov/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/kernel/gcov/Kconfig b/kernel/gcov/Kconfig index 1e3823fa799b..bfb6579a19d0 100644 --- a/kernel/gcov/Kconfig +++ b/kernel/gcov/Kconfig @@ -3,6 +3,7 @@ menu "GCOV-based kernel profiling" config GCOV_KERNEL bool "Enable gcov-based kernel profiling" depends on DEBUG_FS + depends on !CC_IS_GCC || GCC_VERSION < 100000 select CONSTRUCTORS if !UML default n ---help--- -- GitLab From 4e8bf9d2ba15b4a9e479c8a446f1e0a284ba9d70 Mon Sep 17 00:00:00 2001 From: Angelo Compagnucci Date: Wed, 19 Aug 2020 09:55:25 +0200 Subject: [PATCH 0748/1304] iio: adc: mcp3422: fix locking scope commit 3f1093d83d7164e4705e4232ccf76da54adfda85 upstream. Locking should be held for the entire reading sequence involving setting the channel, waiting for the channel switch and reading from the channel. If not, reading from a channel can result mixing with the reading from another channel. Fixes: 07914c84ba30 ("iio: adc: Add driver for Microchip MCP3422/3/4 high resolution ADC") Signed-off-by: Angelo Compagnucci Link: https://lore.kernel.org/r/20200819075525.1395248-1-angelo.compagnucci@gmail.com Cc: Signed-off-by: Jonathan Cameron Signed-off-by: Greg Kroah-Hartman --- drivers/iio/adc/mcp3422.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/drivers/iio/adc/mcp3422.c b/drivers/iio/adc/mcp3422.c index 819f26011500..fd1496c9c3fb 100644 --- a/drivers/iio/adc/mcp3422.c +++ b/drivers/iio/adc/mcp3422.c @@ -99,16 +99,12 @@ static int mcp3422_update_config(struct mcp3422 *adc, u8 newconfig) { int ret; - mutex_lock(&adc->lock); - ret = i2c_master_send(adc->i2c, &newconfig, 1); if (ret > 0) { adc->config = newconfig; ret = 0; } - mutex_unlock(&adc->lock); - return ret; } @@ -141,6 +137,8 @@ static int mcp3422_read_channel(struct mcp3422 *adc, u8 config; u8 req_channel = channel->channel; + mutex_lock(&adc->lock); + if (req_channel != MCP3422_CHANNEL(adc->config)) { config = adc->config; config &= ~MCP3422_CHANNEL_MASK; @@ -153,7 +151,11 @@ static int mcp3422_read_channel(struct mcp3422 *adc, msleep(mcp3422_read_times[MCP3422_SAMPLE_RATE(adc->config)]); } - return mcp3422_read(adc, value, &config); + ret = mcp3422_read(adc, value, &config); + + mutex_unlock(&adc->lock); + + return ret; } static int mcp3422_read_raw(struct iio_dev *iio, -- GitLab From 4fea86da6a106bae203f8010ccae43844bf7bca6 Mon Sep 17 00:00:00 2001 From: Angelo Compagnucci Date: Tue, 1 Sep 2020 11:32:18 +0200 Subject: [PATCH 0749/1304] iio: adc: mcp3422: fix locking on error path [ Upstream commit a139ffa40f0c24b753838b8ef3dcf6ad10eb7854 ] Reading from the chip should be unlocked on error path else the lock could never being released. Fixes: 07914c84ba30 ("iio: adc: Add driver for Microchip MCP3422/3/4 high resolution ADC") Fixes: 3f1093d83d71 ("iio: adc: mcp3422: fix locking scope") Acked-by: Jonathan Cameron Signed-off-by: Angelo Compagnucci Link: https://lore.kernel.org/r/20200901093218.1500845-1-angelo.compagnucci@gmail.com Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin --- drivers/iio/adc/mcp3422.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/iio/adc/mcp3422.c b/drivers/iio/adc/mcp3422.c index fd1496c9c3fb..4ee4ca35c255 100644 --- a/drivers/iio/adc/mcp3422.c +++ b/drivers/iio/adc/mcp3422.c @@ -146,8 +146,10 @@ static int mcp3422_read_channel(struct mcp3422 *adc, config &= ~MCP3422_PGA_MASK; config |= MCP3422_PGA_VALUE(adc->pga[req_channel]); ret = mcp3422_update_config(adc, config); - if (ret < 0) + if (ret < 0) { + mutex_unlock(&adc->lock); return ret; + } msleep(mcp3422_read_times[MCP3422_SAMPLE_RATE(adc->config)]); } -- GitLab From da2eb70c3ce0fc9ac129d3f67747727f371d6ad8 Mon Sep 17 00:00:00 2001 From: Maxim Kochetkov Date: Mon, 3 Aug 2020 08:04:05 +0300 Subject: [PATCH 0750/1304] iio: adc: ti-ads1015: fix conversion when CONFIG_PM is not set commit e71e6dbe96ac80ac2aebe71a6a942e7bd60e7596 upstream. To stop conversion ads1015_set_power_state() function call unimplemented function __pm_runtime_suspend() from pm_runtime_put_autosuspend() if CONFIG_PM is not set. In case of CONFIG_PM is not set: __pm_runtime_suspend() returns -ENOSYS, so ads1015_read_raw() failed because ads1015_set_power_state() returns an error. If CONFIG_PM is disabled, there is no need to start/stop conversion. Fix it by adding return 0 function variant if CONFIG_PM is not set. Signed-off-by: Maxim Kochetkov Fixes: ecc24e72f437 ("iio: adc: Add TI ADS1015 ADC driver support") Tested-by: Maxim Kiselev Reviewed-by: Andy Shevchenko Cc: Signed-off-by: Jonathan Cameron Signed-off-by: Greg Kroah-Hartman --- drivers/iio/adc/ti-ads1015.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c index 6a114dcb4a3a..dc8d859e4b92 100644 --- a/drivers/iio/adc/ti-ads1015.c +++ b/drivers/iio/adc/ti-ads1015.c @@ -312,6 +312,7 @@ static const struct iio_chan_spec ads1115_channels[] = { IIO_CHAN_SOFT_TIMESTAMP(ADS1015_TIMESTAMP), }; +#ifdef CONFIG_PM static int ads1015_set_power_state(struct ads1015_data *data, bool on) { int ret; @@ -329,6 +330,15 @@ static int ads1015_set_power_state(struct ads1015_data *data, bool on) return ret < 0 ? ret : 0; } +#else /* !CONFIG_PM */ + +static int ads1015_set_power_state(struct ads1015_data *data, bool on) +{ + return 0; +} + +#endif /* !CONFIG_PM */ + static int ads1015_get_adc_result(struct ads1015_data *data, int chan, int *val) { -- GitLab From a418187843866e39c18fccd8342c3c6ba0821784 Mon Sep 17 00:00:00 2001 From: Jonathan Cameron Date: Wed, 22 Jul 2020 16:50:48 +0100 Subject: [PATCH 0751/1304] iio:light:ltr501 Fix timestamp alignment issue. commit 2684d5003490df5398aeafe2592ba9d4a4653998 upstream. One of a class of bugs pointed out by Lars in a recent review. iio_push_to_buffers_with_timestamp assumes the buffer used is aligned to the size of the timestamp (8 bytes). This is not guaranteed in this driver which uses an array of smaller elements on the stack. Here we use a structure on the stack. The driver already did an explicit memset so no data leak was possible. Forced alignment of ts is not strictly necessary but probably makes the code slightly less fragile. Note there has been some rework in this driver of the years, so no way this will apply cleanly all the way back. Fixes: 2690be905123 ("iio: Add Lite-On ltr501 ambient light / proximity sensor driver") Reported-by: Lars-Peter Clausen Signed-off-by: Jonathan Cameron Reviewed-by: Andy Shevchenko Cc: Signed-off-by: Greg Kroah-Hartman --- drivers/iio/light/ltr501.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c index 830a2d45aa4d..947f17588024 100644 --- a/drivers/iio/light/ltr501.c +++ b/drivers/iio/light/ltr501.c @@ -1245,13 +1245,16 @@ static irqreturn_t ltr501_trigger_handler(int irq, void *p) struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct ltr501_data *data = iio_priv(indio_dev); - u16 buf[8]; + struct { + u16 channels[3]; + s64 ts __aligned(8); + } scan; __le16 als_buf[2]; u8 mask = 0; int j = 0; int ret, psdata; - memset(buf, 0, sizeof(buf)); + memset(&scan, 0, sizeof(scan)); /* figure out which data needs to be ready */ if (test_bit(0, indio_dev->active_scan_mask) || @@ -1270,9 +1273,9 @@ static irqreturn_t ltr501_trigger_handler(int irq, void *p) if (ret < 0) return ret; if (test_bit(0, indio_dev->active_scan_mask)) - buf[j++] = le16_to_cpu(als_buf[1]); + scan.channels[j++] = le16_to_cpu(als_buf[1]); if (test_bit(1, indio_dev->active_scan_mask)) - buf[j++] = le16_to_cpu(als_buf[0]); + scan.channels[j++] = le16_to_cpu(als_buf[0]); } if (mask & LTR501_STATUS_PS_RDY) { @@ -1280,10 +1283,10 @@ static irqreturn_t ltr501_trigger_handler(int irq, void *p) &psdata, 2); if (ret < 0) goto done; - buf[j++] = psdata & LTR501_PS_DATA_MASK; + scan.channels[j++] = psdata & LTR501_PS_DATA_MASK; } - iio_push_to_buffers_with_timestamp(indio_dev, buf, + iio_push_to_buffers_with_timestamp(indio_dev, &scan, iio_get_time_ns(indio_dev)); done: -- GitLab From 0ddc21f7cb8cfd9fe9ba8681b6e7e77ed4672c3e Mon Sep 17 00:00:00 2001 From: Jonathan Cameron Date: Wed, 22 Jul 2020 16:50:39 +0100 Subject: [PATCH 0752/1304] iio:accel:bmc150-accel: Fix timestamp alignment and prevent data leak. commit a6f86f724394de3629da63fe5e1b7a4ab3396efe upstream. One of a class of bugs pointed out by Lars in a recent review. iio_push_to_buffers_with_timestamp assumes the buffer used is aligned to the size of the timestamp (8 bytes). This is not guaranteed in this driver which uses a 16 byte array of smaller elements on the stack. As Lars also noted this anti pattern can involve a leak of data to userspace and that indeed can happen here. We close both issues by moving to a suitable structure in the iio_priv() data with alignment ensured by use of an explicit c structure. This data is allocated with kzalloc so no data can leak appart from previous readings. Fixes tag is beyond some major refactoring so likely manual backporting would be needed to get that far back. Whilst the force alignment of the ts is not strictly necessary, it does make the code less fragile. Fixes: 3bbec9773389 ("iio: bmc150_accel: add support for hardware fifo") Reported-by: Lars-Peter Clausen Signed-off-by: Jonathan Cameron Acked-by: Srinivas Pandruvada Reviewed-by: Andy Shevchenko Cc: Signed-off-by: Greg Kroah-Hartman --- drivers/iio/accel/bmc150-accel-core.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c index cb8c98a44010..e029d4b0f7af 100644 --- a/drivers/iio/accel/bmc150-accel-core.c +++ b/drivers/iio/accel/bmc150-accel-core.c @@ -197,6 +197,14 @@ struct bmc150_accel_data { struct mutex mutex; u8 fifo_mode, watermark; s16 buffer[8]; + /* + * Ensure there is sufficient space and correct alignment for + * the timestamp if enabled + */ + struct { + __le16 channels[3]; + s64 ts __aligned(8); + } scan; u8 bw_bits; u32 slope_dur; u32 slope_thres; @@ -915,15 +923,16 @@ static int __bmc150_accel_fifo_flush(struct iio_dev *indio_dev, * now. */ for (i = 0; i < count; i++) { - u16 sample[8]; int j, bit; j = 0; for_each_set_bit(bit, indio_dev->active_scan_mask, indio_dev->masklength) - memcpy(&sample[j++], &buffer[i * 3 + bit], 2); + memcpy(&data->scan.channels[j++], &buffer[i * 3 + bit], + sizeof(data->scan.channels[0])); - iio_push_to_buffers_with_timestamp(indio_dev, sample, tstamp); + iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, + tstamp); tstamp += sample_period; } -- GitLab From 5acf8486056f28b21623765fb1412c0b5234c39d Mon Sep 17 00:00:00 2001 From: Jonathan Cameron Date: Wed, 22 Jul 2020 16:50:57 +0100 Subject: [PATCH 0753/1304] iio:adc:ti-adc084s021 Fix alignment and data leak issues. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit a661b571e3682705cb402a5cd1e970586a3ec00f upstream. One of a class of bugs pointed out by Lars in a recent review. iio_push_to_buffers_with_timestamp assumes the buffer used is aligned to the size of the timestamp (8 bytes). This is not guaranteed in this driver which uses an array of smaller elements on the stack. As Lars also noted this anti pattern can involve a leak of data to userspace and that indeed can happen here. We close both issues by moving to a suitable structure in the iio_priv(). This data is allocated with kzalloc so no data can leak apart from previous readings. The force alignment of ts is not strictly necessary in this case but reduces the fragility of the code. Fixes: 3691e5a69449 ("iio: adc: add driver for the ti-adc084s021 chip") Reported-by: Lars-Peter Clausen Cc: Mårten Lindahl Signed-off-by: Jonathan Cameron Reviewed-by: Andy Shevchenko Cc: Signed-off-by: Greg Kroah-Hartman --- drivers/iio/adc/ti-adc084s021.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/drivers/iio/adc/ti-adc084s021.c b/drivers/iio/adc/ti-adc084s021.c index 25504640e126..ec490e7a5b73 100644 --- a/drivers/iio/adc/ti-adc084s021.c +++ b/drivers/iio/adc/ti-adc084s021.c @@ -28,6 +28,11 @@ struct adc084s021 { struct spi_transfer spi_trans; struct regulator *reg; struct mutex lock; + /* Buffer used to align data */ + struct { + __be16 channels[4]; + s64 ts __aligned(8); + } scan; /* * DMA (thus cache coherency maintenance) requires the * transfer buffers to live in their own cache line. @@ -143,14 +148,13 @@ static irqreturn_t adc084s021_buffer_trigger_handler(int irq, void *pollfunc) struct iio_poll_func *pf = pollfunc; struct iio_dev *indio_dev = pf->indio_dev; struct adc084s021 *adc = iio_priv(indio_dev); - __be16 data[8] = {0}; /* 4 * 16-bit words of data + 8 bytes timestamp */ mutex_lock(&adc->lock); - if (adc084s021_adc_conversion(adc, &data) < 0) + if (adc084s021_adc_conversion(adc, adc->scan.channels) < 0) dev_err(&adc->spi->dev, "Failed to read data\n"); - iio_push_to_buffers_with_timestamp(indio_dev, data, + iio_push_to_buffers_with_timestamp(indio_dev, &adc->scan, iio_get_time_ns(indio_dev)); mutex_unlock(&adc->lock); iio_trigger_notify_done(indio_dev->trig); -- GitLab From e7a63f2877e13ed1fb67b17f5b9bad4dcab663b2 Mon Sep 17 00:00:00 2001 From: Jonathan Cameron Date: Wed, 22 Jul 2020 16:51:02 +0100 Subject: [PATCH 0754/1304] iio:adc:ina2xx Fix timestamp alignment issue. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit f8cd222feb82ecd82dcf610fcc15186f55f9c2b5 upstream. One of a class of bugs pointed out by Lars in a recent review. iio_push_to_buffers_with_timestamp assumes the buffer used is aligned to the size of the timestamp (8 bytes). This is not guaranteed in this driver which uses a 32 byte array of smaller elements on the stack. As Lars also noted this anti pattern can involve a leak of data to userspace and that indeed can happen here. We close both issues by moving to a suitable structure in the iio_priv() data with alignment explicitly requested. This data is allocated with kzalloc so no data can leak apart from previous readings. The explicit alignment isn't technically needed here, but it reduced fragility and avoids cut and paste into drivers where it will be needed. If we want this in older stables will need manual backport due to driver reworks. Fixes: c43a102e67db ("iio: ina2xx: add support for TI INA2xx Power Monitors") Reported-by: Lars-Peter Clausen Cc: Stefan Brüns Cc: Marc Titinger Signed-off-by: Jonathan Cameron Reviewed-by: Andy Shevchenko Cc: Signed-off-by: Greg Kroah-Hartman --- drivers/iio/adc/ina2xx-adc.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/drivers/iio/adc/ina2xx-adc.c b/drivers/iio/adc/ina2xx-adc.c index d1239624187d..1ab106b3d3a6 100644 --- a/drivers/iio/adc/ina2xx-adc.c +++ b/drivers/iio/adc/ina2xx-adc.c @@ -146,6 +146,11 @@ struct ina2xx_chip_info { int range_vbus; /* Bus voltage maximum in V */ int pga_gain_vshunt; /* Shunt voltage PGA gain */ bool allow_async_readout; + /* data buffer needs space for channel data and timestamp */ + struct { + u16 chan[4]; + u64 ts __aligned(8); + } scan; }; static const struct ina2xx_config ina2xx_config[] = { @@ -736,8 +741,6 @@ static int ina2xx_conversion_ready(struct iio_dev *indio_dev) static int ina2xx_work_buffer(struct iio_dev *indio_dev) { struct ina2xx_chip_info *chip = iio_priv(indio_dev); - /* data buffer needs space for channel data and timestap */ - unsigned short data[4 + sizeof(s64)/sizeof(short)]; int bit, ret, i = 0; s64 time; @@ -756,10 +759,10 @@ static int ina2xx_work_buffer(struct iio_dev *indio_dev) if (ret < 0) return ret; - data[i++] = val; + chip->scan.chan[i++] = val; } - iio_push_to_buffers_with_timestamp(indio_dev, data, time); + iio_push_to_buffers_with_timestamp(indio_dev, &chip->scan, time); return 0; }; -- GitLab From 234974cf057873c1c412d2c588b1ee660acedf2d Mon Sep 17 00:00:00 2001 From: Jonathan Cameron Date: Wed, 22 Jul 2020 16:51:03 +0100 Subject: [PATCH 0755/1304] iio:adc:max1118 Fix alignment of timestamp and data leak issues commit db8f06d97ec284dc018e2e4890d2e5035fde8630 upstream. One of a class of bugs pointed out by Lars in a recent review. iio_push_to_buffers_with_timestamp assumes the buffer used is aligned to the size of the timestamp (8 bytes). This is not guaranteed in this driver which uses an array of smaller elements on the stack. As Lars also noted this anti pattern can involve a leak of data to userspace and that indeed can happen here. We close both issues by moving to a suitable structure in the iio_priv() data. This data is allocated with kzalloc so no data can leak apart from previous readings. The explicit alignment of ts is necessary to ensure correct padding on architectures where s64 is only 4 bytes aligned such as x86_32. Fixes: a9e9c7153e96 ("iio: adc: add max1117/max1118/max1119 ADC driver") Reported-by: Lars-Peter Clausen Cc: Akinobu Mita Signed-off-by: Jonathan Cameron Reviewed-by: Andy Shevchenko Cc: Signed-off-by: Greg Kroah-Hartman --- drivers/iio/adc/max1118.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/drivers/iio/adc/max1118.c b/drivers/iio/adc/max1118.c index 49db9e9ae625..b372b226ac20 100644 --- a/drivers/iio/adc/max1118.c +++ b/drivers/iio/adc/max1118.c @@ -38,6 +38,11 @@ struct max1118 { struct spi_device *spi; struct mutex lock; struct regulator *reg; + /* Ensure natural alignment of buffer elements */ + struct { + u8 channels[2]; + s64 ts __aligned(8); + } scan; u8 data ____cacheline_aligned; }; @@ -162,7 +167,6 @@ static irqreturn_t max1118_trigger_handler(int irq, void *p) struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct max1118 *adc = iio_priv(indio_dev); - u8 data[16] = { }; /* 2x 8-bit ADC data + padding + 8 bytes timestamp */ int scan_index; int i = 0; @@ -180,10 +184,10 @@ static irqreturn_t max1118_trigger_handler(int irq, void *p) goto out; } - data[i] = ret; + adc->scan.channels[i] = ret; i++; } - iio_push_to_buffers_with_timestamp(indio_dev, data, + iio_push_to_buffers_with_timestamp(indio_dev, &adc->scan, iio_get_time_ns(indio_dev)); out: mutex_unlock(&adc->lock); -- GitLab From 62d46fa157d813044cf1e745334ddc79f09f2112 Mon Sep 17 00:00:00 2001 From: Jonathan Cameron Date: Wed, 22 Jul 2020 16:50:56 +0100 Subject: [PATCH 0756/1304] iio:adc:ti-adc081c Fix alignment and data leak issues commit 54f82df2ba86e2a8e9cbf4036d192366e3905c89 upstream. One of a class of bugs pointed out by Lars in a recent review. iio_push_to_buffers_with_timestamp assumes the buffer used is aligned to the size of the timestamp (8 bytes). This is not guaranteed in this driver which uses an array of smaller elements on the stack. As Lars also noted this anti pattern can involve a leak of data to userspace and that indeed can happen here. We close both issues by moving to a suitable structure in the iio_priv(). This data is allocated with kzalloc so no data can leak apart from previous readings. The eplicit alignment of ts is necessary to ensure correct padding on x86_32 where s64 is only aligned to 4 bytes. Fixes: 08e05d1fce5c ("ti-adc081c: Initial triggered buffer support") Reported-by: Lars-Peter Clausen Signed-off-by: Jonathan Cameron Reviewed-by: Andy Shevchenko Cc: Signed-off-by: Greg Kroah-Hartman --- drivers/iio/adc/ti-adc081c.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/drivers/iio/adc/ti-adc081c.c b/drivers/iio/adc/ti-adc081c.c index 405e3779c0c5..ef95363ebac2 100644 --- a/drivers/iio/adc/ti-adc081c.c +++ b/drivers/iio/adc/ti-adc081c.c @@ -36,6 +36,12 @@ struct adc081c { /* 8, 10 or 12 */ int bits; + + /* Ensure natural alignment of buffer elements */ + struct { + u16 channel; + s64 ts __aligned(8); + } scan; }; #define REG_CONV_RES 0x00 @@ -131,14 +137,13 @@ static irqreturn_t adc081c_trigger_handler(int irq, void *p) struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct adc081c *data = iio_priv(indio_dev); - u16 buf[8]; /* 2 bytes data + 6 bytes padding + 8 bytes timestamp */ int ret; ret = i2c_smbus_read_word_swapped(data->i2c, REG_CONV_RES); if (ret < 0) goto out; - buf[0] = ret; - iio_push_to_buffers_with_timestamp(indio_dev, buf, + data->scan.channel = ret; + iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, iio_get_time_ns(indio_dev)); out: iio_trigger_notify_done(indio_dev->trig); -- GitLab From a24fd33527fd81a7601699ca8084f8717cb9e0fd Mon Sep 17 00:00:00 2001 From: Jonathan Cameron Date: Wed, 22 Jul 2020 16:50:49 +0100 Subject: [PATCH 0757/1304] iio:magnetometer:ak8975 Fix alignment and data leak issues. commit 02ad21cefbac4d89ac443866f25b90449527737b upstream. One of a class of bugs pointed out by Lars in a recent review. iio_push_to_buffers_with_timestamp assumes the buffer used is aligned to the size of the timestamp (8 bytes). This is not guaranteed in this driver which uses an array of smaller elements on the stack. As Lars also noted this anti pattern can involve a leak of data to userspace and that indeed can happen here. We close both issues by moving to a suitable structure in the iio_priv() data. This data is allocated with kzalloc so no data can leak apart from previous readings. The explicit alignment of ts is not necessary in this case as by coincidence the padding will end up the same, however I consider it to make the code less fragile and have included it. Fixes: bc11ca4a0b84 ("iio:magnetometer:ak8975: triggered buffer support") Reported-by: Lars-Peter Clausen Cc: Gregor Boirie Cc: Linus Walleij Signed-off-by: Jonathan Cameron Reviewed-by: Andy Shevchenko Cc: Signed-off-by: Greg Kroah-Hartman --- drivers/iio/magnetometer/ak8975.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c index 42a827a66512..379aa7f4a804 100644 --- a/drivers/iio/magnetometer/ak8975.c +++ b/drivers/iio/magnetometer/ak8975.c @@ -381,6 +381,12 @@ struct ak8975_data { struct iio_mount_matrix orientation; struct regulator *vdd; struct regulator *vid; + + /* Ensure natural alignment of timestamp */ + struct { + s16 channels[3]; + s64 ts __aligned(8); + } scan; }; /* Enable attached power regulator if any. */ @@ -815,7 +821,6 @@ static void ak8975_fill_buffer(struct iio_dev *indio_dev) const struct i2c_client *client = data->client; const struct ak_def *def = data->def; int ret; - s16 buff[8]; /* 3 x 16 bits axis values + 1 aligned 64 bits timestamp */ __le16 fval[3]; mutex_lock(&data->lock); @@ -838,12 +843,13 @@ static void ak8975_fill_buffer(struct iio_dev *indio_dev) mutex_unlock(&data->lock); /* Clamp to valid range. */ - buff[0] = clamp_t(s16, le16_to_cpu(fval[0]), -def->range, def->range); - buff[1] = clamp_t(s16, le16_to_cpu(fval[1]), -def->range, def->range); - buff[2] = clamp_t(s16, le16_to_cpu(fval[2]), -def->range, def->range); + data->scan.channels[0] = clamp_t(s16, le16_to_cpu(fval[0]), -def->range, def->range); + data->scan.channels[1] = clamp_t(s16, le16_to_cpu(fval[1]), -def->range, def->range); + data->scan.channels[2] = clamp_t(s16, le16_to_cpu(fval[2]), -def->range, def->range); - iio_push_to_buffers_with_timestamp(indio_dev, buff, + iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, iio_get_time_ns(indio_dev)); + return; unlock: -- GitLab From 412480300a4146a35a30178057f7bfdec50f5521 Mon Sep 17 00:00:00 2001 From: Jonathan Cameron Date: Wed, 22 Jul 2020 16:50:45 +0100 Subject: [PATCH 0758/1304] iio:light:max44000 Fix timestamp alignment and prevent data leak. commit 523628852a5f5f34a15252b2634d0498d3cfb347 upstream. One of a class of bugs pointed out by Lars in a recent review. iio_push_to_buffers_with_timestamp assumes the buffer used is aligned to the size of the timestamp (8 bytes). This is not guaranteed in this driver which uses a 16 byte array of smaller elements on the stack. As Lars also noted this anti pattern can involve a leak of data to userspace and that indeed can happen here. We close both issues by moving to a suitable structure in the iio_priv(). This data is allocated with kzalloc so no data can leak appart from previous readings. It is necessary to force the alignment of ts to avoid the padding on x86_32 being different from 64 bit platorms (it alows for 4 bytes aligned 8 byte types. Fixes: 06ad7ea10e2b ("max44000: Initial triggered buffer support") Reported-by: Lars-Peter Clausen Signed-off-by: Jonathan Cameron Reviewed-by: Andy Shevchenko Cc: Signed-off-by: Greg Kroah-Hartman --- drivers/iio/light/max44000.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/drivers/iio/light/max44000.c b/drivers/iio/light/max44000.c index bcdb0eb9e537..7d2b3d065726 100644 --- a/drivers/iio/light/max44000.c +++ b/drivers/iio/light/max44000.c @@ -78,6 +78,11 @@ struct max44000_data { struct mutex lock; struct regmap *regmap; + /* Ensure naturally aligned timestamp */ + struct { + u16 channels[2]; + s64 ts __aligned(8); + } scan; }; /* Default scale is set to the minimum of 0.03125 or 1 / (1 << 5) lux */ @@ -491,7 +496,6 @@ static irqreturn_t max44000_trigger_handler(int irq, void *p) struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct max44000_data *data = iio_priv(indio_dev); - u16 buf[8]; /* 2x u16 + padding + 8 bytes timestamp */ int index = 0; unsigned int regval; int ret; @@ -501,17 +505,17 @@ static irqreturn_t max44000_trigger_handler(int irq, void *p) ret = max44000_read_alsval(data); if (ret < 0) goto out_unlock; - buf[index++] = ret; + data->scan.channels[index++] = ret; } if (test_bit(MAX44000_SCAN_INDEX_PRX, indio_dev->active_scan_mask)) { ret = regmap_read(data->regmap, MAX44000_REG_PRX_DATA, ®val); if (ret < 0) goto out_unlock; - buf[index] = regval; + data->scan.channels[index] = regval; } mutex_unlock(&data->lock); - iio_push_to_buffers_with_timestamp(indio_dev, buf, + iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, iio_get_time_ns(indio_dev)); iio_trigger_notify_done(indio_dev->trig); return IRQ_HANDLED; -- GitLab From 9f67ad0fa7b350efdd8b6958ce633a57ac0a12c6 Mon Sep 17 00:00:00 2001 From: Jonathan Cameron Date: Wed, 22 Jul 2020 16:50:43 +0100 Subject: [PATCH 0759/1304] iio:chemical:ccs811: Fix timestamp alignment and prevent data leak. commit eb1a148ef41d8ae8d9201efc3f1b145976290331 upstream. One of a class of bugs pointed out by Lars in a recent review. iio_push_to_buffers_with_timestamp assumes the buffer used is aligned to the size of the timestamp (8 bytes). This is not guaranteed in this driver which uses an array of smaller elements on the stack. As Lars also noted this anti pattern can involve a leak of data to userspace and that indeed can happen here. We close both issues by moving to a suitable structure in the iio_priv() data with alignment explicitly requested. This data is allocated with kzalloc so no data can leak appart from previous readings. The explicit alignment of ts is necessary to ensure consistent padding for x86_32 in which the ts would otherwise be 4 byte aligned. Fixes: 283d26917ad6 ("iio: chemical: ccs811: Add triggered buffer support") Reported-by: Lars-Peter Clausen Cc: Narcisa Ana Maria Vasile Signed-off-by: Jonathan Cameron Reviewed-by: Andy Shevchenko Cc: Signed-off-by: Greg Kroah-Hartman --- drivers/iio/chemical/ccs811.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/drivers/iio/chemical/ccs811.c b/drivers/iio/chemical/ccs811.c index b4a46eb45789..46d5d48b58b6 100644 --- a/drivers/iio/chemical/ccs811.c +++ b/drivers/iio/chemical/ccs811.c @@ -78,6 +78,11 @@ struct ccs811_data { struct ccs811_reading buffer; struct iio_trigger *drdy_trig; bool drdy_trig_on; + /* Ensures correct alignment of timestamp if present */ + struct { + s16 channels[2]; + s64 ts __aligned(8); + } scan; }; static const struct iio_chan_spec ccs811_channels[] = { @@ -309,17 +314,17 @@ static irqreturn_t ccs811_trigger_handler(int irq, void *p) struct iio_dev *indio_dev = pf->indio_dev; struct ccs811_data *data = iio_priv(indio_dev); struct i2c_client *client = data->client; - s16 buf[8]; /* s16 eCO2 + s16 TVOC + padding + 8 byte timestamp */ int ret; - ret = i2c_smbus_read_i2c_block_data(client, CCS811_ALG_RESULT_DATA, 4, - (u8 *)&buf); + ret = i2c_smbus_read_i2c_block_data(client, CCS811_ALG_RESULT_DATA, + sizeof(data->scan.channels), + (u8 *)data->scan.channels); if (ret != 4) { dev_err(&client->dev, "cannot read sensor data\n"); goto err; } - iio_push_to_buffers_with_timestamp(indio_dev, buf, + iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, iio_get_time_ns(indio_dev)); err: -- GitLab From d395aab9406136530798a2d0f7e4a65cde325c90 Mon Sep 17 00:00:00 2001 From: Jonathan Cameron Date: Wed, 22 Jul 2020 16:50:37 +0100 Subject: [PATCH 0760/1304] iio: accel: kxsd9: Fix alignment of local buffer. commit 95ad67577de4ea08eb8e441394e698aa4addcc0b upstream. iio_push_to_buffers_with_timestamp assumes 8 byte alignment which is not guaranteed by an array of smaller elements. Note that whilst in this particular case the alignment forcing of the ts element is not strictly necessary it acts as good documentation. Doing this where not necessary should cut down on the number of cut and paste introduced errors elsewhere. Fixes: 0427a106a98a ("iio: accel: kxsd9: Add triggered buffer handling") Reported-by: Lars-Peter Clausen Signed-off-by: Jonathan Cameron Reviewed-by: Andy Shevchenko Cc: Signed-off-by: Greg Kroah-Hartman --- drivers/iio/accel/kxsd9.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/drivers/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c index 0c0df4fce420..f74cb2e082a6 100644 --- a/drivers/iio/accel/kxsd9.c +++ b/drivers/iio/accel/kxsd9.c @@ -212,14 +212,20 @@ static irqreturn_t kxsd9_trigger_handler(int irq, void *p) const struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct kxsd9_state *st = iio_priv(indio_dev); + /* + * Ensure correct positioning and alignment of timestamp. + * No need to zero initialize as all elements written. + */ + struct { + __be16 chan[4]; + s64 ts __aligned(8); + } hw_values; int ret; - /* 4 * 16bit values AND timestamp */ - __be16 hw_values[8]; ret = regmap_bulk_read(st->map, KXSD9_REG_X, - &hw_values, - 8); + hw_values.chan, + sizeof(hw_values.chan)); if (ret) { dev_err(st->dev, "error reading data\n"); @@ -227,7 +233,7 @@ static irqreturn_t kxsd9_trigger_handler(int irq, void *p) } iio_push_to_buffers_with_timestamp(indio_dev, - hw_values, + &hw_values, iio_get_time_ns(indio_dev)); iio_trigger_notify_done(indio_dev->trig); -- GitLab From 12a50595ce4fd946399a7e060bfa6688f3fa7ad7 Mon Sep 17 00:00:00 2001 From: Jonathan Cameron Date: Wed, 22 Jul 2020 16:50:40 +0100 Subject: [PATCH 0761/1304] iio:accel:mma7455: Fix timestamp alignment and prevent data leak. commit 7e5ac1f2206eda414f90c698fe1820dee873394d upstream. One of a class of bugs pointed out by Lars in a recent review. iio_push_to_buffers_with_timestamp assumes the buffer used is aligned to the size of the timestamp (8 bytes). This is not guaranteed in this driver which uses a 16 byte u8 array on the stack As Lars also noted this anti pattern can involve a leak of data to userspace and that indeed can happen here. We close both issues by moving to a suitable structure in the iio_priv() data with alignment ensured by use of an explicit c structure. This data is allocated with kzalloc so no data can leak appart from previous readings. The force alignment of ts is not strictly necessary in this particularly case but does make the code less fragile. Fixes: a84ef0d181d9 ("iio: accel: add Freescale MMA7455L/MMA7456L 3-axis accelerometer driver") Reported-by: Lars-Peter Clausen Signed-off-by: Jonathan Cameron Cc: Reviewed-by: Andy Shevchenko Signed-off-by: Greg Kroah-Hartman --- drivers/iio/accel/mma7455_core.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/drivers/iio/accel/mma7455_core.c b/drivers/iio/accel/mma7455_core.c index da0ceaac46b5..a3b5d5780bc8 100644 --- a/drivers/iio/accel/mma7455_core.c +++ b/drivers/iio/accel/mma7455_core.c @@ -55,6 +55,14 @@ struct mma7455_data { struct regmap *regmap; + /* + * Used to reorganize data. Will ensure correct alignment of + * the timestamp if present + */ + struct { + __le16 channels[3]; + s64 ts __aligned(8); + } scan; }; static int mma7455_drdy(struct mma7455_data *mma7455) @@ -85,19 +93,19 @@ static irqreturn_t mma7455_trigger_handler(int irq, void *p) struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct mma7455_data *mma7455 = iio_priv(indio_dev); - u8 buf[16]; /* 3 x 16-bit channels + padding + ts */ int ret; ret = mma7455_drdy(mma7455); if (ret) goto done; - ret = regmap_bulk_read(mma7455->regmap, MMA7455_REG_XOUTL, buf, - sizeof(__le16) * 3); + ret = regmap_bulk_read(mma7455->regmap, MMA7455_REG_XOUTL, + mma7455->scan.channels, + sizeof(mma7455->scan.channels)); if (ret) goto done; - iio_push_to_buffers_with_timestamp(indio_dev, buf, + iio_push_to_buffers_with_timestamp(indio_dev, &mma7455->scan, iio_get_time_ns(indio_dev)); done: -- GitLab From 3cb4aa00278b12ce98c08b59b9d65077ef9e2695 Mon Sep 17 00:00:00 2001 From: Jonathan Cameron Date: Wed, 22 Jul 2020 16:50:38 +0100 Subject: [PATCH 0762/1304] iio:accel:mma8452: Fix timestamp alignment and prevent data leak. commit 89226a296d816727405d3fea684ef69e7d388bd8 upstream. One of a class of bugs pointed out by Lars in a recent review. iio_push_to_buffers_with_timestamp assumes the buffer used is aligned to the size of the timestamp (8 bytes). This is not guaranteed in this driver which uses a 16 byte u8 array on the stack. As Lars also noted this anti pattern can involve a leak of data to userspace and that indeed can happen here. We close both issues by moving to a suitable structure in the iio_priv() data with alignment ensured by use of an explicit c structure. This data is allocated with kzalloc so no data can leak appart from previous readings. The additional forcing of the 8 byte alignment of the timestamp is not strictly necessary but makes the code less fragile by making this explicit. Fixes: c7eeea93ac60 ("iio: Add Freescale MMA8452Q 3-axis accelerometer driver") Reported-by: Lars-Peter Clausen Cc: Peter Meerwald Signed-off-by: Jonathan Cameron Reviewed-by: Andy Shevchenko Cc: Signed-off-by: Greg Kroah-Hartman --- drivers/iio/accel/mma8452.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c index fcfec758fec6..15c254b4745c 100644 --- a/drivers/iio/accel/mma8452.c +++ b/drivers/iio/accel/mma8452.c @@ -107,6 +107,12 @@ struct mma8452_data { u8 data_cfg; const struct mma_chip_info *chip_info; int sleep_val; + + /* Ensure correct alignment of time stamp when present */ + struct { + __be16 channels[3]; + s64 ts __aligned(8); + } buffer; }; /** @@ -1088,14 +1094,13 @@ static irqreturn_t mma8452_trigger_handler(int irq, void *p) struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct mma8452_data *data = iio_priv(indio_dev); - u8 buffer[16]; /* 3 16-bit channels + padding + ts */ int ret; - ret = mma8452_read(data, (__be16 *)buffer); + ret = mma8452_read(data, data->buffer.channels); if (ret < 0) goto done; - iio_push_to_buffers_with_timestamp(indio_dev, buffer, + iio_push_to_buffers_with_timestamp(indio_dev, &data->buffer, iio_get_time_ns(indio_dev)); done: -- GitLab From 0a0b6ac0e4b2102c0c6fa673793b1409405bbbea Mon Sep 17 00:00:00 2001 From: Rustam Kovhaev Date: Tue, 4 Aug 2020 07:56:14 -0700 Subject: [PATCH 0763/1304] staging: wlan-ng: fix out of bounds read in prism2sta_probe_usb() commit fea22e159d51c766ba70473f473a0ec914cc7e92 upstream. let's use usb_find_common_endpoints() to discover endpoints, it does all necessary checks for type and xfer direction remove memset() in hfa384x_create(), because we now assign endpoints in prism2sta_probe_usb() and because create_wlan() uses kzalloc() to allocate hfa384x struct before calling hfa384x_create() Fixes: faaff9765664 ("staging: wlan-ng: properly check endpoint types") Reported-and-tested-by: syzbot+22794221ab96b0bab53a@syzkaller.appspotmail.com Link: https://syzkaller.appspot.com/bug?extid=22794221ab96b0bab53a Signed-off-by: Rustam Kovhaev Cc: stable Link: https://lore.kernel.org/r/20200804145614.104320-1-rkovhaev@gmail.com Signed-off-by: Greg Kroah-Hartman --- drivers/staging/wlan-ng/hfa384x_usb.c | 5 ----- drivers/staging/wlan-ng/prism2usb.c | 19 ++++++------------- 2 files changed, 6 insertions(+), 18 deletions(-) diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c index 65ad9773018e..7686805dfe0f 100644 --- a/drivers/staging/wlan-ng/hfa384x_usb.c +++ b/drivers/staging/wlan-ng/hfa384x_usb.c @@ -532,13 +532,8 @@ static void hfa384x_usb_defer(struct work_struct *data) */ void hfa384x_create(struct hfa384x *hw, struct usb_device *usb) { - memset(hw, 0, sizeof(*hw)); hw->usb = usb; - /* set up the endpoints */ - hw->endp_in = usb_rcvbulkpipe(usb, 1); - hw->endp_out = usb_sndbulkpipe(usb, 2); - /* Set up the waitq */ init_waitqueue_head(&hw->cmdq); diff --git a/drivers/staging/wlan-ng/prism2usb.c b/drivers/staging/wlan-ng/prism2usb.c index 8d32b1603d10..9eee72aff723 100644 --- a/drivers/staging/wlan-ng/prism2usb.c +++ b/drivers/staging/wlan-ng/prism2usb.c @@ -61,23 +61,14 @@ static int prism2sta_probe_usb(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_device *dev; - const struct usb_endpoint_descriptor *epd; - const struct usb_host_interface *iface_desc = interface->cur_altsetting; + struct usb_endpoint_descriptor *bulk_in, *bulk_out; + struct usb_host_interface *iface_desc = interface->cur_altsetting; struct wlandevice *wlandev = NULL; struct hfa384x *hw = NULL; int result = 0; - if (iface_desc->desc.bNumEndpoints != 2) { - result = -ENODEV; - goto failed; - } - - result = -EINVAL; - epd = &iface_desc->endpoint[1].desc; - if (!usb_endpoint_is_bulk_in(epd)) - goto failed; - epd = &iface_desc->endpoint[2].desc; - if (!usb_endpoint_is_bulk_out(epd)) + result = usb_find_common_endpoints(iface_desc, &bulk_in, &bulk_out, NULL, NULL); + if (result) goto failed; dev = interface_to_usbdev(interface); @@ -96,6 +87,8 @@ static int prism2sta_probe_usb(struct usb_interface *interface, } /* Initialize the hw data */ + hw->endp_in = usb_rcvbulkpipe(dev, bulk_in->bEndpointAddress); + hw->endp_out = usb_sndbulkpipe(dev, bulk_out->bEndpointAddress); hfa384x_create(hw, dev); hw->wlandev = wlandev; -- GitLab From a6fdfac3fc2903647e7683efb4193295db1af666 Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Wed, 26 Aug 2020 17:26:43 +0800 Subject: [PATCH 0764/1304] btrfs: require only sector size alignment for parent eb bytenr commit ea57788eb76dc81f6003245427356a1dcd0ac524 upstream. [BUG] A completely sane converted fs will cause kernel warning at balance time: [ 1557.188633] BTRFS info (device sda7): relocating block group 8162107392 flags data [ 1563.358078] BTRFS info (device sda7): found 11722 extents [ 1563.358277] BTRFS info (device sda7): leaf 7989321728 gen 95 total ptrs 213 free space 3458 owner 2 [ 1563.358280] item 0 key (7984947200 169 0) itemoff 16250 itemsize 33 [ 1563.358281] extent refs 1 gen 90 flags 2 [ 1563.358282] ref#0: tree block backref root 4 [ 1563.358285] item 1 key (7985602560 169 0) itemoff 16217 itemsize 33 [ 1563.358286] extent refs 1 gen 93 flags 258 [ 1563.358287] ref#0: shared block backref parent 7985602560 [ 1563.358288] (parent 7985602560 is NOT ALIGNED to nodesize 16384) [ 1563.358290] item 2 key (7985635328 169 0) itemoff 16184 itemsize 33 ... [ 1563.358995] BTRFS error (device sda7): eb 7989321728 invalid extent inline ref type 182 [ 1563.358996] ------------[ cut here ]------------ [ 1563.359005] WARNING: CPU: 14 PID: 2930 at 0xffffffff9f231766 Then with transaction abort, and obviously failed to balance the fs. [CAUSE] That mentioned inline ref type 182 is completely sane, it's BTRFS_SHARED_BLOCK_REF_KEY, it's some extra check making kernel to believe it's invalid. Commit 64ecdb647ddb ("Btrfs: add one more sanity check for shared ref type") introduced extra checks for backref type. One of the requirement is, parent bytenr must be aligned to node size, which is not correct. One example is like this: 0 1G 1G+4K 2G 2G+4K | |///////////////////|//| <- A chunk starts at 1G+4K | | <- A tree block get reserved at bytenr 1G+4K Then we have a valid tree block at bytenr 1G+4K, but not aligned to nodesize (16K). Such chunk is not ideal, but current kernel can handle it pretty well. We may warn about such tree block in the future, but should not reject them. [FIX] Change the alignment requirement from node size alignment to sector size alignment. Also, to make our lives a little easier, also output @iref when btrfs_get_extent_inline_ref_type() failed, so we can locate the item easier. Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=205475 Fixes: 64ecdb647ddb ("Btrfs: add one more sanity check for shared ref type") CC: stable@vger.kernel.org # 4.14+ Reviewed-by: Josef Bacik Signed-off-by: Qu Wenruo [ update comments and messages ] Signed-off-by: David Sterba Signed-off-by: Greg Kroah-Hartman --- fs/btrfs/extent-tree.c | 19 +++++++++---------- fs/btrfs/print-tree.c | 12 +++++++----- 2 files changed, 16 insertions(+), 15 deletions(-) diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index ec3aa76d19b7..319a89d4d073 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -1057,12 +1057,11 @@ int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb, if (type == BTRFS_SHARED_BLOCK_REF_KEY) { ASSERT(eb->fs_info); /* - * Every shared one has parent tree - * block, which must be aligned to - * nodesize. + * Every shared one has parent tree block, + * which must be aligned to sector size. */ if (offset && - IS_ALIGNED(offset, eb->fs_info->nodesize)) + IS_ALIGNED(offset, eb->fs_info->sectorsize)) return type; } } else if (is_data == BTRFS_REF_TYPE_DATA) { @@ -1071,12 +1070,11 @@ int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb, if (type == BTRFS_SHARED_DATA_REF_KEY) { ASSERT(eb->fs_info); /* - * Every shared one has parent tree - * block, which must be aligned to - * nodesize. + * Every shared one has parent tree block, + * which must be aligned to sector size. */ if (offset && - IS_ALIGNED(offset, eb->fs_info->nodesize)) + IS_ALIGNED(offset, eb->fs_info->sectorsize)) return type; } } else { @@ -1086,8 +1084,9 @@ int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb, } btrfs_print_leaf((struct extent_buffer *)eb); - btrfs_err(eb->fs_info, "eb %llu invalid extent inline ref type %d", - eb->start, type); + btrfs_err(eb->fs_info, + "eb %llu iref 0x%lx invalid extent inline ref type %d", + eb->start, (unsigned long)iref, type); WARN_ON(1); return BTRFS_REF_TYPE_INVALID; diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c index df49931ffe92..4b217e9a581c 100644 --- a/fs/btrfs/print-tree.c +++ b/fs/btrfs/print-tree.c @@ -95,9 +95,10 @@ static void print_extent_item(struct extent_buffer *eb, int slot, int type) * offset is supposed to be a tree block which * must be aligned to nodesize. */ - if (!IS_ALIGNED(offset, eb->fs_info->nodesize)) - pr_info("\t\t\t(parent %llu is NOT ALIGNED to nodesize %llu)\n", - offset, (unsigned long long)eb->fs_info->nodesize); + if (!IS_ALIGNED(offset, eb->fs_info->sectorsize)) + pr_info( + "\t\t\t(parent %llu not aligned to sectorsize %u)\n", + offset, eb->fs_info->sectorsize); break; case BTRFS_EXTENT_DATA_REF_KEY: dref = (struct btrfs_extent_data_ref *)(&iref->offset); @@ -112,8 +113,9 @@ static void print_extent_item(struct extent_buffer *eb, int slot, int type) * must be aligned to nodesize. */ if (!IS_ALIGNED(offset, eb->fs_info->nodesize)) - pr_info("\t\t\t(parent %llu is NOT ALIGNED to nodesize %llu)\n", - offset, (unsigned long long)eb->fs_info->nodesize); + pr_info( + "\t\t\t(parent %llu not aligned to sectorsize %u)\n", + offset, eb->fs_info->sectorsize); break; default: pr_cont("(extent %llu has INVALID ref type %d)\n", -- GitLab From b33e13e4b9d31da13705185025993504d20c0042 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 31 Aug 2020 10:52:42 -0400 Subject: [PATCH 0765/1304] btrfs: fix lockdep splat in add_missing_dev commit fccc0007b8dc952c6bc0805cdf842eb8ea06a639 upstream. Nikolay reported a lockdep splat in generic/476 that I could reproduce with btrfs/187. ====================================================== WARNING: possible circular locking dependency detected 5.9.0-rc2+ #1 Tainted: G W ------------------------------------------------------ kswapd0/100 is trying to acquire lock: ffff9e8ef38b6268 (&delayed_node->mutex){+.+.}-{3:3}, at: __btrfs_release_delayed_node.part.0+0x3f/0x330 but task is already holding lock: ffffffffa9d74700 (fs_reclaim){+.+.}-{0:0}, at: __fs_reclaim_acquire+0x5/0x30 which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #2 (fs_reclaim){+.+.}-{0:0}: fs_reclaim_acquire+0x65/0x80 slab_pre_alloc_hook.constprop.0+0x20/0x200 kmem_cache_alloc_trace+0x3a/0x1a0 btrfs_alloc_device+0x43/0x210 add_missing_dev+0x20/0x90 read_one_chunk+0x301/0x430 btrfs_read_sys_array+0x17b/0x1b0 open_ctree+0xa62/0x1896 btrfs_mount_root.cold+0x12/0xea legacy_get_tree+0x30/0x50 vfs_get_tree+0x28/0xc0 vfs_kern_mount.part.0+0x71/0xb0 btrfs_mount+0x10d/0x379 legacy_get_tree+0x30/0x50 vfs_get_tree+0x28/0xc0 path_mount+0x434/0xc00 __x64_sys_mount+0xe3/0x120 do_syscall_64+0x33/0x40 entry_SYSCALL_64_after_hwframe+0x44/0xa9 -> #1 (&fs_info->chunk_mutex){+.+.}-{3:3}: __mutex_lock+0x7e/0x7e0 btrfs_chunk_alloc+0x125/0x3a0 find_free_extent+0xdf6/0x1210 btrfs_reserve_extent+0xb3/0x1b0 btrfs_alloc_tree_block+0xb0/0x310 alloc_tree_block_no_bg_flush+0x4a/0x60 __btrfs_cow_block+0x11a/0x530 btrfs_cow_block+0x104/0x220 btrfs_search_slot+0x52e/0x9d0 btrfs_lookup_inode+0x2a/0x8f __btrfs_update_delayed_inode+0x80/0x240 btrfs_commit_inode_delayed_inode+0x119/0x120 btrfs_evict_inode+0x357/0x500 evict+0xcf/0x1f0 vfs_rmdir.part.0+0x149/0x160 do_rmdir+0x136/0x1a0 do_syscall_64+0x33/0x40 entry_SYSCALL_64_after_hwframe+0x44/0xa9 -> #0 (&delayed_node->mutex){+.+.}-{3:3}: __lock_acquire+0x1184/0x1fa0 lock_acquire+0xa4/0x3d0 __mutex_lock+0x7e/0x7e0 __btrfs_release_delayed_node.part.0+0x3f/0x330 btrfs_evict_inode+0x24c/0x500 evict+0xcf/0x1f0 dispose_list+0x48/0x70 prune_icache_sb+0x44/0x50 super_cache_scan+0x161/0x1e0 do_shrink_slab+0x178/0x3c0 shrink_slab+0x17c/0x290 shrink_node+0x2b2/0x6d0 balance_pgdat+0x30a/0x670 kswapd+0x213/0x4c0 kthread+0x138/0x160 ret_from_fork+0x1f/0x30 other info that might help us debug this: Chain exists of: &delayed_node->mutex --> &fs_info->chunk_mutex --> fs_reclaim Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock(fs_reclaim); lock(&fs_info->chunk_mutex); lock(fs_reclaim); lock(&delayed_node->mutex); *** DEADLOCK *** 3 locks held by kswapd0/100: #0: ffffffffa9d74700 (fs_reclaim){+.+.}-{0:0}, at: __fs_reclaim_acquire+0x5/0x30 #1: ffffffffa9d65c50 (shrinker_rwsem){++++}-{3:3}, at: shrink_slab+0x115/0x290 #2: ffff9e8e9da260e0 (&type->s_umount_key#48){++++}-{3:3}, at: super_cache_scan+0x38/0x1e0 stack backtrace: CPU: 1 PID: 100 Comm: kswapd0 Tainted: G W 5.9.0-rc2+ #1 Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.13.0-2.fc32 04/01/2014 Call Trace: dump_stack+0x92/0xc8 check_noncircular+0x12d/0x150 __lock_acquire+0x1184/0x1fa0 lock_acquire+0xa4/0x3d0 ? __btrfs_release_delayed_node.part.0+0x3f/0x330 __mutex_lock+0x7e/0x7e0 ? __btrfs_release_delayed_node.part.0+0x3f/0x330 ? __btrfs_release_delayed_node.part.0+0x3f/0x330 ? lock_acquire+0xa4/0x3d0 ? btrfs_evict_inode+0x11e/0x500 ? find_held_lock+0x2b/0x80 __btrfs_release_delayed_node.part.0+0x3f/0x330 btrfs_evict_inode+0x24c/0x500 evict+0xcf/0x1f0 dispose_list+0x48/0x70 prune_icache_sb+0x44/0x50 super_cache_scan+0x161/0x1e0 do_shrink_slab+0x178/0x3c0 shrink_slab+0x17c/0x290 shrink_node+0x2b2/0x6d0 balance_pgdat+0x30a/0x670 kswapd+0x213/0x4c0 ? _raw_spin_unlock_irqrestore+0x46/0x60 ? add_wait_queue_exclusive+0x70/0x70 ? balance_pgdat+0x670/0x670 kthread+0x138/0x160 ? kthread_create_worker_on_cpu+0x40/0x40 ret_from_fork+0x1f/0x30 This is because we are holding the chunk_mutex when we call btrfs_alloc_device, which does a GFP_KERNEL allocation. We don't want to switch that to a GFP_NOFS lock because this is the only place where it matters. So instead use memalloc_nofs_save() around the allocation in order to avoid the lockdep splat. Reported-by: Nikolay Borisov CC: stable@vger.kernel.org # 4.4+ Reviewed-by: Anand Jain Signed-off-by: Josef Bacik Reviewed-by: David Sterba Signed-off-by: David Sterba Signed-off-by: Greg Kroah-Hartman --- fs/btrfs/volumes.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 498ec4b10e61..815b655b8f10 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -4,6 +4,7 @@ */ #include +#include #include #include #include @@ -6292,8 +6293,17 @@ static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices, u64 devid, u8 *dev_uuid) { struct btrfs_device *device; + unsigned int nofs_flag; + /* + * We call this under the chunk_mutex, so we want to use NOFS for this + * allocation, however we don't want to change btrfs_alloc_device() to + * always do NOFS because we use it in a lot of other GFP_KERNEL safe + * places. + */ + nofs_flag = memalloc_nofs_save(); device = btrfs_alloc_device(NULL, &devid, dev_uuid); + memalloc_nofs_restore(nofs_flag); if (IS_ERR(device)) return device; -- GitLab From 91567128a9cbe55a4c133b900d73e6f56fef8f59 Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Mon, 14 Sep 2020 09:01:04 +0100 Subject: [PATCH 0766/1304] btrfs: fix wrong address when faulting in pages in the search ioctl commit 1c78544eaa4660096aeb6a57ec82b42cdb3bfe5a upstream. When faulting in the pages for the user supplied buffer for the search ioctl, we are passing only the base address of the buffer to the function fault_in_pages_writeable(). This means that after the first iteration of the while loop that searches for leaves, when we have a non-zero offset, stored in 'sk_offset', we try to fault in a wrong page range. So fix this by adding the offset in 'sk_offset' to the base address of the user supplied buffer when calling fault_in_pages_writeable(). Several users have reported that the applications compsize and bees have started to operate incorrectly since commit a48b73eca4ceb9 ("btrfs: fix potential deadlock in the search ioctl") was added to stable trees, and these applications make heavy use of the search ioctls. This fixes their issues. Link: https://lore.kernel.org/linux-btrfs/632b888d-a3c3-b085-cdf5-f9bb61017d92@lechevalier.se/ Link: https://github.com/kilobyte/compsize/issues/34 Fixes: a48b73eca4ceb9 ("btrfs: fix potential deadlock in the search ioctl") CC: stable@vger.kernel.org # 4.4+ Tested-by: A L Reviewed-by: Josef Bacik Signed-off-by: Filipe Manana Reviewed-by: David Sterba Signed-off-by: David Sterba Signed-off-by: Greg Kroah-Hartman --- fs/btrfs/ioctl.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 85990755edd9..01a90fa03c24 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -2189,7 +2189,8 @@ static noinline int search_ioctl(struct inode *inode, key.offset = sk->min_offset; while (1) { - ret = fault_in_pages_writeable(ubuf, *buf_size - sk_offset); + ret = fault_in_pages_writeable(ubuf + sk_offset, + *buf_size - sk_offset); if (ret) break; -- GitLab From e0717ed3379d3c04b7307c804a24ca3e6769c39f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Miros=C5=82aw?= Date: Wed, 12 Aug 2020 03:31:36 +0200 Subject: [PATCH 0767/1304] regulator: push allocation in set_consumer_device_supply() out of lock MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 5c06540165d443c6455123eb48e7f1a9b618ab34 upstream. Pull regulator_list_mutex into set_consumer_device_supply() and keep allocations outside of it. Fourth of the fs_reclaim deadlock case. Fixes: 45389c47526d ("regulator: core: Add early supply resolution for regulators") Signed-off-by: Michał Mirosław Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/f0380bdb3d60aeefa9693c4e234d2dcda7e56747.1597195321.git.mirq-linux@rere.qmqm.pl Signed-off-by: Mark Brown Signed-off-by: Greg Kroah-Hartman --- drivers/regulator/core.c | 46 +++++++++++++++++++++++----------------- 1 file changed, 26 insertions(+), 20 deletions(-) diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 4bab758d14b1..37e6270749ee 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -1257,7 +1257,7 @@ static int set_consumer_device_supply(struct regulator_dev *rdev, const char *consumer_dev_name, const char *supply) { - struct regulator_map *node; + struct regulator_map *node, *new_node; int has_dev; if (supply == NULL) @@ -1268,6 +1268,22 @@ static int set_consumer_device_supply(struct regulator_dev *rdev, else has_dev = 0; + new_node = kzalloc(sizeof(struct regulator_map), GFP_KERNEL); + if (new_node == NULL) + return -ENOMEM; + + new_node->regulator = rdev; + new_node->supply = supply; + + if (has_dev) { + new_node->dev_name = kstrdup(consumer_dev_name, GFP_KERNEL); + if (new_node->dev_name == NULL) { + kfree(new_node); + return -ENOMEM; + } + } + + mutex_lock(®ulator_list_mutex); list_for_each_entry(node, ®ulator_map_list, list) { if (node->dev_name && consumer_dev_name) { if (strcmp(node->dev_name, consumer_dev_name) != 0) @@ -1285,26 +1301,19 @@ static int set_consumer_device_supply(struct regulator_dev *rdev, node->regulator->desc->name, supply, dev_name(&rdev->dev), rdev_get_name(rdev)); - return -EBUSY; + goto fail; } - node = kzalloc(sizeof(struct regulator_map), GFP_KERNEL); - if (node == NULL) - return -ENOMEM; - - node->regulator = rdev; - node->supply = supply; - - if (has_dev) { - node->dev_name = kstrdup(consumer_dev_name, GFP_KERNEL); - if (node->dev_name == NULL) { - kfree(node); - return -ENOMEM; - } - } + list_add(&new_node->list, ®ulator_map_list); + mutex_unlock(®ulator_list_mutex); - list_add(&node->list, ®ulator_map_list); return 0; + +fail: + mutex_unlock(®ulator_list_mutex); + kfree(new_node->dev_name); + kfree(new_node); + return -EBUSY; } static void unset_regulator_supplies(struct regulator_dev *rdev) @@ -4375,19 +4384,16 @@ regulator_register(const struct regulator_desc *regulator_desc, /* add consumers devices */ if (init_data) { - mutex_lock(®ulator_list_mutex); for (i = 0; i < init_data->num_consumer_supplies; i++) { ret = set_consumer_device_supply(rdev, init_data->consumer_supplies[i].dev_name, init_data->consumer_supplies[i].supply); if (ret < 0) { - mutex_unlock(®ulator_list_mutex); dev_err(dev, "Failed to set supply %s\n", init_data->consumer_supplies[i].supply); goto unset_supplies; } } - mutex_unlock(®ulator_list_mutex); } if (!rdev->desc->ops->get_voltage && -- GitLab From 549a2cac6bc278b7f238a59eeb644205a10a86ca Mon Sep 17 00:00:00 2001 From: Varun Prakash Date: Tue, 25 Aug 2020 18:05:10 +0530 Subject: [PATCH 0768/1304] scsi: target: iscsi: Fix data digest calculation commit 5528d03183fe5243416c706f64b1faa518b05130 upstream. Current code does not consider 'page_off' in data digest calculation. To fix this, add a local variable 'first_sg' and set first_sg.offset to sg->offset + page_off. Link: https://lore.kernel.org/r/1598358910-3052-1-git-send-email-varun@chelsio.com Fixes: e48354ce078c ("iscsi-target: Add iSCSI fabric support for target v4.1") Cc: Reviewed-by: Mike Christie Signed-off-by: Varun Prakash Signed-off-by: Martin K. Petersen Signed-off-by: Greg Kroah-Hartman --- drivers/target/iscsi/iscsi_target.c | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 1633e2666268..2602b57936d4 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -1381,14 +1381,27 @@ static u32 iscsit_do_crypto_hash_sg( sg = cmd->first_data_sg; page_off = cmd->first_data_sg_off; + if (data_length && page_off) { + struct scatterlist first_sg; + u32 len = min_t(u32, data_length, sg->length - page_off); + + sg_init_table(&first_sg, 1); + sg_set_page(&first_sg, sg_page(sg), len, sg->offset + page_off); + + ahash_request_set_crypt(hash, &first_sg, NULL, len); + crypto_ahash_update(hash); + + data_length -= len; + sg = sg_next(sg); + } + while (data_length) { - u32 cur_len = min_t(u32, data_length, (sg->length - page_off)); + u32 cur_len = min_t(u32, data_length, sg->length); ahash_request_set_crypt(hash, sg, NULL, cur_len); crypto_ahash_update(hash); data_length -= cur_len; - page_off = 0; /* iscsit_map_iovec has already checked for invalid sg pointers */ sg = sg_next(sg); } -- GitLab From 4f78e55daaa2986bc533d1da3e8e7ac9cc3048f5 Mon Sep 17 00:00:00 2001 From: Hou Pu Date: Wed, 29 Jul 2020 09:03:43 -0400 Subject: [PATCH 0769/1304] scsi: target: iscsi: Fix hang in iscsit_access_np() when getting tpg->np_login_sem commit ed43ffea78dcc97db3f561da834f1a49c8961e33 upstream. The iSCSI target login thread might get stuck with the following stack: cat /proc/`pidof iscsi_np`/stack [<0>] down_interruptible+0x42/0x50 [<0>] iscsit_access_np+0xe3/0x167 [<0>] iscsi_target_locate_portal+0x695/0x8ac [<0>] __iscsi_target_login_thread+0x855/0xb82 [<0>] iscsi_target_login_thread+0x2f/0x5a [<0>] kthread+0xfa/0x130 [<0>] ret_from_fork+0x1f/0x30 This can be reproduced via the following steps: 1. Initiator A tries to log in to iqn1-tpg1 on port 3260. After finishing PDU exchange in the login thread and before the negotiation is finished the the network link goes down. At this point A has not finished login and tpg->np_login_sem is held. 2. Initiator B tries to log in to iqn2-tpg1 on port 3260. After finishing PDU exchange in the login thread the target expects to process remaining login PDUs in workqueue context. 3. Initiator A' tries to log in to iqn1-tpg1 on port 3260 from a new socket. A' will wait for tpg->np_login_sem with np->np_login_timer loaded to wait for at most 15 seconds. The lock is held by A so A' eventually times out. 4. Before A' got timeout initiator B gets negotiation failed and calls iscsi_target_login_drop()->iscsi_target_login_sess_out(). The np->np_login_timer is canceled and initiator A' will hang forever. Because A' is now in the login thread, no new login requests can be serviced. Fix this by moving iscsi_stop_login_thread_timer() out of iscsi_target_login_sess_out(). Also remove iscsi_np parameter from iscsi_target_login_sess_out(). Link: https://lore.kernel.org/r/20200729130343.24976-1-houpu@bytedance.com Cc: stable@vger.kernel.org Reviewed-by: Mike Christie Signed-off-by: Hou Pu Signed-off-by: Martin K. Petersen Signed-off-by: Greg Kroah-Hartman --- drivers/target/iscsi/iscsi_target_login.c | 6 +++--- drivers/target/iscsi/iscsi_target_login.h | 3 +-- drivers/target/iscsi/iscsi_target_nego.c | 3 +-- 3 files changed, 5 insertions(+), 7 deletions(-) diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index f25049ba4a85..db93bd0a9b88 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c @@ -1183,7 +1183,7 @@ void iscsit_free_conn(struct iscsi_conn *conn) } void iscsi_target_login_sess_out(struct iscsi_conn *conn, - struct iscsi_np *np, bool zero_tsih, bool new_sess) + bool zero_tsih, bool new_sess) { if (!new_sess) goto old_sess_out; @@ -1201,7 +1201,6 @@ void iscsi_target_login_sess_out(struct iscsi_conn *conn, conn->sess = NULL; old_sess_out: - iscsi_stop_login_thread_timer(np); /* * If login negotiation fails check if the Time2Retain timer * needs to be restarted. @@ -1441,8 +1440,9 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) new_sess_out: new_sess = true; old_sess_out: + iscsi_stop_login_thread_timer(np); tpg_np = conn->tpg_np; - iscsi_target_login_sess_out(conn, np, zero_tsih, new_sess); + iscsi_target_login_sess_out(conn, zero_tsih, new_sess); new_sess = false; if (tpg) { diff --git a/drivers/target/iscsi/iscsi_target_login.h b/drivers/target/iscsi/iscsi_target_login.h index 3b8e3639ff5d..fc95e6150253 100644 --- a/drivers/target/iscsi/iscsi_target_login.h +++ b/drivers/target/iscsi/iscsi_target_login.h @@ -22,8 +22,7 @@ extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32); extern void iscsit_free_conn(struct iscsi_conn *); extern int iscsit_start_kthreads(struct iscsi_conn *); extern void iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8); -extern void iscsi_target_login_sess_out(struct iscsi_conn *, struct iscsi_np *, - bool, bool); +extern void iscsi_target_login_sess_out(struct iscsi_conn *, bool, bool); extern int iscsi_target_login_thread(void *); extern void iscsi_handle_login_thread_timeout(struct timer_list *t); diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c index 8a5e8d17a942..5db8842a8026 100644 --- a/drivers/target/iscsi/iscsi_target_nego.c +++ b/drivers/target/iscsi/iscsi_target_nego.c @@ -554,12 +554,11 @@ static bool iscsi_target_sk_check_and_clear(struct iscsi_conn *conn, unsigned in static void iscsi_target_login_drop(struct iscsi_conn *conn, struct iscsi_login *login) { - struct iscsi_np *np = login->np; bool zero_tsih = login->zero_tsih; iscsi_remove_failed_auth_entry(conn); iscsi_target_nego_release(conn); - iscsi_target_login_sess_out(conn, np, zero_tsih, true); + iscsi_target_login_sess_out(conn, zero_tsih, true); } struct conn_timeout { -- GitLab From fdd498257278893c923f62aad5ffbf98462292c0 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Thu, 20 Aug 2020 22:31:44 +0200 Subject: [PATCH 0770/1304] drm/tve200: Stabilize enable/disable commit f71800228dc74711c3df43854ce7089562a3bc2d upstream. The TVE200 will occasionally print a bunch of lost interrupts and similar dmesg messages, sometimes during boot and sometimes after disabling and coming back to enablement. This is probably because the hardware is left in an unknown state by the boot loader that displays a logo. This can be fixed by bringing the controller into a known state by resetting the controller while enabling it. We retry reset 5 times like the vendor driver does. We also put the controller into reset before de-clocking it and clear all interrupts before enabling the vblank IRQ. This makes the video enable/disable/enable cycle rock solid on the D-Link DIR-685. Tested extensively. Signed-off-by: Linus Walleij Acked-by: Daniel Vetter Cc: stable@vger.kernel.org Link: https://patchwork.freedesktop.org/patch/msgid/20200820203144.271081-1-linus.walleij@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/tve200/tve200_display.c | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/tve200/tve200_display.c b/drivers/gpu/drm/tve200/tve200_display.c index e8723a2412a6..c0b113ba329c 100644 --- a/drivers/gpu/drm/tve200/tve200_display.c +++ b/drivers/gpu/drm/tve200/tve200_display.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -132,9 +133,25 @@ static void tve200_display_enable(struct drm_simple_display_pipe *pipe, struct drm_connector *connector = priv->connector; u32 format = fb->format->format; u32 ctrl1 = 0; + int retries; clk_prepare_enable(priv->clk); + /* Reset the TVE200 and wait for it to come back online */ + writel(TVE200_CTRL_4_RESET, priv->regs + TVE200_CTRL_4); + for (retries = 0; retries < 5; retries++) { + usleep_range(30000, 50000); + if (readl(priv->regs + TVE200_CTRL_4) & TVE200_CTRL_4_RESET) + continue; + else + break; + } + if (retries == 5 && + readl(priv->regs + TVE200_CTRL_4) & TVE200_CTRL_4_RESET) { + dev_err(drm->dev, "can't get hardware out of reset\n"); + return; + } + /* Function 1 */ ctrl1 |= TVE200_CTRL_CSMODE; /* Interlace mode for CCIR656: parameterize? */ @@ -231,8 +248,9 @@ static void tve200_display_disable(struct drm_simple_display_pipe *pipe) drm_crtc_vblank_off(crtc); - /* Disable and Power Down */ + /* Disable put into reset and Power Down */ writel(0, priv->regs + TVE200_CTRL); + writel(TVE200_CTRL_4_RESET, priv->regs + TVE200_CTRL_4); clk_disable_unprepare(priv->clk); } @@ -280,6 +298,8 @@ static int tve200_display_enable_vblank(struct drm_simple_display_pipe *pipe) struct drm_device *drm = crtc->dev; struct tve200_drm_dev_private *priv = drm->dev_private; + /* Clear any IRQs and enable */ + writel(0xFF, priv->regs + TVE200_INT_CLR); writel(TVE200_INT_V_STATUS, priv->regs + TVE200_INT_EN); return 0; } -- GitLab From af5c8bc8ba85a43ccca73a86698777f7af9a7957 Mon Sep 17 00:00:00 2001 From: Jordan Crouse Date: Thu, 3 Sep 2020 20:03:12 -0600 Subject: [PATCH 0771/1304] drm/msm: Disable preemption on all 5xx targets commit 7b3f3948c8b7053d771acc9f79810cc410f5e2e0 upstream. Temporarily disable preemption on a5xx targets pending some improvements to protect the RPTR shadow from being corrupted. Cc: stable@vger.kernel.org Signed-off-by: Jordan Crouse Signed-off-by: Rob Clark Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/msm/adreno/a5xx_gpu.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index ba6f3c14495c..1fc9a7fa37b4 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -1518,7 +1518,8 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev) check_speed_bin(&pdev->dev); - ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 4); + /* Restricting nr_rings to 1 to temporarily disable preemption */ + ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1); if (ret) { a5xx_destroy(&(a5xx_gpu->base.base)); return ERR_PTR(ret); -- GitLab From 0070f9906d7190d4c69e338403db4abfec81fe7f Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Thu, 3 Sep 2020 13:24:11 +0200 Subject: [PATCH 0772/1304] rbd: require global CAP_SYS_ADMIN for mapping and unmapping commit f44d04e696feaf13d192d942c4f14ad2e117065a upstream. It turns out that currently we rely only on sysfs attribute permissions: $ ll /sys/bus/rbd/{add*,remove*} --w------- 1 root root 4096 Sep 3 20:37 /sys/bus/rbd/add --w------- 1 root root 4096 Sep 3 20:37 /sys/bus/rbd/add_single_major --w------- 1 root root 4096 Sep 3 20:37 /sys/bus/rbd/remove --w------- 1 root root 4096 Sep 3 20:38 /sys/bus/rbd/remove_single_major This means that images can be mapped and unmapped (i.e. block devices can be created and deleted) by a UID 0 process even after it drops all privileges or by any process with CAP_DAC_OVERRIDE in its user namespace as long as UID 0 is mapped into that user namespace. Be consistent with other virtual block devices (loop, nbd, dm, md, etc) and require CAP_SYS_ADMIN in the initial user namespace for mapping and unmapping, and also for dumping the configuration string and refreshing the image header. Cc: stable@vger.kernel.org Signed-off-by: Ilya Dryomov Reviewed-by: Jeff Layton Signed-off-by: Greg Kroah-Hartman --- drivers/block/rbd.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 110129097169..9f1265ce2e36 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -4124,6 +4124,9 @@ static ssize_t rbd_config_info_show(struct device *dev, { struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + return sprintf(buf, "%s\n", rbd_dev->config_info); } @@ -4235,6 +4238,9 @@ static ssize_t rbd_image_refresh(struct device *dev, struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); int ret; + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + ret = rbd_dev_refresh(rbd_dev); if (ret) return ret; @@ -5846,6 +5852,9 @@ static ssize_t do_rbd_add(struct bus_type *bus, struct rbd_client *rbdc; int rc; + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + if (!try_module_get(THIS_MODULE)) return -ENODEV; @@ -5995,6 +6004,9 @@ static ssize_t do_rbd_remove(struct bus_type *bus, bool force = false; int ret; + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + dev_id = -1; opt_buf[0] = '\0'; sscanf(buf, "%d %5s", &dev_id, opt_buf); -- GitLab From 33d21bd7d73902dbc4f954fa3df60ed950453edd Mon Sep 17 00:00:00 2001 From: Yi Zhang Date: Thu, 20 Aug 2020 23:36:46 +0800 Subject: [PATCH 0773/1304] RDMA/rxe: Fix the parent sysfs read when the interface has 15 chars commit 60b1af64eb35074a4f2d41cc1e503a7671e68963 upstream. 'parent' sysfs reads will yield '\0' bytes when the interface name has 15 chars, and there will no "\n" output. To reproduce, create one interface with 15 chars: [root@test ~]# ip a s enp0s29u1u7u3c2 2: enp0s29u1u7u3c2: mtu 1500 qdisc fq_codel state UNKNOWN group default qlen 1000 link/ether 02:21:28:57:47:17 brd ff:ff:ff:ff:ff:ff inet6 fe80::ac41:338f:5bcd:c222/64 scope link noprefixroute valid_lft forever preferred_lft forever [root@test ~]# modprobe rdma_rxe [root@test ~]# echo enp0s29u1u7u3c2 > /sys/module/rdma_rxe/parameters/add [root@test ~]# cat /sys/class/infiniband/rxe0/parent enp0s29u1u7u3c2[root@test ~]# [root@test ~]# f="/sys/class/infiniband/rxe0/parent" [root@test ~]# echo "$(<"$f")" -bash: warning: command substitution: ignored null byte in input enp0s29u1u7u3c2 Use scnprintf and PAGE_SIZE to fill the sysfs output buffer. Cc: stable@vger.kernel.org Fixes: 8700e3e7c485 ("Soft RoCE driver") Link: https://lore.kernel.org/r/20200820153646.31316-1-yi.zhang@redhat.com Suggested-by: Jason Gunthorpe Signed-off-by: Yi Zhang Reviewed-by: Bart Van Assche Signed-off-by: Jason Gunthorpe Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/sw/rxe/rxe_verbs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index 3a94eb5edcf9..f7f9caaec7d6 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c @@ -1146,7 +1146,7 @@ static ssize_t parent_show(struct device *device, struct rxe_dev *rxe = container_of(device, struct rxe_dev, ib_dev.dev); - return snprintf(buf, 16, "%s\n", rxe_parent_name(rxe, 1)); + return scnprintf(buf, PAGE_SIZE, "%s\n", rxe_parent_name(rxe, 1)); } static DEVICE_ATTR_RO(parent); -- GitLab From f5fa64c8daf7b97280865c73903edc0a3eea819e Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Wed, 9 Sep 2020 14:53:50 -0700 Subject: [PATCH 0774/1304] vgacon: remove software scrollback support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 973c096f6a85e5b5f2a295126ba6928d9a6afd45 upstream. Yunhai Zhang recently fixed a VGA software scrollback bug in commit ebfdfeeae8c0 ("vgacon: Fix for missing check in scrollback handling"), but that then made people look more closely at some of this code, and there were more problems on the vgacon side, but also the fbcon software scrollback. We don't really have anybody who maintains this code - probably because nobody actually _uses_ it any more. Sure, people still use both VGA and the framebuffer consoles, but they are no longer the main user interfaces to the kernel, and haven't been for decades, so these kinds of extra features end up bitrotting and not really being used. So rather than try to maintain a likely unused set of code, I'll just aggressively remove it, and see if anybody even notices. Maybe there are people who haven't jumped on the whole GUI badnwagon yet, and think it's just a fad. And maybe those people use the scrollback code. If that turns out to be the case, we can resurrect this again, once we've found the sucker^Wmaintainer for it who actually uses it. Reported-by: NopNop Nop Tested-by: Willy Tarreau Cc: 张云海 Acked-by: Andy Lutomirski Acked-by: Willy Tarreau Reviewed-by: Greg Kroah-Hartman Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/configs/pasemi_defconfig | 1 - arch/powerpc/configs/ppc6xx_defconfig | 1 - arch/x86/configs/i386_defconfig | 1 - arch/x86/configs/x86_64_defconfig | 1 - drivers/video/console/Kconfig | 46 ------ drivers/video/console/vgacon.c | 221 +------------------------- 6 files changed, 1 insertion(+), 270 deletions(-) diff --git a/arch/powerpc/configs/pasemi_defconfig b/arch/powerpc/configs/pasemi_defconfig index 4504380c7a92..60839eeada8b 100644 --- a/arch/powerpc/configs/pasemi_defconfig +++ b/arch/powerpc/configs/pasemi_defconfig @@ -110,7 +110,6 @@ CONFIG_FB_NVIDIA=y CONFIG_FB_NVIDIA_I2C=y CONFIG_FB_RADEON=y # CONFIG_LCD_CLASS_DEVICE is not set -CONFIG_VGACON_SOFT_SCROLLBACK=y CONFIG_LOGO=y CONFIG_SOUND=y CONFIG_SND=y diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig index 7032d4244ec5..e30af76f4753 100644 --- a/arch/powerpc/configs/ppc6xx_defconfig +++ b/arch/powerpc/configs/ppc6xx_defconfig @@ -779,7 +779,6 @@ CONFIG_FB_TRIDENT=m CONFIG_FB_SM501=m CONFIG_FB_IBM_GXT4500=y CONFIG_LCD_PLATFORM=m -CONFIG_VGACON_SOFT_SCROLLBACK=y CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y CONFIG_LOGO=y diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig index ce75be940567..5a23a4ccd755 100644 --- a/arch/x86/configs/i386_defconfig +++ b/arch/x86/configs/i386_defconfig @@ -216,7 +216,6 @@ CONFIG_FB_MODE_HELPERS=y CONFIG_FB_TILEBLITTING=y CONFIG_FB_EFI=y # CONFIG_LCD_CLASS_DEVICE is not set -CONFIG_VGACON_SOFT_SCROLLBACK=y CONFIG_LOGO=y # CONFIG_LOGO_LINUX_MONO is not set # CONFIG_LOGO_LINUX_VGA16 is not set diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig index 45b0f4d84d83..dc0881292904 100644 --- a/arch/x86/configs/x86_64_defconfig +++ b/arch/x86/configs/x86_64_defconfig @@ -212,7 +212,6 @@ CONFIG_FB_MODE_HELPERS=y CONFIG_FB_TILEBLITTING=y CONFIG_FB_EFI=y # CONFIG_LCD_CLASS_DEVICE is not set -CONFIG_VGACON_SOFT_SCROLLBACK=y CONFIG_LOGO=y # CONFIG_LOGO_LINUX_MONO is not set # CONFIG_LOGO_LINUX_VGA16 is not set diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig index 787792c3d08d..40d5fea8513c 100644 --- a/drivers/video/console/Kconfig +++ b/drivers/video/console/Kconfig @@ -21,52 +21,6 @@ config VGA_CONSOLE Say Y. -config VGACON_SOFT_SCROLLBACK - bool "Enable Scrollback Buffer in System RAM" - depends on VGA_CONSOLE - default n - help - The scrollback buffer of the standard VGA console is located in - the VGA RAM. The size of this RAM is fixed and is quite small. - If you require a larger scrollback buffer, this can be placed in - System RAM which is dynamically allocated during initialization. - Placing the scrollback buffer in System RAM will slightly slow - down the console. - - If you want this feature, say 'Y' here and enter the amount of - RAM to allocate for this buffer. If unsure, say 'N'. - -config VGACON_SOFT_SCROLLBACK_SIZE - int "Scrollback Buffer Size (in KB)" - depends on VGACON_SOFT_SCROLLBACK - range 1 1024 - default "64" - help - Enter the amount of System RAM to allocate for scrollback - buffers of VGA consoles. Each 64KB will give you approximately - 16 80x25 screenfuls of scrollback buffer. - -config VGACON_SOFT_SCROLLBACK_PERSISTENT_ENABLE_BY_DEFAULT - bool "Persistent Scrollback History for each console by default" - depends on VGACON_SOFT_SCROLLBACK - default n - help - Say Y here if the scrollback history should persist by default when - switching between consoles. Otherwise, the scrollback history will be - flushed each time the console is switched. This feature can also be - enabled using the boot command line parameter - 'vgacon.scrollback_persistent=1'. - - This feature might break your tool of choice to flush the scrollback - buffer, e.g. clear(1) will work fine but Debian's clear_console(1) - will be broken, which might cause security issues. - You can use the escape sequence \e[3J instead if this feature is - activated. - - Note that a buffer of VGACON_SOFT_SCROLLBACK_SIZE is taken for each - created tty device. - So if you use a RAM-constrained system, say N here. - config MDA_CONSOLE depends on !M68K && !PARISC && ISA tristate "MDA text console (dual-headed)" diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c index e079b910feb2..55507df335bd 100644 --- a/drivers/video/console/vgacon.c +++ b/drivers/video/console/vgacon.c @@ -165,214 +165,6 @@ static inline void vga_set_mem_top(struct vc_data *c) write_vga(12, (c->vc_visible_origin - vga_vram_base) / 2); } -#ifdef CONFIG_VGACON_SOFT_SCROLLBACK -/* software scrollback */ -struct vgacon_scrollback_info { - void *data; - int tail; - int size; - int rows; - int cnt; - int cur; - int save; - int restore; -}; - -static struct vgacon_scrollback_info *vgacon_scrollback_cur; -static struct vgacon_scrollback_info vgacon_scrollbacks[MAX_NR_CONSOLES]; -static bool scrollback_persistent = \ - IS_ENABLED(CONFIG_VGACON_SOFT_SCROLLBACK_PERSISTENT_ENABLE_BY_DEFAULT); -module_param_named(scrollback_persistent, scrollback_persistent, bool, 0000); -MODULE_PARM_DESC(scrollback_persistent, "Enable persistent scrollback for all vga consoles"); - -static void vgacon_scrollback_reset(int vc_num, size_t reset_size) -{ - struct vgacon_scrollback_info *scrollback = &vgacon_scrollbacks[vc_num]; - - if (scrollback->data && reset_size > 0) - memset(scrollback->data, 0, reset_size); - - scrollback->cnt = 0; - scrollback->tail = 0; - scrollback->cur = 0; -} - -static void vgacon_scrollback_init(int vc_num) -{ - int pitch = vga_video_num_columns * 2; - size_t size = CONFIG_VGACON_SOFT_SCROLLBACK_SIZE * 1024; - int rows = size / pitch; - void *data; - - data = kmalloc_array(CONFIG_VGACON_SOFT_SCROLLBACK_SIZE, 1024, - GFP_NOWAIT); - - vgacon_scrollbacks[vc_num].data = data; - vgacon_scrollback_cur = &vgacon_scrollbacks[vc_num]; - - vgacon_scrollback_cur->rows = rows - 1; - vgacon_scrollback_cur->size = rows * pitch; - - vgacon_scrollback_reset(vc_num, size); -} - -static void vgacon_scrollback_switch(int vc_num) -{ - if (!scrollback_persistent) - vc_num = 0; - - if (!vgacon_scrollbacks[vc_num].data) { - vgacon_scrollback_init(vc_num); - } else { - if (scrollback_persistent) { - vgacon_scrollback_cur = &vgacon_scrollbacks[vc_num]; - } else { - size_t size = CONFIG_VGACON_SOFT_SCROLLBACK_SIZE * 1024; - - vgacon_scrollback_reset(vc_num, size); - } - } -} - -static void vgacon_scrollback_startup(void) -{ - vgacon_scrollback_cur = &vgacon_scrollbacks[0]; - vgacon_scrollback_init(0); -} - -static void vgacon_scrollback_update(struct vc_data *c, int t, int count) -{ - void *p; - - if (!vgacon_scrollback_cur->data || !vgacon_scrollback_cur->size || - c->vc_num != fg_console) - return; - - p = (void *) (c->vc_origin + t * c->vc_size_row); - - while (count--) { - if ((vgacon_scrollback_cur->tail + c->vc_size_row) > - vgacon_scrollback_cur->size) - vgacon_scrollback_cur->tail = 0; - - scr_memcpyw(vgacon_scrollback_cur->data + - vgacon_scrollback_cur->tail, - p, c->vc_size_row); - - vgacon_scrollback_cur->cnt++; - p += c->vc_size_row; - vgacon_scrollback_cur->tail += c->vc_size_row; - - if (vgacon_scrollback_cur->tail >= vgacon_scrollback_cur->size) - vgacon_scrollback_cur->tail = 0; - - if (vgacon_scrollback_cur->cnt > vgacon_scrollback_cur->rows) - vgacon_scrollback_cur->cnt = vgacon_scrollback_cur->rows; - - vgacon_scrollback_cur->cur = vgacon_scrollback_cur->cnt; - } -} - -static void vgacon_restore_screen(struct vc_data *c) -{ - c->vc_origin = c->vc_visible_origin; - vgacon_scrollback_cur->save = 0; - - if (!vga_is_gfx && !vgacon_scrollback_cur->restore) { - scr_memcpyw((u16 *) c->vc_origin, (u16 *) c->vc_screenbuf, - c->vc_screenbuf_size > vga_vram_size ? - vga_vram_size : c->vc_screenbuf_size); - vgacon_scrollback_cur->restore = 1; - vgacon_scrollback_cur->cur = vgacon_scrollback_cur->cnt; - } -} - -static void vgacon_scrolldelta(struct vc_data *c, int lines) -{ - int start, end, count, soff; - - if (!lines) { - vgacon_restore_screen(c); - return; - } - - if (!vgacon_scrollback_cur->data) - return; - - if (!vgacon_scrollback_cur->save) { - vgacon_cursor(c, CM_ERASE); - vgacon_save_screen(c); - c->vc_origin = (unsigned long)c->vc_screenbuf; - vgacon_scrollback_cur->save = 1; - } - - vgacon_scrollback_cur->restore = 0; - start = vgacon_scrollback_cur->cur + lines; - end = start + abs(lines); - - if (start < 0) - start = 0; - - if (start > vgacon_scrollback_cur->cnt) - start = vgacon_scrollback_cur->cnt; - - if (end < 0) - end = 0; - - if (end > vgacon_scrollback_cur->cnt) - end = vgacon_scrollback_cur->cnt; - - vgacon_scrollback_cur->cur = start; - count = end - start; - soff = vgacon_scrollback_cur->tail - - ((vgacon_scrollback_cur->cnt - end) * c->vc_size_row); - soff -= count * c->vc_size_row; - - if (soff < 0) - soff += vgacon_scrollback_cur->size; - - count = vgacon_scrollback_cur->cnt - start; - - if (count > c->vc_rows) - count = c->vc_rows; - - if (count) { - int copysize; - - int diff = c->vc_rows - count; - void *d = (void *) c->vc_visible_origin; - void *s = (void *) c->vc_screenbuf; - - count *= c->vc_size_row; - /* how much memory to end of buffer left? */ - copysize = min(count, vgacon_scrollback_cur->size - soff); - scr_memcpyw(d, vgacon_scrollback_cur->data + soff, copysize); - d += copysize; - count -= copysize; - - if (count) { - scr_memcpyw(d, vgacon_scrollback_cur->data, count); - d += count; - } - - if (diff) - scr_memcpyw(d, s, diff * c->vc_size_row); - } else - vgacon_cursor(c, CM_MOVE); -} - -static void vgacon_flush_scrollback(struct vc_data *c) -{ - size_t size = CONFIG_VGACON_SOFT_SCROLLBACK_SIZE * 1024; - - vgacon_scrollback_reset(c->vc_num, size); -} -#else -#define vgacon_scrollback_startup(...) do { } while (0) -#define vgacon_scrollback_init(...) do { } while (0) -#define vgacon_scrollback_update(...) do { } while (0) -#define vgacon_scrollback_switch(...) do { } while (0) - static void vgacon_restore_screen(struct vc_data *c) { if (c->vc_origin != c->vc_visible_origin) @@ -386,11 +178,6 @@ static void vgacon_scrolldelta(struct vc_data *c, int lines) vga_set_mem_top(c); } -static void vgacon_flush_scrollback(struct vc_data *c) -{ -} -#endif /* CONFIG_VGACON_SOFT_SCROLLBACK */ - static const char *vgacon_startup(void) { const char *display_desc = NULL; @@ -573,10 +360,7 @@ static const char *vgacon_startup(void) vgacon_xres = screen_info.orig_video_cols * VGA_FONTWIDTH; vgacon_yres = vga_scan_lines; - if (!vga_init_done) { - vgacon_scrollback_startup(); - vga_init_done = true; - } + vga_init_done = true; return display_desc; } @@ -867,7 +651,6 @@ static int vgacon_switch(struct vc_data *c) vgacon_doresize(c, c->vc_cols, c->vc_rows); } - vgacon_scrollback_switch(c->vc_num); return 0; /* Redrawing not needed */ } @@ -1384,7 +1167,6 @@ static bool vgacon_scroll(struct vc_data *c, unsigned int t, unsigned int b, oldo = c->vc_origin; delta = lines * c->vc_size_row; if (dir == SM_UP) { - vgacon_scrollback_update(c, t, lines); if (c->vc_scr_end + delta >= vga_vram_end) { scr_memcpyw((u16 *) vga_vram_base, (u16 *) (oldo + delta), @@ -1448,7 +1230,6 @@ const struct consw vga_con = { .con_save_screen = vgacon_save_screen, .con_build_attr = vgacon_build_attr, .con_invert_region = vgacon_invert_region, - .con_flush_scrollback = vgacon_flush_scrollback, }; EXPORT_SYMBOL(vga_con); -- GitLab From 770adb5d2b8ebe94a92e4c9510f4f2517f4204eb Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Mon, 7 Sep 2020 11:45:27 -0700 Subject: [PATCH 0775/1304] fbcon: remove soft scrollback code commit 50145474f6ef4a9c19205b173da6264a644c7489 upstream. This (and the VGA soft scrollback) turns out to have various nasty small special cases that nobody really is willing to fight. The soft scrollback code was really useful a few decades ago when you typically used the console interactively as the main way to interact with the machine, but that just isn't the case any more. So it's not worth dragging along. Tested-by: Yuan Ming Tested-by: Willy Tarreau Acked-by: Bartlomiej Zolnierkiewicz Acked-by: Daniel Vetter Reviewed-by: Greg Kroah-Hartman Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- drivers/video/fbdev/core/fbcon.c | 334 +------------------------------ 1 file changed, 4 insertions(+), 330 deletions(-) diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index f75557b39a61..66542e9192f4 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -102,12 +102,6 @@ static int logo_lines; /* logo_shown is an index to vc_cons when >= 0; otherwise follows FBCON_LOGO enums. */ static int logo_shown = FBCON_LOGO_CANSHOW; -/* Software scrollback */ -static int fbcon_softback_size = 32768; -static unsigned long softback_buf, softback_curr; -static unsigned long softback_in; -static unsigned long softback_top, softback_end; -static int softback_lines; /* console mappings */ static int first_fb_vc; static int last_fb_vc = MAX_NR_CONSOLES - 1; @@ -148,8 +142,6 @@ static int margin_color; static const struct consw fb_con; -#define CM_SOFTBACK (8) - #define advance_row(p, delta) (unsigned short *)((unsigned long)(p) + (delta) * vc->vc_size_row) static int fbcon_set_origin(struct vc_data *); @@ -355,18 +347,6 @@ static int get_color(struct vc_data *vc, struct fb_info *info, return color; } -static void fbcon_update_softback(struct vc_data *vc) -{ - int l = fbcon_softback_size / vc->vc_size_row; - - if (l > 5) - softback_end = softback_buf + l * vc->vc_size_row; - else - /* Smaller scrollback makes no sense, and 0 would screw - the operation totally */ - softback_top = 0; -} - static void fb_flashcursor(struct work_struct *work) { struct fb_info *info = container_of(work, struct fb_info, queue); @@ -396,7 +376,7 @@ static void fb_flashcursor(struct work_struct *work) c = scr_readw((u16 *) vc->vc_pos); mode = (!ops->cursor_flash || ops->cursor_state.enable) ? CM_ERASE : CM_DRAW; - ops->cursor(vc, info, mode, softback_lines, get_color(vc, info, c, 1), + ops->cursor(vc, info, mode, 0, get_color(vc, info, c, 1), get_color(vc, info, c, 0)); console_unlock(); } @@ -453,13 +433,7 @@ static int __init fb_console_setup(char *this_opt) } if (!strncmp(options, "scrollback:", 11)) { - options += 11; - if (*options) { - fbcon_softback_size = simple_strtoul(options, &options, 0); - if (*options == 'k' || *options == 'K') { - fbcon_softback_size *= 1024; - } - } + pr_warn("Ignoring scrollback size option\n"); continue; } @@ -988,31 +962,6 @@ static const char *fbcon_startup(void) set_blitting_type(vc, info); - if (info->fix.type != FB_TYPE_TEXT) { - if (fbcon_softback_size) { - if (!softback_buf) { - softback_buf = - (unsigned long) - kmalloc(fbcon_softback_size, - GFP_KERNEL); - if (!softback_buf) { - fbcon_softback_size = 0; - softback_top = 0; - } - } - } else { - if (softback_buf) { - kfree((void *) softback_buf); - softback_buf = 0; - softback_top = 0; - } - } - if (softback_buf) - softback_in = softback_top = softback_curr = - softback_buf; - softback_lines = 0; - } - /* Setup default font */ if (!p->fontdata && !vc->vc_font.data) { if (!fontname[0] || !(font = find_font(fontname))) @@ -1181,9 +1130,6 @@ static void fbcon_init(struct vc_data *vc, int init) if (logo) fbcon_prepare_logo(vc, info, cols, rows, new_cols, new_rows); - if (vc == svc && softback_buf) - fbcon_update_softback(vc); - if (ops->rotate_font && ops->rotate_font(info, vc)) { ops->rotate = FB_ROTATE_UR; set_blitting_type(vc, info); @@ -1346,7 +1292,6 @@ static void fbcon_cursor(struct vc_data *vc, int mode) { struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; struct fbcon_ops *ops = info->fbcon_par; - int y; int c = scr_readw((u16 *) vc->vc_pos); ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms); @@ -1360,16 +1305,8 @@ static void fbcon_cursor(struct vc_data *vc, int mode) fbcon_add_cursor_timer(info); ops->cursor_flash = (mode == CM_ERASE) ? 0 : 1; - if (mode & CM_SOFTBACK) { - mode &= ~CM_SOFTBACK; - y = softback_lines; - } else { - if (softback_lines) - fbcon_set_origin(vc); - y = 0; - } - ops->cursor(vc, info, mode, y, get_color(vc, info, c, 1), + ops->cursor(vc, info, mode, 0, get_color(vc, info, c, 1), get_color(vc, info, c, 0)); } @@ -1440,8 +1377,6 @@ static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var, if (con_is_visible(vc)) { update_screen(vc); - if (softback_buf) - fbcon_update_softback(vc); } } @@ -1579,99 +1514,6 @@ static __inline__ void ypan_down_redraw(struct vc_data *vc, int t, int count) scrollback_current = 0; } -static void fbcon_redraw_softback(struct vc_data *vc, struct display *p, - long delta) -{ - int count = vc->vc_rows; - unsigned short *d, *s; - unsigned long n; - int line = 0; - - d = (u16 *) softback_curr; - if (d == (u16 *) softback_in) - d = (u16 *) vc->vc_origin; - n = softback_curr + delta * vc->vc_size_row; - softback_lines -= delta; - if (delta < 0) { - if (softback_curr < softback_top && n < softback_buf) { - n += softback_end - softback_buf; - if (n < softback_top) { - softback_lines -= - (softback_top - n) / vc->vc_size_row; - n = softback_top; - } - } else if (softback_curr >= softback_top - && n < softback_top) { - softback_lines -= - (softback_top - n) / vc->vc_size_row; - n = softback_top; - } - } else { - if (softback_curr > softback_in && n >= softback_end) { - n += softback_buf - softback_end; - if (n > softback_in) { - n = softback_in; - softback_lines = 0; - } - } else if (softback_curr <= softback_in && n > softback_in) { - n = softback_in; - softback_lines = 0; - } - } - if (n == softback_curr) - return; - softback_curr = n; - s = (u16 *) softback_curr; - if (s == (u16 *) softback_in) - s = (u16 *) vc->vc_origin; - while (count--) { - unsigned short *start; - unsigned short *le; - unsigned short c; - int x = 0; - unsigned short attr = 1; - - start = s; - le = advance_row(s, 1); - do { - c = scr_readw(s); - if (attr != (c & 0xff00)) { - attr = c & 0xff00; - if (s > start) { - fbcon_putcs(vc, start, s - start, - line, x); - x += s - start; - start = s; - } - } - if (c == scr_readw(d)) { - if (s > start) { - fbcon_putcs(vc, start, s - start, - line, x); - x += s - start + 1; - start = s + 1; - } else { - x++; - start++; - } - } - s++; - d++; - } while (s < le); - if (s > start) - fbcon_putcs(vc, start, s - start, line, x); - line++; - if (d == (u16 *) softback_end) - d = (u16 *) softback_buf; - if (d == (u16 *) softback_in) - d = (u16 *) vc->vc_origin; - if (s == (u16 *) softback_end) - s = (u16 *) softback_buf; - if (s == (u16 *) softback_in) - s = (u16 *) vc->vc_origin; - } -} - static void fbcon_redraw_move(struct vc_data *vc, struct display *p, int line, int count, int dy) { @@ -1811,31 +1653,6 @@ static void fbcon_redraw(struct vc_data *vc, struct display *p, } } -static inline void fbcon_softback_note(struct vc_data *vc, int t, - int count) -{ - unsigned short *p; - - if (vc->vc_num != fg_console) - return; - p = (unsigned short *) (vc->vc_origin + t * vc->vc_size_row); - - while (count) { - scr_memcpyw((u16 *) softback_in, p, vc->vc_size_row); - count--; - p = advance_row(p, 1); - softback_in += vc->vc_size_row; - if (softback_in == softback_end) - softback_in = softback_buf; - if (softback_in == softback_top) { - softback_top += vc->vc_size_row; - if (softback_top == softback_end) - softback_top = softback_buf; - } - } - softback_curr = softback_in; -} - static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b, enum con_scroll dir, unsigned int count) { @@ -1858,8 +1675,6 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b, case SM_UP: if (count > vc->vc_rows) /* Maximum realistic size */ count = vc->vc_rows; - if (softback_top) - fbcon_softback_note(vc, t, count); if (logo_shown >= 0) goto redraw_up; switch (p->scrollmode) { @@ -2230,14 +2045,6 @@ static int fbcon_switch(struct vc_data *vc) info = registered_fb[con2fb_map[vc->vc_num]]; ops = info->fbcon_par; - if (softback_top) { - if (softback_lines) - fbcon_set_origin(vc); - softback_top = softback_curr = softback_in = softback_buf; - softback_lines = 0; - fbcon_update_softback(vc); - } - if (logo_shown >= 0) { struct vc_data *conp2 = vc_cons[logo_shown].d; @@ -2571,9 +2378,6 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, int cnt; char *old_data = NULL; - if (con_is_visible(vc) && softback_lines) - fbcon_set_origin(vc); - resize = (w != vc->vc_font.width) || (h != vc->vc_font.height); if (p->userfont) old_data = vc->vc_font.data; @@ -2599,8 +2403,6 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, cols /= w; rows /= h; vc_resize(vc, cols, rows); - if (con_is_visible(vc) && softback_buf) - fbcon_update_softback(vc); } else if (con_is_visible(vc) && vc->vc_mode == KD_TEXT) { fbcon_clear_margins(vc, 0); @@ -2759,19 +2561,7 @@ static void fbcon_set_palette(struct vc_data *vc, const unsigned char *table) static u16 *fbcon_screen_pos(struct vc_data *vc, int offset) { - unsigned long p; - int line; - - if (vc->vc_num != fg_console || !softback_lines) - return (u16 *) (vc->vc_origin + offset); - line = offset / vc->vc_size_row; - if (line >= softback_lines) - return (u16 *) (vc->vc_origin + offset - - softback_lines * vc->vc_size_row); - p = softback_curr + offset; - if (p >= softback_end) - p += softback_buf - softback_end; - return (u16 *) p; + return (u16 *) (vc->vc_origin + offset); } static unsigned long fbcon_getxy(struct vc_data *vc, unsigned long pos, @@ -2785,22 +2575,7 @@ static unsigned long fbcon_getxy(struct vc_data *vc, unsigned long pos, x = offset % vc->vc_cols; y = offset / vc->vc_cols; - if (vc->vc_num == fg_console) - y += softback_lines; ret = pos + (vc->vc_cols - x) * 2; - } else if (vc->vc_num == fg_console && softback_lines) { - unsigned long offset = pos - softback_curr; - - if (pos < softback_curr) - offset += softback_end - softback_buf; - offset /= 2; - x = offset % vc->vc_cols; - y = offset / vc->vc_cols; - ret = pos + (vc->vc_cols - x) * 2; - if (ret == softback_end) - ret = softback_buf; - if (ret == softback_in) - ret = vc->vc_origin; } else { /* Should not happen */ x = y = 0; @@ -2828,106 +2603,11 @@ static void fbcon_invert_region(struct vc_data *vc, u16 * p, int cnt) a = ((a) & 0x88ff) | (((a) & 0x7000) >> 4) | (((a) & 0x0700) << 4); scr_writew(a, p++); - if (p == (u16 *) softback_end) - p = (u16 *) softback_buf; - if (p == (u16 *) softback_in) - p = (u16 *) vc->vc_origin; } } -static void fbcon_scrolldelta(struct vc_data *vc, int lines) -{ - struct fb_info *info = registered_fb[con2fb_map[fg_console]]; - struct fbcon_ops *ops = info->fbcon_par; - struct display *disp = &fb_display[fg_console]; - int offset, limit, scrollback_old; - - if (softback_top) { - if (vc->vc_num != fg_console) - return; - if (vc->vc_mode != KD_TEXT || !lines) - return; - if (logo_shown >= 0) { - struct vc_data *conp2 = vc_cons[logo_shown].d; - - if (conp2->vc_top == logo_lines - && conp2->vc_bottom == conp2->vc_rows) - conp2->vc_top = 0; - if (logo_shown == vc->vc_num) { - unsigned long p, q; - int i; - - p = softback_in; - q = vc->vc_origin + - logo_lines * vc->vc_size_row; - for (i = 0; i < logo_lines; i++) { - if (p == softback_top) - break; - if (p == softback_buf) - p = softback_end; - p -= vc->vc_size_row; - q -= vc->vc_size_row; - scr_memcpyw((u16 *) q, (u16 *) p, - vc->vc_size_row); - } - softback_in = softback_curr = p; - update_region(vc, vc->vc_origin, - logo_lines * vc->vc_cols); - } - logo_shown = FBCON_LOGO_CANSHOW; - } - fbcon_cursor(vc, CM_ERASE | CM_SOFTBACK); - fbcon_redraw_softback(vc, disp, lines); - fbcon_cursor(vc, CM_DRAW | CM_SOFTBACK); - return; - } - - if (!scrollback_phys_max) - return; - - scrollback_old = scrollback_current; - scrollback_current -= lines; - if (scrollback_current < 0) - scrollback_current = 0; - else if (scrollback_current > scrollback_max) - scrollback_current = scrollback_max; - if (scrollback_current == scrollback_old) - return; - - if (fbcon_is_inactive(vc, info)) - return; - - fbcon_cursor(vc, CM_ERASE); - - offset = disp->yscroll - scrollback_current; - limit = disp->vrows; - switch (disp->scrollmode) { - case SCROLL_WRAP_MOVE: - info->var.vmode |= FB_VMODE_YWRAP; - break; - case SCROLL_PAN_MOVE: - case SCROLL_PAN_REDRAW: - limit -= vc->vc_rows; - info->var.vmode &= ~FB_VMODE_YWRAP; - break; - } - if (offset < 0) - offset += limit; - else if (offset >= limit) - offset -= limit; - - ops->var.xoffset = 0; - ops->var.yoffset = offset * vc->vc_font.height; - ops->update_start(info); - - if (!scrollback_current) - fbcon_cursor(vc, CM_DRAW); -} - static int fbcon_set_origin(struct vc_data *vc) { - if (softback_lines) - fbcon_scrolldelta(vc, softback_lines); return 0; } @@ -2991,8 +2671,6 @@ static void fbcon_modechanged(struct fb_info *info) fbcon_set_palette(vc, color_table); update_screen(vc); - if (softback_buf) - fbcon_update_softback(vc); } } @@ -3434,7 +3112,6 @@ static const struct consw fb_con = { .con_font_default = fbcon_set_def_font, .con_font_copy = fbcon_copy_font, .con_set_palette = fbcon_set_palette, - .con_scrolldelta = fbcon_scrolldelta, .con_set_origin = fbcon_set_origin, .con_invert_region = fbcon_invert_region, .con_screen_pos = fbcon_screen_pos, @@ -3691,9 +3368,6 @@ static void fbcon_exit(void) } #endif - kfree((void *)softback_buf); - softback_buf = 0UL; - for_each_registered_fb(i) { int pending = 0; -- GitLab From e8b41a9558c81f9aabeb47f2bdee37b98e4f8de6 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Tue, 8 Sep 2020 10:56:27 -0700 Subject: [PATCH 0776/1304] fbcon: remove now unusued 'softback_lines' cursor() argument commit 06a0df4d1b8b13b551668e47b11fd7629033b7df upstream. Since the softscroll code got removed, this argument is always zero and makes no sense any more. Tested-by: Yuan Ming Tested-by: Willy Tarreau Reviewed-by: Greg Kroah-Hartman Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- drivers/video/fbdev/core/bitblit.c | 11 +---------- drivers/video/fbdev/core/fbcon.c | 4 ++-- drivers/video/fbdev/core/fbcon.h | 2 +- drivers/video/fbdev/core/fbcon_ccw.c | 11 +---------- drivers/video/fbdev/core/fbcon_cw.c | 11 +---------- drivers/video/fbdev/core/fbcon_ud.c | 11 +---------- drivers/video/fbdev/core/tileblit.c | 2 +- 7 files changed, 8 insertions(+), 44 deletions(-) diff --git a/drivers/video/fbdev/core/bitblit.c b/drivers/video/fbdev/core/bitblit.c index 35ebeeccde4d..436365efae73 100644 --- a/drivers/video/fbdev/core/bitblit.c +++ b/drivers/video/fbdev/core/bitblit.c @@ -234,7 +234,7 @@ static void bit_clear_margins(struct vc_data *vc, struct fb_info *info, } static void bit_cursor(struct vc_data *vc, struct fb_info *info, int mode, - int softback_lines, int fg, int bg) + int fg, int bg) { struct fb_cursor cursor; struct fbcon_ops *ops = info->fbcon_par; @@ -247,15 +247,6 @@ static void bit_cursor(struct vc_data *vc, struct fb_info *info, int mode, cursor.set = 0; - if (softback_lines) { - if (y + softback_lines >= vc->vc_rows) { - mode = CM_ERASE; - ops->cursor_flash = 0; - return; - } else - y += softback_lines; - } - c = scr_readw((u16 *) vc->vc_pos); attribute = get_attribute(info, c); src = vc->vc_font.data + ((c & charmask) * (w * vc->vc_font.height)); diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index 66542e9192f4..29226b6cb632 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -376,7 +376,7 @@ static void fb_flashcursor(struct work_struct *work) c = scr_readw((u16 *) vc->vc_pos); mode = (!ops->cursor_flash || ops->cursor_state.enable) ? CM_ERASE : CM_DRAW; - ops->cursor(vc, info, mode, 0, get_color(vc, info, c, 1), + ops->cursor(vc, info, mode, get_color(vc, info, c, 1), get_color(vc, info, c, 0)); console_unlock(); } @@ -1306,7 +1306,7 @@ static void fbcon_cursor(struct vc_data *vc, int mode) ops->cursor_flash = (mode == CM_ERASE) ? 0 : 1; - ops->cursor(vc, info, mode, 0, get_color(vc, info, c, 1), + ops->cursor(vc, info, mode, get_color(vc, info, c, 1), get_color(vc, info, c, 0)); } diff --git a/drivers/video/fbdev/core/fbcon.h b/drivers/video/fbdev/core/fbcon.h index 21912a3ba32f..aeea63abbe98 100644 --- a/drivers/video/fbdev/core/fbcon.h +++ b/drivers/video/fbdev/core/fbcon.h @@ -62,7 +62,7 @@ struct fbcon_ops { void (*clear_margins)(struct vc_data *vc, struct fb_info *info, int color, int bottom_only); void (*cursor)(struct vc_data *vc, struct fb_info *info, int mode, - int softback_lines, int fg, int bg); + int fg, int bg); int (*update_start)(struct fb_info *info); int (*rotate_font)(struct fb_info *info, struct vc_data *vc); struct fb_var_screeninfo var; /* copy of the current fb_var_screeninfo */ diff --git a/drivers/video/fbdev/core/fbcon_ccw.c b/drivers/video/fbdev/core/fbcon_ccw.c index 78f3a5621478..71ad6967a70e 100644 --- a/drivers/video/fbdev/core/fbcon_ccw.c +++ b/drivers/video/fbdev/core/fbcon_ccw.c @@ -219,7 +219,7 @@ static void ccw_clear_margins(struct vc_data *vc, struct fb_info *info, } static void ccw_cursor(struct vc_data *vc, struct fb_info *info, int mode, - int softback_lines, int fg, int bg) + int fg, int bg) { struct fb_cursor cursor; struct fbcon_ops *ops = info->fbcon_par; @@ -236,15 +236,6 @@ static void ccw_cursor(struct vc_data *vc, struct fb_info *info, int mode, cursor.set = 0; - if (softback_lines) { - if (y + softback_lines >= vc->vc_rows) { - mode = CM_ERASE; - ops->cursor_flash = 0; - return; - } else - y += softback_lines; - } - c = scr_readw((u16 *) vc->vc_pos); attribute = get_attribute(info, c); src = ops->fontbuffer + ((c & charmask) * (w * vc->vc_font.width)); diff --git a/drivers/video/fbdev/core/fbcon_cw.c b/drivers/video/fbdev/core/fbcon_cw.c index fd098ff17574..31fe5dd651d4 100644 --- a/drivers/video/fbdev/core/fbcon_cw.c +++ b/drivers/video/fbdev/core/fbcon_cw.c @@ -202,7 +202,7 @@ static void cw_clear_margins(struct vc_data *vc, struct fb_info *info, } static void cw_cursor(struct vc_data *vc, struct fb_info *info, int mode, - int softback_lines, int fg, int bg) + int fg, int bg) { struct fb_cursor cursor; struct fbcon_ops *ops = info->fbcon_par; @@ -219,15 +219,6 @@ static void cw_cursor(struct vc_data *vc, struct fb_info *info, int mode, cursor.set = 0; - if (softback_lines) { - if (y + softback_lines >= vc->vc_rows) { - mode = CM_ERASE; - ops->cursor_flash = 0; - return; - } else - y += softback_lines; - } - c = scr_readw((u16 *) vc->vc_pos); attribute = get_attribute(info, c); src = ops->fontbuffer + ((c & charmask) * (w * vc->vc_font.width)); diff --git a/drivers/video/fbdev/core/fbcon_ud.c b/drivers/video/fbdev/core/fbcon_ud.c index e165a3fad29a..b2dd1370e39b 100644 --- a/drivers/video/fbdev/core/fbcon_ud.c +++ b/drivers/video/fbdev/core/fbcon_ud.c @@ -249,7 +249,7 @@ static void ud_clear_margins(struct vc_data *vc, struct fb_info *info, } static void ud_cursor(struct vc_data *vc, struct fb_info *info, int mode, - int softback_lines, int fg, int bg) + int fg, int bg) { struct fb_cursor cursor; struct fbcon_ops *ops = info->fbcon_par; @@ -267,15 +267,6 @@ static void ud_cursor(struct vc_data *vc, struct fb_info *info, int mode, cursor.set = 0; - if (softback_lines) { - if (y + softback_lines >= vc->vc_rows) { - mode = CM_ERASE; - ops->cursor_flash = 0; - return; - } else - y += softback_lines; - } - c = scr_readw((u16 *) vc->vc_pos); attribute = get_attribute(info, c); src = ops->fontbuffer + ((c & charmask) * (w * vc->vc_font.height)); diff --git a/drivers/video/fbdev/core/tileblit.c b/drivers/video/fbdev/core/tileblit.c index 93390312957f..eb664dbf96f6 100644 --- a/drivers/video/fbdev/core/tileblit.c +++ b/drivers/video/fbdev/core/tileblit.c @@ -80,7 +80,7 @@ static void tile_clear_margins(struct vc_data *vc, struct fb_info *info, } static void tile_cursor(struct vc_data *vc, struct fb_info *info, int mode, - int softback_lines, int fg, int bg) + int fg, int bg) { struct fb_tilecursor cursor; int use_sw = (vc->vc_cursor_type & 0x10); -- GitLab From d597a38246c612e2a72c337feefad11d03d967d2 Mon Sep 17 00:00:00 2001 From: Wanpeng Li Date: Wed, 19 Aug 2020 16:55:27 +0800 Subject: [PATCH 0777/1304] KVM: VMX: Don't freeze guest when event delivery causes an APIC-access exit commit 99b82a1437cb31340dbb2c437a2923b9814a7b15 upstream. According to SDM 27.2.4, Event delivery causes an APIC-access VM exit. Don't report internal error and freeze guest when event delivery causes an APIC-access exit, it is handleable and the event will be re-injected during the next vmentry. Signed-off-by: Wanpeng Li Message-Id: <1597827327-25055-2-git-send-email-wanpengli@tencent.com> Cc: stable@vger.kernel.org Signed-off-by: Paolo Bonzini Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/vmx.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 2f823f35dee5..d6bcbce6c15c 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -10128,6 +10128,7 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu) (exit_reason != EXIT_REASON_EXCEPTION_NMI && exit_reason != EXIT_REASON_EPT_VIOLATION && exit_reason != EXIT_REASON_PML_FULL && + exit_reason != EXIT_REASON_APIC_ACCESS && exit_reason != EXIT_REASON_TASK_SWITCH)) { vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV; -- GitLab From fcfdbfb37a86f6e5d77d27412cfc3a89154ced68 Mon Sep 17 00:00:00 2001 From: Chris Healy Date: Fri, 21 Aug 2020 14:21:02 -0700 Subject: [PATCH 0778/1304] ARM: dts: vfxxx: Add syscon compatible with OCOTP commit 2a6838d54128952ace6f0ca166dd8706abe46649 upstream. Add syscon compatibility with Vybrid OCOTP node. This is required to access the UID. Fixes: fa8d20c8dbb77 ("ARM: dts: vfxxx: Add node corresponding to OCOTP") Cc: stable@vger.kernel.org Reviewed-by: Fabio Estevam Reviewed-by: Stefan Agner Signed-off-by: Chris Healy Signed-off-by: Shawn Guo Signed-off-by: Greg Kroah-Hartman --- arch/arm/boot/dts/vfxxx.dtsi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/boot/dts/vfxxx.dtsi b/arch/arm/boot/dts/vfxxx.dtsi index d392794d9c13..de81e8b4afde 100644 --- a/arch/arm/boot/dts/vfxxx.dtsi +++ b/arch/arm/boot/dts/vfxxx.dtsi @@ -532,7 +532,7 @@ }; ocotp: ocotp@400a5000 { - compatible = "fsl,vf610-ocotp"; + compatible = "fsl,vf610-ocotp", "syscon"; reg = <0x400a5000 0x1000>; clocks = <&clks VF610_CLK_OCOTP>; }; -- GitLab From 2b537106bce0d55e6351ad63753e18391d4770f4 Mon Sep 17 00:00:00 2001 From: Tetsuo Handa Date: Mon, 31 Aug 2020 19:37:00 +0900 Subject: [PATCH 0779/1304] video: fbdev: fix OOB read in vga_8planes_imageblit() commit bd018a6a75cebb511bb55a0e7690024be975fe93 upstream. syzbot is reporting OOB read at vga_8planes_imageblit() [1], for "cdat[y] >> 4" can become a negative value due to "const char *cdat". [1] https://syzkaller.appspot.com/bug?id=0d7a0da1557dcd1989e00cb3692b26d4173b4132 Reported-by: syzbot Signed-off-by: Tetsuo Handa Cc: stable Link: https://lore.kernel.org/r/90b55ec3-d5b0-3307-9f7c-7ff5c5fd6ad3@i-love.sakura.ne.jp Signed-off-by: Greg Kroah-Hartman --- drivers/video/fbdev/vga16fb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/video/fbdev/vga16fb.c b/drivers/video/fbdev/vga16fb.c index 2c6a576ed84c..4b83109202b1 100644 --- a/drivers/video/fbdev/vga16fb.c +++ b/drivers/video/fbdev/vga16fb.c @@ -1121,7 +1121,7 @@ static void vga_8planes_imageblit(struct fb_info *info, const struct fb_image *i char oldop = setop(0); char oldsr = setsr(0); char oldmask = selectmask(); - const char *cdat = image->data; + const unsigned char *cdat = image->data; u32 dx = image->dx; char __iomem *where; int y; -- GitLab From 72cd9c802a69fafe0537dda450ffb452673188cf Mon Sep 17 00:00:00 2001 From: Vaibhav Agarwal Date: Fri, 14 Aug 2020 18:03:15 +0530 Subject: [PATCH 0780/1304] staging: greybus: audio: fix uninitialized value issue commit 1dffeb8b8b4c261c45416d53c75ea51e6ece1770 upstream. The current implementation for gbcodec_mixer_dapm_ctl_put() uses uninitialized gbvalue for comparison with updated value. This was found using static analysis with coverity. Uninitialized scalar variable (UNINIT) 11. uninit_use: Using uninitialized value gbvalue.value.integer_value[0]. 460 if (gbvalue.value.integer_value[0] != val) { This patch fixes the issue with fetching the gbvalue before using it for comparision. Fixes: 6339d2322c47 ("greybus: audio: Add topology parser for GB codec") Reported-by: Colin Ian King Signed-off-by: Vaibhav Agarwal Cc: stable Link: https://lore.kernel.org/r/bc4f29eb502ccf93cd2ffd98db0e319fa7d0f247.1597408126.git.vaibhav.sr@gmail.com Signed-off-by: Greg Kroah-Hartman --- drivers/staging/greybus/audio_topology.c | 29 ++++++++++++------------ 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/drivers/staging/greybus/audio_topology.c b/drivers/staging/greybus/audio_topology.c index b71078339e86..860247d71818 100644 --- a/drivers/staging/greybus/audio_topology.c +++ b/drivers/staging/greybus/audio_topology.c @@ -460,6 +460,15 @@ static int gbcodec_mixer_dapm_ctl_put(struct snd_kcontrol *kcontrol, val = ucontrol->value.integer.value[0] & mask; connect = !!val; + ret = gb_pm_runtime_get_sync(bundle); + if (ret) + return ret; + + ret = gb_audio_gb_get_control(module->mgmt_connection, data->ctl_id, + GB_AUDIO_INVALID_INDEX, &gbvalue); + if (ret) + goto exit; + /* update ucontrol */ if (gbvalue.value.integer_value[0] != val) { for (wi = 0; wi < wlist->num_widgets; wi++) { @@ -473,25 +482,17 @@ static int gbcodec_mixer_dapm_ctl_put(struct snd_kcontrol *kcontrol, gbvalue.value.integer_value[0] = cpu_to_le32(ucontrol->value.integer.value[0]); - ret = gb_pm_runtime_get_sync(bundle); - if (ret) - return ret; - ret = gb_audio_gb_set_control(module->mgmt_connection, data->ctl_id, GB_AUDIO_INVALID_INDEX, &gbvalue); - - gb_pm_runtime_put_autosuspend(bundle); - - if (ret) { - dev_err_ratelimited(codec->dev, - "%d:Error in %s for %s\n", ret, - __func__, kcontrol->id.name); - return ret; - } } - return 0; +exit: + gb_pm_runtime_put_autosuspend(bundle); + if (ret) + dev_err_ratelimited(codec_dev, "%d:Error in %s for %s\n", ret, + __func__, kcontrol->id.name); + return ret; } #define SOC_DAPM_MIXER_GB(xname, kcount, data) \ -- GitLab From 5214c5029672a087a01cef47c4aea797441f2bea Mon Sep 17 00:00:00 2001 From: Sivaprakash Murugesan Date: Wed, 29 Jul 2020 21:00:03 +0530 Subject: [PATCH 0781/1304] phy: qcom-qmp: Use correct values for ipq8074 PCIe Gen2 PHY init commit afd55e6d1bd35b4b36847869011447a83a81c8e0 upstream. There were some problem in ipq8074 Gen2 PCIe phy init sequence. 1. Few register values were wrongly updated in the phy init sequence. 2. The register QSERDES_RX_SIGDET_CNTRL is a RX tuning parameter register which is added in serdes table causing the wrong register was getting updated. 3. Clocks and resets were not added in the phy init. Fix these to make Gen2 PCIe port on ipq8074 devices to work. Fixes: eef243d04b2b6 ("phy: qcom-qmp: Add support for IPQ8074") Cc: stable@vger.kernel.org Co-developed-by: Selvam Sathappan Periakaruppan Signed-off-by: Selvam Sathappan Periakaruppan Signed-off-by: Sivaprakash Murugesan Link: https://lore.kernel.org/r/1596036607-11877-4-git-send-email-sivaprak@codeaurora.org Signed-off-by: Vinod Koul Signed-off-by: Greg Kroah-Hartman --- drivers/phy/qualcomm/phy-qcom-qmp.c | 16 +++++++++------- drivers/phy/qualcomm/phy-qcom-qmp.h | 2 ++ 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c index cf515928fed0..68107611c70a 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp.c +++ b/drivers/phy/qualcomm/phy-qcom-qmp.c @@ -311,8 +311,8 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_serdes_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_COM_BG_TRIM, 0xf), QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_EN, 0x1), QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x0), - QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER1, 0x1f), - QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER2, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER1, 0xff), + QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER2, 0x1f), QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x6), QMP_PHY_INIT_CFG(QSERDES_COM_PLL_IVCO, 0xf), QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x0), @@ -338,7 +338,6 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_serdes_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x0), QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80), QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CTRL_BY_PSM, 0x1), - QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_CTRL, 0xa), QMP_PHY_INIT_CFG(QSERDES_COM_SSC_EN_CENTER, 0x1), QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER1, 0x31), QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER2, 0x1), @@ -347,7 +346,6 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_serdes_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE1, 0x2f), QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE2, 0x19), QMP_PHY_INIT_CFG(QSERDES_COM_CLK_EP_DIV, 0x19), - QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_CNTRL, 0x7), }; static const struct qmp_phy_init_tbl ipq8074_pcie_tx_tbl[] = { @@ -355,6 +353,8 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_tx_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_TX_LANE_MODE, 0x6), QMP_PHY_INIT_CFG(QSERDES_TX_RES_CODE_LANE_OFFSET, 0x2), QMP_PHY_INIT_CFG(QSERDES_TX_RCV_DETECT_LVL_2, 0x12), + QMP_PHY_INIT_CFG(QSERDES_TX_EMP_POST1_LVL, 0x36), + QMP_PHY_INIT_CFG(QSERDES_TX_SLEW_CNTL, 0x0a), }; static const struct qmp_phy_init_tbl ipq8074_pcie_rx_tbl[] = { @@ -365,7 +365,6 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_rx_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4, 0xdb), QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x4b), QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN, 0x4), - QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN_HALF, 0x4), }; static const struct qmp_phy_init_tbl ipq8074_pcie_pcs_tbl[] = { @@ -818,6 +817,9 @@ static const struct qmp_phy_cfg msm8996_usb3phy_cfg = { .mask_pcs_ready = PHYSTATUS, }; +static const char * const ipq8074_pciephy_clk_l[] = { + "aux", "cfg_ahb", +}; /* list of resets */ static const char * const ipq8074_pciephy_reset_l[] = { "phy", "common", @@ -835,8 +837,8 @@ static const struct qmp_phy_cfg ipq8074_pciephy_cfg = { .rx_tbl_num = ARRAY_SIZE(ipq8074_pcie_rx_tbl), .pcs_tbl = ipq8074_pcie_pcs_tbl, .pcs_tbl_num = ARRAY_SIZE(ipq8074_pcie_pcs_tbl), - .clk_list = NULL, - .num_clks = 0, + .clk_list = ipq8074_pciephy_clk_l, + .num_clks = ARRAY_SIZE(ipq8074_pciephy_clk_l), .reset_list = ipq8074_pciephy_reset_l, .num_resets = ARRAY_SIZE(ipq8074_pciephy_reset_l), .vreg_list = NULL, diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.h b/drivers/phy/qualcomm/phy-qcom-qmp.h index 5d78d43ba9fc..6b3aaf521e58 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp.h +++ b/drivers/phy/qualcomm/phy-qcom-qmp.h @@ -77,6 +77,8 @@ #define QSERDES_COM_CORECLK_DIV_MODE1 0x1bc /* Only for QMP V2 PHY - TX registers */ +#define QSERDES_TX_EMP_POST1_LVL 0x018 +#define QSERDES_TX_SLEW_CNTL 0x040 #define QSERDES_TX_RES_CODE_LANE_OFFSET 0x054 #define QSERDES_TX_DEBUG_BUS_SEL 0x064 #define QSERDES_TX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN 0x068 -- GitLab From 3a75f7e384c96f811f5818387171e6264e1ad691 Mon Sep 17 00:00:00 2001 From: Zeng Tao Date: Fri, 4 Sep 2020 14:37:44 +0800 Subject: [PATCH 0782/1304] usb: core: fix slab-out-of-bounds Read in read_descriptors commit a18cd6c9b6bc73dc17e8b7e9bd07decaa8833c97 upstream. The USB device descriptor may get changed between two consecutive enumerations on the same device for some reason, such as DFU or malicius device. In that case, we may access the changing descriptor if we don't take the device lock here. The issue is reported: https://syzkaller.appspot.com/bug?id=901a0d9e6519ef8dc7acab25344bd287dd3c7be9 Cc: stable Cc: Alan Stern Reported-by: syzbot+256e56ddde8b8957eabd@syzkaller.appspotmail.com Fixes: 217a9081d8e6 ("USB: add all configs to the "descriptors" attribute") Signed-off-by: Zeng Tao Link: https://lore.kernel.org/r/1599201467-11000-1-git-send-email-prime.zeng@hisilicon.com Signed-off-by: Greg Kroah-Hartman --- drivers/usb/core/sysfs.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c index 7e88fdfe3cf5..b93b18ba89df 100644 --- a/drivers/usb/core/sysfs.c +++ b/drivers/usb/core/sysfs.c @@ -888,7 +888,11 @@ read_descriptors(struct file *filp, struct kobject *kobj, size_t srclen, n; int cfgno; void *src; + int retval; + retval = usb_lock_device_interruptible(udev); + if (retval < 0) + return -EINTR; /* The binary attribute begins with the device descriptor. * Following that are the raw descriptor entries for all the * configurations (config plus subsidiary descriptors). @@ -913,6 +917,7 @@ read_descriptors(struct file *filp, struct kobject *kobj, off -= srclen; } } + usb_unlock_device(udev); return count - nleft; } -- GitLab From 79f8553ccd4185be0020a58815aadbe30521fd13 Mon Sep 17 00:00:00 2001 From: Patrick Riphagen Date: Thu, 6 Aug 2020 13:55:47 +0200 Subject: [PATCH 0783/1304] USB: serial: ftdi_sio: add IDs for Xsens Mti USB converter commit 6ccc48e0eb2f3a5f3bd39954a21317e5f8874726 upstream. The device added has an FTDI chip inside. The device is used to connect Xsens USB Motion Trackers. Cc: stable@vger.kernel.org Signed-off-by: Patrick Riphagen Signed-off-by: Johan Hovold Signed-off-by: Greg Kroah-Hartman --- drivers/usb/serial/ftdi_sio.c | 1 + drivers/usb/serial/ftdi_sio_ids.h | 1 + 2 files changed, 2 insertions(+) diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index ce9cc1f90b05..f0f630e1cf1c 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -703,6 +703,7 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(XSENS_VID, XSENS_AWINDA_STATION_PID) }, { USB_DEVICE(XSENS_VID, XSENS_CONVERTER_PID) }, { USB_DEVICE(XSENS_VID, XSENS_MTDEVBOARD_PID) }, + { USB_DEVICE(XSENS_VID, XSENS_MTIUSBCONVERTER_PID) }, { USB_DEVICE(XSENS_VID, XSENS_MTW_PID) }, { USB_DEVICE(FTDI_VID, FTDI_OMNI1509) }, { USB_DEVICE(MOBILITY_VID, MOBILITY_USB_SERIAL_PID) }, diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index e8373528264c..b5ca17a5967a 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -160,6 +160,7 @@ #define XSENS_AWINDA_DONGLE_PID 0x0102 #define XSENS_MTW_PID 0x0200 /* Xsens MTw */ #define XSENS_MTDEVBOARD_PID 0x0300 /* Motion Tracker Development Board */ +#define XSENS_MTIUSBCONVERTER_PID 0x0301 /* MTi USB converter */ #define XSENS_CONVERTER_PID 0xD00D /* Xsens USB-serial converter */ /* Xsens devices using FTDI VID */ -- GitLab From 1539e44765119693c0695abe278a6f7050e6a144 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B8rn=20Mork?= Date: Sat, 29 Aug 2020 15:42:50 +0200 Subject: [PATCH 0784/1304] USB: serial: option: support dynamic Quectel USB compositions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 2bb70f0a4b238323e4e2f392fc3ddeb5b7208c9e upstream. The USB composition, defining the set of exported functions, is dynamic in newer Quectel modems. Default functions can be disabled and alternative functions can be enabled instead. The alternatives includes class functions using interface pairs, which should be handled by the respective class drivers. Active interfaces are numbered consecutively, so static blacklisting based on interface numbers will fail when the composition changes. An example of such an error, where the option driver has bound to the CDC ECM data interface, preventing cdc_ether from handling this function: T: Bus=01 Lev=01 Prnt=01 Port=00 Cnt=01 Dev#= 2 Spd=480 MxCh= 0 D: Ver= 2.00 Cls=ef(misc ) Sub=02 Prot=01 MxPS=64 #Cfgs= 1 P: Vendor=2c7c ProdID=0125 Rev= 3.18 S: Manufacturer=Quectel S: Product=EC25-AF C:* #Ifs= 6 Cfg#= 1 Atr=a0 MxPwr=500mA A: FirstIf#= 4 IfCount= 2 Cls=02(comm.) Sub=06 Prot=00 I:* If#= 0 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=ff Prot=ff Driver=option E: Ad=81(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms E: Ad=01(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms I:* If#= 1 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=00 Prot=00 Driver=option E: Ad=83(I) Atr=03(Int.) MxPS= 10 Ivl=32ms E: Ad=82(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms E: Ad=02(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms I:* If#= 2 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=00 Prot=00 Driver=option E: Ad=85(I) Atr=03(Int.) MxPS= 10 Ivl=32ms E: Ad=84(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms E: Ad=03(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms I:* If#= 3 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=00 Prot=00 Driver=option E: Ad=87(I) Atr=03(Int.) MxPS= 10 Ivl=32ms E: Ad=86(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms E: Ad=04(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms I:* If#= 4 Alt= 0 #EPs= 1 Cls=02(comm.) Sub=06 Prot=00 Driver=(none) E: Ad=89(I) Atr=03(Int.) MxPS= 16 Ivl=32ms I:* If#= 5 Alt= 0 #EPs= 0 Cls=0a(data ) Sub=00 Prot=00 Driver=option I: If#= 5 Alt= 1 #EPs= 2 Cls=0a(data ) Sub=00 Prot=00 Driver=option E: Ad=88(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms E: Ad=05(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms Another device with the same id gets correct drivers, since the interface of the network function happens to be blacklisted by option: T: Bus=01 Lev=02 Prnt=02 Port=01 Cnt=01 Dev#= 3 Spd=480 MxCh= 0 D: Ver= 2.00 Cls=ef(misc ) Sub=02 Prot=01 MxPS=64 #Cfgs= 1 P: Vendor=2c7c ProdID=0125 Rev= 3.18 S: Manufacturer=Android S: Product=Android C:* #Ifs= 5 Cfg#= 1 Atr=a0 MxPwr=500mA I:* If#= 0 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=ff Prot=ff Driver=option E: Ad=81(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms E: Ad=01(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms I:* If#= 1 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=00 Prot=00 Driver=option E: Ad=83(I) Atr=03(Int.) MxPS= 10 Ivl=32ms E: Ad=82(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms E: Ad=02(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms I:* If#= 2 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=00 Prot=00 Driver=option E: Ad=85(I) Atr=03(Int.) MxPS= 10 Ivl=32ms E: Ad=84(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms E: Ad=03(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms I:* If#= 3 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=00 Prot=00 Driver=option E: Ad=87(I) Atr=03(Int.) MxPS= 10 Ivl=32ms E: Ad=86(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms E: Ad=04(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms I:* If#= 4 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=ff Prot=ff Driver=qmi_wwan E: Ad=89(I) Atr=03(Int.) MxPS= 8 Ivl=32ms E: Ad=88(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms E: Ad=05(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms Change rules for EC21, EC25, BG96 and EG95 to match vendor specific serial functions only, to prevent binding to class functions. Require 2 endpoints on ff/ff/ff functions, avoiding the 3 endpoint QMI/RMNET network functions. Cc: AceLan Kao Cc: Sebastian Sjoholm Cc: Dan Williams Cc: stable@vger.kernel.org Signed-off-by: Bjørn Mork Signed-off-by: Johan Hovold Signed-off-by: Greg Kroah-Hartman --- drivers/usb/serial/option.c | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 6e44aaafdcb1..ce69605c83dc 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -1094,14 +1094,18 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R410M), .driver_info = RSVD(1) | RSVD(3) }, /* Quectel products using Quectel vendor ID */ - { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21), - .driver_info = RSVD(4) }, - { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25), - .driver_info = RSVD(4) }, - { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95), - .driver_info = RSVD(4) }, - { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96), - .driver_info = RSVD(4) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21, 0xff, 0xff, 0xff), + .driver_info = NUMEP2 }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21, 0xff, 0, 0) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25, 0xff, 0xff, 0xff), + .driver_info = NUMEP2 }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25, 0xff, 0, 0) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0xff, 0xff), + .driver_info = NUMEP2 }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0, 0) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96, 0xff, 0xff, 0xff), + .driver_info = NUMEP2 }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96, 0xff, 0, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff), .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) }, -- GitLab From a7c775450086b418f1a323bd158b7e99e52e3a99 Mon Sep 17 00:00:00 2001 From: Aleksander Morgado Date: Sat, 29 Aug 2020 11:05:39 +0200 Subject: [PATCH 0785/1304] USB: serial: option: add support for SIM7070/SIM7080/SIM7090 modules commit 1ac698790819b83f39fd7ea4f6cdabee9bdd7b38 upstream. These modules have 2 different USB layouts: The default layout with PID 0x9205 (AT+CUSBSELNV=1) exposes 4 TTYs and an ECM interface: T: Bus=02 Lev=01 Prnt=01 Port=02 Cnt=01 Dev#= 6 Spd=480 MxCh= 0 D: Ver= 2.00 Cls=ef(misc ) Sub=02 Prot=01 MxPS=64 #Cfgs= 1 P: Vendor=1e0e ProdID=9205 Rev=00.00 S: Manufacturer=SimTech, Incorporated S: Product=SimTech SIM7080 S: SerialNumber=1234567890ABCDEF C: #Ifs= 6 Cfg#= 1 Atr=e0 MxPwr=500mA I: If#=0x0 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=ff Prot=ff Driver=option I: If#=0x1 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=ff Prot=ff Driver=option I: If#=0x2 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=ff Prot=ff Driver=option I: If#=0x3 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=ff Prot=ff Driver=option I: If#=0x4 Alt= 0 #EPs= 1 Cls=02(commc) Sub=06 Prot=00 Driver=cdc_ether I: If#=0x5 Alt= 1 #EPs= 2 Cls=0a(data ) Sub=00 Prot=00 Driver=cdc_ether The purpose of each TTY is as follows: * ttyUSB0: DIAG/QCDM port. * ttyUSB1: GNSS data. * ttyUSB2: AT-capable port (control). * ttyUSB3: AT-capable port (data). In the secondary layout with PID=0x9206 (AT+CUSBSELNV=86) the module exposes 6 TTY ports: T: Bus=02 Lev=01 Prnt=01 Port=02 Cnt=01 Dev#= 8 Spd=480 MxCh= 0 D: Ver= 2.00 Cls=02(commc) Sub=00 Prot=00 MxPS=64 #Cfgs= 1 P: Vendor=1e0e ProdID=9206 Rev=00.00 S: Manufacturer=SimTech, Incorporated S: Product=SimTech SIM7080 S: SerialNumber=1234567890ABCDEF C: #Ifs= 6 Cfg#= 1 Atr=e0 MxPwr=500mA I: If#=0x0 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=ff Prot=ff Driver=option I: If#=0x1 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=ff Prot=ff Driver=option I: If#=0x2 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=ff Prot=ff Driver=option I: If#=0x3 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=ff Prot=ff Driver=option I: If#=0x4 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=ff Prot=ff Driver=option I: If#=0x5 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=ff Prot=ff Driver=option The purpose of each TTY is as follows: * ttyUSB0: DIAG/QCDM port. * ttyUSB1: GNSS data. * ttyUSB2: AT-capable port (control). * ttyUSB3: QFLOG interface. * ttyUSB4: DAM interface. * ttyUSB5: AT-capable port (data). Signed-off-by: Aleksander Morgado Cc: stable@vger.kernel.org Signed-off-by: Johan Hovold Signed-off-by: Greg Kroah-Hartman --- drivers/usb/serial/option.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index ce69605c83dc..810f1010ab13 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -1823,6 +1823,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9003, 0xff) }, /* Simcom SIM7500/SIM7600 MBIM mode */ { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9011, 0xff), /* Simcom SIM7500/SIM7600 RNDIS mode */ .driver_info = RSVD(7) }, + { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9205, 0xff) }, /* Simcom SIM7070/SIM7080/SIM7090 AT+ECM mode */ + { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9206, 0xff) }, /* Simcom SIM7070/SIM7080/SIM7090 AT-only mode */ { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200), .driver_info = NCTRL(0) | NCTRL(1) | RSVD(4) }, { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D), -- GitLab From f744c85ad330ef90cc2b8c4fd4d7c18fb5c7ac83 Mon Sep 17 00:00:00 2001 From: Mathias Nyman Date: Tue, 1 Sep 2020 11:25:28 +0300 Subject: [PATCH 0786/1304] usb: Fix out of sync data toggle if a configured device is reconfigured commit cfd54fa83a5068b61b7eb28d3c117d8354c74c7a upstream. Userspace drivers that use a SetConfiguration() request to "lightweight" reset an already configured usb device might cause data toggles to get out of sync between the device and host, and the device becomes unusable. The xHCI host requires endpoints to be dropped and added back to reset the toggle. If USB core notices the new configuration is the same as the current active configuration it will avoid these extra steps by calling usb_reset_configuration() instead of usb_set_configuration(). A SetConfiguration() request will reset the device side data toggles. Make sure usb_reset_configuration() function also drops and adds back the endpoints to ensure data toggles are in sync. To avoid code duplication split the current usb_disable_device() function and reuse the endpoint specific part. Cc: stable Tested-by: Martin Thierer Signed-off-by: Mathias Nyman Link: https://lore.kernel.org/r/20200901082528.12557-1-mathias.nyman@linux.intel.com Signed-off-by: Greg Kroah-Hartman --- drivers/usb/core/message.c | 91 ++++++++++++++++++-------------------- 1 file changed, 42 insertions(+), 49 deletions(-) diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index f705ea52eb97..152228d33ad2 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c @@ -1204,6 +1204,34 @@ void usb_disable_interface(struct usb_device *dev, struct usb_interface *intf, } } +/* + * usb_disable_device_endpoints -- Disable all endpoints for a device + * @dev: the device whose endpoints are being disabled + * @skip_ep0: 0 to disable endpoint 0, 1 to skip it. + */ +static void usb_disable_device_endpoints(struct usb_device *dev, int skip_ep0) +{ + struct usb_hcd *hcd = bus_to_hcd(dev->bus); + int i; + + if (hcd->driver->check_bandwidth) { + /* First pass: Cancel URBs, leave endpoint pointers intact. */ + for (i = skip_ep0; i < 16; ++i) { + usb_disable_endpoint(dev, i, false); + usb_disable_endpoint(dev, i + USB_DIR_IN, false); + } + /* Remove endpoints from the host controller internal state */ + mutex_lock(hcd->bandwidth_mutex); + usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL); + mutex_unlock(hcd->bandwidth_mutex); + } + /* Second pass: remove endpoint pointers */ + for (i = skip_ep0; i < 16; ++i) { + usb_disable_endpoint(dev, i, true); + usb_disable_endpoint(dev, i + USB_DIR_IN, true); + } +} + /** * usb_disable_device - Disable all the endpoints for a USB device * @dev: the device whose endpoints are being disabled @@ -1217,7 +1245,6 @@ void usb_disable_interface(struct usb_device *dev, struct usb_interface *intf, void usb_disable_device(struct usb_device *dev, int skip_ep0) { int i; - struct usb_hcd *hcd = bus_to_hcd(dev->bus); /* getting rid of interfaces will disconnect * any drivers bound to them (a key side effect) @@ -1263,22 +1290,8 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0) dev_dbg(&dev->dev, "%s nuking %s URBs\n", __func__, skip_ep0 ? "non-ep0" : "all"); - if (hcd->driver->check_bandwidth) { - /* First pass: Cancel URBs, leave endpoint pointers intact. */ - for (i = skip_ep0; i < 16; ++i) { - usb_disable_endpoint(dev, i, false); - usb_disable_endpoint(dev, i + USB_DIR_IN, false); - } - /* Remove endpoints from the host controller internal state */ - mutex_lock(hcd->bandwidth_mutex); - usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL); - mutex_unlock(hcd->bandwidth_mutex); - /* Second pass: remove endpoint pointers */ - } - for (i = skip_ep0; i < 16; ++i) { - usb_disable_endpoint(dev, i, true); - usb_disable_endpoint(dev, i + USB_DIR_IN, true); - } + + usb_disable_device_endpoints(dev, skip_ep0); } /** @@ -1521,6 +1534,9 @@ EXPORT_SYMBOL_GPL(usb_set_interface); * The caller must own the device lock. * * Return: Zero on success, else a negative error code. + * + * If this routine fails the device will probably be in an unusable state + * with endpoints disabled, and interfaces only partially enabled. */ int usb_reset_configuration(struct usb_device *dev) { @@ -1536,10 +1552,7 @@ int usb_reset_configuration(struct usb_device *dev) * calls during probe() are fine */ - for (i = 1; i < 16; ++i) { - usb_disable_endpoint(dev, i, true); - usb_disable_endpoint(dev, i + USB_DIR_IN, true); - } + usb_disable_device_endpoints(dev, 1); /* skip ep0*/ config = dev->actconfig; retval = 0; @@ -1552,34 +1565,10 @@ int usb_reset_configuration(struct usb_device *dev) mutex_unlock(hcd->bandwidth_mutex); return -ENOMEM; } - /* Make sure we have enough bandwidth for each alternate setting 0 */ - for (i = 0; i < config->desc.bNumInterfaces; i++) { - struct usb_interface *intf = config->interface[i]; - struct usb_host_interface *alt; - alt = usb_altnum_to_altsetting(intf, 0); - if (!alt) - alt = &intf->altsetting[0]; - if (alt != intf->cur_altsetting) - retval = usb_hcd_alloc_bandwidth(dev, NULL, - intf->cur_altsetting, alt); - if (retval < 0) - break; - } - /* If not, reinstate the old alternate settings */ + /* xHCI adds all endpoints in usb_hcd_alloc_bandwidth */ + retval = usb_hcd_alloc_bandwidth(dev, config, NULL, NULL); if (retval < 0) { -reset_old_alts: - for (i--; i >= 0; i--) { - struct usb_interface *intf = config->interface[i]; - struct usb_host_interface *alt; - - alt = usb_altnum_to_altsetting(intf, 0); - if (!alt) - alt = &intf->altsetting[0]; - if (alt != intf->cur_altsetting) - usb_hcd_alloc_bandwidth(dev, NULL, - alt, intf->cur_altsetting); - } usb_enable_lpm(dev); mutex_unlock(hcd->bandwidth_mutex); return retval; @@ -1588,8 +1577,12 @@ int usb_reset_configuration(struct usb_device *dev) USB_REQ_SET_CONFIGURATION, 0, config->desc.bConfigurationValue, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); - if (retval < 0) - goto reset_old_alts; + if (retval < 0) { + usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL); + usb_enable_lpm(dev); + mutex_unlock(hcd->bandwidth_mutex); + return retval; + } mutex_unlock(hcd->bandwidth_mutex); /* re-init hc/hcd interface/endpoint state */ -- GitLab From 923e11ef2afa63e2a637f1e5ff989e8197c95f58 Mon Sep 17 00:00:00 2001 From: Heikki Krogerus Date: Fri, 4 Sep 2020 14:09:18 +0300 Subject: [PATCH 0787/1304] usb: typec: ucsi: acpi: Check the _DEP dependencies commit 1f3546ff3f0a1000971daef58406954bad3f7061 upstream. Failing probe with -EPROBE_DEFER until all dependencies listed in the _DEP (Operation Region Dependencies) object have been met. This will fix an issue where on some platforms UCSI ACPI driver fails to probe because the address space handler for the operation region that the UCSI ACPI interface uses has not been loaded yet. Fixes: 8243edf44152 ("usb: typec: ucsi: Add ACPI driver") Cc: stable@vger.kernel.org Signed-off-by: Heikki Krogerus Link: https://lore.kernel.org/r/20200904110918.51546-1-heikki.krogerus@linux.intel.com Signed-off-by: Greg Kroah-Hartman --- drivers/usb/typec/ucsi/ucsi_acpi.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c index a18112a83fae..dda8bd39c918 100644 --- a/drivers/usb/typec/ucsi/ucsi_acpi.c +++ b/drivers/usb/typec/ucsi/ucsi_acpi.c @@ -64,11 +64,15 @@ static void ucsi_acpi_notify(acpi_handle handle, u32 event, void *data) static int ucsi_acpi_probe(struct platform_device *pdev) { + struct acpi_device *adev = ACPI_COMPANION(&pdev->dev); struct ucsi_acpi *ua; struct resource *res; acpi_status status; int ret; + if (adev->dep_unmet) + return -EPROBE_DEFER; + ua = devm_kzalloc(&pdev->dev, sizeof(*ua), GFP_KERNEL); if (!ua) return -ENOMEM; -- GitLab From 87bf8f8733c67efaf7f09195a785764caddb2c43 Mon Sep 17 00:00:00 2001 From: Peter Oberparleiter Date: Thu, 10 Sep 2020 14:52:01 +0200 Subject: [PATCH 0788/1304] gcov: add support for GCC 10.1 [ Upstream commit 40249c6962075c040fd071339acae524f18bfac9 ] Using gcov to collect coverage data for kernels compiled with GCC 10.1 causes random malfunctions and kernel crashes. This is the result of a changed GCOV_COUNTERS value in GCC 10.1 that causes a mismatch between the layout of the gcov_info structure created by GCC profiling code and the related structure used by the kernel. Fix this by updating the in-kernel GCOV_COUNTERS value. Also re-enable config GCOV_KERNEL for use with GCC 10. Reported-by: Colin Ian King Reported-by: Leon Romanovsky Signed-off-by: Peter Oberparleiter Tested-by: Leon Romanovsky Tested-and-Acked-by: Colin Ian King Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin --- kernel/gcov/Kconfig | 1 - kernel/gcov/gcc_4_7.c | 4 +++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/kernel/gcov/Kconfig b/kernel/gcov/Kconfig index bfb6579a19d0..1e3823fa799b 100644 --- a/kernel/gcov/Kconfig +++ b/kernel/gcov/Kconfig @@ -3,7 +3,6 @@ menu "GCOV-based kernel profiling" config GCOV_KERNEL bool "Enable gcov-based kernel profiling" depends on DEBUG_FS - depends on !CC_IS_GCC || GCC_VERSION < 100000 select CONSTRUCTORS if !UML default n ---help--- diff --git a/kernel/gcov/gcc_4_7.c b/kernel/gcov/gcc_4_7.c index ca5e5c0ef853..5b9e76117ded 100644 --- a/kernel/gcov/gcc_4_7.c +++ b/kernel/gcov/gcc_4_7.c @@ -19,7 +19,9 @@ #include #include "gcov.h" -#if (__GNUC__ >= 7) +#if (__GNUC__ >= 10) +#define GCOV_COUNTERS 8 +#elif (__GNUC__ >= 7) #define GCOV_COUNTERS 9 #elif (__GNUC__ > 5) || (__GNUC__ == 5 && __GNUC_MINOR__ >= 1) #define GCOV_COUNTERS 10 -- GitLab From 015e94d0e37b6860e4354ce3cac56bd7c39c8992 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Thu, 17 Sep 2020 13:45:31 +0200 Subject: [PATCH 0789/1304] Linux 4.19.146 Tested-by: Shuah Khan Tested-by: Jon Hunter Tested-by: Linux Kernel Functional Testing Tested-by: Guenter Roeck Signed-off-by: Greg Kroah-Hartman --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 6bf851efcabe..aaeb3f3dbcea 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 4 PATCHLEVEL = 19 -SUBLEVEL = 145 +SUBLEVEL = 146 EXTRAVERSION = NAME = "People's Front" -- GitLab From 9e7399f6fad617dd184113bc4dd9d41ce0773bca Mon Sep 17 00:00:00 2001 From: Matthias Maennich Date: Tue, 15 Sep 2020 14:45:04 +0100 Subject: [PATCH 0790/1304] ANDROID: KMI symbol lists: migrate section name Libabigail learned to accept 'symbol_list' as a valid suffix for symbol list sections. Hence make use of it consistently. Bug: 162536543 Signed-off-by: Matthias Maennich Change-Id: I1454b0068769c9e57a533dafb6267e63adb7ceb0 --- android/abi_gki_aarch64_cuttlefish | 2 +- android/abi_gki_aarch64_qcom | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/android/abi_gki_aarch64_cuttlefish b/android/abi_gki_aarch64_cuttlefish index d8dd477a8d31..01a2267410a8 100644 --- a/android/abi_gki_aarch64_cuttlefish +++ b/android/abi_gki_aarch64_cuttlefish @@ -1,4 +1,4 @@ -[abi_whitelist] +[abi_symbol_list] # commonly used symbols add_wait_queue alloc_etherdev_mqs diff --git a/android/abi_gki_aarch64_qcom b/android/abi_gki_aarch64_qcom index 8fde29744153..88d3768d176d 100644 --- a/android/abi_gki_aarch64_qcom +++ b/android/abi_gki_aarch64_qcom @@ -1,4 +1,4 @@ -[abi_whitelist] +[abi_symbol_list] # commonly used symbols add_timer add_uevent_var -- GitLab From 17670c1a715f93a6c0cf65555f8ae335bce86188 Mon Sep 17 00:00:00 2001 From: Matthias Maennich Date: Tue, 22 Sep 2020 17:33:09 +0100 Subject: [PATCH 0791/1304] ANDROID: Refresh ABI.xmls with libabigail 1.8.0-1dca710a This upgrades some types from declaration-only tracking to full type tracking, but creates this one-time churn. Bug: 158736583 Signed-off-by: Matthias Maennich Change-Id: I39f778d7660fb4065eec2ecb6dd1ed47816e25b6 --- android/abi_gki_aarch64 | 2 +- android/abi_gki_aarch64.xml | 3971 +++++++++++++++++++++++++++++++---- 2 files changed, 3589 insertions(+), 384 deletions(-) diff --git a/android/abi_gki_aarch64 b/android/abi_gki_aarch64 index 9f00353c4e55..7bb7e004c3de 100644 --- a/android/abi_gki_aarch64 +++ b/android/abi_gki_aarch64 @@ -1,4 +1,4 @@ -[abi_whitelist] +[abi_symbol_list] # commonly used symbols __cfi_slowpath __const_udelay diff --git a/android/abi_gki_aarch64.xml b/android/abi_gki_aarch64.xml index faeca9f0ee19..b80128fc7bb9 100644 --- a/android/abi_gki_aarch64.xml +++ b/android/abi_gki_aarch64.xml @@ -2749,59 +2749,37 @@ - - - - - - - - - - - - - - - - - - - - - - @@ -7385,11 +7363,114 @@ - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -7493,6 +7574,35 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -7540,6 +7650,23 @@ + + + + + + + + + + + + + + + + + @@ -7999,7 +8126,6 @@ - @@ -8625,6 +8751,35 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -8687,6 +8842,35 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -8700,6 +8884,50 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -9118,49 +9346,26 @@ - - - - - - - - - - - - - - - - - - - - - - - @@ -9173,7 +9378,6 @@ - @@ -13828,6 +14032,71 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -14189,6 +14458,20 @@ + + + + + + + + + + + + + + @@ -14212,6 +14495,17 @@ + + + + + + + + + + + @@ -15390,6 +15684,41 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -15831,6 +16160,29 @@ + + + + + + + + + + + + + + + + + + + + + + + @@ -15900,6 +16252,20 @@ + + + + + + + + + + + + + + @@ -16021,7 +16387,7 @@ - + @@ -16194,7 +16560,7 @@ - + @@ -16310,7 +16676,7 @@ - + @@ -16521,26 +16887,6 @@ - - - - - - - - - - - - - - - - - - - - @@ -16600,7 +16946,7 @@ - + @@ -16619,7 +16965,7 @@ - + @@ -16870,7 +17216,6 @@ - @@ -17277,7 +17622,6 @@ - @@ -18113,7 +18457,7 @@ - + @@ -18382,7 +18726,6 @@ - @@ -18636,7 +18979,6 @@ - @@ -18947,6 +19289,154 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -19116,7 +19606,6 @@ - @@ -20021,7 +20510,6 @@ - @@ -21726,6 +22214,61 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -22033,6 +22576,20 @@ + + + + + + + + + + + + + + @@ -22479,13 +23036,9 @@ - - - - @@ -27974,6 +28527,26 @@ + + + + + + + + + + + + + + + + + + + + @@ -28175,6 +28748,14 @@ + + + + + + + + @@ -28388,6 +28969,23 @@ + + + + + + + + + + + + + + + + + @@ -30624,17 +31222,6 @@ - - - - - - - - - - - @@ -30676,6 +31263,17 @@ + + + + + + + + + + + @@ -30951,7 +31549,7 @@ - + @@ -31050,6 +31648,17 @@ + + + + + + + + + + + @@ -31784,23 +32393,6 @@ - - - - - - - - - - - - - - - - - @@ -31827,7 +32419,6 @@ - @@ -32857,6 +33448,29 @@ + + + + + + + + + + + + + + + + + + + + + + + @@ -32959,6 +33573,26 @@ + + + + + + + + + + + + + + + + + + + + @@ -33015,18 +33649,7 @@ - - - - - - - - - - - - + @@ -33203,12 +33826,47 @@ - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -33329,7 +33987,7 @@ - + @@ -33348,7 +34006,6 @@ - @@ -33357,6 +34014,40 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -34168,8 +34859,6 @@ - - @@ -34203,7 +34892,7 @@ - + @@ -34290,7 +34979,7 @@ - + @@ -35106,7 +35795,7 @@ - + @@ -35177,6 +35866,38 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -35278,6 +35999,38 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -35292,6 +36045,20 @@ + + + + + + + + + + + + + + @@ -35982,7 +36749,6 @@ - @@ -36198,7 +36964,7 @@ - + @@ -37230,7 +37996,7 @@ - + @@ -39788,8 +40554,6 @@ - - @@ -40338,6 +41102,32 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -40974,7 +41764,7 @@ - + @@ -41116,6 +41906,80 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -41130,6 +41994,17 @@ + + + + + + + + + + + @@ -41362,6 +42237,11 @@ + + + + + @@ -41378,6 +42258,32 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -41688,15 +42594,11 @@ - - - - @@ -41739,6 +42641,26 @@ + + + + + + + + + + + + + + + + + + + + @@ -42495,6 +43417,23 @@ + + + + + + + + + + + + + + + + + @@ -44148,6 +45087,11 @@ + + + + + @@ -44523,6 +45467,20 @@ + + + + + + + + + + + + + + @@ -44741,6 +45699,37 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -44761,6 +45750,173 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -44769,6 +45925,112 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -45136,6 +46398,29 @@ + + + + + + + + + + + + + + + + + + + + + + + @@ -45767,6 +47052,17 @@ + + + + + + + + + + + @@ -47344,6 +48640,76 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -47810,6 +49176,82 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -47881,7 +49323,6 @@ - @@ -47994,6 +49435,44 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -48188,7 +49667,6 @@ - @@ -48265,6 +49743,14 @@ + + + + + + + + @@ -48361,6 +49847,23 @@ + + + + + + + + + + + + + + + + + @@ -48769,7 +50272,6 @@ - @@ -48955,7 +50457,7 @@ - + @@ -49870,6 +51372,23 @@ + + + + + + + + + + + + + + + + + @@ -49883,31 +51402,31 @@ - - - + + + - - - + + + - - - + + + - - + + - - + + - - + + @@ -49916,50 +51435,50 @@ - - - - + + + + - - - + + + - - + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - - + + + + @@ -49967,20 +51486,20 @@ - - + + - - + + - - + + - - + + @@ -50029,7 +51548,6 @@ - @@ -50784,6 +52302,32 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -50969,7 +52513,6 @@ - @@ -52068,6 +53611,17 @@ + + + + + + + + + + + @@ -52481,23 +54035,6 @@ - - - - - - - - - - - - - - - - - @@ -52845,7 +54382,7 @@ - + @@ -52855,6 +54392,20 @@ + + + + + + + + + + + + + + @@ -53913,6 +55464,14 @@ + + + + + + + + @@ -54095,7 +55654,6 @@ - @@ -54581,6 +56139,23 @@ + + + + + + + + + + + + + + + + + @@ -54854,7 +56429,7 @@ - + @@ -57125,6 +58700,20 @@ + + + + + + + + + + + + + + @@ -57160,7 +58749,6 @@ - @@ -58005,8 +59593,6 @@ - - @@ -59514,6 +61100,23 @@ + + + + + + + + + + + + + + + + + @@ -62365,7 +63968,7 @@ - + @@ -64136,6 +65739,20 @@ + + + + + + + + + + + + + + @@ -64155,6 +65772,44 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -64198,15 +65853,21 @@ - - - + + + - - + + - - + + + + + + + + @@ -64315,6 +65976,49 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -64384,6 +66088,29 @@ + + + + + + + + + + + + + + + + + + + + + + + @@ -64816,6 +66543,80 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -64970,7 +66771,7 @@ - + @@ -65252,6 +67053,44 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -65373,7 +67212,6 @@ - @@ -65521,7 +67359,6 @@ - @@ -65577,6 +67414,32 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -65804,6 +67667,146 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -66117,6 +68120,38 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -66192,7 +68227,23 @@ - + + + + + + + + + + + + + + + + + @@ -66471,6 +68522,32 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -66540,10 +68617,6 @@ - - - - @@ -66555,6 +68628,104 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -66662,53 +68833,6 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -66767,6 +68891,26 @@ + + + + + + + + + + + + + + + + + + + + @@ -66830,6 +68974,29 @@ + + + + + + + + + + + + + + + + + + + + + + + @@ -66902,6 +69069,22 @@ + + + + + + + + + + + + + + + + @@ -67085,7 +69268,7 @@ - + @@ -67308,17 +69491,6 @@ - - - - - - - - - - - @@ -67336,7 +69508,6 @@ - @@ -67410,6 +69581,17 @@ + + + + + + + + + + + @@ -67418,6 +69600,17 @@ + + + + + + + + + + + @@ -67556,6 +69749,17 @@ + + + + + + + + + + + @@ -67975,6 +70179,77 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -68744,6 +71019,29 @@ + + + + + + + + + + + + + + + + + + + + + + + @@ -69020,67 +71318,38 @@ - - - - - - - - - - - - + - - - - - - - - - - - - - - - - - - @@ -70682,7 +72951,7 @@ - + @@ -70966,6 +73235,26 @@ + + + + + + + + + + + + + + + + + + + + @@ -71642,6 +73931,59 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -72015,7 +74357,7 @@ - + @@ -72750,6 +75092,14 @@ + + + + + + + + @@ -73873,7 +76223,7 @@ - + @@ -75626,7 +77976,7 @@ - + @@ -76071,6 +78421,23 @@ + + + + + + + + + + + + + + + + + @@ -76141,7 +78508,7 @@ - + @@ -76214,7 +78581,7 @@ - + @@ -77109,6 +79476,17 @@ + + + + + + + + + + + @@ -78369,7 +80747,7 @@ - + @@ -78409,7 +80787,7 @@ - + @@ -78913,21 +81291,21 @@ - + - + - - + + - - + + @@ -80111,7 +82489,7 @@ - + @@ -80510,7 +82888,7 @@ - + @@ -80518,7 +82896,7 @@ - + @@ -80879,6 +83257,20 @@ + + + + + + + + + + + + + + @@ -80924,6 +83316,8 @@ + + @@ -80979,7 +83373,7 @@ - + @@ -81008,7 +83402,7 @@ - + @@ -81129,6 +83523,76 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -81196,11 +83660,32 @@ + + + + + + + + + + + + + + + + + + + + + @@ -81314,6 +83799,106 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -81372,6 +83957,77 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -81413,6 +84069,34 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -81552,6 +84236,71 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -82353,6 +85102,26 @@ + + + + + + + + + + + + + + + + + + + + @@ -82387,6 +85156,26 @@ + + + + + + + + + + + + + + + + + + + + @@ -82657,6 +85446,92 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -82930,7 +85805,6 @@ - @@ -83026,6 +85900,29 @@ + + + + + + + + + + + + + + + + + + + + + + + @@ -83138,6 +86035,14 @@ + + + + + + + + @@ -83242,6 +86147,23 @@ + + + + + + + + + + + + + + + + + @@ -84138,20 +87060,6 @@ - - - - - - - - - - - - - - @@ -84600,6 +87508,38 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -84632,6 +87572,35 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -84722,6 +87691,14 @@ + + + + + + + + @@ -84729,7 +87706,6 @@ - @@ -84942,6 +87918,17 @@ + + + + + + + + + + + @@ -85330,6 +88317,11 @@ + + + + + @@ -85376,12 +88368,40 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -85642,6 +88662,14 @@ + + + + + + + + @@ -85672,22 +88700,6 @@ - - - - - - - - - - - - - - - - @@ -85722,6 +88734,22 @@ + + + + + + + + + + + + + + + + @@ -86037,6 +89065,14 @@ + + + + + + + + @@ -86070,6 +89106,32 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -86096,6 +89158,14 @@ + + + + + + + + @@ -86765,6 +89835,62 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -86772,6 +89898,25 @@ + + + + + + + + + + + + + + + + + + + @@ -86999,7 +90144,6 @@ - @@ -92340,6 +95484,32 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -93367,6 +96537,41 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -98017,6 +101222,6 @@ -- GitLab From 65e389d8656e04252b0080416fe544a7ea8a6d76 Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Sat, 20 Jun 2020 21:39:25 +0200 Subject: [PATCH 0792/1304] dsa: Allow forwarding of redirected IGMP traffic commit 1ed9ec9b08addbd8d3e36d5f4a652d8590a6ddb7 upstream. The driver for Marvell switches puts all ports in IGMP snooping mode which results in all IGMP/MLD frames that ingress on the ports to be forwarded to the CPU only. The bridge code in the kernel can then interpret these frames and act upon them, for instance by updating the mdb in the switch to reflect multicast memberships of stations connected to the ports. However, the IGMP/MLD frames must then also be forwarded to other ports of the bridge so external IGMP queriers can track membership reports, and external multicast clients can receive query reports from foreign IGMP queriers. Currently, this is impossible as the EDSA tagger sets offload_fwd_mark on the skb when it unwraps the tagged frames, and that will make the switchdev layer prevent the skb from egressing on any other port of the same switch. To fix that, look at the To_CPU code in the DSA header and make forwarding of the frame possible for trapped IGMP packets. Introduce some #defines for the frame types to make the code a bit more comprehensive. This was tested on a Marvell 88E6352 variant. Signed-off-by: Daniel Mack Reviewed-by: Andrew Lunn Tested-by: Andrew Lunn Signed-off-by: David S. Miller Cc: DENG Qingfang Signed-off-by: Greg Kroah-Hartman --- net/dsa/tag_edsa.c | 37 ++++++++++++++++++++++++++++++++++--- 1 file changed, 34 insertions(+), 3 deletions(-) diff --git a/net/dsa/tag_edsa.c b/net/dsa/tag_edsa.c index 4083326b806e..d62d28d358d9 100644 --- a/net/dsa/tag_edsa.c +++ b/net/dsa/tag_edsa.c @@ -17,6 +17,16 @@ #define DSA_HLEN 4 #define EDSA_HLEN 8 +#define FRAME_TYPE_TO_CPU 0x00 +#define FRAME_TYPE_FORWARD 0x03 + +#define TO_CPU_CODE_MGMT_TRAP 0x00 +#define TO_CPU_CODE_FRAME2REG 0x01 +#define TO_CPU_CODE_IGMP_MLD_TRAP 0x02 +#define TO_CPU_CODE_POLICY_TRAP 0x03 +#define TO_CPU_CODE_ARP_MIRROR 0x04 +#define TO_CPU_CODE_POLICY_MIRROR 0x05 + static struct sk_buff *edsa_xmit(struct sk_buff *skb, struct net_device *dev) { struct dsa_port *dp = dsa_slave_to_port(dev); @@ -81,6 +91,8 @@ static struct sk_buff *edsa_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt) { u8 *edsa_header; + int frame_type; + int code; int source_device; int source_port; @@ -95,8 +107,29 @@ static struct sk_buff *edsa_rcv(struct sk_buff *skb, struct net_device *dev, /* * Check that frame type is either TO_CPU or FORWARD. */ - if ((edsa_header[0] & 0xc0) != 0x00 && (edsa_header[0] & 0xc0) != 0xc0) + frame_type = edsa_header[0] >> 6; + + switch (frame_type) { + case FRAME_TYPE_TO_CPU: + code = (edsa_header[1] & 0x6) | ((edsa_header[2] >> 4) & 1); + + /* + * Mark the frame to never egress on any port of the same switch + * unless it's a trapped IGMP/MLD packet, in which case the + * bridge might want to forward it. + */ + if (code != TO_CPU_CODE_IGMP_MLD_TRAP) + skb->offload_fwd_mark = 1; + + break; + + case FRAME_TYPE_FORWARD: + skb->offload_fwd_mark = 1; + break; + + default: return NULL; + } /* * Determine source device and port. @@ -160,8 +193,6 @@ static struct sk_buff *edsa_rcv(struct sk_buff *skb, struct net_device *dev, 2 * ETH_ALEN); } - skb->offload_fwd_mark = 1; - return skb; } -- GitLab From 7d559fcb3cf68b0ff7aa044b503e15f6c7a8d08c Mon Sep 17 00:00:00 2001 From: Quinn Tran Date: Fri, 31 Aug 2018 11:24:29 -0700 Subject: [PATCH 0793/1304] scsi: qla2xxx: Update rscn_rcvd field to more meaningful scan_needed commit cb873ba4002095d1e2fc60521bc4d860c7b72b92 upstream. Rename rscn_rcvd field to scan_needed to be more meaningful. Signed-off-by: Quinn Tran Signed-off-by: Himanshu Madhani Signed-off-by: Martin K. Petersen Signed-off-by: Zhengyuan Liu Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/qla2xxx/qla_def.h | 2 +- drivers/scsi/qla2xxx/qla_gs.c | 12 ++++++------ drivers/scsi/qla2xxx/qla_init.c | 2 +- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index a9dc9c4a6382..c41d0dbbbd79 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -2351,7 +2351,7 @@ typedef struct fc_port { unsigned int login_succ:1; unsigned int query:1; unsigned int id_changed:1; - unsigned int rscn_rcvd:1; + unsigned int scan_needed:1; struct work_struct nvme_del_work; struct completion nvme_del_done; diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index f621cb55ccfb..01cd977eb55e 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c @@ -3973,7 +3973,7 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp) list_for_each_entry(fcport, &vha->vp_fcports, list) { if (memcmp(rp->port_name, fcport->port_name, WWN_SIZE)) continue; - fcport->rscn_rcvd = 0; + fcport->scan_needed = 0; fcport->scan_state = QLA_FCPORT_FOUND; found = true; /* @@ -4009,12 +4009,12 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp) */ list_for_each_entry(fcport, &vha->vp_fcports, list) { if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) { - fcport->rscn_rcvd = 0; + fcport->scan_needed = 0; continue; } if (fcport->scan_state != QLA_FCPORT_FOUND) { - fcport->rscn_rcvd = 0; + fcport->scan_needed = 0; if ((qla_dual_mode_enabled(vha) || qla_ini_mode_enabled(vha)) && atomic_read(&fcport->state) == FCS_ONLINE) { @@ -4033,7 +4033,7 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp) } } } else { - if (fcport->rscn_rcvd || + if (fcport->scan_needed || fcport->disc_state != DSC_LOGIN_COMPLETE) { if (fcport->login_retry == 0) { fcport->login_retry = @@ -4043,7 +4043,7 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp) fcport->port_name, fcport->loop_id, fcport->login_retry); } - fcport->rscn_rcvd = 0; + fcport->scan_needed = 0; qla24xx_fcport_handle_login(vha, fcport); } } @@ -4058,7 +4058,7 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp) if (recheck) { list_for_each_entry(fcport, &vha->vp_fcports, list) { - if (fcport->rscn_rcvd) { + if (fcport->scan_needed) { set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); break; diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index f45759b353be..e5c86c873e95 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -1573,7 +1573,7 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea) fcport = qla2x00_find_fcport_by_nportid (vha, &ea->id, 1); if (fcport) - fcport->rscn_rcvd = 1; + fcport->scan_needed = 1; spin_lock_irqsave(&vha->work_lock, flags); if (vha->scan.scan_flags == 0) { -- GitLab From 8084042b063f8479a7163654254dd838a697240d Mon Sep 17 00:00:00 2001 From: Quinn Tran Date: Fri, 31 Aug 2018 11:24:31 -0700 Subject: [PATCH 0794/1304] scsi: qla2xxx: Move rport registration out of internal work_list commit cd4ed6b470f1569692b5d0d295b207f870570829 upstream. Currently, the rport registration is being called from a single work element that is used to process QLA internal "work_list". This work_list is meant for quick and simple task (ie no sleep). The Rport registration process sometime can be delayed by upper layer. This causes back pressure with the internal queue where other jobs are unable to move forward. This patch will schedule the registration process with a new work element (fc_port.reg_work). While the RPort is being registered, the current state of the fcport will not move forward until the registration is done. If the state of the fabric has changed, a new field/next_disc_state will record the next action on whether to 'DELETE' or 'Reverify the session/ADISC'. Signed-off-by: Quinn Tran Signed-off-by: Himanshu Madhani Signed-off-by: Martin K. Petersen Signed-off-by: Zhengyuan Liu Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/qla2xxx/qla_def.h | 6 ++- drivers/scsi/qla2xxx/qla_gbl.h | 5 +- drivers/scsi/qla2xxx/qla_init.c | 66 ++++++++++++++++++++---- drivers/scsi/qla2xxx/qla_os.c | 26 ++++++---- drivers/scsi/qla2xxx/qla_target.c | 83 +++++++++++++++++++++++++------ 5 files changed, 147 insertions(+), 39 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index c41d0dbbbd79..16dd59bcd60a 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -2375,11 +2375,13 @@ typedef struct fc_port { unsigned long expires; struct list_head del_list_entry; struct work_struct free_work; - + struct work_struct reg_work; + uint64_t jiffies_at_registration; struct qlt_plogi_ack_t *plogi_link[QLT_PLOGI_LINK_MAX]; uint16_t tgt_id; uint16_t old_tgt_id; + uint16_t sec_since_registration; uint8_t fcp_prio; @@ -2412,6 +2414,7 @@ typedef struct fc_port { struct qla_tgt_sess *tgt_session; struct ct_sns_desc ct_desc; enum discovery_state disc_state; + enum discovery_state next_disc_state; enum login_state fw_login_state; unsigned long dm_login_expire; unsigned long plogi_nack_done_deadline; @@ -3222,7 +3225,6 @@ enum qla_work_type { QLA_EVT_GPDB, QLA_EVT_PRLI, QLA_EVT_GPSC, - QLA_EVT_UPD_FCPORT, QLA_EVT_GNL, QLA_EVT_NACK, QLA_EVT_RELOGIN, diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index 178974896b5c..b8e4abe804d5 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -54,7 +54,7 @@ extern void qla2x00_abort_isp_cleanup(scsi_qla_host_t *); extern void qla2x00_quiesce_io(scsi_qla_host_t *); extern void qla2x00_update_fcport(scsi_qla_host_t *, fc_port_t *); - +void qla_register_fcport_fn(struct work_struct *); extern void qla2x00_alloc_fw_dump(scsi_qla_host_t *); extern void qla2x00_try_to_stop_firmware(scsi_qla_host_t *); @@ -109,6 +109,7 @@ int qla24xx_post_newsess_work(struct scsi_qla_host *, port_id_t *, u8 *, u8*, int qla24xx_fcport_handle_login(struct scsi_qla_host *, fc_port_t *); int qla24xx_detect_sfp(scsi_qla_host_t *vha); int qla24xx_post_gpdb_work(struct scsi_qla_host *, fc_port_t *, u8); + void qla2x00_async_prlo_done(struct scsi_qla_host *, fc_port_t *, uint16_t *); extern int qla2x00_post_async_prlo_work(struct scsi_qla_host *, fc_port_t *, @@ -208,7 +209,7 @@ extern void qla2x00_disable_board_on_pci_error(struct work_struct *); extern void qla2x00_sp_compl(void *, int); extern void qla2xxx_qpair_sp_free_dma(void *); extern void qla2xxx_qpair_sp_compl(void *, int); -extern int qla24xx_post_upd_fcport_work(struct scsi_qla_host *, fc_port_t *); +extern void qla24xx_sched_upd_fcport(fc_port_t *); void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *, uint16_t *); int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *); diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index e5c86c873e95..4460c841d14b 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -1204,11 +1204,7 @@ void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea) vha->fcport_count++; ea->fcport->login_succ = 1; - ql_dbg(ql_dbg_disc, vha, 0x20d6, - "%s %d %8phC post upd_fcport fcp_cnt %d\n", - __func__, __LINE__, ea->fcport->port_name, - vha->fcport_count); - qla24xx_post_upd_fcport_work(vha, ea->fcport); + qla24xx_sched_upd_fcport(ea->fcport); } else if (ea->fcport->login_succ) { /* * We have an existing session. A late RSCN delivery @@ -1326,6 +1322,7 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport) { u16 data[2]; u64 wwn; + u16 sec; ql_dbg(ql_dbg_disc, vha, 0x20d8, "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d retry %d lid %d scan %d\n", @@ -1457,6 +1454,22 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport) qla24xx_post_prli_work(vha, fcport); break; + case DSC_UPD_FCPORT: + sec = jiffies_to_msecs(jiffies - + fcport->jiffies_at_registration)/1000; + if (fcport->sec_since_registration < sec && sec && + !(sec % 60)) { + fcport->sec_since_registration = sec; + ql_dbg(ql_dbg_disc, fcport->vha, 0xffff, + "%s %8phC - Slow Rport registration(%d Sec)\n", + __func__, fcport->port_name, sec); + } + + if (fcport->next_disc_state != DSC_DELETE_PEND) + fcport->next_disc_state = DSC_ADISC; + set_bit(RELOGIN_NEEDED, &vha->dpc_flags); + break; + default: break; } @@ -1572,8 +1585,10 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea) case RSCN_PORT_ADDR: fcport = qla2x00_find_fcport_by_nportid (vha, &ea->id, 1); - if (fcport) + if (fcport) { fcport->scan_needed = 1; + fcport->rscn_gen++; + } spin_lock_irqsave(&vha->work_lock, flags); if (vha->scan.scan_flags == 0) { @@ -4741,6 +4756,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags) return NULL; } INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn); + INIT_WORK(&fcport->reg_work, qla_register_fcport_fn); INIT_LIST_HEAD(&fcport->gnl_entry); INIT_LIST_HEAD(&fcport->list); @@ -5221,13 +5237,15 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport) void qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) { - fcport->vha = vha; - if (IS_SW_RESV_ADDR(fcport->d_id)) return; + ql_dbg(ql_dbg_disc, vha, 0x20ef, "%s %8phC\n", + __func__, fcport->port_name); + + fcport->disc_state = DSC_UPD_FCPORT; + fcport->login_retry = vha->hw->login_retry_count; fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); - fcport->disc_state = DSC_LOGIN_COMPLETE; fcport->deleted = 0; fcport->logout_on_delete = 1; fcport->login_retry = vha->hw->login_retry_count; @@ -5289,6 +5307,36 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) } } qla2x00_set_fcport_state(fcport, FCS_ONLINE); + + fcport->disc_state = DSC_LOGIN_COMPLETE; +} + +void qla_register_fcport_fn(struct work_struct *work) +{ + fc_port_t *fcport = container_of(work, struct fc_port, reg_work); + u32 rscn_gen = fcport->rscn_gen; + u16 data[2]; + + if (IS_SW_RESV_ADDR(fcport->d_id)) + return; + + qla2x00_update_fcport(fcport->vha, fcport); + + if (rscn_gen != fcport->rscn_gen) { + /* RSCN(s) came in while registration */ + switch (fcport->next_disc_state) { + case DSC_DELETE_PEND: + qlt_schedule_sess_for_deletion(fcport); + break; + case DSC_ADISC: + data[0] = data[1] = 0; + qla2x00_post_async_adisc_work(fcport->vha, fcport, + data); + break; + default: + break; + } + } } /* diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index e17ca7df8d0e..102c6b9f31ae 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -4792,16 +4792,25 @@ qlafx00_post_aenfx_work(struct scsi_qla_host *vha, uint32_t evtcode, return qla2x00_post_work(vha, e); } -int qla24xx_post_upd_fcport_work(struct scsi_qla_host *vha, fc_port_t *fcport) +void qla24xx_sched_upd_fcport(fc_port_t *fcport) { - struct qla_work_evt *e; + unsigned long flags; - e = qla2x00_alloc_work(vha, QLA_EVT_UPD_FCPORT); - if (!e) - return QLA_FUNCTION_FAILED; + if (IS_SW_RESV_ADDR(fcport->d_id)) + return; - e->u.fcport.fcport = fcport; - return qla2x00_post_work(vha, e); + spin_lock_irqsave(&fcport->vha->work_lock, flags); + if (fcport->disc_state == DSC_UPD_FCPORT) { + spin_unlock_irqrestore(&fcport->vha->work_lock, flags); + return; + } + fcport->jiffies_at_registration = jiffies; + fcport->sec_since_registration = 0; + fcport->next_disc_state = DSC_DELETED; + fcport->disc_state = DSC_UPD_FCPORT; + spin_unlock_irqrestore(&fcport->vha->work_lock, flags); + + queue_work(system_unbound_wq, &fcport->reg_work); } static @@ -5057,9 +5066,6 @@ qla2x00_do_work(struct scsi_qla_host *vha) case QLA_EVT_GPSC: qla24xx_async_gpsc(vha, e->u.fcport.fcport); break; - case QLA_EVT_UPD_FCPORT: - qla2x00_update_fcport(vha, e->u.fcport.fcport); - break; case QLA_EVT_GNL: qla24xx_async_gnl(vha, e->u.fcport.fcport); break; diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 95206e227730..7b14204c79dd 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -600,14 +600,7 @@ void qla2x00_async_nack_sp_done(void *s, int res) sp->fcport->login_succ = 1; vha->fcport_count++; - - ql_dbg(ql_dbg_disc, vha, 0x20f3, - "%s %d %8phC post upd_fcport fcp_cnt %d\n", - __func__, __LINE__, - sp->fcport->port_name, - vha->fcport_count); - sp->fcport->disc_state = DSC_UPD_FCPORT; - qla24xx_post_upd_fcport_work(vha, sp->fcport); + qla24xx_sched_upd_fcport(sp->fcport); } else { sp->fcport->login_retry = 0; sp->fcport->disc_state = DSC_LOGIN_COMPLETE; @@ -1227,11 +1220,12 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess) { struct qla_tgt *tgt = sess->tgt; unsigned long flags; + u16 sec; - if (sess->disc_state == DSC_DELETE_PEND) + switch (sess->disc_state) { + case DSC_DELETE_PEND: return; - - if (sess->disc_state == DSC_DELETED) { + case DSC_DELETED: if (tgt && tgt->tgt_stop && (tgt->sess_count == 0)) wake_up_all(&tgt->waitQ); if (sess->vha->fcport_count == 0) @@ -1240,6 +1234,24 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess) if (!sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] && !sess->plogi_link[QLT_PLOGI_LINK_CONFLICT]) return; + break; + case DSC_UPD_FCPORT: + /* + * This port is not done reporting to upper layer. + * let it finish + */ + sess->next_disc_state = DSC_DELETE_PEND; + sec = jiffies_to_msecs(jiffies - + sess->jiffies_at_registration)/1000; + if (sess->sec_since_registration < sec && sec && !(sec % 5)) { + sess->sec_since_registration = sec; + ql_dbg(ql_dbg_disc, sess->vha, 0xffff, + "%s %8phC : Slow Rport registration(%d Sec)\n", + __func__, sess->port_name, sec); + } + return; + default: + break; } if (sess->deleted == QLA_SESS_DELETED) @@ -4749,6 +4761,32 @@ static int qlt_handle_login(struct scsi_qla_host *vha, goto out; } + if (sess->disc_state == DSC_UPD_FCPORT) { + u16 sec; + + /* + * Remote port registration is still going on from + * previous login. Allow it to finish before we + * accept the new login. + */ + sess->next_disc_state = DSC_DELETE_PEND; + sec = jiffies_to_msecs(jiffies - + sess->jiffies_at_registration) / 1000; + if (sess->sec_since_registration < sec && sec && + !(sec % 5)) { + sess->sec_since_registration = sec; + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %8phC - Slow Rport registration (%d Sec)\n", + __func__, sess->port_name, sec); + } + + if (!conflict_sess) + kmem_cache_free(qla_tgt_plogi_cachep, pla); + + qlt_send_term_imm_notif(vha, iocb, 1); + goto out; + } + qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN); sess->d_id = port_id; sess->login_gen++; @@ -4908,6 +4946,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, if (sess != NULL) { bool delete = false; + int sec; spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags); switch (sess->fw_login_state) { case DSC_LS_PLOGI_PEND: @@ -4920,9 +4959,24 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, } switch (sess->disc_state) { + case DSC_UPD_FCPORT: + spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, + flags); + + sec = jiffies_to_msecs(jiffies - + sess->jiffies_at_registration)/1000; + if (sess->sec_since_registration < sec && sec && + !(sec % 5)) { + sess->sec_since_registration = sec; + ql_dbg(ql_dbg_disc, sess->vha, 0xffff, + "%s %8phC : Slow Rport registration(%d Sec)\n", + __func__, sess->port_name, sec); + } + qlt_send_term_imm_notif(vha, iocb, 1); + return 0; + case DSC_LOGIN_PEND: case DSC_GPDB: - case DSC_UPD_FCPORT: case DSC_LOGIN_COMPLETE: case DSC_ADISC: delete = false; @@ -5959,10 +6013,7 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha, case MODE_DUAL: if (newfcport) { if (!IS_IIDMA_CAPABLE(vha->hw) || !vha->hw->flags.gpsc_supported) { - ql_dbg(ql_dbg_disc, vha, 0x20fe, - "%s %d %8phC post upd_fcport fcp_cnt %d\n", - __func__, __LINE__, fcport->port_name, vha->fcport_count); - qla24xx_post_upd_fcport_work(vha, fcport); + qla24xx_sched_upd_fcport(fcport); } else { ql_dbg(ql_dbg_disc, vha, 0x20ff, "%s %d %8phC post gpsc fcp_cnt %d\n", -- GitLab From 6486bc4bc514c91d26f1c00add2aec55b009e359 Mon Sep 17 00:00:00 2001 From: Quinn Tran Date: Tue, 4 Sep 2018 14:19:16 -0700 Subject: [PATCH 0795/1304] scsi: qla2xxx: Reduce holding sess_lock to prevent CPU lock-up commit 0aca77843e2803bf4fab1598b7891c56c16be979 upstream. - Reduce sess_lock holding to prevent CPU Lock up. sess_lock was held across fc_port registration and deletion. These calls can be blocked by upper layer. Sess_lock is also being accessed by interrupt thread. - Reduce number of loops in processing work_list to prevent kernel complaint of CPU lockup or holding sess_lock. Reported-by: Zhengyuan Liu Tested-by: Zhengyuan Liu Fixes: 9ba1cb25c151 ("scsi: qla2xxx: Remove all rports if fabric scan retry fails") Link: https://lore.kernel.org/linux-scsi/D01377DD-2E86-427B-BA0C-8D7649E37870@oracle.com/T/#t Signed-off-by: Quinn Tran Signed-off-by: Himanshu Madhani Signed-off-by: Martin K. Petersen Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/qla2xxx/qla_def.h | 2 +- drivers/scsi/qla2xxx/qla_gs.c | 18 +++++++++++------ drivers/scsi/qla2xxx/qla_init.c | 33 ++++++++++++++++--------------- drivers/scsi/qla2xxx/qla_os.c | 3 +-- drivers/scsi/qla2xxx/qla_target.c | 2 ++ 5 files changed, 33 insertions(+), 25 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 16dd59bcd60a..47835d26a973 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -262,8 +262,8 @@ struct name_list_extended { struct get_name_list_extended *l; dma_addr_t ldma; struct list_head fcports; - spinlock_t fcports_lock; u32 size; + u8 sent; }; /* * Timeout timer counts in seconds diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index 01cd977eb55e..c3195d4c25e5 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c @@ -4018,11 +4018,10 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp) if ((qla_dual_mode_enabled(vha) || qla_ini_mode_enabled(vha)) && atomic_read(&fcport->state) == FCS_ONLINE) { - qla2x00_mark_device_lost(vha, fcport, - ql2xplogiabsentdevice, 0); + if (fcport->loop_id != FC_NO_LOOP_ID) { + if (fcport->flags & FCF_FCP2_DEVICE) + fcport->logout_on_delete = 0; - if (fcport->loop_id != FC_NO_LOOP_ID && - (fcport->flags & FCF_FCP2_DEVICE) == 0) { ql_dbg(ql_dbg_disc, vha, 0x20f0, "%s %d %8phC post del sess\n", __func__, __LINE__, @@ -4261,12 +4260,13 @@ static void qla2x00_async_gpnft_gnnft_sp_done(void *s, int res) sp->rc = res; rc = qla2x00_post_nvme_gpnft_done_work(vha, sp, QLA_EVT_GPNFT); - if (!rc) { + if (rc) { qla24xx_sp_unmap(vha, sp); set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); return; } + return; } if (cmd == GPN_FT_CMD) { @@ -4316,6 +4316,8 @@ static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp, vha->scan.scan_flags &= ~SF_SCANNING; spin_unlock_irqrestore(&vha->work_lock, flags); WARN_ON(1); + set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); goto done_free_sp; } @@ -4349,8 +4351,12 @@ static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp, sp->done = qla2x00_async_gpnft_gnnft_sp_done; rval = qla2x00_start_sp(sp); - if (rval != QLA_SUCCESS) + if (rval != QLA_SUCCESS) { + spin_lock_irqsave(&vha->work_lock, flags); + vha->scan.scan_flags &= ~SF_SCANNING; + spin_unlock_irqrestore(&vha->work_lock, flags); goto done_free_sp; + } ql_dbg(ql_dbg_disc, vha, 0xffff, "Async-%s hdl=%x FC4Type %x.\n", sp->name, diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 4460c841d14b..2ebf4e4e0234 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -800,6 +800,7 @@ qla24xx_async_gnl_sp_done(void *s, int res) if (res == QLA_FUNCTION_TIMEOUT) return; + sp->fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE); memset(&ea, 0, sizeof(ea)); ea.sp = sp; ea.rc = res; @@ -827,25 +828,24 @@ qla24xx_async_gnl_sp_done(void *s, int res) (loop_id & 0x7fff)); } - spin_lock_irqsave(&vha->gnl.fcports_lock, flags); + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); INIT_LIST_HEAD(&h); fcport = tf = NULL; if (!list_empty(&vha->gnl.fcports)) list_splice_init(&vha->gnl.fcports, &h); + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); list_for_each_entry_safe(fcport, tf, &h, gnl_entry) { list_del_init(&fcport->gnl_entry); - spin_lock(&vha->hw->tgt.sess_lock); + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); - spin_unlock(&vha->hw->tgt.sess_lock); + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); ea.fcport = fcport; qla2x00_fcport_event_handler(vha, &ea); } - spin_unlock_irqrestore(&vha->gnl.fcports_lock, flags); - spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); /* create new fcport if fw has knowledge of new sessions */ for (i = 0; i < n; i++) { port_id_t id; @@ -878,6 +878,8 @@ qla24xx_async_gnl_sp_done(void *s, int res) } } + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); + vha->gnl.sent = 0; spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); sp->free(sp); @@ -897,27 +899,24 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport) ql_dbg(ql_dbg_disc, vha, 0x20d9, "Async-gnlist WWPN %8phC \n", fcport->port_name); - spin_lock_irqsave(&vha->gnl.fcports_lock, flags); - if (!list_empty(&fcport->gnl_entry)) { - spin_unlock_irqrestore(&vha->gnl.fcports_lock, flags); - rval = QLA_SUCCESS; - goto done; - } - - spin_lock(&vha->hw->tgt.sess_lock); + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); + fcport->flags |= FCF_ASYNC_SENT; fcport->disc_state = DSC_GNL; fcport->last_rscn_gen = fcport->rscn_gen; fcport->last_login_gen = fcport->login_gen; - spin_unlock(&vha->hw->tgt.sess_lock); list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports); - spin_unlock_irqrestore(&vha->gnl.fcports_lock, flags); + if (vha->gnl.sent) { + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + return QLA_SUCCESS; + } + vha->gnl.sent = 1; + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) goto done; - fcport->flags |= FCF_ASYNC_SENT; sp->type = SRB_MB_IOCB; sp->name = "gnlist"; sp->gen1 = fcport->rscn_gen; @@ -1204,7 +1203,9 @@ void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea) vha->fcport_count++; ea->fcport->login_succ = 1; + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); qla24xx_sched_upd_fcport(ea->fcport); + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); } else if (ea->fcport->login_succ) { /* * We have an existing session. A late RSCN delivery diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 102c6b9f31ae..83ef790afb5d 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -2719,7 +2719,7 @@ static void qla2x00_iocb_work_fn(struct work_struct *work) struct scsi_qla_host, iocb_work); struct qla_hw_data *ha = vha->hw; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); - int i = 20; + int i = 2; unsigned long flags; if (test_bit(UNLOADING, &base_vha->dpc_flags)) @@ -4606,7 +4606,6 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, spin_lock_init(&vha->work_lock); spin_lock_init(&vha->cmd_list_lock); - spin_lock_init(&vha->gnl.fcports_lock); init_waitqueue_head(&vha->fcport_waitQ); init_waitqueue_head(&vha->vref_waitq); diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 7b14204c79dd..29b79e85fa7f 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -600,7 +600,9 @@ void qla2x00_async_nack_sp_done(void *s, int res) sp->fcport->login_succ = 1; vha->fcport_count++; + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); qla24xx_sched_upd_fcport(sp->fcport); + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); } else { sp->fcport->login_retry = 0; sp->fcport->disc_state = DSC_LOGIN_COMPLETE; -- GitLab From ad2dd3f4e5ac85664cbdca5be2d533e9e9207e8c Mon Sep 17 00:00:00 2001 From: Bob Peterson Date: Fri, 5 Jun 2020 14:12:34 -0500 Subject: [PATCH 0796/1304] gfs2: initialize transaction tr_ailX_lists earlier commit cbcc89b630447ec7836aa2b9242d9bb1725f5a61 upstream. Since transactions may be freed shortly after they're created, before a log_flush occurs, we need to initialize their ail1 and ail2 lists earlier. Before this patch, the ail1 list was initialized in gfs2_log_flush(). This moves the initialization to the point when the transaction is first created. Signed-off-by: Bob Peterson Signed-off-by: Andreas Gruenbacher Cc: Salvatore Bonaccorso Signed-off-by: Greg Kroah-Hartman --- fs/gfs2/glops.c | 2 ++ fs/gfs2/log.c | 2 -- fs/gfs2/trans.c | 2 ++ 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c index c63bee9adb6a..20f08f4391c9 100644 --- a/fs/gfs2/glops.c +++ b/fs/gfs2/glops.c @@ -89,6 +89,8 @@ static void gfs2_ail_empty_gl(struct gfs2_glock *gl) memset(&tr, 0, sizeof(tr)); INIT_LIST_HEAD(&tr.tr_buf); INIT_LIST_HEAD(&tr.tr_databuf); + INIT_LIST_HEAD(&tr.tr_ail1_list); + INIT_LIST_HEAD(&tr.tr_ail2_list); tr.tr_revokes = atomic_read(&gl->gl_ail_count); if (!tr.tr_revokes) diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c index 06752db213d2..74c1fe9c4a04 100644 --- a/fs/gfs2/log.c +++ b/fs/gfs2/log.c @@ -806,8 +806,6 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags) tr = sdp->sd_log_tr; if (tr) { sdp->sd_log_tr = NULL; - INIT_LIST_HEAD(&tr->tr_ail1_list); - INIT_LIST_HEAD(&tr->tr_ail2_list); tr->tr_first = sdp->sd_log_flush_head; if (unlikely (state == SFS_FROZEN)) gfs2_assert_withdraw(sdp, !tr->tr_num_buf_new && !tr->tr_num_databuf_new); diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c index 812b5d5978b2..9313f7904e34 100644 --- a/fs/gfs2/trans.c +++ b/fs/gfs2/trans.c @@ -56,6 +56,8 @@ int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks, sizeof(u64)); INIT_LIST_HEAD(&tr->tr_databuf); INIT_LIST_HEAD(&tr->tr_buf); + INIT_LIST_HEAD(&tr->tr_ail1_list); + INIT_LIST_HEAD(&tr->tr_ail2_list); sb_start_intwrite(sdp->sd_vfs); -- GitLab From 64463253511adad375549a79cf2094de6effe817 Mon Sep 17 00:00:00 2001 From: Naresh Kumar PBS Date: Mon, 24 Aug 2020 11:14:35 -0700 Subject: [PATCH 0797/1304] RDMA/bnxt_re: Restrict the max_gids to 256 commit 847b97887ed4569968d5b9a740f2334abca9f99a upstream. Some adapters report more than 256 gid entries. Restrict it to 256 for now. Fixes: 1ac5a4047975("RDMA/bnxt_re: Add bnxt_re RoCE driver") Link: https://lore.kernel.org/r/1598292876-26529-6-git-send-email-selvin.xavier@broadcom.com Signed-off-by: Naresh Kumar PBS Signed-off-by: Selvin Xavier Signed-off-by: Jason Gunthorpe Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/hw/bnxt_re/qplib_sp.c | 2 +- drivers/infiniband/hw/bnxt_re/qplib_sp.h | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c index 09e7d3dd3055..336144876363 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c @@ -141,7 +141,7 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, attr->max_inline_data = le32_to_cpu(sb->max_inline_data); attr->l2_db_size = (sb->l2_db_space_size + 1) * (0x01 << RCFW_DBR_BASE_PAGE_SHIFT); - attr->max_sgid = le32_to_cpu(sb->max_gid); + attr->max_sgid = BNXT_QPLIB_NUM_GIDS_SUPPORTED; bnxt_qplib_query_version(rcfw, attr->fw_ver); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h index 9d3e8b994945..b6e9e0ef7939 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h @@ -47,6 +47,7 @@ struct bnxt_qplib_dev_attr { #define FW_VER_ARR_LEN 4 u8 fw_ver[FW_VER_ARR_LEN]; +#define BNXT_QPLIB_NUM_GIDS_SUPPORTED 256 u16 max_sgid; u16 max_mrw; u32 max_qp; -- GitLab From cf1a59e1ac54d8f7e211b845100dbd778624aeda Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Sat, 15 Aug 2020 04:46:41 -0400 Subject: [PATCH 0798/1304] net: handle the return value of pskb_carve_frag_list() correctly commit eabe861881a733fc84f286f4d5a1ffaddd4f526f upstream. pskb_carve_frag_list() may return -ENOMEM in pskb_carve_inside_nonlinear(). we should handle this correctly or we would get wrong sk_buff. Fixes: 6fa01ccd8830 ("skbuff: Add pskb_extract() helper function") Signed-off-by: Miaohe Lin Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/core/skbuff.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/net/core/skbuff.c b/net/core/skbuff.c index af6e9028716d..be4bc833c28a 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -5521,9 +5521,13 @@ static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off, if (skb_has_frag_list(skb)) skb_clone_fraglist(skb); - if (k == 0) { - /* split line is in frag list */ - pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask); + /* split line is in frag list */ + if (k == 0 && pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask)) { + /* skb_frag_unref() is not needed here as shinfo->nr_frags = 0. */ + if (skb_has_frag_list(skb)) + kfree_skb_list(skb_shinfo(skb)->frag_list); + kfree(data); + return -ENOMEM; } skb_release_data(skb); -- GitLab From 931f10b04b3040c602466f90989f798aa1cf5ddc Mon Sep 17 00:00:00 2001 From: Haiyang Zhang Date: Thu, 20 Aug 2020 14:53:14 -0700 Subject: [PATCH 0799/1304] hv_netvsc: Remove "unlikely" from netvsc_select_queue commit 4d820543c54c47a2bd3c95ddbf52f83c89a219a0 upstream. When using vf_ops->ndo_select_queue, the number of queues of VF is usually bigger than the synthetic NIC. This condition may happen often. Remove "unlikely" from the comparison of ndev->real_num_tx_queues. Fixes: b3bf5666a510 ("hv_netvsc: defer queue selection to VF") Signed-off-by: Haiyang Zhang Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/hyperv/netvsc_drv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 4a5d99ecb89d..2dff0e110c6f 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -378,7 +378,7 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, } rcu_read_unlock(); - while (unlikely(txq >= ndev->real_num_tx_queues)) + while (txq >= ndev->real_num_tx_queues) txq -= ndev->real_num_tx_queues; return txq; -- GitLab From c642341b09f367b5d85c97e30e45afc00241f1f9 Mon Sep 17 00:00:00 2001 From: Olga Kornievskaia Date: Thu, 20 Aug 2020 18:52:43 -0400 Subject: [PATCH 0800/1304] NFSv4.1 handle ERR_DELAY error reclaiming locking state on delegation recall [ Upstream commit 3d7a9520f0c3e6a68b6de8c5812fc8b6d7a52626 ] A client should be able to handle getting an ERR_DELAY error while doing a LOCK call to reclaim state due to delegation being recalled. This is a transient error that can happen due to server moving its volumes and invalidating its file location cache and upon reference to it during the LOCK call needing to do an expensive lookup (leading to an ERR_DELAY error on a PUTFH). Signed-off-by: Olga Kornievskaia Signed-off-by: Trond Myklebust Signed-off-by: Sasha Levin --- fs/nfs/nfs4proc.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 1ef75b1deffa..929f1d72bfd3 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -7006,7 +7006,12 @@ int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, err = nfs4_set_lock_state(state, fl); if (err != 0) return err; - err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW); + do { + err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW); + if (err != -NFS4ERR_DELAY) + break; + ssleep(1); + } while (err == -NFS4ERR_DELAY); return nfs4_handle_delegation_recall_error(server, state, stateid, fl, err); } -- GitLab From 70f1396b14549ef191df79bd29f9760467cbc54d Mon Sep 17 00:00:00 2001 From: Dinghao Liu Date: Sun, 23 Aug 2020 17:14:53 +0800 Subject: [PATCH 0801/1304] scsi: pm8001: Fix memleak in pm8001_exec_internal_task_abort [ Upstream commit ea403fde7552bd61bad6ea45e3feb99db77cb31e ] When pm8001_tag_alloc() fails, task should be freed just like it is done in the subsequent error paths. Link: https://lore.kernel.org/r/20200823091453.4782-1-dinghao.liu@zju.edu.cn Acked-by: Jack Wang Signed-off-by: Dinghao Liu Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin --- drivers/scsi/pm8001/pm8001_sas.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c index 5be4212312cb..ba79b37d8cf7 100644 --- a/drivers/scsi/pm8001/pm8001_sas.c +++ b/drivers/scsi/pm8001/pm8001_sas.c @@ -794,7 +794,7 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha, res = pm8001_tag_alloc(pm8001_ha, &ccb_tag); if (res) - return res; + goto ex_err; ccb = &pm8001_ha->ccb_info[ccb_tag]; ccb->device = pm8001_dev; ccb->ccb_tag = ccb_tag; -- GitLab From d7c720df355273db19093c487c460fb2067068dd Mon Sep 17 00:00:00 2001 From: Javed Hasan Date: Tue, 25 Aug 2020 02:39:40 -0700 Subject: [PATCH 0802/1304] scsi: libfc: Fix for double free() [ Upstream commit 5a5b80f98534416b3b253859897e2ba1dc241e70 ] Fix for '&fp->skb' double free. Link: https://lore.kernel.org/r/20200825093940.19612-1-jhasan@marvell.com Reported-by: Dan Carpenter Signed-off-by: Javed Hasan Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin --- drivers/scsi/libfc/fc_disc.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index 78cf5b32bca6..0b3f4538c1d4 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c @@ -646,8 +646,6 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp, fc_frame_free(fp); out: kref_put(&rdata->kref, fc_rport_destroy); - if (!IS_ERR(fp)) - fc_frame_free(fp); } /** -- GitLab From 67ee55a92c8a50fc99971cf9303e4a5099b99a3b Mon Sep 17 00:00:00 2001 From: James Smart Date: Fri, 28 Aug 2020 10:53:30 -0700 Subject: [PATCH 0803/1304] scsi: lpfc: Fix FLOGI/PLOGI receive race condition in pt2pt discovery [ Upstream commit 7b08e89f98cee9907895fabb64cf437bc505ce9a ] The driver is unable to successfully login with remote device. During pt2pt login, the driver completes its FLOGI request with the remote device having WWN precedence. The remote device issues its own (delayed) FLOGI after accepting the driver's and, upon transmitting the FLOGI, immediately recognizes it has already processed the driver's FLOGI thus it transitions to sending a PLOGI before waiting for an ACC to its FLOGI. In the driver, the FLOGI is received and an ACC sent, followed by the PLOGI being received and an ACC sent. The issue is that the PLOGI reception occurs before the response from the adapter from the FLOGI ACC is received. Processing of the PLOGI sets state flags to perform the REG_RPI mailbox command and proceed with the rest of discovery on the port. The same completion routine used by both FLOGI and PLOGI is generic in nature. One of the things it does is clear flags, and those flags happen to drive the rest of discovery. So what happened was the PLOGI processing set the flags, the FLOGI ACC completion cleared them, thus when the PLOGI ACC completes it doesn't see the flags and stops. Fix by modifying the generic completion routine to not clear the rest of discovery flag (NLP_ACC_REGLOGIN) unless the completion is also associated with performing a mailbox command as part of its handling. For things such as FLOGI ACC, there isn't a subsequent action to perform with the adapter, thus there is no mailbox cmd ptr. PLOGI ACC though will perform REG_RPI upon completion, thus there is a mailbox cmd ptr. Link: https://lore.kernel.org/r/20200828175332.130300-3-james.smart@broadcom.com Co-developed-by: Dick Kennedy Signed-off-by: Dick Kennedy Signed-off-by: James Smart Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin --- drivers/scsi/lpfc/lpfc_els.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 9032793c405e..6a4b496081e4 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -4112,7 +4112,9 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, out: if (ndlp && NLP_CHK_NODE_ACT(ndlp) && shost) { spin_lock_irq(shost->host_lock); - ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI); + if (mbox) + ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN; + ndlp->nlp_flag &= ~NLP_RM_DFLT_RPI; spin_unlock_irq(shost->host_lock); /* If the node is not being used by another discovery thread, -- GitLab From ad20a37cb784c42d0331a23e97244b98098d271a Mon Sep 17 00:00:00 2001 From: Vincent Whitchurch Date: Wed, 2 Sep 2020 15:09:52 +0200 Subject: [PATCH 0804/1304] regulator: pwm: Fix machine constraints application [ Upstream commit 59ae97a7a9e1499c2070e29841d1c4be4ae2994a ] If the zero duty cycle doesn't correspond to any voltage in the voltage table, the PWM regulator returns an -EINVAL from get_voltage_sel() which results in the core erroring out with a "failed to get the current voltage" and ending up not applying the machine constraints. Instead, return -ENOTRECOVERABLE which makes the core set the voltage since it's at an unknown value. For example, with this device tree: fooregulator { compatible = "pwm-regulator"; pwms = <&foopwm 0 100000>; regulator-min-microvolt = <2250000>; regulator-max-microvolt = <2250000>; regulator-name = "fooregulator"; regulator-always-on; regulator-boot-on; voltage-table = <2250000 30>; }; Before this patch: fooregulator: failed to get the current voltage(-22) After this patch: fooregulator: Setting 2250000-2250000uV fooregulator: 2250 mV Signed-off-by: Vincent Whitchurch Link: https://lore.kernel.org/r/20200902130952.24880-1-vincent.whitchurch@axis.com Signed-off-by: Mark Brown Signed-off-by: Sasha Levin --- drivers/regulator/pwm-regulator.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/regulator/pwm-regulator.c b/drivers/regulator/pwm-regulator.c index a2fd140eff81..34f3b9778ffa 100644 --- a/drivers/regulator/pwm-regulator.c +++ b/drivers/regulator/pwm-regulator.c @@ -285,7 +285,7 @@ static int pwm_regulator_init_table(struct platform_device *pdev, return ret; } - drvdata->state = -EINVAL; + drvdata->state = -ENOTRECOVERABLE; drvdata->duty_cycle_table = duty_cycle_table; memcpy(&drvdata->ops, &pwm_regulator_voltage_table_ops, sizeof(drvdata->ops)); -- GitLab From 09ca2d2681e95c7603ce1c3601ebbeaaf8415750 Mon Sep 17 00:00:00 2001 From: Vincent Whitchurch Date: Wed, 2 Sep 2020 15:23:41 +0200 Subject: [PATCH 0805/1304] spi: spi-loopback-test: Fix out-of-bounds read [ Upstream commit 837ba18dfcd4db21ad58107c65bfe89753aa56d7 ] The "tx/rx-transfer - crossing PAGE_SIZE" test always fails when len=131071 and rx_offset >= 5: spi-loopback-test spi0.0: Running test tx/rx-transfer - crossing PAGE_SIZE ... with iteration values: len = 131071, tx_off = 0, rx_off = 3 with iteration values: len = 131071, tx_off = 0, rx_off = 4 with iteration values: len = 131071, tx_off = 0, rx_off = 5 loopback strangeness - rx changed outside of allowed range at: ...a4321000 spi_msg@ffffffd5a4157690 frame_length: 131071 actual_length: 131071 spi_transfer@ffffffd5a41576f8 len: 131071 tx_buf: ffffffd5a4340ffc Note that rx_offset > 3 can only occur if the SPI controller driver sets ->dma_alignment to a higher value than 4, so most SPI controller drivers are not affect. The allocated Rx buffer is of size SPI_TEST_MAX_SIZE_PLUS, which is 132 KiB (assuming 4 KiB pages). This test uses an initial offset into the rx_buf of PAGE_SIZE - 4, and a len of 131071, so the range expected to be written in this transfer ends at (4096 - 4) + 5 + 131071 == 132 KiB, which is also the end of the allocated buffer. But the code which verifies the content of the buffer reads a byte beyond the allocated buffer and spuriously fails because this out-of-bounds read doesn't return the expected value. Fix this by using ITERATE_LEN instead of ITERATE_MAX_LEN to avoid testing sizes which cause out-of-bounds reads. Signed-off-by: Vincent Whitchurch Link: https://lore.kernel.org/r/20200902132341.7079-1-vincent.whitchurch@axis.com Signed-off-by: Mark Brown Signed-off-by: Sasha Levin --- drivers/spi/spi-loopback-test.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/spi/spi-loopback-test.c b/drivers/spi/spi-loopback-test.c index bed7403bb6b3..b9a7117b6dce 100644 --- a/drivers/spi/spi-loopback-test.c +++ b/drivers/spi/spi-loopback-test.c @@ -99,7 +99,7 @@ static struct spi_test spi_tests[] = { { .description = "tx/rx-transfer - crossing PAGE_SIZE", .fill_option = FILL_COUNT_8, - .iterate_len = { ITERATE_MAX_LEN }, + .iterate_len = { ITERATE_LEN }, .iterate_tx_align = ITERATE_ALIGN, .iterate_rx_align = ITERATE_ALIGN, .transfer_count = 1, -- GitLab From a6a2cf4d918f3c62b652a87d4e9b667049de4cb1 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Fri, 4 Sep 2020 17:39:12 -0400 Subject: [PATCH 0806/1304] NFS: Zero-stateid SETATTR should first return delegation [ Upstream commit 644c9f40cf71969f29add32f32349e71d4995c0b ] If a write delegation isn't available, the Linux NFS client uses a zero-stateid when performing a SETATTR. NFSv4.0 provides no mechanism for an NFS server to match such a request to a particular client. It recalls all delegations for that file, even delegations held by the client issuing the request. If that client happens to hold a read delegation, the server will recall it immediately, resulting in an NFS4ERR_DELAY/CB_RECALL/ DELEGRETURN sequence. Optimize out this pipeline bubble by having the client return any delegations it may hold on a file before it issues a SETATTR(zero-stateid) on that file. Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust Signed-off-by: Sasha Levin --- fs/nfs/nfs4proc.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 929f1d72bfd3..b2a2ff3f22a4 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -3129,8 +3129,10 @@ static int _nfs4_do_setattr(struct inode *inode, /* Servers should only apply open mode checks for file size changes */ truncate = (arg->iap->ia_valid & ATTR_SIZE) ? true : false; - if (!truncate) + if (!truncate) { + nfs4_inode_make_writeable(inode); goto zero_stateid; + } if (nfs4_copy_delegation_stateid(inode, FMODE_WRITE, &arg->stateid, &delegation_cred)) { /* Use that stateid */ -- GitLab From 61279a7b3e337b8c8605987591964db4a2b3eb91 Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Sat, 5 Sep 2020 10:03:26 -0400 Subject: [PATCH 0807/1304] SUNRPC: stop printk reading past end of string [ Upstream commit 8c6b6c793ed32b8f9770ebcdf1ba99af423c303b ] Since p points at raw xdr data, there's no guarantee that it's NULL terminated, so we should give a length. And probably escape any special characters too. Reported-by: Zhi Li Signed-off-by: J. Bruce Fields Signed-off-by: Trond Myklebust Signed-off-by: Sasha Levin --- net/sunrpc/rpcb_clnt.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c index 08b5fa4a2852..ba8f36731228 100644 --- a/net/sunrpc/rpcb_clnt.c +++ b/net/sunrpc/rpcb_clnt.c @@ -981,8 +981,8 @@ static int rpcb_dec_getaddr(struct rpc_rqst *req, struct xdr_stream *xdr, p = xdr_inline_decode(xdr, len); if (unlikely(p == NULL)) goto out_fail; - dprintk("RPC: %5u RPCB_%s reply: %s\n", req->rq_task->tk_pid, - req->rq_task->tk_msg.rpc_proc->p_name, (char *)p); + dprintk("RPC: %5u RPCB_%s reply: %*pE\n", req->rq_task->tk_pid, + req->rq_task->tk_msg.rpc_proc->p_name, len, (char *)p); if (rpc_uaddr2sockaddr(req->rq_xprt->xprt_net, (char *)p, len, sap, sizeof(address)) == 0) -- GitLab From 30857be3992300481d31b87d7d1d126163c858eb Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Wed, 29 Jul 2020 01:19:40 +0300 Subject: [PATCH 0808/1304] rapidio: Replace 'select' DMAENGINES 'with depends on' [ Upstream commit d2b86100245080cfdf1e95e9e07477474c1be2bd ] Enabling a whole subsystem from a single driver 'select' is frowned upon and won't be accepted in new drivers, that need to use 'depends on' instead. Existing selection of DMAENGINES will then cause circular dependencies. Replace them with a dependency. Signed-off-by: Laurent Pinchart Acked-by: Randy Dunlap Signed-off-by: Sasha Levin --- drivers/rapidio/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/rapidio/Kconfig b/drivers/rapidio/Kconfig index d6d2f20c4597..21df2816def7 100644 --- a/drivers/rapidio/Kconfig +++ b/drivers/rapidio/Kconfig @@ -25,7 +25,7 @@ config RAPIDIO_ENABLE_RX_TX_PORTS config RAPIDIO_DMA_ENGINE bool "DMA Engine support for RapidIO" depends on RAPIDIO - select DMADEVICES + depends on DMADEVICES select DMA_ENGINE help Say Y here if you want to use DMA Engine frameork for RapidIO data -- GitLab From 2c01af4f3b75f6280964653eb17bd6709ec31dc6 Mon Sep 17 00:00:00 2001 From: Stafford Horne Date: Thu, 3 Sep 2020 05:48:58 +0900 Subject: [PATCH 0809/1304] openrisc: Fix cache API compile issue when not inlining [ Upstream commit 3ae90d764093dfcd6ab8ab6875377302892c87d4 ] I found this when compiling a kbuild random config with GCC 11. The config enables CONFIG_DEBUG_SECTION_MISMATCH, which sets CFLAGS -fno-inline-functions-called-once. This causes the call to cache_loop in cache.c to not be inlined causing the below compile error. In file included from arch/openrisc/mm/cache.c:13: arch/openrisc/mm/cache.c: In function 'cache_loop': ./arch/openrisc/include/asm/spr.h:16:27: warning: 'asm' operand 0 probably does not match constraints 16 | #define mtspr(_spr, _val) __asm__ __volatile__ ( \ | ^~~~~~~ arch/openrisc/mm/cache.c:25:3: note: in expansion of macro 'mtspr' 25 | mtspr(reg, line); | ^~~~~ ./arch/openrisc/include/asm/spr.h:16:27: error: impossible constraint in 'asm' 16 | #define mtspr(_spr, _val) __asm__ __volatile__ ( \ | ^~~~~~~ arch/openrisc/mm/cache.c:25:3: note: in expansion of macro 'mtspr' 25 | mtspr(reg, line); | ^~~~~ make[1]: *** [scripts/Makefile.build:283: arch/openrisc/mm/cache.o] Error 1 The asm constraint "K" requires a immediate constant argument to mtspr, however because of no inlining a register argument is passed causing a failure. Fix this by using __always_inline. Link: https://lore.kernel.org/lkml/202008200453.ohnhqkjQ%25lkp@intel.com/ Signed-off-by: Stafford Horne Signed-off-by: Sasha Levin --- arch/openrisc/mm/cache.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/openrisc/mm/cache.c b/arch/openrisc/mm/cache.c index b747bf1fc1b6..4272d9123f9e 100644 --- a/arch/openrisc/mm/cache.c +++ b/arch/openrisc/mm/cache.c @@ -20,7 +20,7 @@ #include #include -static void cache_loop(struct page *page, const unsigned int reg) +static __always_inline void cache_loop(struct page *page, const unsigned int reg) { unsigned long paddr = page_to_pfn(page) << PAGE_SHIFT; unsigned long line = paddr & ~(L1_CACHE_BYTES - 1); -- GitLab From 514171c50909736af8b6cdf6365c0d15bdb869a2 Mon Sep 17 00:00:00 2001 From: David Milburn Date: Wed, 2 Sep 2020 17:42:54 -0500 Subject: [PATCH 0810/1304] nvme-fc: cancel async events before freeing event struct [ Upstream commit e126e8210e950bb83414c4f57b3120ddb8450742 ] Cancel async event work in case async event has been queued up, and nvme_fc_submit_async_event() runs after event has been freed. Signed-off-by: David Milburn Reviewed-by: Keith Busch Reviewed-by: Sagi Grimberg Signed-off-by: Christoph Hellwig Signed-off-by: Sasha Levin --- drivers/nvme/host/fc.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index bb3b447c5646..73db32f97abf 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -1791,6 +1791,7 @@ nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl) struct nvme_fc_fcp_op *aen_op; int i; + cancel_work_sync(&ctrl->ctrl.async_event_work); aen_op = ctrl->aen_ops; for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) { if (!aen_op->fcp_req.private) -- GitLab From f10c9c9dce4d3ee542987680e2a8576871c05734 Mon Sep 17 00:00:00 2001 From: David Milburn Date: Wed, 2 Sep 2020 17:42:52 -0500 Subject: [PATCH 0811/1304] nvme-rdma: cancel async events before freeing event struct [ Upstream commit 925dd04c1f9825194b9e444c12478084813b2b5d ] Cancel async event work in case async event has been queued up, and nvme_rdma_submit_async_event() runs after event has been freed. Signed-off-by: David Milburn Reviewed-by: Keith Busch Reviewed-by: Sagi Grimberg Signed-off-by: Christoph Hellwig Signed-off-by: Sasha Levin --- drivers/nvme/host/rdma.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 7e2cdb17c26d..077c67816665 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -740,6 +740,7 @@ static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl, nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset); } if (ctrl->async_event_sqe.data) { + cancel_work_sync(&ctrl->ctrl.async_event_work); nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe, sizeof(struct nvme_command), DMA_TO_DEVICE); ctrl->async_event_sqe.data = NULL; -- GitLab From 9cba1f79e383db89ce397ce6abedf521aef27b90 Mon Sep 17 00:00:00 2001 From: Sahitya Tummala Date: Tue, 18 Aug 2020 15:40:14 +0530 Subject: [PATCH 0812/1304] f2fs: fix indefinite loop scanning for free nid [ Upstream commit e2cab031ba7b5003cd12185b3ef38f1a75e3dae8 ] If the sbi->ckpt->next_free_nid is not NAT block aligned and if there are free nids in that NAT block between the start of the block and next_free_nid, then those free nids will not be scanned in scan_nat_page(). This results into mismatch between nm_i->available_nids and the sum of nm_i->free_nid_count of all NAT blocks scanned. And nm_i->available_nids will always be greater than the sum of free nids in all the blocks. Under this condition, if we use all the currently scanned free nids, then it will loop forever in f2fs_alloc_nid() as nm_i->available_nids is still not zero but nm_i->free_nid_count of that partially scanned NAT block is zero. Fix this to align the nm_i->next_scan_nid to the first nid of the corresponding NAT block. Signed-off-by: Sahitya Tummala Reviewed-by: Chao Yu Signed-off-by: Jaegeuk Kim Signed-off-by: Sasha Levin --- fs/f2fs/node.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index 2ff02541c53d..1934dc6ad1cc 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -2257,6 +2257,9 @@ static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi, if (unlikely(nid >= nm_i->max_nid)) nid = 0; + if (unlikely(nid % NAT_ENTRY_PER_BLOCK)) + nid = NAT_BLOCK_OFFSET(nid) * NAT_ENTRY_PER_BLOCK; + /* Enough entries */ if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK) return 0; -- GitLab From cababeac915a218b3902aac772b1786068e8e2f3 Mon Sep 17 00:00:00 2001 From: Gabriel Krisman Bertazi Date: Wed, 19 Aug 2020 16:07:31 -0400 Subject: [PATCH 0813/1304] f2fs: Return EOF on unaligned end of file DIO read [ Upstream commit 20d0a107fb35f37578b919f62bd474d6d358d579 ] Reading past end of file returns EOF for aligned reads but -EINVAL for unaligned reads on f2fs. While documentation is not strict about this corner case, most filesystem returns EOF on this case, like iomap filesystems. This patch consolidates the behavior for f2fs, by making it return EOF(0). it can be verified by a read loop on a file that does a partial read before EOF (A file that doesn't end at an aligned address). The following code fails on an unaligned file on f2fs, but not on btrfs, ext4, and xfs. while (done < total) { ssize_t delta = pread(fd, buf + done, total - done, off + done); if (!delta) break; ... } It is arguable whether filesystems should actually return EOF or -EINVAL, but since iomap filesystems support it, and so does the original DIO code, it seems reasonable to consolidate on that. Signed-off-by: Gabriel Krisman Bertazi Reviewed-by: Chao Yu Signed-off-by: Jaegeuk Kim Signed-off-by: Sasha Levin --- fs/f2fs/data.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index c81a1f3f0a10..c63f5e32630e 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -2490,6 +2490,9 @@ static int check_direct_IO(struct inode *inode, struct iov_iter *iter, unsigned long align = offset | iov_iter_alignment(iter); struct block_device *bdev = inode->i_sb->s_bdev; + if (iov_iter_rw(iter) == READ && offset >= i_size_read(inode)) + return 1; + if (align & blocksize_mask) { if (bdev) blkbits = blksize_bits(bdev_logical_block_size(bdev)); -- GitLab From 4c265a94462cb00ed28671540da2b74a32c63a68 Mon Sep 17 00:00:00 2001 From: Evan Nimmo Date: Wed, 9 Sep 2020 08:32:47 +1200 Subject: [PATCH 0814/1304] i2c: algo: pca: Reapply i2c bus settings after reset [ Upstream commit 0a355aeb24081e4538d4d424cd189f16c0bbd983 ] If something goes wrong (such as the SCL being stuck low) then we need to reset the PCA chip. The issue with this is that on reset we lose all config settings and the chip ends up in a disabled state which results in a lock up/high CPU usage. We need to re-apply any configuration that had previously been set and re-enable the chip. Signed-off-by: Evan Nimmo Reviewed-by: Chris Packham Reviewed-by: Andy Shevchenko Signed-off-by: Wolfram Sang Signed-off-by: Sasha Levin --- drivers/i2c/algos/i2c-algo-pca.c | 35 +++++++++++++++++++++----------- include/linux/i2c-algo-pca.h | 15 ++++++++++++++ 2 files changed, 38 insertions(+), 12 deletions(-) diff --git a/drivers/i2c/algos/i2c-algo-pca.c b/drivers/i2c/algos/i2c-algo-pca.c index 0e745f82d6a5..f328de980855 100644 --- a/drivers/i2c/algos/i2c-algo-pca.c +++ b/drivers/i2c/algos/i2c-algo-pca.c @@ -50,8 +50,22 @@ static void pca_reset(struct i2c_algo_pca_data *adap) pca_outw(adap, I2C_PCA_INDPTR, I2C_PCA_IPRESET); pca_outw(adap, I2C_PCA_IND, 0xA5); pca_outw(adap, I2C_PCA_IND, 0x5A); + + /* + * After a reset we need to re-apply any configuration + * (calculated in pca_init) to get the bus in a working state. + */ + pca_outw(adap, I2C_PCA_INDPTR, I2C_PCA_IMODE); + pca_outw(adap, I2C_PCA_IND, adap->bus_settings.mode); + pca_outw(adap, I2C_PCA_INDPTR, I2C_PCA_ISCLL); + pca_outw(adap, I2C_PCA_IND, adap->bus_settings.tlow); + pca_outw(adap, I2C_PCA_INDPTR, I2C_PCA_ISCLH); + pca_outw(adap, I2C_PCA_IND, adap->bus_settings.thi); + + pca_set_con(adap, I2C_PCA_CON_ENSIO); } else { adap->reset_chip(adap->data); + pca_set_con(adap, I2C_PCA_CON_ENSIO | adap->bus_settings.clock_freq); } } @@ -432,13 +446,14 @@ static int pca_init(struct i2c_adapter *adap) " Use the nominal frequency.\n", adap->name); } - pca_reset(pca_data); - clock = pca_clock(pca_data); printk(KERN_INFO "%s: Clock frequency is %dkHz\n", adap->name, freqs[clock]); - pca_set_con(pca_data, I2C_PCA_CON_ENSIO | clock); + /* Store settings as these will be needed when the PCA chip is reset */ + pca_data->bus_settings.clock_freq = clock; + + pca_reset(pca_data); } else { int clock; int mode; @@ -505,19 +520,15 @@ static int pca_init(struct i2c_adapter *adap) thi = tlow * min_thi / min_tlow; } + /* Store settings as these will be needed when the PCA chip is reset */ + pca_data->bus_settings.mode = mode; + pca_data->bus_settings.tlow = tlow; + pca_data->bus_settings.thi = thi; + pca_reset(pca_data); printk(KERN_INFO "%s: Clock frequency is %dHz\n", adap->name, clock * 100); - - pca_outw(pca_data, I2C_PCA_INDPTR, I2C_PCA_IMODE); - pca_outw(pca_data, I2C_PCA_IND, mode); - pca_outw(pca_data, I2C_PCA_INDPTR, I2C_PCA_ISCLL); - pca_outw(pca_data, I2C_PCA_IND, tlow); - pca_outw(pca_data, I2C_PCA_INDPTR, I2C_PCA_ISCLH); - pca_outw(pca_data, I2C_PCA_IND, thi); - - pca_set_con(pca_data, I2C_PCA_CON_ENSIO); } udelay(500); /* 500 us for oscillator to stabilise */ diff --git a/include/linux/i2c-algo-pca.h b/include/linux/i2c-algo-pca.h index d03071732db4..7c522fdd9ea7 100644 --- a/include/linux/i2c-algo-pca.h +++ b/include/linux/i2c-algo-pca.h @@ -53,6 +53,20 @@ #define I2C_PCA_CON_SI 0x08 /* Serial Interrupt */ #define I2C_PCA_CON_CR 0x07 /* Clock Rate (MASK) */ +/** + * struct pca_i2c_bus_settings - The configured PCA i2c bus settings + * @mode: Configured i2c bus mode + * @tlow: Configured SCL LOW period + * @thi: Configured SCL HIGH period + * @clock_freq: The configured clock frequency + */ +struct pca_i2c_bus_settings { + int mode; + int tlow; + int thi; + int clock_freq; +}; + struct i2c_algo_pca_data { void *data; /* private low level data */ void (*write_byte) (void *data, int reg, int val); @@ -64,6 +78,7 @@ struct i2c_algo_pca_data { * For PCA9665, use the frequency you want here. */ unsigned int i2c_clock; unsigned int chip; + struct pca_i2c_bus_settings bus_settings; }; int i2c_pca_add_bus(struct i2c_adapter *); -- GitLab From 80c468d9abc9d4129809c1ffc90b3c835a1202c2 Mon Sep 17 00:00:00 2001 From: Gustav Wiklander Date: Tue, 8 Sep 2020 17:11:29 +0200 Subject: [PATCH 0815/1304] spi: Fix memory leak on splited transfers [ Upstream commit b59a7ca15464c78ea1ba3b280cfc5ac5ece11ade ] In the prepare_message callback the bus driver has the opportunity to split a transfer into smaller chunks. spi_map_msg is done after prepare_message. Function spi_res_release releases the splited transfers in the message. Therefore spi_res_release should be called after spi_map_msg. The previous try at this was commit c9ba7a16d0f1 which released the splited transfers after spi_finalize_current_message had been called. This introduced a race since the message struct could be out of scope because the spi_sync call got completed. Fixes this leak on spi bus driver spi-bcm2835.c when transfer size is greater than 65532: Kmemleak: sg_alloc_table+0x28/0xc8 spi_map_buf+0xa4/0x300 __spi_pump_messages+0x370/0x748 __spi_sync+0x1d4/0x270 spi_sync+0x34/0x58 spi_test_execute_msg+0x60/0x340 [spi_loopback_test] spi_test_run_iter+0x548/0x578 [spi_loopback_test] spi_test_run_test+0x94/0x140 [spi_loopback_test] spi_test_run_tests+0x150/0x180 [spi_loopback_test] spi_loopback_test_probe+0x50/0xd0 [spi_loopback_test] spi_drv_probe+0x84/0xe0 Signed-off-by: Gustav Wiklander Link: https://lore.kernel.org/r/20200908151129.15915-1-gustav.wiklander@axis.com Signed-off-by: Mark Brown Signed-off-by: Sasha Levin --- drivers/spi/spi.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 92e6b6774d98..1fd529a2d2f6 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -1116,8 +1116,6 @@ static int spi_transfer_one_message(struct spi_controller *ctlr, if (msg->status && ctlr->handle_err) ctlr->handle_err(ctlr, msg); - spi_res_release(ctlr, msg); - spi_finalize_current_message(ctlr); return ret; @@ -1375,6 +1373,13 @@ void spi_finalize_current_message(struct spi_controller *ctlr) spi_unmap_msg(ctlr, mesg); + /* In the prepare_messages callback the spi bus has the opportunity to + * split a transfer to smaller chunks. + * Release splited transfers here since spi_map_msg is done on the + * splited transfers. + */ + spi_res_release(ctlr, mesg); + if (ctlr->cur_msg_prepared && ctlr->unprepare_message) { ret = ctlr->unprepare_message(ctlr, mesg); if (ret) { -- GitLab From 6da40b74efca9aa9d709ae36a825d14439af9b40 Mon Sep 17 00:00:00 2001 From: Huacai Chen Date: Thu, 10 Sep 2020 18:33:51 +0800 Subject: [PATCH 0816/1304] KVM: MIPS: Change the definition of kvm type [ Upstream commit 15e9e35cd1dec2bc138464de6bf8ef828df19235 ] MIPS defines two kvm types: #define KVM_VM_MIPS_TE 0 #define KVM_VM_MIPS_VZ 1 In Documentation/virt/kvm/api.rst it is said that "You probably want to use 0 as machine type", which implies that type 0 be the "automatic" or "default" type. And, in user-space libvirt use the null-machine (with type 0) to detect the kvm capability, which returns "KVM not supported" on a VZ platform. I try to fix it in QEMU but it is ugly: https://lists.nongnu.org/archive/html/qemu-devel/2020-08/msg05629.html And Thomas Huth suggests me to change the definition of kvm type: https://lists.nongnu.org/archive/html/qemu-devel/2020-09/msg03281.html So I define like this: #define KVM_VM_MIPS_AUTO 0 #define KVM_VM_MIPS_VZ 1 #define KVM_VM_MIPS_TE 2 Since VZ and TE cannot co-exists, using type 0 on a TE platform will still return success (so old user-space tools have no problems on new kernels); the advantage is that using type 0 on a VZ platform will not return failure. So, the only problem is "new user-space tools use type 2 on old kernels", but if we treat this as a kernel bug, we can backport this patch to old stable kernels. Signed-off-by: Huacai Chen Message-Id: <1599734031-28746-1-git-send-email-chenhc@lemote.com> Signed-off-by: Paolo Bonzini Signed-off-by: Sasha Levin --- arch/mips/kvm/mips.c | 2 ++ include/uapi/linux/kvm.h | 5 +++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index e7f5ef6bed0f..79485790f7b5 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -131,6 +131,8 @@ void kvm_arch_check_processor_compat(void *rtn) int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) { switch (type) { + case KVM_VM_MIPS_AUTO: + break; #ifdef CONFIG_KVM_MIPS_VZ case KVM_VM_MIPS_VZ: #else diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 66ce6659ecb6..c297abc4e669 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -746,9 +746,10 @@ struct kvm_ppc_resize_hpt { #define KVM_VM_PPC_HV 1 #define KVM_VM_PPC_PR 2 -/* on MIPS, 0 forces trap & emulate, 1 forces VZ ASE */ -#define KVM_VM_MIPS_TE 0 +/* on MIPS, 0 indicates auto, 1 forces VZ ASE, 2 forces trap & emulate */ +#define KVM_VM_MIPS_AUTO 0 #define KVM_VM_MIPS_VZ 1 +#define KVM_VM_MIPS_TE 2 #define KVM_S390_SIE_PAGE_OFFSET 1 -- GitLab From b2cbd001d34d12358eb4e1600e4720be224b4900 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sun, 9 Aug 2020 16:49:59 +0200 Subject: [PATCH 0817/1304] clk: davinci: Use the correct size when allocating memory [ Upstream commit 3dabfa2bda48dab717986609762ce2a49335eb99 ] 'sizeof(*pllen)' should be used in place of 'sizeof(*pllout)' to avoid a small over-allocation. Fixes: 2d1726915159 ("clk: davinci: New driver for davinci PLL clocks") Signed-off-by: Christophe JAILLET Link: https://lore.kernel.org/r/20200809144959.747986-1-christophe.jaillet@wanadoo.fr Reviewed-by: David Lechner Signed-off-by: Stephen Boyd Signed-off-by: Sasha Levin --- drivers/clk/davinci/pll.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/clk/davinci/pll.c b/drivers/clk/davinci/pll.c index 1c99e992d638..796b428998ae 100644 --- a/drivers/clk/davinci/pll.c +++ b/drivers/clk/davinci/pll.c @@ -491,7 +491,7 @@ struct clk *davinci_pll_clk_register(struct device *dev, parent_name = postdiv_name; } - pllen = kzalloc(sizeof(*pllout), GFP_KERNEL); + pllen = kzalloc(sizeof(*pllen), GFP_KERNEL); if (!pllen) { ret = -ENOMEM; goto err_unregister_postdiv; -- GitLab From f4b96020c38ca58d8c8bd6947f7aeacce80fe084 Mon Sep 17 00:00:00 2001 From: Nathan Chancellor Date: Sun, 9 Aug 2020 21:40:20 -0700 Subject: [PATCH 0818/1304] clk: rockchip: Fix initialization of mux_pll_src_4plls_p MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit e9c006bc782c488f485ffe50de20b44e1e3daa18 ] A new warning in Clang points out that the initialization of mux_pll_src_4plls_p appears incorrect: ../drivers/clk/rockchip/clk-rk3228.c:140:58: warning: suspicious concatenation of string literals in an array initialization; did you mean to separate the elements with a comma? [-Wstring-concatenation] PNAME(mux_pll_src_4plls_p) = { "cpll", "gpll", "hdmiphy" "usb480m" }; ^ , ../drivers/clk/rockchip/clk-rk3228.c:140:48: note: place parentheses around the string literal to silence warning PNAME(mux_pll_src_4plls_p) = { "cpll", "gpll", "hdmiphy" "usb480m" }; ^ 1 warning generated. Given the name of the variable and the same variable name in rv1108, it seems that this should have been four distinct elements. Fix it up by adding the comma as suggested. Fixes: 307a2e9ac524 ("clk: rockchip: add clock controller for rk3228") Link: https://github.com/ClangBuiltLinux/linux/issues/1123 Signed-off-by: Nathan Chancellor Link: https://lore.kernel.org/r/20200810044020.2063350-1-natechancellor@gmail.com Reviewed-by: Heiko Stübner Signed-off-by: Stephen Boyd Signed-off-by: Sasha Levin --- drivers/clk/rockchip/clk-rk3228.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/clk/rockchip/clk-rk3228.c b/drivers/clk/rockchip/clk-rk3228.c index 04f4f3739e3b..8d11d76e1db7 100644 --- a/drivers/clk/rockchip/clk-rk3228.c +++ b/drivers/clk/rockchip/clk-rk3228.c @@ -144,7 +144,7 @@ PNAME(mux_usb480m_p) = { "usb480m_phy", "xin24m" }; PNAME(mux_hdmiphy_p) = { "hdmiphy_phy", "xin24m" }; PNAME(mux_aclk_cpu_src_p) = { "cpll_aclk_cpu", "gpll_aclk_cpu", "hdmiphy_aclk_cpu" }; -PNAME(mux_pll_src_4plls_p) = { "cpll", "gpll", "hdmiphy" "usb480m" }; +PNAME(mux_pll_src_4plls_p) = { "cpll", "gpll", "hdmiphy", "usb480m" }; PNAME(mux_pll_src_3plls_p) = { "cpll", "gpll", "hdmiphy" }; PNAME(mux_pll_src_2plls_p) = { "cpll", "gpll" }; PNAME(mux_sclk_hdmi_cec_p) = { "cpll", "gpll", "xin24m" }; -- GitLab From 9ae0d92f866e2df7b9e3a5bf8fb295058762942e Mon Sep 17 00:00:00 2001 From: Stephan Gerhold Date: Thu, 20 Aug 2020 17:45:11 +0200 Subject: [PATCH 0819/1304] ASoC: qcom: Set card->owner to avoid warnings [ Upstream commit 3c27ea23ffb43262da6c64964163895951aaed4e ] On Linux 5.9-rc1 I get the following warning with apq8016-sbc: WARNING: CPU: 2 PID: 69 at sound/core/init.c:207 snd_card_new+0x36c/0x3b0 [snd] CPU: 2 PID: 69 Comm: kworker/2:1 Not tainted 5.9.0-rc1 #1 Workqueue: events deferred_probe_work_func pc : snd_card_new+0x36c/0x3b0 [snd] lr : snd_card_new+0xf4/0x3b0 [snd] Call trace: snd_card_new+0x36c/0x3b0 [snd] snd_soc_bind_card+0x340/0x9a0 [snd_soc_core] snd_soc_register_card+0xf4/0x110 [snd_soc_core] devm_snd_soc_register_card+0x44/0xa0 [snd_soc_core] apq8016_sbc_platform_probe+0x11c/0x140 [snd_soc_apq8016_sbc] This warning was introduced in commit 81033c6b584b ("ALSA: core: Warn on empty module"). It looks like we are supposed to set card->owner to THIS_MODULE. Fix this for all the qcom ASoC drivers. Cc: Srinivas Kandagatla Fixes: 79119c798649 ("ASoC: qcom: Add Storm machine driver") Fixes: bdb052e81f62 ("ASoC: qcom: add apq8016 sound card support") Fixes: a6f933f63f2f ("ASoC: qcom: apq8096: Add db820c machine driver") Fixes: 6b1687bf76ef ("ASoC: qcom: add sdm845 sound card support") Signed-off-by: Stephan Gerhold Link: https://lore.kernel.org/r/20200820154511.203072-1-stephan@gerhold.net Signed-off-by: Mark Brown Signed-off-by: Sasha Levin --- sound/soc/qcom/apq8016_sbc.c | 1 + sound/soc/qcom/apq8096.c | 1 + sound/soc/qcom/sdm845.c | 1 + sound/soc/qcom/storm.c | 1 + 4 files changed, 4 insertions(+) diff --git a/sound/soc/qcom/apq8016_sbc.c b/sound/soc/qcom/apq8016_sbc.c index 4b559932adc3..121460db8eac 100644 --- a/sound/soc/qcom/apq8016_sbc.c +++ b/sound/soc/qcom/apq8016_sbc.c @@ -233,6 +233,7 @@ static int apq8016_sbc_platform_probe(struct platform_device *pdev) return -ENOMEM; card->dev = dev; + card->owner = THIS_MODULE; card->dapm_widgets = apq8016_sbc_dapm_widgets; card->num_dapm_widgets = ARRAY_SIZE(apq8016_sbc_dapm_widgets); data = apq8016_sbc_parse_of(card); diff --git a/sound/soc/qcom/apq8096.c b/sound/soc/qcom/apq8096.c index 1543e85629f8..04f814a0a7d5 100644 --- a/sound/soc/qcom/apq8096.c +++ b/sound/soc/qcom/apq8096.c @@ -46,6 +46,7 @@ static int apq8096_platform_probe(struct platform_device *pdev) return -ENOMEM; card->dev = dev; + card->owner = THIS_MODULE; dev_set_drvdata(dev, card); ret = qcom_snd_parse_of(card); if (ret) { diff --git a/sound/soc/qcom/sdm845.c b/sound/soc/qcom/sdm845.c index 2a781d87ee65..5fdbfa363ab1 100644 --- a/sound/soc/qcom/sdm845.c +++ b/sound/soc/qcom/sdm845.c @@ -226,6 +226,7 @@ static int sdm845_snd_platform_probe(struct platform_device *pdev) } card->dev = dev; + card->owner = THIS_MODULE; dev_set_drvdata(dev, card); ret = qcom_snd_parse_of(card); if (ret) { diff --git a/sound/soc/qcom/storm.c b/sound/soc/qcom/storm.c index a9fa972466ad..00a3f4c1b6fe 100644 --- a/sound/soc/qcom/storm.c +++ b/sound/soc/qcom/storm.c @@ -99,6 +99,7 @@ static int storm_platform_probe(struct platform_device *pdev) return -ENOMEM; card->dev = &pdev->dev; + card->owner = THIS_MODULE; ret = snd_soc_of_parse_card_name(card, "qcom,model"); if (ret) { -- GitLab From 1dcf055a46ba41c971b0d4f7a19c584db4d01bac Mon Sep 17 00:00:00 2001 From: Michael Kelley Date: Sun, 13 Sep 2020 12:47:29 -0700 Subject: [PATCH 0820/1304] Drivers: hv: vmbus: Add timeout to vmbus_wait_for_unload [ Upstream commit 911e1987efc8f3e6445955fbae7f54b428b92bd3 ] vmbus_wait_for_unload() looks for a CHANNELMSG_UNLOAD_RESPONSE message coming from Hyper-V. But if the message isn't found for some reason, the panic path gets hung forever. Add a timeout of 10 seconds to prevent this. Fixes: 415719160de3 ("Drivers: hv: vmbus: avoid scheduling in interrupt context in vmbus_initiate_unload()") Signed-off-by: Michael Kelley Reviewed-by: Dexuan Cui Reviewed-by: Vitaly Kuznetsov Link: https://lore.kernel.org/r/1600026449-23651-1-git-send-email-mikelley@microsoft.com Signed-off-by: Wei Liu Signed-off-by: Sasha Levin --- drivers/hv/channel_mgmt.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index c83361a8e203..7920b0d7e35a 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c @@ -779,7 +779,7 @@ static void vmbus_wait_for_unload(void) void *page_addr; struct hv_message *msg; struct vmbus_channel_message_header *hdr; - u32 message_type; + u32 message_type, i; /* * CHANNELMSG_UNLOAD_RESPONSE is always delivered to the CPU which was @@ -789,8 +789,11 @@ static void vmbus_wait_for_unload(void) * functional and vmbus_unload_response() will complete * vmbus_connection.unload_event. If not, the last thing we can do is * read message pages for all CPUs directly. + * + * Wait no more than 10 seconds so that the panic path can't get + * hung forever in case the response message isn't seen. */ - while (1) { + for (i = 0; i < 1000; i++) { if (completion_done(&vmbus_connection.unload_event)) break; -- GitLab From c5dbe21652cd7a0ab49274d79077a8655255611a Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Fri, 11 Sep 2020 15:00:05 +0200 Subject: [PATCH 0821/1304] perf test: Fix the "signal" test inline assembly [ Upstream commit 8a39e8c4d9baf65d88f66d49ac684df381e30055 ] When compiling with DEBUG=1 on Fedora 32 I'm getting crash for 'perf test signal': Program received signal SIGSEGV, Segmentation fault. 0x0000000000c68548 in __test_function () (gdb) bt #0 0x0000000000c68548 in __test_function () #1 0x00000000004d62e9 in test_function () at tests/bp_signal.c:61 #2 0x00000000004d689a in test__bp_signal (test=0xa8e280 DW_AT_producer : (indirect string, offset: 0x254a): GNU C99 10.2.1 20200723 (Red Hat 10.2.1-1) -mtune=generic -march=x86-64 -ggdb3 -std=gnu99 -fno-omit-frame-pointer -funwind-tables -fstack-protector-all ^^^^^ ^^^^^ ^^^^^ $ Before: $ perf test signal 20: Breakpoint overflow signal handler : FAILED! $ After: $ perf test signal 20: Breakpoint overflow signal handler : Ok $ Fixes: 8fd34e1cce18 ("perf test: Improve bp_signal") Signed-off-by: Jiri Olsa Tested-by: Arnaldo Carvalho de Melo Cc: Alexander Shishkin Cc: Michael Petlan Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Wang Nan Link: http://lore.kernel.org/lkml/20200911130005.1842138-1-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: Sasha Levin --- tools/perf/tests/bp_signal.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tools/perf/tests/bp_signal.c b/tools/perf/tests/bp_signal.c index 6cf00650602e..697423ce3bdf 100644 --- a/tools/perf/tests/bp_signal.c +++ b/tools/perf/tests/bp_signal.c @@ -44,10 +44,13 @@ volatile long the_var; #if defined (__x86_64__) extern void __test_function(volatile long *ptr); asm ( + ".pushsection .text;" ".globl __test_function\n" + ".type __test_function, @function;" "__test_function:\n" "incq (%rdi)\n" - "ret\n"); + "ret\n" + ".popsection\n"); #else static void __test_function(volatile long *ptr) { -- GitLab From 4cf1b96a36d5b26dadca1e2ab0f85180259bab75 Mon Sep 17 00:00:00 2001 From: Thomas Bogendoerfer Date: Mon, 14 Sep 2020 18:05:00 +0200 Subject: [PATCH 0822/1304] MIPS: SNI: Fix MIPS_L1_CACHE_SHIFT [ Upstream commit 564c836fd945a94b5dd46597d6b7adb464092650 ] Commit 930beb5ac09a ("MIPS: introduce MIPS_L1_CACHE_SHIFT_") forgot to select the correct MIPS_L1_CACHE_SHIFT for SNI RM. This breaks non coherent DMA because of a wrong allocation alignment. Fixes: 930beb5ac09a ("MIPS: introduce MIPS_L1_CACHE_SHIFT_") Signed-off-by: Thomas Bogendoerfer Signed-off-by: Sasha Levin --- arch/mips/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index a830a9701e50..cc8c8d22afaf 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -852,6 +852,7 @@ config SNI_RM select I8253 select I8259 select ISA + select MIPS_L1_CACHE_SHIFT_6 select SWAP_IO_SPACE if CPU_BIG_ENDIAN select SYS_HAS_CPU_R4X00 select SYS_HAS_CPU_R5000 -- GitLab From 1e96d27099ef4b9ee2c3ad09025083779657e175 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 15 Sep 2020 12:18:19 +0900 Subject: [PATCH 0823/1304] perf test: Free formats for perf pmu parse test [ Upstream commit d26383dcb2b4b8629fde05270b4e3633be9e3d4b ] The following leaks were detected by ASAN: Indirect leak of 360 byte(s) in 9 object(s) allocated from: #0 0x7fecc305180e in calloc (/lib/x86_64-linux-gnu/libasan.so.5+0x10780e) #1 0x560578f6dce5 in perf_pmu__new_format util/pmu.c:1333 #2 0x560578f752fc in perf_pmu_parse util/pmu.y:59 #3 0x560578f6a8b7 in perf_pmu__format_parse util/pmu.c:73 #4 0x560578e07045 in test__pmu tests/pmu.c:155 #5 0x560578de109b in run_test tests/builtin-test.c:410 #6 0x560578de109b in test_and_print tests/builtin-test.c:440 #7 0x560578de401a in __cmd_test tests/builtin-test.c:661 #8 0x560578de401a in cmd_test tests/builtin-test.c:807 #9 0x560578e49354 in run_builtin /home/namhyung/project/linux/tools/perf/perf.c:312 #10 0x560578ce71a8 in handle_internal_command /home/namhyung/project/linux/tools/perf/perf.c:364 #11 0x560578ce71a8 in run_argv /home/namhyung/project/linux/tools/perf/perf.c:408 #12 0x560578ce71a8 in main /home/namhyung/project/linux/tools/perf/perf.c:538 #13 0x7fecc2b7acc9 in __libc_start_main ../csu/libc-start.c:308 Fixes: cff7f956ec4a1 ("perf tests: Move pmu tests into separate object") Signed-off-by: Namhyung Kim Acked-by: Jiri Olsa Cc: Alexander Shishkin Cc: Andi Kleen Cc: Ian Rogers Cc: Mark Rutland Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lore.kernel.org/lkml/20200915031819.386559-12-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: Sasha Levin --- tools/perf/tests/pmu.c | 1 + tools/perf/util/pmu.c | 11 +++++++++++ tools/perf/util/pmu.h | 1 + 3 files changed, 13 insertions(+) diff --git a/tools/perf/tests/pmu.c b/tools/perf/tests/pmu.c index 7bedf8608fdd..3e183eef6f85 100644 --- a/tools/perf/tests/pmu.c +++ b/tools/perf/tests/pmu.c @@ -172,6 +172,7 @@ int test__pmu(struct test *test __maybe_unused, int subtest __maybe_unused) ret = 0; } while (0); + perf_pmu__del_formats(&formats); test_format_dir_put(format); return ret; } diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c index c1acf04c9f7a..c42054f42e7e 100644 --- a/tools/perf/util/pmu.c +++ b/tools/perf/util/pmu.c @@ -1282,6 +1282,17 @@ void perf_pmu__set_format(unsigned long *bits, long from, long to) set_bit(b, bits); } +void perf_pmu__del_formats(struct list_head *formats) +{ + struct perf_pmu_format *fmt, *tmp; + + list_for_each_entry_safe(fmt, tmp, formats, list) { + list_del(&fmt->list); + free(fmt->name); + free(fmt); + } +} + static int sub_non_neg(int a, int b) { if (b > a) diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h index 76fecec7b3f9..21335425f2e4 100644 --- a/tools/perf/util/pmu.h +++ b/tools/perf/util/pmu.h @@ -79,6 +79,7 @@ int perf_pmu__new_format(struct list_head *list, char *name, int config, unsigned long *bits); void perf_pmu__set_format(unsigned long *bits, long from, long to); int perf_pmu__format_parse(char *dir, struct list_head *head); +void perf_pmu__del_formats(struct list_head *formats); struct perf_pmu *perf_pmu__scan(struct perf_pmu *pmu); -- GitLab From 76fe92986c5c2fff36d8fb83e86332113b6c1725 Mon Sep 17 00:00:00 2001 From: Tetsuo Handa Date: Fri, 11 Sep 2020 07:57:06 +0900 Subject: [PATCH 0824/1304] fbcon: Fix user font detection test at fbcon_resize(). [ Upstream commit ec0972adecb391a8d8650832263a4790f3bfb4df ] syzbot is reporting OOB read at fbcon_resize() [1], for commit 39b3cffb8cf31117 ("fbcon: prevent user font height or width change from causing potential out-of-bounds access") is by error using registered_fb[con2fb_map[vc->vc_num]]->fbcon_par->p->userfont (which was set to non-zero) instead of fb_display[vc->vc_num].userfont (which remains zero for that display). We could remove tricky userfont flag [2], for we can determine it by comparing address of the font data and addresses of built-in font data. But since that commit is failing to fix the original OOB read [3], this patch keeps the change minimal in case we decide to revert altogether. [1] https://syzkaller.appspot.com/bug?id=ebcbbb6576958a496500fee9cf7aa83ea00b5920 [2] https://syzkaller.appspot.com/text?tag=Patch&x=14030853900000 [3] https://syzkaller.appspot.com/bug?id=6fba8c186d97cf1011ab17660e633b1cc4e080c9 Reported-by: syzbot Signed-off-by: Tetsuo Handa Fixes: 39b3cffb8cf31117 ("fbcon: prevent user font height or width change from causing potential out-of-bounds access") Cc: George Kennedy Link: https://lore.kernel.org/r/f6e3e611-8704-1263-d163-f52c906a4f06@I-love.SAKURA.ne.jp Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin --- drivers/video/fbdev/core/fbcon.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index 29226b6cb632..0bf5ea518558 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -1979,7 +1979,7 @@ static int fbcon_resize(struct vc_data *vc, unsigned int width, struct fb_var_screeninfo var = info->var; int x_diff, y_diff, virt_w, virt_h, virt_fw, virt_fh; - if (ops->p && ops->p->userfont && FNTSIZE(vc->vc_font.data)) { + if (p->userfont && FNTSIZE(vc->vc_font.data)) { int size; int pitch = PITCH(vc->vc_font.width); -- GitLab From 5ee2eceb0750ebedace1bd04c1efa6a325038f2a Mon Sep 17 00:00:00 2001 From: Thomas Bogendoerfer Date: Wed, 16 Sep 2020 15:54:37 +0200 Subject: [PATCH 0825/1304] MIPS: SNI: Fix spurious interrupts [ Upstream commit b959b97860d0fee8c8f6a3e641d3c2ad76eab6be ] On A20R machines the interrupt pending bits in cause register need to be updated by requesting the chipset to do it. This needs to be done to find the interrupt cause and after interrupt service. In commit 0b888c7f3a03 ("MIPS: SNI: Convert to new irq_chip functions") the function to do after service update got lost, which caused spurious interrupts. Fixes: 0b888c7f3a03 ("MIPS: SNI: Convert to new irq_chip functions") Signed-off-by: Thomas Bogendoerfer Signed-off-by: Sasha Levin --- arch/mips/sni/a20r.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/arch/mips/sni/a20r.c b/arch/mips/sni/a20r.c index f9407e170476..c6af7047eb0d 100644 --- a/arch/mips/sni/a20r.c +++ b/arch/mips/sni/a20r.c @@ -143,7 +143,10 @@ static struct platform_device sc26xx_pdev = { }, }; -static u32 a20r_ack_hwint(void) +/* + * Trigger chipset to update CPU's CAUSE IP field + */ +static u32 a20r_update_cause_ip(void) { u32 status = read_c0_status(); @@ -205,12 +208,14 @@ static void a20r_hwint(void) int irq; clear_c0_status(IE_IRQ0); - status = a20r_ack_hwint(); + status = a20r_update_cause_ip(); cause = read_c0_cause(); irq = ffs(((cause & status) >> 8) & 0xf8); if (likely(irq > 0)) do_IRQ(SNI_A20R_IRQ_BASE + irq - 1); + + a20r_update_cause_ip(); set_c0_status(IE_IRQ0); } -- GitLab From 0d191f84b65cd2daa8d2fa7294a509d2d1ec0fc1 Mon Sep 17 00:00:00 2001 From: Yu Kuai Date: Wed, 9 Sep 2020 16:49:42 +0800 Subject: [PATCH 0826/1304] drm/mediatek: Add exception handing in mtk_drm_probe() if component init fail [ Upstream commit 64c194c00789889b0f9454f583712f079ba414ee ] mtk_ddp_comp_init() is called in a loop in mtk_drm_probe(), if it fail, previous successive init component is not proccessed. Thus uninitialize valid component and put their device if component init failed. Fixes: 119f5173628a ("drm/mediatek: Add DRM Driver for Mediatek SoC MT8173.") Signed-off-by: Yu Kuai Signed-off-by: Chun-Kuang Hu Signed-off-by: Sasha Levin --- drivers/gpu/drm/mediatek/mtk_drm_drv.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index 947bc6d62302..d14321763607 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -600,8 +600,13 @@ static int mtk_drm_probe(struct platform_device *pdev) pm_runtime_disable(dev); err_node: of_node_put(private->mutex_node); - for (i = 0; i < DDP_COMPONENT_ID_MAX; i++) + for (i = 0; i < DDP_COMPONENT_ID_MAX; i++) { of_node_put(private->comp_node[i]); + if (private->ddp_comp[i]) { + put_device(private->ddp_comp[i]->larb_dev); + private->ddp_comp[i] = NULL; + } + } return ret; } -- GitLab From f47ba6941d10479838086234d4bb73d7aea0d61b Mon Sep 17 00:00:00 2001 From: Yu Kuai Date: Fri, 11 Sep 2020 19:21:51 +0800 Subject: [PATCH 0827/1304] drm/mediatek: Add missing put_device() call in mtk_hdmi_dt_parse_pdata() [ Upstream commit 0680a622318b8d657323b94082f4b9a44038dfee ] if of_find_device_by_node() succeed, mtk_drm_kms_init() doesn't have a corresponding put_device(). Thus add jump target to fix the exception handling for this function implementation. Fixes: 8f83f26891e1 ("drm/mediatek: Add HDMI support") Signed-off-by: Yu Kuai Signed-off-by: Chun-Kuang Hu Signed-off-by: Sasha Levin --- drivers/gpu/drm/mediatek/mtk_hdmi.c | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c index 62444a3a5742..331fb0c12929 100644 --- a/drivers/gpu/drm/mediatek/mtk_hdmi.c +++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c @@ -1476,25 +1476,30 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi, dev_err(dev, "Failed to get system configuration registers: %d\n", ret); - return ret; + goto put_device; } hdmi->sys_regmap = regmap; mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); hdmi->regs = devm_ioremap_resource(dev, mem); - if (IS_ERR(hdmi->regs)) - return PTR_ERR(hdmi->regs); + if (IS_ERR(hdmi->regs)) { + ret = PTR_ERR(hdmi->regs); + goto put_device; + } remote = of_graph_get_remote_node(np, 1, 0); - if (!remote) - return -EINVAL; + if (!remote) { + ret = -EINVAL; + goto put_device; + } if (!of_device_is_compatible(remote, "hdmi-connector")) { hdmi->next_bridge = of_drm_find_bridge(remote); if (!hdmi->next_bridge) { dev_err(dev, "Waiting for external bridge\n"); of_node_put(remote); - return -EPROBE_DEFER; + ret = -EPROBE_DEFER; + goto put_device; } } @@ -1503,7 +1508,8 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi, dev_err(dev, "Failed to find ddc-i2c-bus node in %pOF\n", remote); of_node_put(remote); - return -EINVAL; + ret = -EINVAL; + goto put_device; } of_node_put(remote); @@ -1511,10 +1517,14 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi, of_node_put(i2c_np); if (!hdmi->ddc_adpt) { dev_err(dev, "Failed to get ddc i2c adapter by node\n"); - return -EINVAL; + ret = -EINVAL; + goto put_device; } return 0; +put_device: + put_device(hdmi->cec_dev); + return ret; } /* -- GitLab From cb0f66eb67d75b93a66063c12414b969ee137b51 Mon Sep 17 00:00:00 2001 From: Penghao Date: Mon, 7 Sep 2020 10:30:26 +0800 Subject: [PATCH 0828/1304] USB: quirks: Add USB_QUIRK_IGNORE_REMOTE_WAKEUP quirk for BYD zhaoxin notebook commit bcea6dafeeef7d1a6a8320a249aabf981d63b881 upstream. Add a USB_QUIRK_IGNORE_REMOTE_WAKEUP quirk for the BYD zhaoxin notebook. This notebook come with usb touchpad. And we would like to disable touchpad wakeup on this notebook by default. Signed-off-by: Penghao Cc: stable Link: https://lore.kernel.org/r/20200907023026.28189-1-penghao@uniontech.com Signed-off-by: Greg Kroah-Hartman --- drivers/usb/core/quirks.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 2f068e525a37..4ee810531098 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -397,6 +397,10 @@ static const struct usb_device_id usb_quirk_list[] = { /* Generic RTL8153 based ethernet adapters */ { USB_DEVICE(0x0bda, 0x8153), .driver_info = USB_QUIRK_NO_LPM }, + /* SONiX USB DEVICE Touchpad */ + { USB_DEVICE(0x0c45, 0x7056), .driver_info = + USB_QUIRK_IGNORE_REMOTE_WAKEUP }, + /* Action Semiconductor flash disk */ { USB_DEVICE(0x10d6, 0x2200), .driver_info = USB_QUIRK_STRING_FETCH_255 }, -- GitLab From 1f4b55978ea2ce37d0730515ef9f12b01c2c557b Mon Sep 17 00:00:00 2001 From: Oliver Neukum Date: Wed, 16 Sep 2020 11:40:25 +0200 Subject: [PATCH 0829/1304] USB: UAS: fix disconnect by unplugging a hub commit 325b008723b2dd31de020e85ab9d2e9aa4637d35 upstream. The SCSI layer can go into an ugly loop if you ignore that a device is gone. You need to report an error in the command rather than in the return value of the queue method. We need to specifically check for ENODEV. The issue goes back to the introduction of the driver. Fixes: 115bb1ffa54c3 ("USB: Add UAS driver") Signed-off-by: Oliver Neukum Cc: stable Link: https://lore.kernel.org/r/20200916094026.30085-2-oneukum@suse.com Signed-off-by: Greg Kroah-Hartman --- drivers/usb/storage/uas.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c index 27d8b4b6ff59..658b0cd8e27e 100644 --- a/drivers/usb/storage/uas.c +++ b/drivers/usb/storage/uas.c @@ -669,8 +669,7 @@ static int uas_queuecommand_lck(struct scsi_cmnd *cmnd, if (devinfo->resetting) { cmnd->result = DID_ERROR << 16; cmnd->scsi_done(cmnd); - spin_unlock_irqrestore(&devinfo->lock, flags); - return 0; + goto zombie; } /* Find a free uas-tag */ @@ -706,6 +705,16 @@ static int uas_queuecommand_lck(struct scsi_cmnd *cmnd, cmdinfo->state &= ~(SUBMIT_DATA_IN_URB | SUBMIT_DATA_OUT_URB); err = uas_submit_urbs(cmnd, devinfo); + /* + * in case of fatal errors the SCSI layer is peculiar + * a command that has finished is a success for the purpose + * of queueing, no matter how fatal the error + */ + if (err == -ENODEV) { + cmnd->result = DID_ERROR << 16; + cmnd->scsi_done(cmnd); + goto zombie; + } if (err) { /* If we did nothing, give up now */ if (cmdinfo->state & SUBMIT_STATUS_URB) { @@ -716,6 +725,7 @@ static int uas_queuecommand_lck(struct scsi_cmnd *cmnd, } devinfo->cmnd[idx] = cmnd; +zombie: spin_unlock_irqrestore(&devinfo->lock, flags); return 0; } -- GitLab From 51e7ea12ccedbed2f9e31b593d02432986e491fb Mon Sep 17 00:00:00 2001 From: Oliver Neukum Date: Thu, 17 Sep 2020 12:34:27 +0200 Subject: [PATCH 0830/1304] usblp: fix race between disconnect() and read() commit 9cdabcb3ef8c24ca3a456e4db7b012befb688e73 upstream. read() needs to check whether the device has been disconnected before it tries to talk to the device. Signed-off-by: Oliver Neukum Reported-by: syzbot+be5b5f86a162a6c281e6@syzkaller.appspotmail.com Link: https://lore.kernel.org/r/20200917103427.15740-1-oneukum@suse.com Cc: stable Signed-off-by: Greg Kroah-Hartman --- drivers/usb/class/usblp.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c index 419804c9c974..db36a796af8c 100644 --- a/drivers/usb/class/usblp.c +++ b/drivers/usb/class/usblp.c @@ -827,6 +827,11 @@ static ssize_t usblp_read(struct file *file, char __user *buffer, size_t len, lo if (rv < 0) return rv; + if (!usblp->present) { + count = -ENODEV; + goto done; + } + if ((avail = usblp->rstatus) < 0) { printk(KERN_ERR "usblp%d: error %d reading from printer\n", usblp->minor, (int)avail); -- GitLab From 6efcaea77668162e28ad4b18c978a3601384a07b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Volker=20R=C3=BCmelin?= Date: Tue, 1 Sep 2020 15:22:21 +0200 Subject: [PATCH 0831/1304] i2c: i801: Fix resume bug MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 66d402e2e9455cf0213c42b97f22a0493372d7cc upstream. On suspend the original host configuration gets restored. The resume routine has to undo this, otherwise the SMBus master may be left in disabled state or in i2c mode. [JD: Rebased on v5.8, moved the write into i801_setup_hstcfg.] Signed-off-by: Volker Rümelin Signed-off-by: Jean Delvare Signed-off-by: Wolfram Sang Cc: stable@vger.kernel.org Signed-off-by: Greg Kroah-Hartman --- drivers/i2c/busses/i2c-i801.c | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index 679c6c41f64b..58fc17e46694 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c @@ -1506,6 +1506,16 @@ static inline int i801_acpi_probe(struct i801_priv *priv) { return 0; } static inline void i801_acpi_remove(struct i801_priv *priv) { } #endif +static unsigned char i801_setup_hstcfg(struct i801_priv *priv) +{ + unsigned char hstcfg = priv->original_hstcfg; + + hstcfg &= ~SMBHSTCFG_I2C_EN; /* SMBus timing */ + hstcfg |= SMBHSTCFG_HST_EN; + pci_write_config_byte(priv->pci_dev, SMBHSTCFG, hstcfg); + return hstcfg; +} + static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) { unsigned char temp; @@ -1611,14 +1621,10 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) return err; } - pci_read_config_byte(priv->pci_dev, SMBHSTCFG, &temp); - priv->original_hstcfg = temp; - temp &= ~SMBHSTCFG_I2C_EN; /* SMBus timing */ - if (!(temp & SMBHSTCFG_HST_EN)) { + pci_read_config_byte(priv->pci_dev, SMBHSTCFG, &priv->original_hstcfg); + temp = i801_setup_hstcfg(priv); + if (!(priv->original_hstcfg & SMBHSTCFG_HST_EN)) dev_info(&dev->dev, "Enabling SMBus device\n"); - temp |= SMBHSTCFG_HST_EN; - } - pci_write_config_byte(priv->pci_dev, SMBHSTCFG, temp); if (temp & SMBHSTCFG_SMB_SMI_EN) { dev_dbg(&dev->dev, "SMBus using interrupt SMI#\n"); @@ -1745,6 +1751,7 @@ static int i801_resume(struct device *dev) struct pci_dev *pci_dev = to_pci_dev(dev); struct i801_priv *priv = pci_get_drvdata(pci_dev); + i801_setup_hstcfg(priv); i801_enable_host_notify(&priv->adapter); return 0; -- GitLab From cb0978891ba6c7be05b4d0d71004a50f57cc1530 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Mon, 21 Sep 2020 13:55:28 +0200 Subject: [PATCH 0832/1304] Revert "ALSA: hda - Fix silent audio output and corrupted input on MSI X570-A PRO" This reverts commit c0a7b7fe0e0f7baa7c1779e401d293d176307c51 which is commit 15cbff3fbbc631952c346744f862fb294504b5e2 upstream. It causes know regressions and will be reverted in Linus's tree soon. Reported-by: Hans de Goede Cc: Dan Crawford Cc: Takashi Iwai Link: https://lore.kernel.org/r/7efd2fe5-bf38-7f85-891a-eee3845d1493@redhat.com Signed-off-by: Greg Kroah-Hartman --- sound/pci/hda/patch_realtek.c | 1 - 1 file changed, 1 deletion(-) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 8092fd5617fa..9c5b3d19bfa7 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -2452,7 +2452,6 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { SND_PCI_QUIRK(0x1462, 0x1276, "MSI-GL73", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1462, 0x1293, "MSI-GP65", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD), - SND_PCI_QUIRK(0x1462, 0x9c37, "MSI X570-A PRO", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1462, 0xda57, "MSI Z270-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS), SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3), SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX), -- GitLab From 5afd52f302cac2700c59b86d19c329c0ba918977 Mon Sep 17 00:00:00 2001 From: Sunghyun Jin Date: Thu, 3 Sep 2020 21:41:16 +0900 Subject: [PATCH 0833/1304] percpu: fix first chunk size calculation for populated bitmap commit b3b33d3c43bbe0177d70653f4e889c78cc37f097 upstream. Variable populated, which is a member of struct pcpu_chunk, is used as a unit of size of unsigned long. However, size of populated is miscounted. So, I fix this minor part. Fixes: 8ab16c43ea79 ("percpu: change the number of pages marked in the first_chunk pop bitmap") Cc: # 4.14+ Signed-off-by: Sunghyun Jin Signed-off-by: Dennis Zhou Signed-off-by: Greg Kroah-Hartman --- mm/percpu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/percpu.c b/mm/percpu.c index ff76fa0b7528..0151f276ae68 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -1103,7 +1103,7 @@ static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr, /* allocate chunk */ chunk = memblock_virt_alloc(sizeof(struct pcpu_chunk) + - BITS_TO_LONGS(region_size >> PAGE_SHIFT), + BITS_TO_LONGS(region_size >> PAGE_SHIFT) * sizeof(unsigned long), 0); INIT_LIST_HEAD(&chunk->list); -- GitLab From de3c235838e0cda09506f9bc3a9ee6107fb87102 Mon Sep 17 00:00:00 2001 From: Vincent Huang Date: Mon, 14 Sep 2020 12:19:08 -0700 Subject: [PATCH 0834/1304] Input: trackpoint - add new trackpoint variant IDs commit 6c77545af100a72bf5e28142b510ba042a17648d upstream. Add trackpoint variant IDs to allow supported control on Synaptics trackpoints. Signed-off-by: Vincent Huang Link: https://lore.kernel.org/r/20200914120327.2592-1-vincent.huang@tw.synaptics.com Cc: stable@vger.kernel.org Signed-off-by: Dmitry Torokhov Signed-off-by: Greg Kroah-Hartman --- drivers/input/mouse/trackpoint.c | 10 ++++++---- drivers/input/mouse/trackpoint.h | 10 ++++++---- 2 files changed, 12 insertions(+), 8 deletions(-) diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c index 6590d10f166f..31c16b68aa31 100644 --- a/drivers/input/mouse/trackpoint.c +++ b/drivers/input/mouse/trackpoint.c @@ -20,10 +20,12 @@ #include "trackpoint.h" static const char * const trackpoint_variants[] = { - [TP_VARIANT_IBM] = "IBM", - [TP_VARIANT_ALPS] = "ALPS", - [TP_VARIANT_ELAN] = "Elan", - [TP_VARIANT_NXP] = "NXP", + [TP_VARIANT_IBM] = "IBM", + [TP_VARIANT_ALPS] = "ALPS", + [TP_VARIANT_ELAN] = "Elan", + [TP_VARIANT_NXP] = "NXP", + [TP_VARIANT_JYT_SYNAPTICS] = "JYT_Synaptics", + [TP_VARIANT_SYNAPTICS] = "Synaptics", }; /* diff --git a/drivers/input/mouse/trackpoint.h b/drivers/input/mouse/trackpoint.h index 538986e5ac5b..4ebcdf802e9a 100644 --- a/drivers/input/mouse/trackpoint.h +++ b/drivers/input/mouse/trackpoint.h @@ -27,10 +27,12 @@ * 0x01 was the original IBM trackpoint, others implement very limited * subset of trackpoint features. */ -#define TP_VARIANT_IBM 0x01 -#define TP_VARIANT_ALPS 0x02 -#define TP_VARIANT_ELAN 0x03 -#define TP_VARIANT_NXP 0x04 +#define TP_VARIANT_IBM 0x01 +#define TP_VARIANT_ALPS 0x02 +#define TP_VARIANT_ELAN 0x03 +#define TP_VARIANT_NXP 0x04 +#define TP_VARIANT_JYT_SYNAPTICS 0x05 +#define TP_VARIANT_SYNAPTICS 0x06 /* * Commands -- GitLab From 9046e7c4718a29afb986213217954885af05e9d6 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Tue, 8 Sep 2020 16:27:29 -0700 Subject: [PATCH 0835/1304] Input: i8042 - add Entroware Proteus EL07R4 to nomux and reset lists commit c4440b8a457779adeec42c5e181cb4016f19ce0f upstream. The keyboard drops keypresses early during boot unless both the nomux and reset quirks are set. Add DMI table entries for this. BugLink: https://bugzilla.redhat.com/show_bug.cgi?id=1806085 Signed-off-by: Hans de Goede Link: https://lore.kernel.org/r/20200907095656.13155-1-hdegoede@redhat.com Cc: stable@vger.kernel.org Signed-off-by: Dmitry Torokhov Signed-off-by: Greg Kroah-Hartman --- drivers/input/serio/i8042-x86ia64io.h | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index 8134c7f92816..7c05e09abacf 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -552,6 +552,14 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5738"), }, }, + { + /* Entroware Proteus */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Entroware"), + DMI_MATCH(DMI_PRODUCT_NAME, "Proteus"), + DMI_MATCH(DMI_PRODUCT_VERSION, "EL07R4"), + }, + }, { } }; @@ -680,6 +688,14 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "33474HU"), }, }, + { + /* Entroware Proteus */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Entroware"), + DMI_MATCH(DMI_PRODUCT_NAME, "Proteus"), + DMI_MATCH(DMI_PRODUCT_VERSION, "EL07R4"), + }, + }, { } }; -- GitLab From bafdc39d1c95f7572eba887e6c3707992c430931 Mon Sep 17 00:00:00 2001 From: Tobias Diedrich Date: Mon, 14 Sep 2020 19:36:28 +0200 Subject: [PATCH 0836/1304] serial: 8250_pci: Add Realtek 816a and 816b commit 3c5a87be170aba8ac40982182f812dcff6ed1ad1 upstream. These serial ports are exposed by the OOB-management-engine on RealManage-enabled network cards (e.g. AMD DASH enabled systems using Realtek cards). Because these have 3 BARs, they fail the "num_iomem <= 1" check in serial_pci_guess_board. I've manually checked the two IOMEM regions and BAR 2 doesn't seem to respond to reads, but BAR 4 seems to be an MMIO version of the IO ports (untested). With this change, the ports are detected: 0000:02:00.1: ttyS0 at I/O 0x2200 (irq = 82, base_baud = 115200) is a 16550A 0000:02:00.2: ttyS1 at I/O 0x2100 (irq = 55, base_baud = 115200) is a 16550A lspci output: 02:00.1 0700: 10ec:816a (rev 0e) (prog-if 02 [16550]) Subsystem: 17aa:5082 Control: I/O+ Mem+ BusMaster- SpecCycle- MemWINV- VGASnoop- ParErr- Stepping- SERR- FastB2B- DisINTx- Status: Cap+ 66MHz- UDF- FastB2B- ParErr- DEVSEL=fast >TAbort+ SERR- Cc: stable Link: https://lore.kernel.org/r/20200914173628.GA22508@yamamaya.is-a-geek.org Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/8250/8250_pci.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index 02091782bc1e..725e5842b8ac 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c @@ -5236,6 +5236,17 @@ static const struct pci_device_id serial_pci_tbl[] = { PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_wch384_4 }, + /* + * Realtek RealManage + */ + { PCI_VENDOR_ID_REALTEK, 0x816a, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, pbn_b0_1_115200 }, + + { PCI_VENDOR_ID_REALTEK, 0x816b, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, pbn_b0_1_115200 }, + /* Fintek PCI serial cards */ { PCI_DEVICE(0x1c29, 0x1104), .driver_data = pbn_fintek_4 }, { PCI_DEVICE(0x1c29, 0x1108), .driver_data = pbn_fintek_8 }, -- GitLab From e8a0dc8183be1fd9b55abf4e85dfb98160eb790b Mon Sep 17 00:00:00 2001 From: Arvind Sankar Date: Tue, 11 Aug 2020 20:43:08 -0400 Subject: [PATCH 0837/1304] x86/boot/compressed: Disable relocation relaxation commit 09e43968db40c33a73e9ddbfd937f46d5c334924 upstream. The x86-64 psABI [0] specifies special relocation types (R_X86_64_[REX_]GOTPCRELX) for indirection through the Global Offset Table, semantically equivalent to R_X86_64_GOTPCREL, which the linker can take advantage of for optimization (relaxation) at link time. This is supported by LLD and binutils versions 2.26 onwards. The compressed kernel is position-independent code, however, when using LLD or binutils versions before 2.27, it must be linked without the -pie option. In this case, the linker may optimize certain instructions into a non-position-independent form, by converting foo@GOTPCREL(%rip) to $foo. This potential issue has been present with LLD and binutils-2.26 for a long time, but it has never manifested itself before now: - LLD and binutils-2.26 only relax movq foo@GOTPCREL(%rip), %reg to leaq foo(%rip), %reg which is still position-independent, rather than mov $foo, %reg which is permitted by the psABI when -pie is not enabled. - GCC happens to only generate GOTPCREL relocations on mov instructions. - CLang does generate GOTPCREL relocations on non-mov instructions, but when building the compressed kernel, it uses its integrated assembler (due to the redefinition of KBUILD_CFLAGS dropping -no-integrated-as), which has so far defaulted to not generating the GOTPCRELX relocations. Nick Desaulniers reports [1,2]: "A recent change [3] to a default value of configuration variable (ENABLE_X86_RELAX_RELOCATIONS OFF -> ON) in LLVM now causes Clang's integrated assembler to emit R_X86_64_GOTPCRELX/R_X86_64_REX_GOTPCRELX relocations. LLD will relax instructions with these relocations based on whether the image is being linked as position independent or not. When not, then LLD will relax these instructions to use absolute addressing mode (R_RELAX_GOT_PC_NOPIC). This causes kernels built with Clang and linked with LLD to fail to boot." Patch series [4] is a solution to allow the compressed kernel to be linked with -pie unconditionally, but even if merged is unlikely to be backported. As a simple solution that can be applied to stable as well, prevent the assembler from generating the relaxed relocation types using the -mrelax-relocations=no option. For ease of backporting, do this unconditionally. [0] https://gitlab.com/x86-psABIs/x86-64-ABI/-/blob/master/x86-64-ABI/linker-optimization.tex#L65 [1] https://lore.kernel.org/lkml/20200807194100.3570838-1-ndesaulniers@google.com/ [2] https://github.com/ClangBuiltLinux/linux/issues/1121 [3] https://reviews.llvm.org/rGc41a18cf61790fc898dcda1055c3efbf442c14c0 [4] https://lore.kernel.org/lkml/20200731202738.2577854-1-nivedita@alum.mit.edu/ Reported-by: Nick Desaulniers Signed-off-by: Arvind Sankar Signed-off-by: Ingo Molnar Tested-by: Nick Desaulniers Tested-by: Sedat Dilek Acked-by: Ard Biesheuvel Reviewed-by: Nick Desaulniers Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20200812004308.1448603-1-nivedita@alum.mit.edu Signed-off-by: Greg Kroah-Hartman --- arch/x86/boot/compressed/Makefile | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index 466f66c8a7f8..b337a0cd58ba 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -38,6 +38,8 @@ KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector) KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member) KBUILD_CFLAGS += $(call cc-disable-warning, gnu) KBUILD_CFLAGS += -Wno-pointer-sign +# Disable relocation relaxation in case the link is not PIE. +KBUILD_CFLAGS += $(call as-option,-Wa$(comma)-mrelax-relocations=no) KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ GCOV_PROFILE := n -- GitLab From ad81a334b22ad05198c211e689115ad90747e5d8 Mon Sep 17 00:00:00 2001 From: Quentin Perret Date: Wed, 16 Sep 2020 18:18:25 +0100 Subject: [PATCH 0838/1304] ehci-hcd: Move include to keep CRC stable commit 29231826f3bd65500118c473fccf31c0cf14dbc0 upstream. The CRC calculation done by genksyms is triggered when the parser hits EXPORT_SYMBOL*() macros. At this point, genksyms recursively expands the types of the function parameters, and uses that as the input for the CRC calculation. In the case of forward-declared structs, the type expands to 'UNKNOWN'. Following this, it appears that the result of the expansion of each type is cached somewhere, and seems to be re-used when/if the same type is seen again for another exported symbol in the same C file. Unfortunately, this can cause CRC 'stability' issues when a struct definition becomes visible in the middle of a C file. For example, let's assume code with the following pattern: struct foo; int bar(struct foo *arg) { /* Do work ... */ } EXPORT_SYMBOL_GPL(bar); /* This contains struct foo's definition */ #include "foo.h" int baz(struct foo *arg) { /* Do more work ... */ } EXPORT_SYMBOL_GPL(baz); Here, baz's CRC will be computed using the expansion of struct foo that was cached after bar's CRC calculation ('UNKOWN' here). But if EXPORT_SYMBOL_GPL(bar) is removed from the file (because of e.g. symbol trimming using CONFIG_TRIM_UNUSED_KSYMS), struct foo will be expanded late, during baz's CRC calculation, which now has visibility over the full struct definition, hence resulting in a different CRC for baz. The proper fix for this certainly is in genksyms, but that will take me some time to get right. In the meantime, we have seen one occurrence of this in the ehci-hcd code which hits this problem because of the way it includes C files halfway through the code together with an unlucky mix of symbol trimming. In order to workaround this, move the include done in ehci-hub.c early in ehci-hcd.c, hence making sure the struct definitions are visible to the entire file. This improves CRC stability of the ehci-hcd exports even when symbol trimming is enabled. Acked-by: Alan Stern Cc: stable Signed-off-by: Quentin Perret Link: https://lore.kernel.org/r/20200916171825.3228122-1-qperret@google.com Signed-off-by: Greg Kroah-Hartman --- drivers/usb/host/ehci-hcd.c | 1 + drivers/usb/host/ehci-hub.c | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index 8608ac513fb7..caf9f6b1cd34 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c index ce0eaf7d7c12..087402aec5cb 100644 --- a/drivers/usb/host/ehci-hub.c +++ b/drivers/usb/host/ehci-hub.c @@ -14,7 +14,6 @@ */ /*-------------------------------------------------------------------------*/ -#include #define PORT_WAKE_BITS (PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E) -- GitLab From 349c5add6d85b10caa99c5e28790bf5d5f0d90e8 Mon Sep 17 00:00:00 2001 From: Alexey Kardashevskiy Date: Tue, 8 Sep 2020 11:51:06 +1000 Subject: [PATCH 0839/1304] powerpc/dma: Fix dma_map_ops::get_required_mask commit 437ef802e0adc9f162a95213a3488e8646e5fc03 upstream. There are 2 problems with it: 1. "<" vs expected "<<" 2. the shift number is an IOMMU page number mask, not an address mask as the IOMMU page shift is missing. This did not hit us before f1565c24b596 ("powerpc: use the generic dma_ops_bypass mode") because we had additional code to handle bypass mask so this chunk (almost?) never executed.However there were reports that aacraid does not work with "iommu=nobypass". After f1565c24b596, aacraid (and probably others which call dma_get_required_mask() before setting the mask) was unable to enable 64bit DMA and fall back to using IOMMU which was known not to work, one of the problems is double free of an IOMMU page. This fixes DMA for aacraid, both with and without "iommu=nobypass" in the kernel command line. Verified with "stress-ng -d 4". Fixes: 6a5c7be5e484 ("powerpc: Override dma_get_required_mask by platform hook and ops") Cc: stable@vger.kernel.org # v3.2+ Signed-off-by: Alexey Kardashevskiy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200908015106.79661-1-aik@ozlabs.ru Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/kernel/dma-iommu.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c index f9fe2080ceb9..eed3543aeca4 100644 --- a/arch/powerpc/kernel/dma-iommu.c +++ b/arch/powerpc/kernel/dma-iommu.c @@ -100,7 +100,8 @@ static u64 dma_iommu_get_required_mask(struct device *dev) if (!tbl) return 0; - mask = 1ULL < (fls_long(tbl->it_offset + tbl->it_size) - 1); + mask = 1ULL << (fls_long(tbl->it_offset + tbl->it_size) + + tbl->it_page_shift - 1); mask += mask - 1; return mask; -- GitLab From c3bba4b2239a4565a72742aa0ed4752674599c95 Mon Sep 17 00:00:00 2001 From: Adam Borowski Date: Tue, 9 Oct 2018 08:28:03 +0200 Subject: [PATCH 0840/1304] x86/defconfig: Enable CONFIG_USB_XHCI_HCD=y commit 72a9c673636b779e370983fea08e40f97039b981 upstream. A spanking new machine I just got has all but one USB ports wired as 3.0. Booting defconfig resulted in no keyboard or mouse, which was pretty uncool. Let's enable that -- USB3 is ubiquitous rather than an oddity. As 'y' not 'm' -- recovering from initrd problems needs a keyboard. Also add it to the 32-bit defconfig. Signed-off-by: Adam Borowski Cc: Greg Kroah-Hartman Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-usb@vger.kernel.org Link: http://lkml.kernel.org/r/20181009062803.4332-1-kilobyte@angband.pl Signed-off-by: Ingo Molnar Cc: Andy Shevchenko Signed-off-by: Greg Kroah-Hartman --- arch/x86/configs/i386_defconfig | 1 + arch/x86/configs/x86_64_defconfig | 1 + 2 files changed, 2 insertions(+) diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig index 5a23a4ccd755..9218cb128661 100644 --- a/arch/x86/configs/i386_defconfig +++ b/arch/x86/configs/i386_defconfig @@ -245,6 +245,7 @@ CONFIG_USB_HIDDEV=y CONFIG_USB=y CONFIG_USB_ANNOUNCE_NEW_DEVICES=y CONFIG_USB_MON=y +CONFIG_USB_XHCI_HCD=y CONFIG_USB_EHCI_HCD=y CONFIG_USB_EHCI_TT_NEWSCHED=y CONFIG_USB_OHCI_HCD=y diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig index dc0881292904..146a12293396 100644 --- a/arch/x86/configs/x86_64_defconfig +++ b/arch/x86/configs/x86_64_defconfig @@ -241,6 +241,7 @@ CONFIG_USB_HIDDEV=y CONFIG_USB=y CONFIG_USB_ANNOUNCE_NEW_DEVICES=y CONFIG_USB_MON=y +CONFIG_USB_XHCI_HCD=y CONFIG_USB_EHCI_HCD=y CONFIG_USB_EHCI_TT_NEWSCHED=y CONFIG_USB_OHCI_HCD=y -- GitLab From d09b80172c22df7a5e2ec58aa1a0fbe8914752e7 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Wed, 23 Sep 2020 12:11:02 +0200 Subject: [PATCH 0841/1304] Linux 4.19.147 Tested-by: Jon Hunter Tested-by: Linux Kernel Functional Testing Tested-by: Guenter Roeck Link: https://lore.kernel.org/lkml/20200921162034.660953761@linuxfoundation.org/ Signed-off-by: Greg Kroah-Hartman --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index aaeb3f3dbcea..ee648a902ce3 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 4 PATCHLEVEL = 19 -SUBLEVEL = 146 +SUBLEVEL = 147 EXTRAVERSION = NAME = "People's Front" -- GitLab From 6697b9ebbecb4cf6ea12885a9ecaa41f0110b863 Mon Sep 17 00:00:00 2001 From: Haseeb Khan Date: Thu, 24 Sep 2020 20:13:22 +0530 Subject: [PATCH 0842/1304] msm: cvp: Avoid releasing non-existent ARP buffer CVP firmware would crash if non-existent ARP buffer is released. Change-Id: I4d4055423bd8c040978f1c005fd957488c65776f Signed-off-by: Haseeb Khan --- drivers/media/platform/msm/cvp/msm_cvp_common.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/media/platform/msm/cvp/msm_cvp_common.c b/drivers/media/platform/msm/cvp/msm_cvp_common.c index f05856013ec3..105cb6b23c4b 100644 --- a/drivers/media/platform/msm/cvp/msm_cvp_common.c +++ b/drivers/media/platform/msm/cvp/msm_cvp_common.c @@ -1749,7 +1749,8 @@ int cvp_comm_set_arp_buffers(struct msm_cvp_inst *inst) return rc; error: - cvp_comm_release_persist_buffers(inst); + if (rc != -ENOMEM) + cvp_comm_release_persist_buffers(inst); return rc; } -- GitLab From e2a92b60205ca0cf60a72eca9263e9d215d15977 Mon Sep 17 00:00:00 2001 From: Will McVicker Date: Mon, 3 Aug 2020 16:18:08 -0700 Subject: [PATCH 0843/1304] ANDROID: drop KERNEL_DIR setting in build.config.common And replace many references of ${ROOT_DIR}/common with ${ROOT_DIR}/${KERNEL_DIR}, which makes it a lot easier to branch off of ACK and update the KERNEL_DIR. The variable of KERNEL_DIR will be set in build/_setup_env.sh to the directory of the build config file by default if it's not set explicitly in the build config file or in environment. Test: build test with following configs common/build.config.gki.aarch64 common-modules/virtual-device/build.config.cuttlefish.aarch64 common-modules/virtual-device/build.config.goldfish.aarch64 Bug: 162785964 Change-Id: I790ac0ded10bd790484f14c6f93d53e06c38b830 Signed-off-by: Will McVicker Signed-off-by: Yongqin Liu (cherry picked from commit 931718fa65fe5e34d143250c048f591a7d74e5f8) --- build.config.allmodconfig.aarch64 | 6 +++--- build.config.allmodconfig.arm | 6 +++--- build.config.allmodconfig.x86_64 | 6 +++--- build.config.common | 1 - build.config.gki-debug.aarch64 | 2 +- build.config.gki-debug.x86_64 | 2 +- build.config.gki.aarch64 | 6 +++--- build.config.gki.x86_64 | 6 +++--- build.config.gki_kasan | 1 - build.config.gki_kasan.aarch64 | 7 +++---- build.config.gki_kasan.x86_64 | 6 +++--- 11 files changed, 23 insertions(+), 26 deletions(-) diff --git a/build.config.allmodconfig.aarch64 b/build.config.allmodconfig.aarch64 index 863ab1caddab..2fbe380e030a 100644 --- a/build.config.allmodconfig.aarch64 +++ b/build.config.allmodconfig.aarch64 @@ -1,4 +1,4 @@ -. ${ROOT_DIR}/common/build.config.common -. ${ROOT_DIR}/common/build.config.aarch64 -. ${ROOT_DIR}/common/build.config.allmodconfig +. ${ROOT_DIR}/${KERNEL_DIR}/build.config.common +. ${ROOT_DIR}/${KERNEL_DIR}/build.config.aarch64 +. ${ROOT_DIR}/${KERNEL_DIR}/build.config.allmodconfig diff --git a/build.config.allmodconfig.arm b/build.config.allmodconfig.arm index 5dd94819c871..e92744a9b518 100644 --- a/build.config.allmodconfig.arm +++ b/build.config.allmodconfig.arm @@ -1,4 +1,4 @@ -. ${ROOT_DIR}/common/build.config.common -. ${ROOT_DIR}/common/build.config.arm -. ${ROOT_DIR}/common/build.config.allmodconfig +. ${ROOT_DIR}/${KERNEL_DIR}/build.config.common +. ${ROOT_DIR}/${KERNEL_DIR}/build.config.arm +. ${ROOT_DIR}/${KERNEL_DIR}/build.config.allmodconfig diff --git a/build.config.allmodconfig.x86_64 b/build.config.allmodconfig.x86_64 index bedb3869d99b..f06b30c8426f 100644 --- a/build.config.allmodconfig.x86_64 +++ b/build.config.allmodconfig.x86_64 @@ -1,4 +1,4 @@ -. ${ROOT_DIR}/common/build.config.common -. ${ROOT_DIR}/common/build.config.x86_64 -. ${ROOT_DIR}/common/build.config.allmodconfig +. ${ROOT_DIR}/${KERNEL_DIR}/build.config.common +. ${ROOT_DIR}/${KERNEL_DIR}/build.config.x86_64 +. ${ROOT_DIR}/${KERNEL_DIR}/build.config.allmodconfig diff --git a/build.config.common b/build.config.common index 0c20768c2301..1454d4c3c7c7 100644 --- a/build.config.common +++ b/build.config.common @@ -1,6 +1,5 @@ BRANCH=android-4.19-stable KMI_GENERATION=0 -KERNEL_DIR=common CC=clang LD=ld.lld diff --git a/build.config.gki-debug.aarch64 b/build.config.gki-debug.aarch64 index 58cee7a6362b..c1fe2f03a279 100644 --- a/build.config.gki-debug.aarch64 +++ b/build.config.gki-debug.aarch64 @@ -1,3 +1,3 @@ -. ${ROOT_DIR}/common/build.config.gki.aarch64 +. ${ROOT_DIR}/${KERNEL_DIR}/build.config.gki.aarch64 TRIM_NONLISTED_KMI="" KMI_SYMBOL_LIST_STRICT_MODE="" diff --git a/build.config.gki-debug.x86_64 b/build.config.gki-debug.x86_64 index 9ee51e7de18c..d89b7ad4e804 100644 --- a/build.config.gki-debug.x86_64 +++ b/build.config.gki-debug.x86_64 @@ -1,3 +1,3 @@ -. ${ROOT_DIR}/common/build.config.gki.x86_64 +. ${ROOT_DIR}/${KERNEL_DIR}/build.config.gki.x86_64 TRIM_NONLISTED_KMI="" KMI_SYMBOL_LIST_STRICT_MODE="" diff --git a/build.config.gki.aarch64 b/build.config.gki.aarch64 index 7353591fade5..b540900d0940 100644 --- a/build.config.gki.aarch64 +++ b/build.config.gki.aarch64 @@ -1,6 +1,6 @@ -. ${ROOT_DIR}/common/build.config.common -. ${ROOT_DIR}/common/build.config.aarch64 -. ${ROOT_DIR}/common/build.config.gki +. ${ROOT_DIR}/${KERNEL_DIR}/build.config.common +. ${ROOT_DIR}/${KERNEL_DIR}/build.config.aarch64 +. ${ROOT_DIR}/${KERNEL_DIR}/build.config.gki ABI_DEFINITION=android/abi_gki_aarch64.xml KMI_SYMBOL_LIST=android/abi_gki_aarch64 diff --git a/build.config.gki.x86_64 b/build.config.gki.x86_64 index 627d1e1c27ab..0e04fc692df2 100644 --- a/build.config.gki.x86_64 +++ b/build.config.gki.x86_64 @@ -1,4 +1,4 @@ -. ${ROOT_DIR}/common/build.config.common -. ${ROOT_DIR}/common/build.config.x86_64 -. ${ROOT_DIR}/common/build.config.gki +. ${ROOT_DIR}/${KERNEL_DIR}/build.config.common +. ${ROOT_DIR}/${KERNEL_DIR}/build.config.x86_64 +. ${ROOT_DIR}/${KERNEL_DIR}/build.config.gki diff --git a/build.config.gki_kasan b/build.config.gki_kasan index e682b0d490bd..b3273b10a01c 100644 --- a/build.config.gki_kasan +++ b/build.config.gki_kasan @@ -1,6 +1,5 @@ DEFCONFIG=gki_defconfig POST_DEFCONFIG_CMDS="check_defconfig && update_kasan_config" -KERNEL_DIR=common function update_kasan_config() { ${KERNEL_DIR}/scripts/config --file ${OUT_DIR}/.config \ -e CONFIG_KASAN \ diff --git a/build.config.gki_kasan.aarch64 b/build.config.gki_kasan.aarch64 index 6277fd662863..9fd2560c45e8 100644 --- a/build.config.gki_kasan.aarch64 +++ b/build.config.gki_kasan.aarch64 @@ -1,4 +1,3 @@ -. ${ROOT_DIR}/common/build.config.common -. ${ROOT_DIR}/common/build.config.aarch64 -. ${ROOT_DIR}/common/build.config.gki_kasan - +. ${ROOT_DIR}/${KERNEL_DIR}/build.config.common +. ${ROOT_DIR}/${KERNEL_DIR}/build.config.aarch64 +. ${ROOT_DIR}/${KERNEL_DIR}/build.config.gki_kasan diff --git a/build.config.gki_kasan.x86_64 b/build.config.gki_kasan.x86_64 index 6a379eceeb31..eec645805f39 100644 --- a/build.config.gki_kasan.x86_64 +++ b/build.config.gki_kasan.x86_64 @@ -1,4 +1,4 @@ -. ${ROOT_DIR}/common/build.config.common -. ${ROOT_DIR}/common/build.config.x86_64 -. ${ROOT_DIR}/common/build.config.gki_kasan +. ${ROOT_DIR}/${KERNEL_DIR}/build.config.common +. ${ROOT_DIR}/${KERNEL_DIR}/build.config.x86_64 +. ${ROOT_DIR}/${KERNEL_DIR}/build.config.gki_kasan -- GitLab From b59a23d596807a5aa88d8dd5655a66c6843729b3 Mon Sep 17 00:00:00 2001 From: Mark Salyzyn Date: Wed, 22 Jul 2020 04:00:53 -0700 Subject: [PATCH 0844/1304] af_key: pfkey_dump needs parameter validation commit 37bd22420f856fcd976989f1d4f1f7ad28e1fcac upstream. In pfkey_dump() dplen and splen can both be specified to access the xfrm_address_t structure out of bounds in__xfrm_state_filter_match() when it calls addr_match() with the indexes. Return EINVAL if either are out of range. Signed-off-by: Mark Salyzyn Cc: netdev@vger.kernel.org Cc: linux-kernel@vger.kernel.org Cc: kernel-team@android.com Cc: Steffen Klassert Cc: Herbert Xu Cc: "David S. Miller" Cc: Jakub Kicinski Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Signed-off-by: Steffen Klassert Signed-off-by: Greg Kroah-Hartman --- net/key/af_key.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/net/key/af_key.c b/net/key/af_key.c index 1982f9f31deb..e340e97224c3 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -1855,6 +1855,13 @@ static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_ms if (ext_hdrs[SADB_X_EXT_FILTER - 1]) { struct sadb_x_filter *xfilter = ext_hdrs[SADB_X_EXT_FILTER - 1]; + if ((xfilter->sadb_x_filter_splen >= + (sizeof(xfrm_address_t) << 3)) || + (xfilter->sadb_x_filter_dplen >= + (sizeof(xfrm_address_t) << 3))) { + mutex_unlock(&pfk->dump_lock); + return -EINVAL; + } filter = kmalloc(sizeof(*filter), GFP_KERNEL); if (filter == NULL) { mutex_unlock(&pfk->dump_lock); -- GitLab From 19184bd06f488af62924ff1747614a8cb284ad63 Mon Sep 17 00:00:00 2001 From: Rustam Kovhaev Date: Mon, 7 Sep 2020 11:55:35 -0700 Subject: [PATCH 0845/1304] KVM: fix memory leak in kvm_io_bus_unregister_dev() [ Upstream commit f65886606c2d3b562716de030706dfe1bea4ed5e ] when kmalloc() fails in kvm_io_bus_unregister_dev(), before removing the bus, we should iterate over all other devices linked to it and call kvm_iodevice_destructor() for them Fixes: 90db10434b16 ("KVM: kvm_io_bus_unregister_dev() should never fail") Cc: stable@vger.kernel.org Reported-and-tested-by: syzbot+f196caa45793d6374707@syzkaller.appspotmail.com Link: https://syzkaller.appspot.com/bug?extid=f196caa45793d6374707 Signed-off-by: Rustam Kovhaev Reviewed-by: Vitaly Kuznetsov Message-Id: <20200907185535.233114-1-rkovhaev@gmail.com> Signed-off-by: Paolo Bonzini Signed-off-by: Sasha Levin --- virt/kvm/kvm_main.c | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 2155b52b17ec..6bd01d12df2e 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -3844,7 +3844,7 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, struct kvm_io_device *dev) { - int i; + int i, j; struct kvm_io_bus *new_bus, *bus; bus = kvm_get_bus(kvm, bus_idx); @@ -3861,17 +3861,20 @@ void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count - 1) * sizeof(struct kvm_io_range)), GFP_KERNEL); - if (!new_bus) { + if (new_bus) { + memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range)); + new_bus->dev_count--; + memcpy(new_bus->range + i, bus->range + i + 1, + (new_bus->dev_count - i) * sizeof(struct kvm_io_range)); + } else { pr_err("kvm: failed to shrink bus, removing it completely\n"); - goto broken; + for (j = 0; j < bus->dev_count; j++) { + if (j == i) + continue; + kvm_iodevice_destructor(bus->range[j].dev); + } } - memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range)); - new_bus->dev_count--; - memcpy(new_bus->range + i, bus->range + i + 1, - (new_bus->dev_count - i) * sizeof(struct kvm_io_range)); - -broken: rcu_assign_pointer(kvm->buses[bus_idx], new_bus); synchronize_srcu_expedited(&kvm->srcu); kfree(bus); -- GitLab From d44a437826119e8307c3904c1e4f513095ea17cb Mon Sep 17 00:00:00 2001 From: Muchun Song Date: Fri, 18 Sep 2020 21:20:21 -0700 Subject: [PATCH 0846/1304] kprobes: fix kill kprobe which has been marked as gone [ Upstream commit b0399092ccebd9feef68d4ceb8d6219a8c0caa05 ] If a kprobe is marked as gone, we should not kill it again. Otherwise, we can disarm the kprobe more than once. In that case, the statistics of kprobe_ftrace_enabled can unbalance which can lead to that kprobe do not work. Fixes: e8386a0cb22f ("kprobes: support probing module __exit function") Co-developed-by: Chengming Zhou Signed-off-by: Muchun Song Signed-off-by: Chengming Zhou Signed-off-by: Andrew Morton Acked-by: Masami Hiramatsu Cc: "Naveen N . Rao" Cc: Anil S Keshavamurthy Cc: David S. Miller Cc: Song Liu Cc: Steven Rostedt Cc: Link: https://lkml.kernel.org/r/20200822030055.32383-1-songmuchun@bytedance.com Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin --- kernel/kprobes.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/kernel/kprobes.c b/kernel/kprobes.c index eb4bffe6d764..230d9d599b5a 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -2061,6 +2061,9 @@ static void kill_kprobe(struct kprobe *p) { struct kprobe *kp; + if (WARN_ON_ONCE(kprobe_gone(p))) + return; + p->flags |= KPROBE_FLAG_GONE; if (kprobe_aggrprobe(p)) { /* @@ -2243,7 +2246,10 @@ static int kprobes_module_callback(struct notifier_block *nb, mutex_lock(&kprobe_mutex); for (i = 0; i < KPROBE_TABLE_SIZE; i++) { head = &kprobe_table[i]; - hlist_for_each_entry_rcu(p, head, hlist) + hlist_for_each_entry_rcu(p, head, hlist) { + if (kprobe_gone(p)) + continue; + if (within_module_init((unsigned long)p->addr, mod) || (checkcore && within_module_core((unsigned long)p->addr, mod))) { @@ -2260,6 +2266,7 @@ static int kprobes_module_callback(struct notifier_block *nb, */ kill_kprobe(p); } + } } mutex_unlock(&kprobe_mutex); return NOTIFY_DONE; -- GitLab From ec56646e3b2a9a0c3a2fa63732fab731009a25af Mon Sep 17 00:00:00 2001 From: Ralph Campbell Date: Fri, 18 Sep 2020 21:20:24 -0700 Subject: [PATCH 0847/1304] mm/thp: fix __split_huge_pmd_locked() for migration PMD [ Upstream commit ec0abae6dcdf7ef88607c869bf35a4b63ce1b370 ] A migrating transparent huge page has to already be unmapped. Otherwise, the page could be modified while it is being copied to a new page and data could be lost. The function __split_huge_pmd() checks for a PMD migration entry before calling __split_huge_pmd_locked() leading one to think that __split_huge_pmd_locked() can handle splitting a migrating PMD. However, the code always increments the page->_mapcount and adjusts the memory control group accounting assuming the page is mapped. Also, if the PMD entry is a migration PMD entry, the call to is_huge_zero_pmd(*pmd) is incorrect because it calls pmd_pfn(pmd) instead of migration_entry_to_pfn(pmd_to_swp_entry(pmd)). Fix these problems by checking for a PMD migration entry. Fixes: 84c3fc4e9c56 ("mm: thp: check pmd migration entry in common path") Signed-off-by: Ralph Campbell Signed-off-by: Andrew Morton Reviewed-by: Yang Shi Reviewed-by: Zi Yan Cc: Jerome Glisse Cc: John Hubbard Cc: Alistair Popple Cc: Christoph Hellwig Cc: Jason Gunthorpe Cc: Bharata B Rao Cc: Ben Skeggs Cc: Shuah Khan Cc: [4.14+] Link: https://lkml.kernel.org/r/20200903183140.19055-1-rcampbell@nvidia.com Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin --- mm/huge_memory.c | 40 +++++++++++++++++++++++----------------- 1 file changed, 23 insertions(+), 17 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 1443ae6fee9b..8b137248b146 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2145,7 +2145,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, put_page(page); add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR); return; - } else if (is_huge_zero_pmd(*pmd)) { + } else if (pmd_trans_huge(*pmd) && is_huge_zero_pmd(*pmd)) { /* * FIXME: Do we want to invalidate secondary mmu by calling * mmu_notifier_invalidate_range() see comments below inside @@ -2233,27 +2233,33 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, pte = pte_offset_map(&_pmd, addr); BUG_ON(!pte_none(*pte)); set_pte_at(mm, addr, pte, entry); - atomic_inc(&page[i]._mapcount); - pte_unmap(pte); - } - - /* - * Set PG_double_map before dropping compound_mapcount to avoid - * false-negative page_mapped(). - */ - if (compound_mapcount(page) > 1 && !TestSetPageDoubleMap(page)) { - for (i = 0; i < HPAGE_PMD_NR; i++) + if (!pmd_migration) atomic_inc(&page[i]._mapcount); + pte_unmap(pte); } - if (atomic_add_negative(-1, compound_mapcount_ptr(page))) { - /* Last compound_mapcount is gone. */ - __dec_node_page_state(page, NR_ANON_THPS); - if (TestClearPageDoubleMap(page)) { - /* No need in mapcount reference anymore */ + if (!pmd_migration) { + /* + * Set PG_double_map before dropping compound_mapcount to avoid + * false-negative page_mapped(). + */ + if (compound_mapcount(page) > 1 && + !TestSetPageDoubleMap(page)) { for (i = 0; i < HPAGE_PMD_NR; i++) - atomic_dec(&page[i]._mapcount); + atomic_inc(&page[i]._mapcount); + } + + lock_page_memcg(page); + if (atomic_add_negative(-1, compound_mapcount_ptr(page))) { + /* Last compound_mapcount is gone. */ + __dec_lruvec_page_state(page, NR_ANON_THPS); + if (TestClearPageDoubleMap(page)) { + /* No need in mapcount reference anymore */ + for (i = 0; i < HPAGE_PMD_NR; i++) + atomic_dec(&page[i]._mapcount); + } } + unlock_page_memcg(page); } smp_wmb(); /* make pte visible before pmd */ -- GitLab From 35145dab2074abf12c1486317c912d8cff5a5fa8 Mon Sep 17 00:00:00 2001 From: Ganji Aravind Date: Fri, 4 Sep 2020 15:58:18 +0530 Subject: [PATCH 0848/1304] cxgb4: Fix offset when clearing filter byte counters [ Upstream commit 94cc242a067a869c29800aa789d38b7676136e50 ] Pass the correct offset to clear the stale filter hit bytes counter. Otherwise, the counter starts incrementing from the stale information, instead of 0. Fixes: 12b276fbf6e0 ("cxgb4: add support to create hash filters") Signed-off-by: Ganji Aravind Signed-off-by: Jakub Kicinski Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c index 97d97de9accc..bb3ee55cb72c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c @@ -1591,13 +1591,16 @@ int cxgb4_del_filter(struct net_device *dev, int filter_id, static int configure_filter_tcb(struct adapter *adap, unsigned int tid, struct filter_entry *f) { - if (f->fs.hitcnts) + if (f->fs.hitcnts) { set_tcb_field(adap, f, tid, TCB_TIMESTAMP_W, - TCB_TIMESTAMP_V(TCB_TIMESTAMP_M) | + TCB_TIMESTAMP_V(TCB_TIMESTAMP_M), + TCB_TIMESTAMP_V(0ULL), + 1); + set_tcb_field(adap, f, tid, TCB_RTT_TS_RECENT_AGE_W, TCB_RTT_TS_RECENT_AGE_V(TCB_RTT_TS_RECENT_AGE_M), - TCB_TIMESTAMP_V(0ULL) | TCB_RTT_TS_RECENT_AGE_V(0ULL), 1); + } if (f->fs.newdmac) set_tcb_tflag(adap, f, tid, TF_CCTRL_ECE_S, 1, -- GitLab From c797110d97c48054d1491251fd713900ff51615c Mon Sep 17 00:00:00 2001 From: Mark Gray Date: Wed, 16 Sep 2020 05:19:35 -0400 Subject: [PATCH 0849/1304] geneve: add transport ports in route lookup for geneve [ Upstream commit 34beb21594519ce64a55a498c2fe7d567bc1ca20 ] This patch adds transport ports information for route lookup so that IPsec can select Geneve tunnel traffic to do encryption. This is needed for OVS/OVN IPsec with encrypted Geneve tunnels. This can be tested by configuring a host-host VPN using an IKE daemon and specifying port numbers. For example, for an Openswan-type configuration, the following parameters should be configured on both hosts and IPsec set up as-per normal: $ cat /etc/ipsec.conf conn in ... left=$IP1 right=$IP2 ... leftprotoport=udp/6081 rightprotoport=udp ... conn out ... left=$IP1 right=$IP2 ... leftprotoport=udp rightprotoport=udp/6081 ... The tunnel can then be setup using "ip" on both hosts (but changing the relevant IP addresses): $ ip link add tun type geneve id 1000 remote $IP2 $ ip addr add 192.168.0.1/24 dev tun $ ip link set tun up This can then be tested by pinging from $IP1: $ ping 192.168.0.2 Without this patch the traffic is unencrypted on the wire. Fixes: 2d07dc79fe04 ("geneve: add initial netdev driver for GENEVE tunnels") Signed-off-by: Qiuyu Xiao Signed-off-by: Mark Gray Reviewed-by: Greg Rose Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/geneve.c | 37 +++++++++++++++++++++++++++---------- 1 file changed, 27 insertions(+), 10 deletions(-) diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 817c290b78cd..d0b5844c8a31 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -721,7 +721,8 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb, struct net_device *dev, struct geneve_sock *gs4, struct flowi4 *fl4, - const struct ip_tunnel_info *info) + const struct ip_tunnel_info *info, + __be16 dport, __be16 sport) { bool use_cache = ip_tunnel_dst_cache_usable(skb, info); struct geneve_dev *geneve = netdev_priv(dev); @@ -737,6 +738,8 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb, fl4->flowi4_proto = IPPROTO_UDP; fl4->daddr = info->key.u.ipv4.dst; fl4->saddr = info->key.u.ipv4.src; + fl4->fl4_dport = dport; + fl4->fl4_sport = sport; tos = info->key.tos; if ((tos == 1) && !geneve->collect_md) { @@ -771,7 +774,8 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb, struct net_device *dev, struct geneve_sock *gs6, struct flowi6 *fl6, - const struct ip_tunnel_info *info) + const struct ip_tunnel_info *info, + __be16 dport, __be16 sport) { bool use_cache = ip_tunnel_dst_cache_usable(skb, info); struct geneve_dev *geneve = netdev_priv(dev); @@ -787,6 +791,9 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb, fl6->flowi6_proto = IPPROTO_UDP; fl6->daddr = info->key.u.ipv6.dst; fl6->saddr = info->key.u.ipv6.src; + fl6->fl6_dport = dport; + fl6->fl6_sport = sport; + prio = info->key.tos; if ((prio == 1) && !geneve->collect_md) { prio = ip_tunnel_get_dsfield(ip_hdr(skb), skb); @@ -833,14 +840,15 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, __be16 df; int err; - rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info); + sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); + rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info, + geneve->info.key.tp_dst, sport); if (IS_ERR(rt)) return PTR_ERR(rt); skb_tunnel_check_pmtu(skb, &rt->dst, GENEVE_IPV4_HLEN + info->options_len); - sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); if (geneve->collect_md) { tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb); ttl = key->ttl; @@ -875,13 +883,14 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, __be16 sport; int err; - dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info); + sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); + dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info, + geneve->info.key.tp_dst, sport); if (IS_ERR(dst)) return PTR_ERR(dst); skb_tunnel_check_pmtu(skb, dst, GENEVE_IPV6_HLEN + info->options_len); - sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); if (geneve->collect_md) { prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb); ttl = key->ttl; @@ -958,13 +967,18 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) { struct ip_tunnel_info *info = skb_tunnel_info(skb); struct geneve_dev *geneve = netdev_priv(dev); + __be16 sport; if (ip_tunnel_info_af(info) == AF_INET) { struct rtable *rt; struct flowi4 fl4; + struct geneve_sock *gs4 = rcu_dereference(geneve->sock4); + sport = udp_flow_src_port(geneve->net, skb, + 1, USHRT_MAX, true); - rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info); + rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info, + geneve->info.key.tp_dst, sport); if (IS_ERR(rt)) return PTR_ERR(rt); @@ -974,9 +988,13 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) } else if (ip_tunnel_info_af(info) == AF_INET6) { struct dst_entry *dst; struct flowi6 fl6; + struct geneve_sock *gs6 = rcu_dereference(geneve->sock6); + sport = udp_flow_src_port(geneve->net, skb, + 1, USHRT_MAX, true); - dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info); + dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info, + geneve->info.key.tp_dst, sport); if (IS_ERR(dst)) return PTR_ERR(dst); @@ -987,8 +1005,7 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) return -EINVAL; } - info->key.tp_src = udp_flow_src_port(geneve->net, skb, - 1, USHRT_MAX, true); + info->key.tp_src = sport; info->key.tp_dst = geneve->info.key.tp_dst; return 0; } -- GitLab From 45676c0bc28eff8f46455b28e2db80a77676488b Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 9 Sep 2020 12:46:48 +0300 Subject: [PATCH 0850/1304] hdlc_ppp: add range checks in ppp_cp_parse_cr() [ Upstream commit 66d42ed8b25b64eb63111a2b8582c5afc8bf1105 ] There are a couple bugs here: 1) If opt[1] is zero then this results in a forever loop. If the value is less than 2 then it is invalid. 2) It assumes that "len" is more than sizeof(valid_accm) or 6 which can result in memory corruption. In the case of LCP_OPTION_ACCM, then we should check "opt[1]" instead of "len" because, if "opt[1]" is less than sizeof(valid_accm) then "nak_len" gets out of sync and it can lead to memory corruption in the next iterations through the loop. In case of LCP_OPTION_MAGIC, the only valid value for opt[1] is 6, but the code is trying to log invalid data so we should only discard the data when "len" is less than 6 because that leads to a read overflow. Reported-by: ChenNan Of Chaitin Security Research Lab Fixes: e022c2f07ae5 ("WAN: new synchronous PPP implementation for generic HDLC.") Signed-off-by: Dan Carpenter Reviewed-by: Eric Dumazet Reviewed-by: Greg Kroah-Hartman Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/wan/hdlc_ppp.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c index ab8b3cbbb205..85844f26547d 100644 --- a/drivers/net/wan/hdlc_ppp.c +++ b/drivers/net/wan/hdlc_ppp.c @@ -386,11 +386,8 @@ static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id, } for (opt = data; len; len -= opt[1], opt += opt[1]) { - if (len < 2 || len < opt[1]) { - dev->stats.rx_errors++; - kfree(out); - return; /* bad packet, drop silently */ - } + if (len < 2 || opt[1] < 2 || len < opt[1]) + goto err_out; if (pid == PID_LCP) switch (opt[0]) { @@ -398,6 +395,8 @@ static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id, continue; /* MRU always OK and > 1500 bytes? */ case LCP_OPTION_ACCM: /* async control character map */ + if (opt[1] < sizeof(valid_accm)) + goto err_out; if (!memcmp(opt, valid_accm, sizeof(valid_accm))) continue; @@ -409,6 +408,8 @@ static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id, } break; case LCP_OPTION_MAGIC: + if (len < 6) + goto err_out; if (opt[1] != 6 || (!opt[2] && !opt[3] && !opt[4] && !opt[5])) break; /* reject invalid magic number */ @@ -427,6 +428,11 @@ static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id, ppp_cp_event(dev, pid, RCR_GOOD, CP_CONF_ACK, id, req_len, data); kfree(out); + return; + +err_out: + dev->stats.rx_errors++; + kfree(out); } static int ppp_rx(struct sk_buff *skb) -- GitLab From 2fc322bf67594e240eb23b4e0c6c8a09c69f9918 Mon Sep 17 00:00:00 2001 From: Wei Wang Date: Tue, 8 Sep 2020 14:09:34 -0700 Subject: [PATCH 0851/1304] ip: fix tos reflection in ack and reset packets [ Upstream commit ba9e04a7ddf4f22a10e05bf9403db6b97743c7bf ] Currently, in tcp_v4_reqsk_send_ack() and tcp_v4_send_reset(), we echo the TOS value of the received packets in the response. However, we do not want to echo the lower 2 ECN bits in accordance with RFC 3168 6.1.5 robustness principles. Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Signed-off-by: Wei Wang Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/ipv4/ip_output.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index fbf30122e8bf..f0faf1193dd8 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -73,6 +73,7 @@ #include #include #include +#include #include #include #include @@ -1582,7 +1583,7 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, if (IS_ERR(rt)) return; - inet_sk(sk)->tos = arg->tos; + inet_sk(sk)->tos = arg->tos & ~INET_ECN_MASK; sk->sk_priority = skb->priority; sk->sk_protocol = ip_hdr(skb)->protocol; -- GitLab From f2e5359dd3bffa434cba0f62179b1e72065183af Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 8 Sep 2020 01:20:23 -0700 Subject: [PATCH 0852/1304] ipv6: avoid lockdep issue in fib6_del() [ Upstream commit 843d926b003ea692468c8cc5bea1f9f58dfa8c75 ] syzbot reported twice a lockdep issue in fib6_del() [1] which I think is caused by net->ipv6.fib6_null_entry having a NULL fib6_table pointer. fib6_del() already checks for fib6_null_entry special case, we only need to return earlier. Bug seems to occur very rarely, I have thus chosen a 'bug origin' that makes backports not too complex. [1] WARNING: suspicious RCU usage 5.9.0-rc4-syzkaller #0 Not tainted ----------------------------- net/ipv6/ip6_fib.c:1996 suspicious rcu_dereference_protected() usage! other info that might help us debug this: rcu_scheduler_active = 2, debug_locks = 1 4 locks held by syz-executor.5/8095: #0: ffffffff8a7ea708 (rtnl_mutex){+.+.}-{3:3}, at: ppp_release+0x178/0x240 drivers/net/ppp/ppp_generic.c:401 #1: ffff88804c422dd8 (&net->ipv6.fib6_gc_lock){+.-.}-{2:2}, at: spin_trylock_bh include/linux/spinlock.h:414 [inline] #1: ffff88804c422dd8 (&net->ipv6.fib6_gc_lock){+.-.}-{2:2}, at: fib6_run_gc+0x21b/0x2d0 net/ipv6/ip6_fib.c:2312 #2: ffffffff89bd6a40 (rcu_read_lock){....}-{1:2}, at: __fib6_clean_all+0x0/0x290 net/ipv6/ip6_fib.c:2613 #3: ffff8880a82e6430 (&tb->tb6_lock){+.-.}-{2:2}, at: spin_lock_bh include/linux/spinlock.h:359 [inline] #3: ffff8880a82e6430 (&tb->tb6_lock){+.-.}-{2:2}, at: __fib6_clean_all+0x107/0x290 net/ipv6/ip6_fib.c:2245 stack backtrace: CPU: 1 PID: 8095 Comm: syz-executor.5 Not tainted 5.9.0-rc4-syzkaller #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:77 [inline] dump_stack+0x198/0x1fd lib/dump_stack.c:118 fib6_del+0x12b4/0x1630 net/ipv6/ip6_fib.c:1996 fib6_clean_node+0x39b/0x570 net/ipv6/ip6_fib.c:2180 fib6_walk_continue+0x4aa/0x8e0 net/ipv6/ip6_fib.c:2102 fib6_walk+0x182/0x370 net/ipv6/ip6_fib.c:2150 fib6_clean_tree+0xdb/0x120 net/ipv6/ip6_fib.c:2230 __fib6_clean_all+0x120/0x290 net/ipv6/ip6_fib.c:2246 fib6_clean_all net/ipv6/ip6_fib.c:2257 [inline] fib6_run_gc+0x113/0x2d0 net/ipv6/ip6_fib.c:2320 ndisc_netdev_event+0x217/0x350 net/ipv6/ndisc.c:1805 notifier_call_chain+0xb5/0x200 kernel/notifier.c:83 call_netdevice_notifiers_info+0xb5/0x130 net/core/dev.c:2033 call_netdevice_notifiers_extack net/core/dev.c:2045 [inline] call_netdevice_notifiers net/core/dev.c:2059 [inline] dev_close_many+0x30b/0x650 net/core/dev.c:1634 rollback_registered_many+0x3a8/0x1210 net/core/dev.c:9261 rollback_registered net/core/dev.c:9329 [inline] unregister_netdevice_queue+0x2dd/0x570 net/core/dev.c:10410 unregister_netdevice include/linux/netdevice.h:2774 [inline] ppp_release+0x216/0x240 drivers/net/ppp/ppp_generic.c:403 __fput+0x285/0x920 fs/file_table.c:281 task_work_run+0xdd/0x190 kernel/task_work.c:141 tracehook_notify_resume include/linux/tracehook.h:188 [inline] exit_to_user_mode_loop kernel/entry/common.c:163 [inline] exit_to_user_mode_prepare+0x1e1/0x200 kernel/entry/common.c:190 syscall_exit_to_user_mode+0x7e/0x2e0 kernel/entry/common.c:265 entry_SYSCALL_64_after_hwframe+0x44/0xa9 Fixes: 421842edeaf6 ("net/ipv6: Add fib6_null_entry") Signed-off-by: Eric Dumazet Cc: David Ahern Reviewed-by: David Ahern Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/ipv6/ip6_fib.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 5e8979c1f76d..05a206202e23 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -1811,14 +1811,19 @@ static void fib6_del_route(struct fib6_table *table, struct fib6_node *fn, /* Need to own table->tb6_lock */ int fib6_del(struct fib6_info *rt, struct nl_info *info) { - struct fib6_node *fn = rcu_dereference_protected(rt->fib6_node, - lockdep_is_held(&rt->fib6_table->tb6_lock)); - struct fib6_table *table = rt->fib6_table; struct net *net = info->nl_net; struct fib6_info __rcu **rtp; struct fib6_info __rcu **rtp_next; + struct fib6_table *table; + struct fib6_node *fn; + + if (rt == net->ipv6.fib6_null_entry) + return -ENOENT; - if (!fn || rt == net->ipv6.fib6_null_entry) + table = rt->fib6_table; + fn = rcu_dereference_protected(rt->fib6_node, + lockdep_is_held(&table->tb6_lock)); + if (!fn) return -ENOENT; WARN_ON(!(fn->fn_flags & RTN_RTINFO)); -- GitLab From d0c2f72526c6cf7ad090ee3a85226d3da8e62458 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Thu, 10 Sep 2020 14:09:05 +0200 Subject: [PATCH 0853/1304] net: DCB: Validate DCB_ATTR_DCB_BUFFER argument [ Upstream commit 297e77e53eadb332d5062913447b104a772dc33b ] The parameter passed via DCB_ATTR_DCB_BUFFER is a struct dcbnl_buffer. The field prio2buffer is an array of IEEE_8021Q_MAX_PRIORITIES bytes, where each value is a number of a buffer to direct that priority's traffic to. That value is however never validated to lie within the bounds set by DCBX_MAX_BUFFERS. The only driver that currently implements the callback is mlx5 (maintainers CCd), and that does not do any validation either, in particual allowing incorrect configuration if the prio2buffer value does not fit into 4 bits. Instead of offloading the need to validate the buffer index to drivers, do it right there in core, and bounce the request if the value is too large. CC: Parav Pandit CC: Saeed Mahameed Fixes: e549f6f9c098 ("net/dcb: Add dcbnl buffer attribute") Signed-off-by: Petr Machata Reviewed-by: Ido Schimmel Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/dcb/dcbnl.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c index a556cd708885..5ee6b94131b2 100644 --- a/net/dcb/dcbnl.c +++ b/net/dcb/dcbnl.c @@ -1421,6 +1421,7 @@ static int dcbnl_ieee_set(struct net_device *netdev, struct nlmsghdr *nlh, { const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1]; + int prio; int err; if (!ops) @@ -1469,6 +1470,13 @@ static int dcbnl_ieee_set(struct net_device *netdev, struct nlmsghdr *nlh, struct dcbnl_buffer *buffer = nla_data(ieee[DCB_ATTR_DCB_BUFFER]); + for (prio = 0; prio < ARRAY_SIZE(buffer->prio2buffer); prio++) { + if (buffer->prio2buffer[prio] >= DCBX_MAX_BUFFERS) { + err = -EINVAL; + goto err; + } + } + err = ops->dcbnl_setbuffer(netdev, buffer); if (err) goto err; -- GitLab From 76fde30cf12ccf3f6d0e731972d15da174159b71 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Sat, 5 Sep 2020 12:32:33 +0200 Subject: [PATCH 0854/1304] net: dsa: rtl8366: Properly clear member config [ Upstream commit 4ddcaf1ebb5e4e99240f29d531ee69d4244fe416 ] When removing a port from a VLAN we are just erasing the member config for the VLAN, which is wrong: other ports can be using it. Just mask off the port and only zero out the rest of the member config once ports using of the VLAN are removed from it. Reported-by: Florian Fainelli Fixes: d8652956cf37 ("net: dsa: realtek-smi: Add Realtek SMI driver") Signed-off-by: Linus Walleij Signed-off-by: Jakub Kicinski Signed-off-by: Greg Kroah-Hartman --- drivers/net/dsa/rtl8366.c | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/rtl8366.c index 7e27c9aff9b7..430988f79722 100644 --- a/drivers/net/dsa/rtl8366.c +++ b/drivers/net/dsa/rtl8366.c @@ -452,13 +452,19 @@ int rtl8366_vlan_del(struct dsa_switch *ds, int port, return ret; if (vid == vlanmc.vid) { - /* clear VLAN member configurations */ - vlanmc.vid = 0; - vlanmc.priority = 0; - vlanmc.member = 0; - vlanmc.untag = 0; - vlanmc.fid = 0; - + /* Remove this port from the VLAN */ + vlanmc.member &= ~BIT(port); + vlanmc.untag &= ~BIT(port); + /* + * If no ports are members of this VLAN + * anymore then clear the whole member + * config so it can be reused. + */ + if (!vlanmc.member && vlanmc.untag) { + vlanmc.vid = 0; + vlanmc.priority = 0; + vlanmc.fid = 0; + } ret = smi->ops->set_vlan_mc(smi, i, &vlanmc); if (ret) { dev_err(smi->dev, -- GitLab From fe916542565b7bbb529c1fa5151812c1fbb07631 Mon Sep 17 00:00:00 2001 From: Necip Fazil Yildiran Date: Thu, 17 Sep 2020 19:46:43 +0300 Subject: [PATCH 0855/1304] net: ipv6: fix kconfig dependency warning for IPV6_SEG6_HMAC [ Upstream commit db7cd91a4be15e1485d6b58c6afc8761c59c4efb ] When IPV6_SEG6_HMAC is enabled and CRYPTO is disabled, it results in the following Kbuild warning: WARNING: unmet direct dependencies detected for CRYPTO_HMAC Depends on [n]: CRYPTO [=n] Selected by [y]: - IPV6_SEG6_HMAC [=y] && NET [=y] && INET [=y] && IPV6 [=y] WARNING: unmet direct dependencies detected for CRYPTO_SHA1 Depends on [n]: CRYPTO [=n] Selected by [y]: - IPV6_SEG6_HMAC [=y] && NET [=y] && INET [=y] && IPV6 [=y] WARNING: unmet direct dependencies detected for CRYPTO_SHA256 Depends on [n]: CRYPTO [=n] Selected by [y]: - IPV6_SEG6_HMAC [=y] && NET [=y] && INET [=y] && IPV6 [=y] The reason is that IPV6_SEG6_HMAC selects CRYPTO_HMAC, CRYPTO_SHA1, and CRYPTO_SHA256 without depending on or selecting CRYPTO while those configs are subordinate to CRYPTO. Honor the kconfig menu hierarchy to remove kconfig dependency warnings. Fixes: bf355b8d2c30 ("ipv6: sr: add core files for SR HMAC support") Signed-off-by: Necip Fazil Yildiran Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/ipv6/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig index 613282c65a10..a32cf50c237d 100644 --- a/net/ipv6/Kconfig +++ b/net/ipv6/Kconfig @@ -321,6 +321,7 @@ config IPV6_SEG6_LWTUNNEL config IPV6_SEG6_HMAC bool "IPv6: Segment Routing HMAC support" depends on IPV6 + select CRYPTO select CRYPTO_HMAC select CRYPTO_SHA1 select CRYPTO_SHA256 -- GitLab From 749cc0b0c7f3dcdfe5842f998c0274e54987384f Mon Sep 17 00:00:00 2001 From: Yunsheng Lin Date: Tue, 8 Sep 2020 19:02:34 +0800 Subject: [PATCH 0856/1304] net: sch_generic: aviod concurrent reset and enqueue op for lockless qdisc [ Upstream commit 2fb541c862c987d02dfdf28f1545016deecfa0d5 ] Currently there is concurrent reset and enqueue operation for the same lockless qdisc when there is no lock to synchronize the q->enqueue() in __dev_xmit_skb() with the qdisc reset operation in qdisc_deactivate() called by dev_deactivate_queue(), which may cause out-of-bounds access for priv->ring[] in hns3 driver if user has requested a smaller queue num when __dev_xmit_skb() still enqueue a skb with a larger queue_mapping after the corresponding qdisc is reset, and call hns3_nic_net_xmit() with that skb later. Reused the existing synchronize_net() in dev_deactivate_many() to make sure skb with larger queue_mapping enqueued to old qdisc(which is saved in dev_queue->qdisc_sleeping) will always be reset when dev_reset_queue() is called. Fixes: 6b3ba9146fe6 ("net: sched: allow qdiscs to handle locking") Signed-off-by: Yunsheng Lin Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/sched/sch_generic.c | 49 +++++++++++++++++++++++++++-------------- 1 file changed, 33 insertions(+), 16 deletions(-) diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 119e20cad662..bd96fd261dba 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -1115,27 +1115,36 @@ static void dev_deactivate_queue(struct net_device *dev, struct netdev_queue *dev_queue, void *_qdisc_default) { - struct Qdisc *qdisc_default = _qdisc_default; - struct Qdisc *qdisc; + struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc); - qdisc = rtnl_dereference(dev_queue->qdisc); if (qdisc) { - bool nolock = qdisc->flags & TCQ_F_NOLOCK; - - if (nolock) - spin_lock_bh(&qdisc->seqlock); - spin_lock_bh(qdisc_lock(qdisc)); - if (!(qdisc->flags & TCQ_F_BUILTIN)) set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state); + } +} - rcu_assign_pointer(dev_queue->qdisc, qdisc_default); - qdisc_reset(qdisc); +static void dev_reset_queue(struct net_device *dev, + struct netdev_queue *dev_queue, + void *_unused) +{ + struct Qdisc *qdisc; + bool nolock; - spin_unlock_bh(qdisc_lock(qdisc)); - if (nolock) - spin_unlock_bh(&qdisc->seqlock); - } + qdisc = dev_queue->qdisc_sleeping; + if (!qdisc) + return; + + nolock = qdisc->flags & TCQ_F_NOLOCK; + + if (nolock) + spin_lock_bh(&qdisc->seqlock); + spin_lock_bh(qdisc_lock(qdisc)); + + qdisc_reset(qdisc); + + spin_unlock_bh(qdisc_lock(qdisc)); + if (nolock) + spin_unlock_bh(&qdisc->seqlock); } static bool some_qdisc_is_busy(struct net_device *dev) @@ -1196,12 +1205,20 @@ void dev_deactivate_many(struct list_head *head) dev_watchdog_down(dev); } - /* Wait for outstanding qdisc-less dev_queue_xmit calls. + /* Wait for outstanding qdisc-less dev_queue_xmit calls or + * outstanding qdisc enqueuing calls. * This is avoided if all devices are in dismantle phase : * Caller will call synchronize_net() for us */ synchronize_net(); + list_for_each_entry(dev, head, close_list) { + netdev_for_each_tx_queue(dev, dev_reset_queue, NULL); + + if (dev_ingress_queue(dev)) + dev_reset_queue(dev, dev_ingress_queue(dev), NULL); + } + /* Wait for outstanding qdisc_run calls. */ list_for_each_entry(dev, head, close_list) { while (some_qdisc_is_busy(dev)) -- GitLab From d4c5a31a63365fc86579d7a6ebe98ecc4cba9bd2 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 17 Sep 2020 10:52:57 -0700 Subject: [PATCH 0857/1304] nfp: use correct define to return NONE fec [ Upstream commit 5f6857e808a8bd078296575b417c4b9d160b9779 ] struct ethtool_fecparam carries bitmasks not bit numbers. We want to return 1 (NONE), not 0. Fixes: 0d0870938337 ("nfp: implement ethtool FEC mode settings") Signed-off-by: Jakub Kicinski Reviewed-by: Simon Horman Reviewed-by: Jesse Brandeburg Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index 6a79c8e4a7a4..9043d2cadd5d 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -744,8 +744,8 @@ nfp_port_get_fecparam(struct net_device *netdev, struct nfp_eth_table_port *eth_port; struct nfp_port *port; - param->active_fec = ETHTOOL_FEC_NONE_BIT; - param->fec = ETHTOOL_FEC_NONE_BIT; + param->active_fec = ETHTOOL_FEC_NONE; + param->fec = ETHTOOL_FEC_NONE; port = nfp_port_from_netdev(netdev); eth_port = nfp_port_get_eth_port(port); -- GitLab From d82e08de23e36c37667f67a502b0cf4a3e3f61bd Mon Sep 17 00:00:00 2001 From: Peilin Ye Date: Sun, 13 Sep 2020 04:06:05 -0400 Subject: [PATCH 0858/1304] tipc: Fix memory leak in tipc_group_create_member() [ Upstream commit bb3a420d47ab00d7e1e5083286cab15235a96680 ] tipc_group_add_to_tree() returns silently if `key` matches `nkey` of an existing node, causing tipc_group_create_member() to leak memory. Let tipc_group_add_to_tree() return an error in such a case, so that tipc_group_create_member() can handle it properly. Fixes: 75da2163dbb6 ("tipc: introduce communication groups") Reported-and-tested-by: syzbot+f95d90c454864b3b5bc9@syzkaller.appspotmail.com Cc: Hillf Danton Link: https://syzkaller.appspot.com/bug?id=048390604fe1b60df34150265479202f10e13aff Signed-off-by: Peilin Ye Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/tipc/group.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/net/tipc/group.c b/net/tipc/group.c index 9a9138de4eca..b656385efad6 100644 --- a/net/tipc/group.c +++ b/net/tipc/group.c @@ -273,8 +273,8 @@ static struct tipc_member *tipc_group_find_node(struct tipc_group *grp, return NULL; } -static void tipc_group_add_to_tree(struct tipc_group *grp, - struct tipc_member *m) +static int tipc_group_add_to_tree(struct tipc_group *grp, + struct tipc_member *m) { u64 nkey, key = (u64)m->node << 32 | m->port; struct rb_node **n, *parent = NULL; @@ -291,10 +291,11 @@ static void tipc_group_add_to_tree(struct tipc_group *grp, else if (key > nkey) n = &(*n)->rb_right; else - return; + return -EEXIST; } rb_link_node(&m->tree_node, parent, n); rb_insert_color(&m->tree_node, &grp->members); + return 0; } static struct tipc_member *tipc_group_create_member(struct tipc_group *grp, @@ -302,6 +303,7 @@ static struct tipc_member *tipc_group_create_member(struct tipc_group *grp, u32 instance, int state) { struct tipc_member *m; + int ret; m = kzalloc(sizeof(*m), GFP_ATOMIC); if (!m) @@ -314,8 +316,12 @@ static struct tipc_member *tipc_group_create_member(struct tipc_group *grp, m->port = port; m->instance = instance; m->bc_acked = grp->bc_snd_nxt - 1; + ret = tipc_group_add_to_tree(grp, m); + if (ret < 0) { + kfree(m); + return NULL; + } grp->member_cnt++; - tipc_group_add_to_tree(grp, m); tipc_nlist_add(&grp->dests, m->node); m->state = state; return m; -- GitLab From 0183a74c915882509f70c2ddc05bc9e6726cfb7c Mon Sep 17 00:00:00 2001 From: Tetsuo Handa Date: Sat, 5 Sep 2020 15:14:47 +0900 Subject: [PATCH 0859/1304] tipc: fix shutdown() of connection oriented socket [ Upstream commit a4b5cc9e10803ecba64a7d54c0f47e4564b4a980 ] I confirmed that the problem fixed by commit 2a63866c8b51a3f7 ("tipc: fix shutdown() of connectionless socket") also applies to stream socket. ---------- #include #include #include int main(int argc, char *argv[]) { int fds[2] = { -1, -1 }; socketpair(PF_TIPC, SOCK_STREAM /* or SOCK_DGRAM */, 0, fds); if (fork() == 0) _exit(read(fds[0], NULL, 1)); shutdown(fds[0], SHUT_RDWR); /* This must make read() return. */ wait(NULL); /* To be woken up by _exit(). */ return 0; } ---------- Since shutdown(SHUT_RDWR) should affect all processes sharing that socket, unconditionally setting sk->sk_shutdown to SHUTDOWN_MASK will be the right behavior. Signed-off-by: Tetsuo Handa Acked-by: Ying Xue Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/tipc/socket.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/net/tipc/socket.c b/net/tipc/socket.c index d0cf7169f08c..16e2af3a00cc 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -2565,10 +2565,7 @@ static int tipc_shutdown(struct socket *sock, int how) lock_sock(sk); __tipc_shutdown(sock, TIPC_CONN_SHUTDOWN); - if (tipc_sk_type_connectionless(sk)) - sk->sk_shutdown = SHUTDOWN_MASK; - else - sk->sk_shutdown = SEND_SHUTDOWN; + sk->sk_shutdown = SHUTDOWN_MASK; if (sk->sk_state == TIPC_DISCONNECTING) { /* Discard any unreceived messages */ -- GitLab From b15fcca8eff903c4a9a50336f5bd8a208ca45df7 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Sun, 13 Sep 2020 19:37:31 +0800 Subject: [PATCH 0860/1304] tipc: use skb_unshare() instead in tipc_buf_append() [ Upstream commit ff48b6222e65ebdba5a403ef1deba6214e749193 ] In tipc_buf_append() it may change skb's frag_list, and it causes problems when this skb is cloned. skb_unclone() doesn't really make this skb's flag_list available to change. Shuang Li has reported an use-after-free issue because of this when creating quite a few macvlan dev over the same dev, where the broadcast packets will be cloned and go up to the stack: [ ] BUG: KASAN: use-after-free in pskb_expand_head+0x86d/0xea0 [ ] Call Trace: [ ] dump_stack+0x7c/0xb0 [ ] print_address_description.constprop.7+0x1a/0x220 [ ] kasan_report.cold.10+0x37/0x7c [ ] check_memory_region+0x183/0x1e0 [ ] pskb_expand_head+0x86d/0xea0 [ ] process_backlog+0x1df/0x660 [ ] net_rx_action+0x3b4/0xc90 [ ] [ ] Allocated by task 1786: [ ] kmem_cache_alloc+0xbf/0x220 [ ] skb_clone+0x10a/0x300 [ ] macvlan_broadcast+0x2f6/0x590 [macvlan] [ ] macvlan_process_broadcast+0x37c/0x516 [macvlan] [ ] process_one_work+0x66a/0x1060 [ ] worker_thread+0x87/0xb10 [ ] [ ] Freed by task 3253: [ ] kmem_cache_free+0x82/0x2a0 [ ] skb_release_data+0x2c3/0x6e0 [ ] kfree_skb+0x78/0x1d0 [ ] tipc_recvmsg+0x3be/0xa40 [tipc] So fix it by using skb_unshare() instead, which would create a new skb for the cloned frag and it'll be safe to change its frag_list. The similar things were also done in sctp_make_reassembled_event(), which is using skb_copy(). Reported-by: Shuang Li Fixes: 37e22164a8a3 ("tipc: rename and move message reassembly function") Signed-off-by: Xin Long Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/tipc/msg.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/tipc/msg.c b/net/tipc/msg.c index cbccf1791d3c..b078b77620f1 100644 --- a/net/tipc/msg.c +++ b/net/tipc/msg.c @@ -140,7 +140,8 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf) if (fragid == FIRST_FRAGMENT) { if (unlikely(head)) goto err; - if (unlikely(skb_unclone(frag, GFP_ATOMIC))) + frag = skb_unshare(frag, GFP_ATOMIC); + if (unlikely(!frag)) goto err; head = *headbuf = frag; *buf = NULL; -- GitLab From 1627f9325dbea4778d150f0b83b01f5883129a16 Mon Sep 17 00:00:00 2001 From: Edwin Peer Date: Sun, 20 Sep 2020 21:08:55 -0400 Subject: [PATCH 0861/1304] bnxt_en: return proper error codes in bnxt_show_temp [ Upstream commit d69753fa1ecb3218b56b022722f7a5822735b876 ] Returning "unknown" as a temperature value violates the hwmon interface rules. Appropriate error codes should be returned via device_attribute show instead. These will ultimately be propagated to the user via the file system interface. In addition to the corrected error handling, it is an even better idea to not present the sensor in sysfs at all if it is known that the read will definitely fail. Given that temp1_input is currently the only sensor reported, ensure no hwmon registration if TEMP_MONITOR_QUERY is not supported or if it will fail due to access permissions. Something smarter may be needed if and when other sensors are added. Fixes: 12cce90b934b ("bnxt_en: fix HWRM error when querying VF temperature") Signed-off-by: Edwin Peer Signed-off-by: Michael Chan Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index a267380b267d..c3f04fb31955 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -6837,18 +6837,16 @@ static ssize_t bnxt_show_temp(struct device *dev, struct hwrm_temp_monitor_query_output *resp; struct bnxt *bp = dev_get_drvdata(dev); u32 len = 0; + int rc; resp = bp->hwrm_cmd_resp_addr; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1); mutex_lock(&bp->hwrm_cmd_lock); - if (!_hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT)) + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc) len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */ mutex_unlock(&bp->hwrm_cmd_lock); - - if (len) - return len; - - return sprintf(buf, "unknown\n"); + return rc ?: len; } static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0); @@ -6868,7 +6866,16 @@ static void bnxt_hwmon_close(struct bnxt *bp) static void bnxt_hwmon_open(struct bnxt *bp) { + struct hwrm_temp_monitor_query_input req = {0}; struct pci_dev *pdev = bp->pdev; + int rc; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1); + rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc == -EACCES || rc == -EOPNOTSUPP) { + bnxt_hwmon_close(bp); + return; + } bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, DRV_MODULE_NAME, bp, -- GitLab From ee0491c2906a352d1575bd6073ad7b3115568861 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Sun, 20 Sep 2020 21:08:56 -0400 Subject: [PATCH 0862/1304] bnxt_en: Protect bnxt_set_eee() and bnxt_set_pauseparam() with mutex. [ Upstream commit a53906908148d64423398a62c4435efb0d09652c ] All changes related to bp->link_info require the protection of the link_lock mutex. It's not sufficient to rely just on RTNL. Fixes: 163e9ef63641 ("bnxt_en: Fix race when modifying pause settings.") Reviewed-by: Edwin Peer Signed-off-by: Michael Chan Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- .../net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 31 ++++++++++++------- 1 file changed, 20 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index a1cb99110092..1ea81c23039f 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -1369,9 +1369,12 @@ static int bnxt_set_pauseparam(struct net_device *dev, if (!BNXT_SINGLE_PF(bp)) return -EOPNOTSUPP; + mutex_lock(&bp->link_lock); if (epause->autoneg) { - if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) - return -EINVAL; + if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { + rc = -EINVAL; + goto pause_exit; + } link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; if (bp->hwrm_spec_code >= 0x10201) @@ -1392,11 +1395,11 @@ static int bnxt_set_pauseparam(struct net_device *dev, if (epause->tx_pause) link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX; - if (netif_running(dev)) { - mutex_lock(&bp->link_lock); + if (netif_running(dev)) rc = bnxt_hwrm_set_pause(bp); - mutex_unlock(&bp->link_lock); - } + +pause_exit: + mutex_unlock(&bp->link_lock); return rc; } @@ -2113,8 +2116,7 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata) struct bnxt *bp = netdev_priv(dev); struct ethtool_eee *eee = &bp->eee; struct bnxt_link_info *link_info = &bp->link_info; - u32 advertising = - _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0); + u32 advertising; int rc = 0; if (!BNXT_SINGLE_PF(bp)) @@ -2123,19 +2125,23 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata) if (!(bp->flags & BNXT_FLAG_EEE_CAP)) return -EOPNOTSUPP; + mutex_lock(&bp->link_lock); + advertising = _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0); if (!edata->eee_enabled) goto eee_ok; if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { netdev_warn(dev, "EEE requires autoneg\n"); - return -EINVAL; + rc = -EINVAL; + goto eee_exit; } if (edata->tx_lpi_enabled) { if (bp->lpi_tmr_hi && (edata->tx_lpi_timer > bp->lpi_tmr_hi || edata->tx_lpi_timer < bp->lpi_tmr_lo)) { netdev_warn(dev, "Valid LPI timer range is %d and %d microsecs\n", bp->lpi_tmr_lo, bp->lpi_tmr_hi); - return -EINVAL; + rc = -EINVAL; + goto eee_exit; } else if (!bp->lpi_tmr_hi) { edata->tx_lpi_timer = eee->tx_lpi_timer; } @@ -2145,7 +2151,8 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata) } else if (edata->advertised & ~advertising) { netdev_warn(dev, "EEE advertised %x must be a subset of autoneg advertised speeds %x\n", edata->advertised, advertising); - return -EINVAL; + rc = -EINVAL; + goto eee_exit; } eee->advertised = edata->advertised; @@ -2157,6 +2164,8 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata) if (netif_running(dev)) rc = bnxt_hwrm_set_link_setting(bp, false, true); +eee_exit: + mutex_unlock(&bp->link_lock); return rc; } -- GitLab From e9ee8b696d116e9e5a375de811fb0929ef2a5139 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Wed, 16 Sep 2020 20:43:09 -0700 Subject: [PATCH 0863/1304] net: phy: Avoid NPD upon phy_detach() when driver is unbound [ Upstream commit c2b727df7caa33876e7066bde090f40001b6d643 ] If we have unbound the PHY driver prior to calling phy_detach() (often via phy_disconnect()) then we can cause a NULL pointer de-reference accessing the driver owner member. The steps to reproduce are: echo unimac-mdio-0:01 > /sys/class/net/eth0/phydev/driver/unbind ip link set eth0 down Fixes: cafe8df8b9bc ("net: phy: Fix lack of reference count on PHY driver") Signed-off-by: Florian Fainelli Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/phy/phy_device.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 54ac599cffb4..b884b681d5c5 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -1154,7 +1154,8 @@ void phy_detach(struct phy_device *phydev) phy_led_triggers_unregister(phydev); - module_put(phydev->mdio.dev.driver->owner); + if (phydev->mdio.dev.driver) + module_put(phydev->mdio.dev.driver->owner); /* If the device had no specific driver before (i.e. - it * was using the generic driver), we unbind the device -- GitLab From 771443a2ffe189f5f51653d21e949b436c9da09a Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 9 Sep 2020 01:27:39 -0700 Subject: [PATCH 0864/1304] net: qrtr: check skb_put_padto() return value [ Upstream commit 3ca1a42a52ca4b4f02061683851692ad65fefac8 ] If skb_put_padto() returns an error, skb has been freed. Better not touch it anymore, as reported by syzbot [1] Note to qrtr maintainers : this suggests qrtr_sendmsg() should adjust sock_alloc_send_skb() second parameter to account for the potential added alignment to avoid reallocation. [1] BUG: KASAN: use-after-free in __skb_insert include/linux/skbuff.h:1907 [inline] BUG: KASAN: use-after-free in __skb_queue_before include/linux/skbuff.h:2016 [inline] BUG: KASAN: use-after-free in __skb_queue_tail include/linux/skbuff.h:2049 [inline] BUG: KASAN: use-after-free in skb_queue_tail+0x6b/0x120 net/core/skbuff.c:3146 Write of size 8 at addr ffff88804d8ab3c0 by task syz-executor.4/4316 CPU: 1 PID: 4316 Comm: syz-executor.4 Not tainted 5.9.0-rc4-syzkaller #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:77 [inline] dump_stack+0x1d6/0x29e lib/dump_stack.c:118 print_address_description+0x66/0x620 mm/kasan/report.c:383 __kasan_report mm/kasan/report.c:513 [inline] kasan_report+0x132/0x1d0 mm/kasan/report.c:530 __skb_insert include/linux/skbuff.h:1907 [inline] __skb_queue_before include/linux/skbuff.h:2016 [inline] __skb_queue_tail include/linux/skbuff.h:2049 [inline] skb_queue_tail+0x6b/0x120 net/core/skbuff.c:3146 qrtr_tun_send+0x1a/0x40 net/qrtr/tun.c:23 qrtr_node_enqueue+0x44f/0xc00 net/qrtr/qrtr.c:364 qrtr_bcast_enqueue+0xbe/0x140 net/qrtr/qrtr.c:861 qrtr_sendmsg+0x680/0x9c0 net/qrtr/qrtr.c:960 sock_sendmsg_nosec net/socket.c:651 [inline] sock_sendmsg net/socket.c:671 [inline] sock_write_iter+0x317/0x470 net/socket.c:998 call_write_iter include/linux/fs.h:1882 [inline] new_sync_write fs/read_write.c:503 [inline] vfs_write+0xa96/0xd10 fs/read_write.c:578 ksys_write+0x11b/0x220 fs/read_write.c:631 do_syscall_64+0x31/0x70 arch/x86/entry/common.c:46 entry_SYSCALL_64_after_hwframe+0x44/0xa9 RIP: 0033:0x45d5b9 Code: 5d b4 fb ff c3 66 2e 0f 1f 84 00 00 00 00 00 66 90 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 0f 83 2b b4 fb ff c3 66 2e 0f 1f 84 00 00 00 00 RSP: 002b:00007f84b5b81c78 EFLAGS: 00000246 ORIG_RAX: 0000000000000001 RAX: ffffffffffffffda RBX: 0000000000038b40 RCX: 000000000045d5b9 RDX: 0000000000000055 RSI: 0000000020001240 RDI: 0000000000000003 RBP: 00007f84b5b81ca0 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000246 R12: 000000000000000f R13: 00007ffcbbf86daf R14: 00007f84b5b829c0 R15: 000000000118cf4c Allocated by task 4316: kasan_save_stack mm/kasan/common.c:48 [inline] kasan_set_track mm/kasan/common.c:56 [inline] __kasan_kmalloc+0x100/0x130 mm/kasan/common.c:461 slab_post_alloc_hook+0x3e/0x290 mm/slab.h:518 slab_alloc mm/slab.c:3312 [inline] kmem_cache_alloc+0x1c1/0x2d0 mm/slab.c:3482 skb_clone+0x1b2/0x370 net/core/skbuff.c:1449 qrtr_bcast_enqueue+0x6d/0x140 net/qrtr/qrtr.c:857 qrtr_sendmsg+0x680/0x9c0 net/qrtr/qrtr.c:960 sock_sendmsg_nosec net/socket.c:651 [inline] sock_sendmsg net/socket.c:671 [inline] sock_write_iter+0x317/0x470 net/socket.c:998 call_write_iter include/linux/fs.h:1882 [inline] new_sync_write fs/read_write.c:503 [inline] vfs_write+0xa96/0xd10 fs/read_write.c:578 ksys_write+0x11b/0x220 fs/read_write.c:631 do_syscall_64+0x31/0x70 arch/x86/entry/common.c:46 entry_SYSCALL_64_after_hwframe+0x44/0xa9 Freed by task 4316: kasan_save_stack mm/kasan/common.c:48 [inline] kasan_set_track+0x3d/0x70 mm/kasan/common.c:56 kasan_set_free_info+0x17/0x30 mm/kasan/generic.c:355 __kasan_slab_free+0xdd/0x110 mm/kasan/common.c:422 __cache_free mm/slab.c:3418 [inline] kmem_cache_free+0x82/0xf0 mm/slab.c:3693 __skb_pad+0x3f5/0x5a0 net/core/skbuff.c:1823 __skb_put_padto include/linux/skbuff.h:3233 [inline] skb_put_padto include/linux/skbuff.h:3252 [inline] qrtr_node_enqueue+0x62f/0xc00 net/qrtr/qrtr.c:360 qrtr_bcast_enqueue+0xbe/0x140 net/qrtr/qrtr.c:861 qrtr_sendmsg+0x680/0x9c0 net/qrtr/qrtr.c:960 sock_sendmsg_nosec net/socket.c:651 [inline] sock_sendmsg net/socket.c:671 [inline] sock_write_iter+0x317/0x470 net/socket.c:998 call_write_iter include/linux/fs.h:1882 [inline] new_sync_write fs/read_write.c:503 [inline] vfs_write+0xa96/0xd10 fs/read_write.c:578 ksys_write+0x11b/0x220 fs/read_write.c:631 do_syscall_64+0x31/0x70 arch/x86/entry/common.c:46 entry_SYSCALL_64_after_hwframe+0x44/0xa9 The buggy address belongs to the object at ffff88804d8ab3c0 which belongs to the cache skbuff_head_cache of size 224 The buggy address is located 0 bytes inside of 224-byte region [ffff88804d8ab3c0, ffff88804d8ab4a0) The buggy address belongs to the page: page:00000000ea8cccfb refcount:1 mapcount:0 mapping:0000000000000000 index:0xffff88804d8abb40 pfn:0x4d8ab flags: 0xfffe0000000200(slab) raw: 00fffe0000000200 ffffea0002237ec8 ffffea00029b3388 ffff88821bb66800 raw: ffff88804d8abb40 ffff88804d8ab000 000000010000000b 0000000000000000 page dumped because: kasan: bad access detected Fixes: ce57785bf91b ("net: qrtr: fix len of skb_put_padto in qrtr_node_enqueue") Signed-off-by: Eric Dumazet Reported-by: syzbot Cc: Carl Huang Cc: Wen Gong Cc: Bjorn Andersson Cc: Manivannan Sadhasivam Acked-by: Manivannan Sadhasivam Reviewed-by: Bjorn Andersson Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/qrtr/qrtr.c | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c index 42bd1e74f78c..a05c5cb3429c 100644 --- a/net/qrtr/qrtr.c +++ b/net/qrtr/qrtr.c @@ -185,7 +185,7 @@ static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb, { struct qrtr_hdr_v1 *hdr; size_t len = skb->len; - int rc = -ENODEV; + int rc; hdr = skb_push(skb, sizeof(*hdr)); hdr->version = cpu_to_le32(QRTR_PROTO_VER_1); @@ -203,15 +203,17 @@ static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb, hdr->size = cpu_to_le32(len); hdr->confirm_rx = 0; - skb_put_padto(skb, ALIGN(len, 4) + sizeof(*hdr)); - - mutex_lock(&node->ep_lock); - if (node->ep) - rc = node->ep->xmit(node->ep, skb); - else - kfree_skb(skb); - mutex_unlock(&node->ep_lock); + rc = skb_put_padto(skb, ALIGN(len, 4) + sizeof(*hdr)); + if (!rc) { + mutex_lock(&node->ep_lock); + rc = -ENODEV; + if (node->ep) + rc = node->ep->xmit(node->ep, skb); + else + kfree_skb(skb); + mutex_unlock(&node->ep_lock); + } return rc; } -- GitLab From f424617e01dce6a220892fce26afc0abef952e1b Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 9 Sep 2020 01:27:40 -0700 Subject: [PATCH 0865/1304] net: add __must_check to skb_put_padto() [ Upstream commit 4a009cb04aeca0de60b73f37b102573354214b52 ] skb_put_padto() and __skb_put_padto() callers must check return values or risk use-after-free. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- include/linux/skbuff.h | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 25407c206e73..cbc0294f3989 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -3014,8 +3014,9 @@ static inline int skb_padto(struct sk_buff *skb, unsigned int len) * is untouched. Otherwise it is extended. Returns zero on * success. The skb is freed on error if @free_on_error is true. */ -static inline int __skb_put_padto(struct sk_buff *skb, unsigned int len, - bool free_on_error) +static inline int __must_check __skb_put_padto(struct sk_buff *skb, + unsigned int len, + bool free_on_error) { unsigned int size = skb->len; @@ -3038,7 +3039,7 @@ static inline int __skb_put_padto(struct sk_buff *skb, unsigned int len, * is untouched. Otherwise it is extended. Returns zero on * success. The skb is freed on error. */ -static inline int skb_put_padto(struct sk_buff *skb, unsigned int len) +static inline int __must_check skb_put_padto(struct sk_buff *skb, unsigned int len) { return __skb_put_padto(skb, len, true); } -- GitLab From 98776a365da509ad923083ae54b38ee521c52742 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Mon, 14 Sep 2020 21:03:54 -0600 Subject: [PATCH 0866/1304] ipv4: Update exception handling for multipath routes via same device [ Upstream commit 2fbc6e89b2f1403189e624cabaf73e189c5e50c6 ] Kfir reported that pmtu exceptions are not created properly for deployments where multipath routes use the same device. After some digging I see 2 compounding problems: 1. ip_route_output_key_hash_rcu is updating the flowi4_oif *after* the route lookup. This is the second use case where this has been a problem (the first is related to use of vti devices with VRF). I can not find any reason for the oif to be changed after the lookup; the code goes back to the start of git. It does not seem logical so remove it. 2. fib_lookups for exceptions do not call fib_select_path to handle multipath route selection based on the hash. The end result is that the fib_lookup used to add the exception always creates it based using the first leg of the route. An example topology showing the problem: | host1 +------+ | eth0 | .209 +------+ | +------+ switch | br0 | +------+ | +---------+---------+ | host2 | host3 +------+ +------+ | eth0 | .250 | eth0 | 192.168.252.252 +------+ +------+ +-----+ +-----+ | vti | .2 | vti | 192.168.247.3 +-----+ +-----+ \ / ================================= tunnels 192.168.247.1/24 for h in host1 host2 host3; do ip netns add ${h} ip -netns ${h} link set lo up ip netns exec ${h} sysctl -wq net.ipv4.ip_forward=1 done ip netns add switch ip -netns switch li set lo up ip -netns switch link add br0 type bridge stp 0 ip -netns switch link set br0 up for n in 1 2 3; do ip -netns switch link add eth-sw type veth peer name eth-h${n} ip -netns switch li set eth-h${n} master br0 up ip -netns switch li set eth-sw netns host${n} name eth0 done ip -netns host1 addr add 192.168.252.209/24 dev eth0 ip -netns host1 link set dev eth0 up ip -netns host1 route add 192.168.247.0/24 \ nexthop via 192.168.252.250 dev eth0 nexthop via 192.168.252.252 dev eth0 ip -netns host2 addr add 192.168.252.250/24 dev eth0 ip -netns host2 link set dev eth0 up ip -netns host2 addr add 192.168.252.252/24 dev eth0 ip -netns host3 link set dev eth0 up ip netns add tunnel ip -netns tunnel li set lo up ip -netns tunnel li add br0 type bridge ip -netns tunnel li set br0 up for n in $(seq 11 20); do ip -netns tunnel addr add dev br0 192.168.247.${n}/24 done for n in 2 3 do ip -netns tunnel link add vti${n} type veth peer name eth${n} ip -netns tunnel link set eth${n} mtu 1360 master br0 up ip -netns tunnel link set vti${n} netns host${n} mtu 1360 up ip -netns host${n} addr add dev vti${n} 192.168.247.${n}/24 done ip -netns tunnel ro add default nexthop via 192.168.247.2 nexthop via 192.168.247.3 ip netns exec host1 ping -M do -s 1400 -c3 -I 192.168.252.209 192.168.247.11 ip netns exec host1 ping -M do -s 1400 -c3 -I 192.168.252.209 192.168.247.15 ip -netns host1 ro ls cache Before this patch the cache always shows exceptions against the first leg in the multipath route; 192.168.252.250 per this example. Since the hash has an initial random seed, you may need to vary the final octet more than what is listed. In my tests, using addresses between 11 and 19 usually found 1 that used both legs. With this patch, the cache will have exceptions for both legs. Fixes: 4895c771c7f0 ("ipv4: Add FIB nexthop exceptions") Reported-by: Kfir Itzhak Signed-off-by: David Ahern Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/ipv4/route.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/net/ipv4/route.c b/net/ipv4/route.c index f752d22cc8a5..84de87b7eedc 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -777,8 +777,10 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow neigh_event_send(n, NULL); } else { if (fib_lookup(net, fl4, &res, 0) == 0) { - struct fib_nh *nh = &FIB_RES_NH(res); + struct fib_nh *nh; + fib_select_path(net, &res, fl4, skb); + nh = &FIB_RES_NH(res); update_or_create_fnhe(nh, fl4->daddr, new_gw, 0, false, jiffies + ip_rt_gc_timeout); @@ -1004,6 +1006,7 @@ out: kfree_skb(skb); static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu) { struct dst_entry *dst = &rt->dst; + struct net *net = dev_net(dst->dev); u32 old_mtu = ipv4_mtu(dst); struct fib_result res; bool lock = false; @@ -1024,9 +1027,11 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu) return; rcu_read_lock(); - if (fib_lookup(dev_net(dst->dev), fl4, &res, 0) == 0) { - struct fib_nh *nh = &FIB_RES_NH(res); + if (fib_lookup(net, fl4, &res, 0) == 0) { + struct fib_nh *nh; + fib_select_path(net, &res, fl4, NULL); + nh = &FIB_RES_NH(res); update_or_create_fnhe(nh, fl4->daddr, 0, mtu, lock, jiffies + ip_rt_mtu_expires); } @@ -2536,8 +2541,6 @@ struct rtable *ip_route_output_key_hash_rcu(struct net *net, struct flowi4 *fl4, fib_select_path(net, res, fl4, skb); dev_out = FIB_RES_DEV(*res); - fl4->flowi4_oif = dev_out->ifindex; - make_route: rth = __mkroute_output(res, fl4, orig_oif, dev_out, flags); -- GitLab From e711de542260e8b2a5158534ded59ab9b1072f41 Mon Sep 17 00:00:00 2001 From: Nick Desaulniers Date: Fri, 28 Jun 2019 12:07:12 -0700 Subject: [PATCH 0867/1304] MAINTAINERS: add CLANG/LLVM BUILD SUPPORT info commit 8708e13c6a0600625eea3aebd027c0715a5d2bb2 upstream. Add keyword support so that our mailing list gets cc'ed for clang/llvm patches. We're pretty active on our mailing list so far as code review. There are numerous Googlers like myself that are paid to support building the Linux kernel with Clang and LLVM. Link: http://lkml.kernel.org/r/20190620001907.255803-1-ndesaulniers@google.com Signed-off-by: Nick Desaulniers Reviewed-by: Nathan Chancellor Cc: Joe Perches Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- MAINTAINERS | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/MAINTAINERS b/MAINTAINERS index b9f9da0b886f..9388564b7ab5 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3613,6 +3613,14 @@ M: Miguel Ojeda S: Maintained F: .clang-format +CLANG/LLVM BUILD SUPPORT +L: clang-built-linux@googlegroups.com +W: https://clangbuiltlinux.github.io/ +B: https://github.com/ClangBuiltLinux/linux/issues +C: irc://chat.freenode.net/clangbuiltlinux +S: Supported +K: \b(?i:clang|llvm)\b + CLEANCACHE API M: Konrad Rzeszutek Wilk L: linux-kernel@vger.kernel.org -- GitLab From 31030d63d5b6253aa15b31b7240e035fcd2704e0 Mon Sep 17 00:00:00 2001 From: Vasily Gorbik Date: Mon, 21 Jan 2019 13:54:39 +0100 Subject: [PATCH 0868/1304] kbuild: add OBJSIZE variable for the size tool commit 7bac98707f65b93bf994ef4e99b1eb9e7dbb9c32 upstream. Define and export OBJSIZE variable for "size" tool from binutils to be used in architecture specific Makefiles (naming the variable just "SIZE" would be too risky). In particular this tool is useful to perform checks that early boot code is not using bss section (which might have not been zeroed yet or intersects with initrd or other files boot loader might have put right after the linux kernel). Link: http://lkml.kernel.org/r/patch-1.thread-2257a1.git-188f5a3d81d5.your-ad-here.call-01565088755-ext-5120@work.hours Acked-by: Masahiro Yamada Signed-off-by: Vasily Gorbik [nd: conflict in exported vars list from not backporting commit e83b9f55448a ("kbuild: add ability to generate BTF type info for vmlinux")] Signed-off-by: Greg Kroah-Hartman --- Makefile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index ee648a902ce3..bfd14c5463da 100644 --- a/Makefile +++ b/Makefile @@ -377,6 +377,7 @@ NM = $(CROSS_COMPILE)nm STRIP = $(CROSS_COMPILE)strip OBJCOPY = $(CROSS_COMPILE)objcopy OBJDUMP = $(CROSS_COMPILE)objdump +OBJSIZE = $(CROSS_COMPILE)size LEX = flex YACC = bison AWK = awk @@ -433,7 +434,7 @@ GCC_PLUGINS_CFLAGS := CLANG_FLAGS := export ARCH SRCARCH CONFIG_SHELL HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE AS LD CC -export CPP AR NM STRIP OBJCOPY OBJDUMP KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS +export CPP AR NM STRIP OBJCOPY OBJDUMP OBJSIZE KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS export MAKE LEX YACC AWK GENKSYMS INSTALLKERNEL PERL PYTHON PYTHON2 PYTHON3 UTS_MACHINE export HOSTCXX KBUILD_HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS -- GitLab From 948f0c02039b0ed9a2da283893e96e41c18b07fc Mon Sep 17 00:00:00 2001 From: Nick Desaulniers Date: Wed, 26 Feb 2020 15:23:36 -0800 Subject: [PATCH 0869/1304] Documentation/llvm: add documentation on building w/ Clang/LLVM commit fcf1b6a35c16ac500fa908a4022238e5d666eabf upstream. added to kbuild documentation. Provides more official info on building kernels with Clang and LLVM than our wiki. Suggested-by: Kees Cook Reviewed-by: Kees Cook Reviewed-by: Nathan Chancellor Reviewed-by: Sedat Dilek Signed-off-by: Nick Desaulniers Signed-off-by: Masahiro Yamada [nd: hunk against Documentation/kbuild/index.rst dropped due to not backporting commit cd238effefa2 ("docs: kbuild: convert docs to ReST and rename to *.rst")] Signed-off-by: Greg Kroah-Hartman --- Documentation/kbuild/llvm.rst | 80 +++++++++++++++++++++++++++++++++++ MAINTAINERS | 1 + 2 files changed, 81 insertions(+) create mode 100644 Documentation/kbuild/llvm.rst diff --git a/Documentation/kbuild/llvm.rst b/Documentation/kbuild/llvm.rst new file mode 100644 index 000000000000..d6c79eb4e23e --- /dev/null +++ b/Documentation/kbuild/llvm.rst @@ -0,0 +1,80 @@ +============================== +Building Linux with Clang/LLVM +============================== + +This document covers how to build the Linux kernel with Clang and LLVM +utilities. + +About +----- + +The Linux kernel has always traditionally been compiled with GNU toolchains +such as GCC and binutils. Ongoing work has allowed for `Clang +`_ and `LLVM `_ utilities to be +used as viable substitutes. Distributions such as `Android +`_, `ChromeOS +`_, and `OpenMandriva +`_ use Clang built kernels. `LLVM is a +collection of toolchain components implemented in terms of C++ objects +`_. Clang is a front-end to LLVM that +supports C and the GNU C extensions required by the kernel, and is pronounced +"klang," not "see-lang." + +Clang +----- + +The compiler used can be swapped out via `CC=` command line argument to `make`. +`CC=` should be set when selecting a config and during a build. + + make CC=clang defconfig + + make CC=clang + +Cross Compiling +--------------- + +A single Clang compiler binary will typically contain all supported backends, +which can help simplify cross compiling. + + ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- make CC=clang + +`CROSS_COMPILE` is not used to prefix the Clang compiler binary, instead +`CROSS_COMPILE` is used to set a command line flag: `--target `. For +example: + + clang --target aarch64-linux-gnu foo.c + +LLVM Utilities +-------------- + +LLVM has substitutes for GNU binutils utilities. These can be invoked as +additional parameters to `make`. + + make CC=clang AS=clang LD=ld.lld AR=llvm-ar NM=llvm-nm STRIP=llvm-strip \\ + OBJCOPY=llvm-objcopy OBJDUMP=llvm-objdump OBJSIZE=llvm-objsize \\ + READELF=llvm-readelf HOSTCC=clang HOSTCXX=clang++ HOSTAR=llvm-ar \\ + HOSTLD=ld.lld + +Getting Help +------------ + +- `Website `_ +- `Mailing List `_: +- `Issue Tracker `_ +- IRC: #clangbuiltlinux on chat.freenode.net +- `Telegram `_: @ClangBuiltLinux +- `Wiki `_ +- `Beginner Bugs `_ + +Getting LLVM +------------- + +- http://releases.llvm.org/download.html +- https://github.com/llvm/llvm-project +- https://llvm.org/docs/GettingStarted.html +- https://llvm.org/docs/CMake.html +- https://apt.llvm.org/ +- https://www.archlinux.org/packages/extra/x86_64/llvm/ +- https://github.com/ClangBuiltLinux/tc-build +- https://github.com/ClangBuiltLinux/linux/wiki/Building-Clang-from-source +- https://android.googlesource.com/platform/prebuilts/clang/host/linux-x86/ diff --git a/MAINTAINERS b/MAINTAINERS index 9388564b7ab5..1061db6fbc32 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3620,6 +3620,7 @@ B: https://github.com/ClangBuiltLinux/linux/issues C: irc://chat.freenode.net/clangbuiltlinux S: Supported K: \b(?i:clang|llvm)\b +F: Documentation/kbuild/llvm.rst CLEANCACHE API M: Konrad Rzeszutek Wilk -- GitLab From 98aeb8d9716fba2fcc64c9c18d915f749e4d69f0 Mon Sep 17 00:00:00 2001 From: Fangrui Song Date: Thu, 2 Apr 2020 10:38:42 -0700 Subject: [PATCH 0870/1304] Documentation/llvm: fix the name of llvm-size commit 0f44fbc162b737ff6251ae248184390ae2279fee upstream. The tool is called llvm-size, not llvm-objsize. Fixes: fcf1b6a35c16 ("Documentation/llvm: add documentation on building w/ Clang/LLVM") Signed-off-by: Fangrui Song Reviewed-by: Nick Desaulniers Reviewed-by: Nathan Chancellor Signed-off-by: Masahiro Yamada Signed-off-by: Nick Desaulniers Signed-off-by: Greg Kroah-Hartman --- Documentation/kbuild/llvm.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/kbuild/llvm.rst b/Documentation/kbuild/llvm.rst index d6c79eb4e23e..eefbdfa3e4d9 100644 --- a/Documentation/kbuild/llvm.rst +++ b/Documentation/kbuild/llvm.rst @@ -51,7 +51,7 @@ LLVM has substitutes for GNU binutils utilities. These can be invoked as additional parameters to `make`. make CC=clang AS=clang LD=ld.lld AR=llvm-ar NM=llvm-nm STRIP=llvm-strip \\ - OBJCOPY=llvm-objcopy OBJDUMP=llvm-objdump OBJSIZE=llvm-objsize \\ + OBJCOPY=llvm-objcopy OBJDUMP=llvm-objdump OBJSIZE=llvm-size \\ READELF=llvm-readelf HOSTCC=clang HOSTCXX=clang++ HOSTAR=llvm-ar \\ HOSTLD=ld.lld -- GitLab From fb181ac6fe194b4eb20007f65504c61ea22826fd Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 26 Mar 2020 14:57:15 +0900 Subject: [PATCH 0871/1304] net: wan: wanxl: use allow to pass CROSS_COMPILE_M68k for rebuilding firmware commit 63b903dfebdea92aa92ad337d8451a6fbfeabf9d upstream. As far as I understood from the Kconfig help text, this build rule is used to rebuild the driver firmware, which runs on an old m68k-based chip. So, you need m68k tools for the firmware rebuild. wanxl.c is a PCI driver, but CONFIG_M68K does not select CONFIG_HAVE_PCI. So, you cannot enable CONFIG_WANXL_BUILD_FIRMWARE for ARCH=m68k. In other words, ifeq ($(ARCH),m68k) is false here. I am keeping the dead code for now, but rebuilding the firmware requires 'as68k' and 'ld68k', which I do not have in hand. Instead, the kernel.org m68k GCC [1] successfully built it. Allowing a user to pass in CROSS_COMPILE_M68K= is handier. [1] https://mirrors.edge.kernel.org/pub/tools/crosstool/files/bin/x86_64/9.2.0/x86_64-gcc-9.2.0-nolibc-m68k-linux.tar.xz Suggested-by: Geert Uytterhoeven Signed-off-by: Masahiro Yamada Signed-off-by: Nick Desaulniers Signed-off-by: Greg Kroah-Hartman --- drivers/net/wan/Kconfig | 2 +- drivers/net/wan/Makefile | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig index 4e9fe75d7067..21190dfbabb1 100644 --- a/drivers/net/wan/Kconfig +++ b/drivers/net/wan/Kconfig @@ -199,7 +199,7 @@ config WANXL_BUILD_FIRMWARE depends on WANXL && !PREVENT_FIRMWARE_BUILD help Allows you to rebuild firmware run by the QUICC processor. - It requires as68k, ld68k and hexdump programs. + It requires m68k toolchains and hexdump programs. You should never need this option, say N. diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile index 9532e69fda87..c21b7345b50b 100644 --- a/drivers/net/wan/Makefile +++ b/drivers/net/wan/Makefile @@ -41,17 +41,17 @@ $(obj)/wanxl.o: $(obj)/wanxlfw.inc ifeq ($(CONFIG_WANXL_BUILD_FIRMWARE),y) ifeq ($(ARCH),m68k) - AS68K = $(AS) - LD68K = $(LD) + M68KAS = $(AS) + M68KLD = $(LD) else - AS68K = as68k - LD68K = ld68k + M68KAS = $(CROSS_COMPILE_M68K)as + M68KLD = $(CROSS_COMPILE_M68K)ld endif quiet_cmd_build_wanxlfw = BLD FW $@ cmd_build_wanxlfw = \ - $(CPP) -D__ASSEMBLY__ -Wp,-MD,$(depfile) -I$(srctree)/include/uapi $< | $(AS68K) -m68360 -o $(obj)/wanxlfw.o; \ - $(LD68K) --oformat binary -Ttext 0x1000 $(obj)/wanxlfw.o -o $(obj)/wanxlfw.bin; \ + $(CPP) -D__ASSEMBLY__ -Wp,-MD,$(depfile) -I$(srctree)/include/uapi $< | $(M68KAS) -m68360 -o $(obj)/wanxlfw.o; \ + $(M68KLD) --oformat binary -Ttext 0x1000 $(obj)/wanxlfw.o -o $(obj)/wanxlfw.bin; \ hexdump -ve '"\n" 16/1 "0x%02X,"' $(obj)/wanxlfw.bin | sed 's/0x ,//g;1s/^/static const u8 firmware[]={/;$$s/,$$/\n};\n/' >$(obj)/wanxlfw.inc; \ rm -f $(obj)/wanxlfw.bin $(obj)/wanxlfw.o -- GitLab From a1c015990071258650b3ec45dfa182bd20378de5 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 26 Mar 2020 14:57:16 +0900 Subject: [PATCH 0872/1304] net: wan: wanxl: use $(M68KCC) instead of $(M68KAS) for rebuilding firmware commit 734f3719d3438f9cc181d674c33ca9762e9148a1 upstream. The firmware source, wanxlfw.S, is currently compiled by the combo of $(CPP) and $(M68KAS). This is not what we usually do for compiling *.S files. In fact, this Makefile is the only user of $(AS) in the kernel build. Instead of combining $(CPP) and (AS) from different tool sets, using $(M68KCC) as an assembler driver is simpler, and saner. Signed-off-by: Masahiro Yamada Signed-off-by: Nick Desaulniers Signed-off-by: Greg Kroah-Hartman --- drivers/net/wan/Makefile | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile index c21b7345b50b..0500282e176e 100644 --- a/drivers/net/wan/Makefile +++ b/drivers/net/wan/Makefile @@ -41,16 +41,16 @@ $(obj)/wanxl.o: $(obj)/wanxlfw.inc ifeq ($(CONFIG_WANXL_BUILD_FIRMWARE),y) ifeq ($(ARCH),m68k) - M68KAS = $(AS) + M68KCC = $(CC) M68KLD = $(LD) else - M68KAS = $(CROSS_COMPILE_M68K)as + M68KCC = $(CROSS_COMPILE_M68K)gcc M68KLD = $(CROSS_COMPILE_M68K)ld endif quiet_cmd_build_wanxlfw = BLD FW $@ cmd_build_wanxlfw = \ - $(CPP) -D__ASSEMBLY__ -Wp,-MD,$(depfile) -I$(srctree)/include/uapi $< | $(M68KAS) -m68360 -o $(obj)/wanxlfw.o; \ + $(M68KCC) -D__ASSEMBLY__ -Wp,-MD,$(depfile) -I$(srctree)/include/uapi -c -o $(obj)/wanxlfw.o $<; \ $(M68KLD) --oformat binary -Ttext 0x1000 $(obj)/wanxlfw.o -o $(obj)/wanxlfw.bin; \ hexdump -ve '"\n" 16/1 "0x%02X,"' $(obj)/wanxlfw.bin | sed 's/0x ,//g;1s/^/static const u8 firmware[]={/;$$s/,$$/\n};\n/' >$(obj)/wanxlfw.inc; \ rm -f $(obj)/wanxlfw.bin $(obj)/wanxlfw.o -- GitLab From 621150689b0992bc02389b2351c2cbf0bc5bd700 Mon Sep 17 00:00:00 2001 From: Dmitry Golovin Date: Thu, 5 Dec 2019 00:54:41 +0200 Subject: [PATCH 0873/1304] x86/boot: kbuild: allow readelf executable to be specified commit eefb8c124fd969e9a174ff2bedff86aa305a7438 upstream. Introduce a new READELF variable to top-level Makefile, so the name of readelf binary can be specified. Before this change the name of the binary was hardcoded to "$(CROSS_COMPILE)readelf" which might not be present for every toolchain. This allows to build with LLVM Object Reader by using make parameter READELF=llvm-readelf. Link: https://github.com/ClangBuiltLinux/linux/issues/771 Signed-off-by: Dmitry Golovin Reviewed-by: Nick Desaulniers Signed-off-by: Masahiro Yamada Signed-off-by: Nick Desaulniers [nd: conflict in exported vars list from not backporting commit e83b9f55448a ("kbuild: add ability to generate BTF type info for vmlinux")] Signed-off-by: Greg Kroah-Hartman --- Makefile | 3 ++- arch/x86/boot/compressed/Makefile | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index bfd14c5463da..4a6c155b23b8 100644 --- a/Makefile +++ b/Makefile @@ -378,6 +378,7 @@ STRIP = $(CROSS_COMPILE)strip OBJCOPY = $(CROSS_COMPILE)objcopy OBJDUMP = $(CROSS_COMPILE)objdump OBJSIZE = $(CROSS_COMPILE)size +READELF = $(CROSS_COMPILE)readelf LEX = flex YACC = bison AWK = awk @@ -434,7 +435,7 @@ GCC_PLUGINS_CFLAGS := CLANG_FLAGS := export ARCH SRCARCH CONFIG_SHELL HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE AS LD CC -export CPP AR NM STRIP OBJCOPY OBJDUMP OBJSIZE KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS +export CPP AR NM STRIP OBJCOPY OBJDUMP OBJSIZE READELF KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS export MAKE LEX YACC AWK GENKSYMS INSTALLKERNEL PERL PYTHON PYTHON2 PYTHON3 UTS_MACHINE export HOSTCXX KBUILD_HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index b337a0cd58ba..5642f025b397 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -102,7 +102,7 @@ vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_thunk_$(BITS).o quiet_cmd_check_data_rel = DATAREL $@ define cmd_check_data_rel for obj in $(filter %.o,$^); do \ - ${CROSS_COMPILE}readelf -S $$obj | grep -qF .rel.local && { \ + $(READELF) -S $$obj | grep -qF .rel.local && { \ echo "error: $$obj has data relocations!" >&2; \ exit 1; \ } || true; \ -- GitLab From 0fbcb1294d3b3c80110575a82b3fa2ab812719d6 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 26 Mar 2020 14:57:18 +0900 Subject: [PATCH 0874/1304] kbuild: remove AS variable commit aa824e0c962b532d5073cbb41b2efcd6f5e72bae upstream. As commit 5ef872636ca7 ("kbuild: get rid of misleading $(AS) from documents") noted, we rarely use $(AS) directly in the kernel build. Now that the only/last user of $(AS) in drivers/net/wan/Makefile was converted to $(CC), $(AS) is no longer used in the build process. You can still pass in AS=clang, which is just a switch to turn on the LLVM integrated assembler. Signed-off-by: Masahiro Yamada Reviewed-by: Nick Desaulniers Tested-by: Nick Desaulniers Reviewed-by: Nathan Chancellor Signed-off-by: Nick Desaulniers [nd: conflict in exported vars list from not backporting commit e83b9f55448a ("kbuild: add ability to generate BTF type info for vmlinux")] Signed-off-by: Greg Kroah-Hartman --- Makefile | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 4a6c155b23b8..1e4ec28404da 100644 --- a/Makefile +++ b/Makefile @@ -368,7 +368,6 @@ KBUILD_HOSTLDFLAGS := $(HOST_LFS_LDFLAGS) $(HOSTLDFLAGS) KBUILD_HOSTLDLIBS := $(HOST_LFS_LIBS) $(HOSTLDLIBS) # Make variables (CC, etc...) -AS = $(CROSS_COMPILE)as LD = $(CROSS_COMPILE)ld CC = $(CROSS_COMPILE)gcc CPP = $(CC) -E @@ -434,7 +433,7 @@ KBUILD_LDFLAGS := GCC_PLUGINS_CFLAGS := CLANG_FLAGS := -export ARCH SRCARCH CONFIG_SHELL HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE AS LD CC +export ARCH SRCARCH CONFIG_SHELL HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE LD CC export CPP AR NM STRIP OBJCOPY OBJDUMP OBJSIZE READELF KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS export MAKE LEX YACC AWK GENKSYMS INSTALLKERNEL PERL PYTHON PYTHON2 PYTHON3 UTS_MACHINE export HOSTCXX KBUILD_HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS -- GitLab From 459c7a844fcba41ab70f6247a1c2b4304939c221 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Wed, 8 Apr 2020 10:36:22 +0900 Subject: [PATCH 0875/1304] kbuild: replace AS=clang with LLVM_IAS=1 commit 7e20e47c70f810d678d02941fa3c671209c4ca97 upstream. The 'AS' variable is unused for building the kernel. Only the remaining usage is to turn on the integrated assembler. A boolean flag is a better fit for this purpose. AS=clang was added for experts. So, I replaced it with LLVM_IAS=1, breaking the backward compatibility. Suggested-by: Nick Desaulniers Signed-off-by: Masahiro Yamada Reviewed-by: Nathan Chancellor Reviewed-by: Nick Desaulniers Signed-off-by: Nick Desaulniers Signed-off-by: Greg Kroah-Hartman --- Documentation/kbuild/llvm.rst | 5 ++++- Makefile | 2 ++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/Documentation/kbuild/llvm.rst b/Documentation/kbuild/llvm.rst index eefbdfa3e4d9..450708534860 100644 --- a/Documentation/kbuild/llvm.rst +++ b/Documentation/kbuild/llvm.rst @@ -50,11 +50,14 @@ LLVM Utilities LLVM has substitutes for GNU binutils utilities. These can be invoked as additional parameters to `make`. - make CC=clang AS=clang LD=ld.lld AR=llvm-ar NM=llvm-nm STRIP=llvm-strip \\ + make CC=clang LD=ld.lld AR=llvm-ar NM=llvm-nm STRIP=llvm-strip \\ OBJCOPY=llvm-objcopy OBJDUMP=llvm-objdump OBJSIZE=llvm-size \\ READELF=llvm-readelf HOSTCC=clang HOSTCXX=clang++ HOSTAR=llvm-ar \\ HOSTLD=ld.lld +Currently, the integrated assembler is disabled by default. You can pass +`LLVM_IAS=1` to enable it. + Getting Help ------------ diff --git a/Makefile b/Makefile index 1e4ec28404da..33154c74ec33 100644 --- a/Makefile +++ b/Makefile @@ -492,7 +492,9 @@ endif ifneq ($(GCC_TOOLCHAIN),) CLANG_FLAGS += --gcc-toolchain=$(GCC_TOOLCHAIN) endif +ifneq ($(LLVM_IAS),1) CLANG_FLAGS += -no-integrated-as +endif CLANG_FLAGS += -Werror=unknown-warning-option KBUILD_CFLAGS += $(CLANG_FLAGS) KBUILD_AFLAGS += $(CLANG_FLAGS) -- GitLab From 7aaf09fd5c63ee9dc86325896abdfa47c54d39a9 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Wed, 8 Apr 2020 10:36:23 +0900 Subject: [PATCH 0876/1304] kbuild: support LLVM=1 to switch the default tools to Clang/LLVM commit a0d1c951ef08ed24f35129267e3595d86f57f5d3 upstream. As Documentation/kbuild/llvm.rst implies, building the kernel with a full set of LLVM tools gets very verbose and unwieldy. Provide a single switch LLVM=1 to use Clang and LLVM tools instead of GCC and Binutils. You can pass it from the command line or as an environment variable. Please note LLVM=1 does not turn on the integrated assembler. You need to pass LLVM_IAS=1 to use it. When the upstream kernel is ready for the integrated assembler, I think we can make it default. We discussed what we need, and we agreed to go with a simple boolean flag that switches both target and host tools: https://lkml.org/lkml/2020/3/28/494 https://lkml.org/lkml/2020/4/3/43 Some items discussed, but not adopted: - LLVM_DIR When multiple versions of LLVM are installed, I just thought supporting LLVM_DIR=/path/to/my/llvm/bin/ might be useful. CC = $(LLVM_DIR)clang LD = $(LLVM_DIR)ld.lld ... However, we can handle this by modifying PATH. So, we decided to not do this. - LLVM_SUFFIX Some distributions (e.g. Debian) package specific versions of LLVM with naming conventions that use the version as a suffix. CC = clang$(LLVM_SUFFIX) LD = ld.lld(LLVM_SUFFIX) ... will allow a user to pass LLVM_SUFFIX=-11 to use clang-11 etc., but the suffixed versions in /usr/bin/ are symlinks to binaries in /usr/lib/llvm-#/bin/, so this can also be handled by PATH. Signed-off-by: Masahiro Yamada Reviewed-by: Nathan Chancellor Tested-by: Nathan Chancellor # build Tested-by: Nick Desaulniers Reviewed-by: Nick Desaulniers Signed-off-by: Nick Desaulniers [nd: conflict in exported vars list from not backporting commit e83b9f55448a ("kbuild: add ability to generate BTF type info for vmlinux")] [nd: hunk against Documentation/kbuild/kbuild.rst dropped due to not backporting commit cd238effefa2 ("docs: kbuild: convert docs to ReST and rename to *.rst")] Signed-off-by: Greg Kroah-Hartman --- Documentation/kbuild/llvm.rst | 8 ++++++-- Makefile | 29 +++++++++++++++++++++++------ tools/objtool/Makefile | 6 ++++++ 3 files changed, 35 insertions(+), 8 deletions(-) diff --git a/Documentation/kbuild/llvm.rst b/Documentation/kbuild/llvm.rst index 450708534860..c776b6eee969 100644 --- a/Documentation/kbuild/llvm.rst +++ b/Documentation/kbuild/llvm.rst @@ -47,8 +47,12 @@ example: LLVM Utilities -------------- -LLVM has substitutes for GNU binutils utilities. These can be invoked as -additional parameters to `make`. +LLVM has substitutes for GNU binutils utilities. Kbuild supports `LLVM=1` +to enable them. + + make LLVM=1 + +They can be enabled individually. The full list of the parameters: make CC=clang LD=ld.lld AR=llvm-ar NM=llvm-nm STRIP=llvm-strip \\ OBJCOPY=llvm-objcopy OBJDUMP=llvm-objdump OBJSIZE=llvm-size \\ diff --git a/Makefile b/Makefile index 33154c74ec33..4f52bb29126b 100644 --- a/Makefile +++ b/Makefile @@ -358,8 +358,13 @@ HOST_LFS_CFLAGS := $(shell getconf LFS_CFLAGS 2>/dev/null) HOST_LFS_LDFLAGS := $(shell getconf LFS_LDFLAGS 2>/dev/null) HOST_LFS_LIBS := $(shell getconf LFS_LIBS 2>/dev/null) -HOSTCC = gcc -HOSTCXX = g++ +ifneq ($(LLVM),) +HOSTCC = clang +HOSTCXX = clang++ +else +HOSTCC = gcc +HOSTCXX = g++ +endif KBUILD_HOSTCFLAGS := -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 \ -fomit-frame-pointer -std=gnu89 $(HOST_LFS_CFLAGS) \ $(HOSTCFLAGS) @@ -368,16 +373,28 @@ KBUILD_HOSTLDFLAGS := $(HOST_LFS_LDFLAGS) $(HOSTLDFLAGS) KBUILD_HOSTLDLIBS := $(HOST_LFS_LIBS) $(HOSTLDLIBS) # Make variables (CC, etc...) -LD = $(CROSS_COMPILE)ld -CC = $(CROSS_COMPILE)gcc CPP = $(CC) -E +ifneq ($(LLVM),) +CC = clang +LD = ld.lld +AR = llvm-ar +NM = llvm-nm +OBJCOPY = llvm-objcopy +OBJDUMP = llvm-objdump +READELF = llvm-readelf +OBJSIZE = llvm-size +STRIP = llvm-strip +else +CC = $(CROSS_COMPILE)gcc +LD = $(CROSS_COMPILE)ld AR = $(CROSS_COMPILE)ar NM = $(CROSS_COMPILE)nm -STRIP = $(CROSS_COMPILE)strip OBJCOPY = $(CROSS_COMPILE)objcopy OBJDUMP = $(CROSS_COMPILE)objdump -OBJSIZE = $(CROSS_COMPILE)size READELF = $(CROSS_COMPILE)readelf +OBJSIZE = $(CROSS_COMPILE)size +STRIP = $(CROSS_COMPILE)strip +endif LEX = flex YACC = bison AWK = awk diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile index 20f67fcf378d..baa92279c137 100644 --- a/tools/objtool/Makefile +++ b/tools/objtool/Makefile @@ -7,9 +7,15 @@ ARCH := x86 endif # always use the host compiler +ifneq ($(LLVM),) +HOSTAR ?= llvm-ar +HOSTCC ?= clang +HOSTLD ?= ld.lld +else HOSTAR ?= ar HOSTCC ?= gcc HOSTLD ?= ld +endif AR = $(HOSTAR) CC = $(HOSTCC) LD = $(HOSTLD) -- GitLab From 1aa7a9e5eebc5c40f0a5ea4e4cb8e8bd0267aea1 Mon Sep 17 00:00:00 2001 From: Xunlei Pang Date: Fri, 4 Sep 2020 16:35:27 -0700 Subject: [PATCH 0877/1304] mm: memcg: fix memcg reclaim soft lockup commit e3336cab2579012b1e72b5265adf98e2d6e244ad upstream. We've met softlockup with "CONFIG_PREEMPT_NONE=y", when the target memcg doesn't have any reclaimable memory. It can be easily reproduced as below: watchdog: BUG: soft lockup - CPU#0 stuck for 111s![memcg_test:2204] CPU: 0 PID: 2204 Comm: memcg_test Not tainted 5.9.0-rc2+ #12 Call Trace: shrink_lruvec+0x49f/0x640 shrink_node+0x2a6/0x6f0 do_try_to_free_pages+0xe9/0x3e0 try_to_free_mem_cgroup_pages+0xef/0x1f0 try_charge+0x2c1/0x750 mem_cgroup_charge+0xd7/0x240 __add_to_page_cache_locked+0x2fd/0x370 add_to_page_cache_lru+0x4a/0xc0 pagecache_get_page+0x10b/0x2f0 filemap_fault+0x661/0xad0 ext4_filemap_fault+0x2c/0x40 __do_fault+0x4d/0xf9 handle_mm_fault+0x1080/0x1790 It only happens on our 1-vcpu instances, because there's no chance for oom reaper to run to reclaim the to-be-killed process. Add a cond_resched() at the upper shrink_node_memcgs() to solve this issue, this will mean that we will get a scheduling point for each memcg in the reclaimed hierarchy without any dependency on the reclaimable memory in that memcg thus making it more predictable. Suggested-by: Michal Hocko Signed-off-by: Xunlei Pang Signed-off-by: Andrew Morton Acked-by: Chris Down Acked-by: Michal Hocko Acked-by: Johannes Weiner Link: http://lkml.kernel.org/r/1598495549-67324-1-git-send-email-xlpang@linux.alibaba.com Signed-off-by: Linus Torvalds Signed-off-by: Julius Hemanth Pitti Signed-off-by: Greg Kroah-Hartman --- mm/vmscan.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/mm/vmscan.c b/mm/vmscan.c index bc2ecd43251a..b93dc8fc6007 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2708,6 +2708,14 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc) unsigned long reclaimed; unsigned long scanned; + /* + * This loop can become CPU-bound when target memcgs + * aren't eligible for reclaim - either because they + * don't have any reclaimable pages, or because their + * memory is explicitly protected. Avoid soft lockups. + */ + cond_resched(); + switch (mem_cgroup_protected(root, memcg)) { case MEMCG_PROT_MIN: /* -- GitLab From a56eb38acc700684f365740993ce3ddedcbb9152 Mon Sep 17 00:00:00 2001 From: Priyaranjan Jha Date: Wed, 23 Jan 2019 12:04:53 -0800 Subject: [PATCH 0878/1304] tcp_bbr: refactor bbr_target_cwnd() for general inflight provisioning commit 232aa8ec3ed979d4716891540c03a806ecab0c37 upstream. Because bbr_target_cwnd() is really a general-purpose BBR helper for computing some volume of inflight data as a function of the estimated BDP, refactor it into following helper functions: - bbr_bdp() - bbr_quantization_budget() - bbr_inflight() Signed-off-by: Priyaranjan Jha Signed-off-by: Neal Cardwell Signed-off-by: Yuchung Cheng Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/ipv4/tcp_bbr.c | 60 ++++++++++++++++++++++++++++++---------------- 1 file changed, 39 insertions(+), 21 deletions(-) diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c index b371e66502c3..4ee6cf1235f7 100644 --- a/net/ipv4/tcp_bbr.c +++ b/net/ipv4/tcp_bbr.c @@ -315,30 +315,19 @@ static void bbr_cwnd_event(struct sock *sk, enum tcp_ca_event event) } } -/* Find target cwnd. Right-size the cwnd based on min RTT and the - * estimated bottleneck bandwidth: +/* Calculate bdp based on min RTT and the estimated bottleneck bandwidth: * - * cwnd = bw * min_rtt * gain = BDP * gain + * bdp = bw * min_rtt * gain * * The key factor, gain, controls the amount of queue. While a small gain * builds a smaller queue, it becomes more vulnerable to noise in RTT * measurements (e.g., delayed ACKs or other ACK compression effects). This * noise may cause BBR to under-estimate the rate. - * - * To achieve full performance in high-speed paths, we budget enough cwnd to - * fit full-sized skbs in-flight on both end hosts to fully utilize the path: - * - one skb in sending host Qdisc, - * - one skb in sending host TSO/GSO engine - * - one skb being received by receiver host LRO/GRO/delayed-ACK engine - * Don't worry, at low rates (bbr_min_tso_rate) this won't bloat cwnd because - * in such cases tso_segs_goal is 1. The minimum cwnd is 4 packets, - * which allows 2 outstanding 2-packet sequences, to try to keep pipe - * full even with ACK-every-other-packet delayed ACKs. */ -static u32 bbr_target_cwnd(struct sock *sk, u32 bw, int gain) +static u32 bbr_bdp(struct sock *sk, u32 bw, int gain) { struct bbr *bbr = inet_csk_ca(sk); - u32 cwnd; + u32 bdp; u64 w; /* If we've never had a valid RTT sample, cap cwnd at the initial @@ -353,7 +342,24 @@ static u32 bbr_target_cwnd(struct sock *sk, u32 bw, int gain) w = (u64)bw * bbr->min_rtt_us; /* Apply a gain to the given value, then remove the BW_SCALE shift. */ - cwnd = (((w * gain) >> BBR_SCALE) + BW_UNIT - 1) / BW_UNIT; + bdp = (((w * gain) >> BBR_SCALE) + BW_UNIT - 1) / BW_UNIT; + + return bdp; +} + +/* To achieve full performance in high-speed paths, we budget enough cwnd to + * fit full-sized skbs in-flight on both end hosts to fully utilize the path: + * - one skb in sending host Qdisc, + * - one skb in sending host TSO/GSO engine + * - one skb being received by receiver host LRO/GRO/delayed-ACK engine + * Don't worry, at low rates (bbr_min_tso_rate) this won't bloat cwnd because + * in such cases tso_segs_goal is 1. The minimum cwnd is 4 packets, + * which allows 2 outstanding 2-packet sequences, to try to keep pipe + * full even with ACK-every-other-packet delayed ACKs. + */ +static u32 bbr_quantization_budget(struct sock *sk, u32 cwnd, int gain) +{ + struct bbr *bbr = inet_csk_ca(sk); /* Allow enough full-sized skbs in flight to utilize end systems. */ cwnd += 3 * bbr_tso_segs_goal(sk); @@ -368,6 +374,17 @@ static u32 bbr_target_cwnd(struct sock *sk, u32 bw, int gain) return cwnd; } +/* Find inflight based on min RTT and the estimated bottleneck bandwidth. */ +static u32 bbr_inflight(struct sock *sk, u32 bw, int gain) +{ + u32 inflight; + + inflight = bbr_bdp(sk, bw, gain); + inflight = bbr_quantization_budget(sk, inflight, gain); + + return inflight; +} + /* An optimization in BBR to reduce losses: On the first round of recovery, we * follow the packet conservation principle: send P packets per P packets acked. * After that, we slow-start and send at most 2*P packets per P packets acked. @@ -429,7 +446,8 @@ static void bbr_set_cwnd(struct sock *sk, const struct rate_sample *rs, goto done; /* If we're below target cwnd, slow start cwnd toward target cwnd. */ - target_cwnd = bbr_target_cwnd(sk, bw, gain); + target_cwnd = bbr_bdp(sk, bw, gain); + target_cwnd = bbr_quantization_budget(sk, target_cwnd, gain); if (bbr_full_bw_reached(sk)) /* only cut cwnd if we filled the pipe */ cwnd = min(cwnd + acked, target_cwnd); else if (cwnd < target_cwnd || tp->delivered < TCP_INIT_CWND) @@ -470,14 +488,14 @@ static bool bbr_is_next_cycle_phase(struct sock *sk, if (bbr->pacing_gain > BBR_UNIT) return is_full_length && (rs->losses || /* perhaps pacing_gain*BDP won't fit */ - inflight >= bbr_target_cwnd(sk, bw, bbr->pacing_gain)); + inflight >= bbr_inflight(sk, bw, bbr->pacing_gain)); /* A pacing_gain < 1.0 tries to drain extra queue we added if bw * probing didn't find more bw. If inflight falls to match BDP then we * estimate queue is drained; persisting would underutilize the pipe. */ return is_full_length || - inflight <= bbr_target_cwnd(sk, bw, BBR_UNIT); + inflight <= bbr_inflight(sk, bw, BBR_UNIT); } static void bbr_advance_cycle_phase(struct sock *sk) @@ -736,11 +754,11 @@ static void bbr_check_drain(struct sock *sk, const struct rate_sample *rs) bbr->pacing_gain = bbr_drain_gain; /* pace slow to drain */ bbr->cwnd_gain = bbr_high_gain; /* maintain cwnd */ tcp_sk(sk)->snd_ssthresh = - bbr_target_cwnd(sk, bbr_max_bw(sk), BBR_UNIT); + bbr_inflight(sk, bbr_max_bw(sk), BBR_UNIT); } /* fall through to check if in-flight is already small: */ if (bbr->mode == BBR_DRAIN && tcp_packets_in_flight(tcp_sk(sk)) <= - bbr_target_cwnd(sk, bbr_max_bw(sk), BBR_UNIT)) + bbr_inflight(sk, bbr_max_bw(sk), BBR_UNIT)) bbr_reset_probe_bw_mode(sk); /* we estimate queue is drained */ } -- GitLab From 610058f519b579e38f9be0715ec9f73697e5d40d Mon Sep 17 00:00:00 2001 From: Priyaranjan Jha Date: Wed, 23 Jan 2019 12:04:54 -0800 Subject: [PATCH 0879/1304] tcp_bbr: adapt cwnd based on ack aggregation estimation commit 78dc70ebaa38aa303274e333be6c98eef87619e2 upstream. Aggregation effects are extremely common with wifi, cellular, and cable modem link technologies, ACK decimation in middleboxes, and LRO and GRO in receiving hosts. The aggregation can happen in either direction, data or ACKs, but in either case the aggregation effect is visible to the sender in the ACK stream. Previously BBR's sending was often limited by cwnd under severe ACK aggregation/decimation because BBR sized the cwnd at 2*BDP. If packets were acked in bursts after long delays (e.g. one ACK acking 5*BDP after 5*RTT), BBR's sending was halted after sending 2*BDP over 2*RTT, leaving the bottleneck idle for potentially long periods. Note that loss-based congestion control does not have this issue because when facing aggregation it continues increasing cwnd after bursts of ACKs, growing cwnd until the buffer is full. To achieve good throughput in the presence of aggregation effects, this algorithm allows the BBR sender to put extra data in flight to keep the bottleneck utilized during silences in the ACK stream that it has evidence to suggest were caused by aggregation. A summary of the algorithm: when a burst of packets are acked by a stretched ACK or a burst of ACKs or both, BBR first estimates the expected amount of data that should have been acked, based on its estimated bandwidth. Then the surplus ("extra_acked") is recorded in a windowed-max filter to estimate the recent level of observed ACK aggregation. Then cwnd is increased by the ACK aggregation estimate. The larger cwnd avoids BBR being cwnd-limited in the face of ACK silences that recent history suggests were caused by aggregation. As a sanity check, the ACK aggregation degree is upper-bounded by the cwnd (at the time of measurement) and a global max of BW * 100ms. The algorithm is further described by the following presentation: https://datatracker.ietf.org/meeting/101/materials/slides-101-iccrg-an-update-on-bbr-work-at-google-00 In our internal testing, we observed a significant increase in BBR throughput (measured using netperf), in a basic wifi setup. - Host1 (sender on ethernet) -> AP -> Host2 (receiver on wifi) - 2.4 GHz -> BBR before: ~73 Mbps; BBR after: ~102 Mbps; CUBIC: ~100 Mbps - 5.0 GHz -> BBR before: ~362 Mbps; BBR after: ~593 Mbps; CUBIC: ~601 Mbps Also, this code is running globally on YouTube TCP connections and produced significant bandwidth increases for YouTube traffic. This is based on Ian Swett's max_ack_height_ algorithm from the QUIC BBR implementation. Signed-off-by: Priyaranjan Jha Signed-off-by: Neal Cardwell Signed-off-by: Yuchung Cheng Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- include/net/inet_connection_sock.h | 4 +- net/ipv4/tcp_bbr.c | 122 ++++++++++++++++++++++++++++- 2 files changed, 123 insertions(+), 3 deletions(-) diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index 2d5220ab0600..fc9d6e37552d 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -139,8 +139,8 @@ struct inet_connection_sock { } icsk_mtup; u32 icsk_user_timeout; - u64 icsk_ca_priv[88 / sizeof(u64)]; -#define ICSK_CA_PRIV_SIZE (11 * sizeof(u64)) + u64 icsk_ca_priv[104 / sizeof(u64)]; +#define ICSK_CA_PRIV_SIZE (13 * sizeof(u64)) }; #define ICSK_TIME_RETRANS 1 /* Retransmit timer */ diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c index 4ee6cf1235f7..93f176336297 100644 --- a/net/ipv4/tcp_bbr.c +++ b/net/ipv4/tcp_bbr.c @@ -115,6 +115,14 @@ struct bbr { unused_b:5; u32 prior_cwnd; /* prior cwnd upon entering loss recovery */ u32 full_bw; /* recent bw, to estimate if pipe is full */ + + /* For tracking ACK aggregation: */ + u64 ack_epoch_mstamp; /* start of ACK sampling epoch */ + u16 extra_acked[2]; /* max excess data ACKed in epoch */ + u32 ack_epoch_acked:20, /* packets (S)ACKed in sampling epoch */ + extra_acked_win_rtts:5, /* age of extra_acked, in round trips */ + extra_acked_win_idx:1, /* current index in extra_acked array */ + unused_c:6; }; #define CYCLE_LEN 8 /* number of phases in a pacing gain cycle */ @@ -174,6 +182,15 @@ static const u32 bbr_lt_bw_diff = 4000 / 8; /* If we estimate we're policed, use lt_bw for this many round trips: */ static const u32 bbr_lt_bw_max_rtts = 48; +/* Gain factor for adding extra_acked to target cwnd: */ +static const int bbr_extra_acked_gain = BBR_UNIT; +/* Window length of extra_acked window. */ +static const u32 bbr_extra_acked_win_rtts = 5; +/* Max allowed val for ack_epoch_acked, after which sampling epoch is reset */ +static const u32 bbr_ack_epoch_acked_reset_thresh = 1U << 20; +/* Time period for clamping cwnd increment due to ack aggregation */ +static const u32 bbr_extra_acked_max_us = 100 * 1000; + static void bbr_check_probe_rtt_done(struct sock *sk); /* Do we estimate that STARTUP filled the pipe? */ @@ -200,6 +217,16 @@ static u32 bbr_bw(const struct sock *sk) return bbr->lt_use_bw ? bbr->lt_bw : bbr_max_bw(sk); } +/* Return maximum extra acked in past k-2k round trips, + * where k = bbr_extra_acked_win_rtts. + */ +static u16 bbr_extra_acked(const struct sock *sk) +{ + struct bbr *bbr = inet_csk_ca(sk); + + return max(bbr->extra_acked[0], bbr->extra_acked[1]); +} + /* Return rate in bytes per second, optionally with a gain. * The order here is chosen carefully to avoid overflow of u64. This should * work for input rates of up to 2.9Tbit/sec and gain of 2.89x. @@ -305,6 +332,8 @@ static void bbr_cwnd_event(struct sock *sk, enum tcp_ca_event event) if (event == CA_EVENT_TX_START && tp->app_limited) { bbr->idle_restart = 1; + bbr->ack_epoch_mstamp = tp->tcp_mstamp; + bbr->ack_epoch_acked = 0; /* Avoid pointless buffer overflows: pace at est. bw if we don't * need more speed (we're restarting from idle and app-limited). */ @@ -385,6 +414,22 @@ static u32 bbr_inflight(struct sock *sk, u32 bw, int gain) return inflight; } +/* Find the cwnd increment based on estimate of ack aggregation */ +static u32 bbr_ack_aggregation_cwnd(struct sock *sk) +{ + u32 max_aggr_cwnd, aggr_cwnd = 0; + + if (bbr_extra_acked_gain && bbr_full_bw_reached(sk)) { + max_aggr_cwnd = ((u64)bbr_bw(sk) * bbr_extra_acked_max_us) + / BW_UNIT; + aggr_cwnd = (bbr_extra_acked_gain * bbr_extra_acked(sk)) + >> BBR_SCALE; + aggr_cwnd = min(aggr_cwnd, max_aggr_cwnd); + } + + return aggr_cwnd; +} + /* An optimization in BBR to reduce losses: On the first round of recovery, we * follow the packet conservation principle: send P packets per P packets acked. * After that, we slow-start and send at most 2*P packets per P packets acked. @@ -445,9 +490,15 @@ static void bbr_set_cwnd(struct sock *sk, const struct rate_sample *rs, if (bbr_set_cwnd_to_recover_or_restore(sk, rs, acked, &cwnd)) goto done; - /* If we're below target cwnd, slow start cwnd toward target cwnd. */ target_cwnd = bbr_bdp(sk, bw, gain); + + /* Increment the cwnd to account for excess ACKed data that seems + * due to aggregation (of data and/or ACKs) visible in the ACK stream. + */ + target_cwnd += bbr_ack_aggregation_cwnd(sk); target_cwnd = bbr_quantization_budget(sk, target_cwnd, gain); + + /* If we're below target cwnd, slow start cwnd toward target cwnd. */ if (bbr_full_bw_reached(sk)) /* only cut cwnd if we filled the pipe */ cwnd = min(cwnd + acked, target_cwnd); else if (cwnd < target_cwnd || tp->delivered < TCP_INIT_CWND) @@ -717,6 +768,67 @@ static void bbr_update_bw(struct sock *sk, const struct rate_sample *rs) } } +/* Estimates the windowed max degree of ack aggregation. + * This is used to provision extra in-flight data to keep sending during + * inter-ACK silences. + * + * Degree of ack aggregation is estimated as extra data acked beyond expected. + * + * max_extra_acked = "maximum recent excess data ACKed beyond max_bw * interval" + * cwnd += max_extra_acked + * + * Max extra_acked is clamped by cwnd and bw * bbr_extra_acked_max_us (100 ms). + * Max filter is an approximate sliding window of 5-10 (packet timed) round + * trips. + */ +static void bbr_update_ack_aggregation(struct sock *sk, + const struct rate_sample *rs) +{ + u32 epoch_us, expected_acked, extra_acked; + struct bbr *bbr = inet_csk_ca(sk); + struct tcp_sock *tp = tcp_sk(sk); + + if (!bbr_extra_acked_gain || rs->acked_sacked <= 0 || + rs->delivered < 0 || rs->interval_us <= 0) + return; + + if (bbr->round_start) { + bbr->extra_acked_win_rtts = min(0x1F, + bbr->extra_acked_win_rtts + 1); + if (bbr->extra_acked_win_rtts >= bbr_extra_acked_win_rtts) { + bbr->extra_acked_win_rtts = 0; + bbr->extra_acked_win_idx = bbr->extra_acked_win_idx ? + 0 : 1; + bbr->extra_acked[bbr->extra_acked_win_idx] = 0; + } + } + + /* Compute how many packets we expected to be delivered over epoch. */ + epoch_us = tcp_stamp_us_delta(tp->delivered_mstamp, + bbr->ack_epoch_mstamp); + expected_acked = ((u64)bbr_bw(sk) * epoch_us) / BW_UNIT; + + /* Reset the aggregation epoch if ACK rate is below expected rate or + * significantly large no. of ack received since epoch (potentially + * quite old epoch). + */ + if (bbr->ack_epoch_acked <= expected_acked || + (bbr->ack_epoch_acked + rs->acked_sacked >= + bbr_ack_epoch_acked_reset_thresh)) { + bbr->ack_epoch_acked = 0; + bbr->ack_epoch_mstamp = tp->delivered_mstamp; + expected_acked = 0; + } + + /* Compute excess data delivered, beyond what was expected. */ + bbr->ack_epoch_acked = min_t(u32, 0xFFFFF, + bbr->ack_epoch_acked + rs->acked_sacked); + extra_acked = bbr->ack_epoch_acked - expected_acked; + extra_acked = min(extra_acked, tp->snd_cwnd); + if (extra_acked > bbr->extra_acked[bbr->extra_acked_win_idx]) + bbr->extra_acked[bbr->extra_acked_win_idx] = extra_acked; +} + /* Estimate when the pipe is full, using the change in delivery rate: BBR * estimates that STARTUP filled the pipe if the estimated bw hasn't changed by * at least bbr_full_bw_thresh (25%) after bbr_full_bw_cnt (3) non-app-limited @@ -846,6 +958,7 @@ static void bbr_update_min_rtt(struct sock *sk, const struct rate_sample *rs) static void bbr_update_model(struct sock *sk, const struct rate_sample *rs) { bbr_update_bw(sk, rs); + bbr_update_ack_aggregation(sk, rs); bbr_update_cycle_phase(sk, rs); bbr_check_full_bw_reached(sk, rs); bbr_check_drain(sk, rs); @@ -896,6 +1009,13 @@ static void bbr_init(struct sock *sk) bbr_reset_lt_bw_sampling(sk); bbr_reset_startup_mode(sk); + bbr->ack_epoch_mstamp = tp->tcp_mstamp; + bbr->ack_epoch_acked = 0; + bbr->extra_acked_win_rtts = 0; + bbr->extra_acked_win_idx = 0; + bbr->extra_acked[0] = 0; + bbr->extra_acked[1] = 0; + cmpxchg(&sk->sk_pacing_status, SK_PACING_NONE, SK_PACING_NEEDED); } -- GitLab From 8b4846ac1af4b0c99817aee7304e9f5dd6ffcb56 Mon Sep 17 00:00:00 2001 From: Lukas Wunner Date: Tue, 12 May 2020 14:40:01 +0200 Subject: [PATCH 0880/1304] serial: 8250: Avoid error message on reprobe commit e0a851fe6b9b619527bd928aa93caaddd003f70c upstream. If the call to uart_add_one_port() in serial8250_register_8250_port() fails, a half-initialized entry in the serial_8250ports[] array is left behind. A subsequent reprobe of the same serial port causes that entry to be reused. Because uart->port.dev is set, uart_remove_one_port() is called for the half-initialized entry and bails out with an error message: bcm2835-aux-uart 3f215040.serial: Removing wrong port: (null) != (ptrval) The same happens on failure of mctrl_gpio_init() since commit 4a96895f74c9 ("tty/serial/8250: use mctrl_gpio helpers"). Fix by zeroing the uart->port.dev pointer in the probe error path. The bug was introduced in v2.6.10 by historical commit befff6f5bf5f ("[SERIAL] Add new port registration/unregistration functions."): https://git.kernel.org/tglx/history/c/befff6f5bf5f The commit added an unconditional call to uart_remove_one_port() in serial8250_register_port(). In v3.7, commit 835d844d1a28 ("8250_pnp: do pnp probe before legacy probe") made that call conditional on uart->port.dev which allows me to fix the issue by zeroing that pointer in the error path. Thus, the present commit will fix the problem as far back as v3.7 whereas still older versions need to also cherry-pick 835d844d1a28. Fixes: 835d844d1a28 ("8250_pnp: do pnp probe before legacy probe") Signed-off-by: Lukas Wunner Cc: stable@vger.kernel.org # v2.6.10 Cc: stable@vger.kernel.org # v2.6.10: 835d844d1a28: 8250_pnp: do pnp probe before legacy Reviewed-by: Andy Shevchenko Link: https://lore.kernel.org/r/b4a072013ee1a1d13ee06b4325afb19bda57ca1b.1589285873.git.lukas@wunner.de [iwamatsu: Backported to 4.14, 4.19: adjust context] Signed-off-by: Nobuhiro Iwamatsu (CIP) Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/8250/8250_core.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c index e1a5887b6d91..d2df7d71d666 100644 --- a/drivers/tty/serial/8250/8250_core.c +++ b/drivers/tty/serial/8250/8250_core.c @@ -1062,8 +1062,10 @@ int serial8250_register_8250_port(struct uart_8250_port *up) serial8250_apply_quirks(uart); ret = uart_add_one_port(&serial8250_reg, &uart->port); - if (ret == 0) - ret = uart->port.line; + if (ret) + goto err; + + ret = uart->port.line; } else { dev_info(uart->port.dev, "skipping CIR port at 0x%lx / 0x%llx, IRQ %d\n", @@ -1088,6 +1090,11 @@ int serial8250_register_8250_port(struct uart_8250_port *up) mutex_unlock(&serial_mutex); return ret; + +err: + uart->port.dev = NULL; + mutex_unlock(&serial_mutex); + return ret; } EXPORT_SYMBOL(serial8250_register_8250_port); -- GitLab From 10ad6cfd57360760116cde00a8ef756e121367a9 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Sat, 26 Sep 2020 18:01:33 +0200 Subject: [PATCH 0881/1304] Linux 4.19.148 Tested-by: Jon Hunter Tested-by: Shuah Khan Tested-by: Linux Kernel Functional Testing Tested-by: Guenter Roeck Signed-off-by: Greg Kroah-Hartman Link: https://lore.kernel.org/r/20200925124720.972208530@linuxfoundation.org Signed-off-by: Greg Kroah-Hartman --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 4f52bb29126b..3ffd5b03e6dd 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 4 PATCHLEVEL = 19 -SUBLEVEL = 147 +SUBLEVEL = 148 EXTRAVERSION = NAME = "People's Front" -- GitLab From aed88142899ca843de982267d9f0297f83f09225 Mon Sep 17 00:00:00 2001 From: Matthias Maennich Date: Mon, 28 Sep 2020 14:42:26 +0100 Subject: [PATCH 0882/1304] ANDROID: Refresh ABI.xmls with libabigail 1.8.0-98bbf30d This adds missing anonymous to union types, creating some one-time churn. Bug: 167563393 Change-Id: I08f55d48793870c6a82ff2c76d3460733ffdbe41 Signed-off-by: Matthias Maennich --- android/abi_gki_aarch64.xml | 1341 ++++++++++++++++++----------------- 1 file changed, 706 insertions(+), 635 deletions(-) diff --git a/android/abi_gki_aarch64.xml b/android/abi_gki_aarch64.xml index b80128fc7bb9..b81396432ec1 100644 --- a/android/abi_gki_aarch64.xml +++ b/android/abi_gki_aarch64.xml @@ -3429,26 +3429,19 @@ - + - - - - - - - - - - - + + + + @@ -3458,6 +3451,14 @@ + + + + + + + + @@ -3892,7 +3893,7 @@ - + @@ -3979,7 +3980,7 @@ - + @@ -4138,7 +4139,7 @@ - + @@ -5162,7 +5163,7 @@ - + @@ -5236,7 +5237,7 @@ - + @@ -5256,7 +5257,7 @@ - + @@ -5304,7 +5305,7 @@ - + @@ -5395,7 +5396,7 @@ - + @@ -5481,7 +5482,7 @@ - + @@ -5489,7 +5490,7 @@ - + @@ -5513,7 +5514,7 @@ - + @@ -5524,7 +5525,7 @@ - + @@ -5542,7 +5543,7 @@ - + @@ -5561,7 +5562,7 @@ - + @@ -5572,7 +5573,7 @@ - + @@ -5580,7 +5581,7 @@ - + @@ -5588,7 +5589,7 @@ - + @@ -5626,7 +5627,7 @@ - + @@ -5648,7 +5649,7 @@ - + @@ -5660,92 +5661,30 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + - + @@ -5759,6 +5698,20 @@ + + + + + + + + + + + + + + @@ -5767,7 +5720,7 @@ - + @@ -5778,7 +5731,7 @@ - + @@ -5789,13 +5742,66 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + @@ -5817,22 +5823,15 @@ - - - - - - - - - - - + + + + - + @@ -5841,6 +5840,14 @@ + + + + + + + + @@ -5861,7 +5868,7 @@ - + @@ -5936,7 +5943,7 @@ - + @@ -5985,7 +5992,7 @@ - + @@ -6039,13 +6046,13 @@ - + - + @@ -6054,7 +6061,7 @@ - + @@ -7123,6 +7130,19 @@ + + + + + + + + + + + + + @@ -11128,7 +11148,7 @@ - + @@ -11145,33 +11165,22 @@ - - - - - - - - - - - - - - - - - - + + + + + + + @@ -11187,7 +11196,15 @@ - + + + + + + + + + @@ -11195,12 +11212,17 @@ - + - + + + + + + @@ -12123,7 +12145,7 @@ - + @@ -12183,7 +12205,7 @@ - + @@ -12412,7 +12434,7 @@ - + @@ -13195,7 +13217,7 @@ - + @@ -13473,7 +13495,7 @@ - + @@ -16151,135 +16173,14 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -32393,6 +32294,23 @@ + + + + + + + + + + + + + + + + + @@ -42046,7 +41964,7 @@ - + @@ -42342,7 +42260,7 @@ - + @@ -42387,11 +42305,11 @@ - + - + @@ -42438,7 +42356,7 @@ - + @@ -44618,129 +44536,27 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + @@ -44750,6 +44566,9 @@ + + + @@ -44792,6 +44611,20 @@ + + + + + + + + + + + + + + @@ -44800,7 +44633,70 @@ - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -44826,6 +44722,17 @@ + + + + + + + + + + + @@ -44840,7 +44747,7 @@ - + @@ -44851,7 +44758,7 @@ - + @@ -44871,7 +44778,7 @@ - + @@ -44879,7 +44786,24 @@ - + + + + + + + + + + + + + + + + + + @@ -45411,7 +45335,7 @@ - + @@ -45447,7 +45371,7 @@ - + @@ -46535,28 +46459,21 @@ - + - - - - - - - - - - - + + + + - + @@ -46564,6 +46481,14 @@ + + + + + + + + @@ -47030,7 +46955,7 @@ - + @@ -47558,7 +47483,7 @@ - + @@ -49767,7 +49692,124 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -49784,6 +49826,10 @@ + + + + @@ -50457,7 +50503,7 @@ - + @@ -54521,8 +54567,8 @@ - - + + @@ -54530,31 +54576,31 @@ - - + + - - - - + + + + - - + + - - - + + + - - + + - - + + @@ -56139,23 +56185,6 @@ - - - - - - - - - - - - - - - - - @@ -56429,7 +56458,7 @@ - + @@ -59136,6 +59165,17 @@ + + + + + + + + + + + @@ -65419,7 +65459,7 @@ - + @@ -65474,11 +65514,11 @@ - + - + @@ -65497,11 +65537,11 @@ - + - + @@ -65853,6 +65893,14 @@ + + + + + + + + @@ -65867,7 +65915,7 @@ - + @@ -66054,7 +66102,7 @@ - + @@ -66068,7 +66116,7 @@ - + @@ -66111,7 +66159,7 @@ - + @@ -66139,7 +66187,7 @@ - + @@ -68244,6 +68292,29 @@ + + + + + + + + + + + + + + + + + + + + + + + @@ -68805,7 +68876,7 @@ - + @@ -68833,7 +68904,6 @@ - @@ -68845,10 +68915,6 @@ - - - - @@ -68866,14 +68932,6 @@ - - - - - - - - @@ -69121,11 +69179,11 @@ - + - + @@ -74357,7 +74415,7 @@ - + @@ -77044,6 +77102,17 @@ + + + + + + + + + + + @@ -78819,13 +78888,13 @@ - + - + @@ -79019,7 +79088,7 @@ - + @@ -79058,65 +79127,27 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + - + @@ -79162,6 +79193,32 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -79190,6 +79247,23 @@ + + + + + + + + + + + + + + + + + @@ -79415,7 +79489,7 @@ - + @@ -79444,7 +79518,7 @@ - + @@ -79452,7 +79526,7 @@ - + @@ -79649,7 +79723,7 @@ - + @@ -80064,7 +80138,7 @@ - + @@ -80445,7 +80519,7 @@ - + @@ -80497,7 +80571,7 @@ - + @@ -82888,7 +82962,7 @@ - + @@ -85234,7 +85308,7 @@ - + @@ -85556,34 +85630,18 @@ - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + - + @@ -85605,6 +85663,23 @@ + + + + + + + + + + + + + + + + + @@ -85886,7 +85961,7 @@ - + @@ -86161,7 +86236,7 @@ - + @@ -87044,22 +87119,51 @@ - + - + - + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -87121,8 +87225,8 @@ - - + + @@ -87452,7 +87556,7 @@ - + @@ -87611,11 +87715,11 @@ - - - - - + + + + + @@ -87691,12 +87795,12 @@ - + - + - + @@ -88368,20 +88472,6 @@ - - - - - - - - - - - - - - @@ -88926,21 +89016,21 @@ - + - + - + @@ -89215,11 +89305,11 @@ - + - + @@ -89909,14 +89999,6 @@ - - - - - - - - @@ -90132,7 +90214,7 @@ - + @@ -92091,7 +92173,7 @@ - + @@ -97558,17 +97640,6 @@ - - - - - - - - - - - @@ -101222,6 +101293,6 @@ -- GitLab From fd9a27ace5d9d472a619cd5082f8963297e136e9 Mon Sep 17 00:00:00 2001 From: Giuliano Procida Date: Thu, 3 Sep 2020 08:16:09 +0100 Subject: [PATCH 0883/1304] ANDROID: GKI: prevent removal of monitored symbols For aarch64 GKI builds, enable KMI_SYMBOL_LIST_ADD_ONLY. Bug: 165839948 Change-Id: I133fc4cd72e0f7a835d4be8b512134c420ea6198 Signed-off-by: Giuliano Procida --- build.config.gki.aarch64 | 1 + 1 file changed, 1 insertion(+) diff --git a/build.config.gki.aarch64 b/build.config.gki.aarch64 index b540900d0940..74a6941f1aa7 100644 --- a/build.config.gki.aarch64 +++ b/build.config.gki.aarch64 @@ -9,5 +9,6 @@ android/abi_gki_aarch64_cuttlefish android/abi_gki_aarch64_qcom " TRIM_NONLISTED_KMI=1 +KMI_SYMBOL_LIST_ADD_ONLY=1 KMI_SYMBOL_LIST_STRICT_MODE=1 KMI_ENFORCED=1 -- GitLab From 1904f6dfcbbd78e6f4858945c441838367469902 Mon Sep 17 00:00:00 2001 From: Jonathan Lebon Date: Thu, 12 Sep 2019 09:30:07 -0400 Subject: [PATCH 0884/1304] selinux: allow labeling before policy is loaded [ Upstream commit 3e3e24b42043eceb97ed834102c2d094dfd7aaa6 ] Currently, the SELinux LSM prevents one from setting the `security.selinux` xattr on an inode without a policy first being loaded. However, this restriction is problematic: it makes it impossible to have newly created files with the correct label before actually loading the policy. This is relevant in distributions like Fedora, where the policy is loaded by systemd shortly after pivoting out of the initrd. In such instances, all files created prior to pivoting will be unlabeled. One then has to relabel them after pivoting, an operation which inherently races with other processes trying to access those same files. Going further, there are use cases for creating the entire root filesystem on first boot from the initrd (e.g. Container Linux supports this today[1], and we'd like to support it in Fedora CoreOS as well[2]). One can imagine doing this in two ways: at the block device level (e.g. laying down a disk image), or at the filesystem level. In the former, labeling can simply be part of the image. But even in the latter scenario, one still really wants to be able to set the right labels when populating the new filesystem. This patch enables this by changing behaviour in the following two ways: 1. allow `setxattr` if we're not initialized 2. don't try to set the in-core inode SID if we're not initialized; instead leave it as `LABEL_INVALID` so that revalidation may be attempted at a later time Note the first hunk of this patch is mostly the same as a previously discussed one[3], though it was part of a larger series which wasn't accepted. [1] https://coreos.com/os/docs/latest/root-filesystem-placement.html [2] https://github.com/coreos/fedora-coreos-tracker/issues/94 [3] https://www.spinics.net/lists/linux-initramfs/msg04593.html Co-developed-by: Victor Kamensky Signed-off-by: Victor Kamensky Signed-off-by: Jonathan Lebon Signed-off-by: Paul Moore Signed-off-by: Sasha Levin --- security/selinux/hooks.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 452254fd89f8..250b725f5754 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -3304,6 +3304,9 @@ static int selinux_inode_setxattr(struct dentry *dentry, const char *name, return dentry_has_perm(current_cred(), dentry, FILE__SETATTR); } + if (!selinux_state.initialized) + return (inode_owner_or_capable(inode) ? 0 : -EPERM); + sbsec = inode->i_sb->s_security; if (!(sbsec->flags & SBLABEL_MNT)) return -EOPNOTSUPP; @@ -3387,6 +3390,15 @@ static void selinux_inode_post_setxattr(struct dentry *dentry, const char *name, return; } + if (!selinux_state.initialized) { + /* If we haven't even been initialized, then we can't validate + * against a policy, so leave the label as invalid. It may + * resolve to a valid label on the next revalidation try if + * we've since initialized. + */ + return; + } + rc = security_context_to_sid_force(&selinux_state, value, size, &newsid); if (rc) { -- GitLab From 3e4afbb188576cbf755f2156f8712cc8553b5330 Mon Sep 17 00:00:00 2001 From: zhengbin Date: Sun, 18 Aug 2019 22:51:30 -0300 Subject: [PATCH 0885/1304] media: mc-device.c: fix memleak in media_device_register_entity [ Upstream commit 713f871b30a66dc4daff4d17b760c9916aaaf2e1 ] In media_device_register_entity, if media_graph_walk_init fails, need to free the previously memory. Reported-by: Hulk Robot Signed-off-by: zhengbin Signed-off-by: Sakari Ailus Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Sasha Levin --- drivers/media/media-device.c | 65 ++++++++++++++++++------------------ 1 file changed, 33 insertions(+), 32 deletions(-) diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c index ed518b1f82e4..d04ed438a45d 100644 --- a/drivers/media/media-device.c +++ b/drivers/media/media-device.c @@ -568,6 +568,38 @@ static void media_device_release(struct media_devnode *devnode) dev_dbg(devnode->parent, "Media device released\n"); } +static void __media_device_unregister_entity(struct media_entity *entity) +{ + struct media_device *mdev = entity->graph_obj.mdev; + struct media_link *link, *tmp; + struct media_interface *intf; + unsigned int i; + + ida_free(&mdev->entity_internal_idx, entity->internal_idx); + + /* Remove all interface links pointing to this entity */ + list_for_each_entry(intf, &mdev->interfaces, graph_obj.list) { + list_for_each_entry_safe(link, tmp, &intf->links, list) { + if (link->entity == entity) + __media_remove_intf_link(link); + } + } + + /* Remove all data links that belong to this entity */ + __media_entity_remove_links(entity); + + /* Remove all pads that belong to this entity */ + for (i = 0; i < entity->num_pads; i++) + media_gobj_destroy(&entity->pads[i].graph_obj); + + /* Remove the entity */ + media_gobj_destroy(&entity->graph_obj); + + /* invoke entity_notify callbacks to handle entity removal?? */ + + entity->graph_obj.mdev = NULL; +} + /** * media_device_register_entity - Register an entity with a media device * @mdev: The media device @@ -625,6 +657,7 @@ int __must_check media_device_register_entity(struct media_device *mdev, */ ret = media_graph_walk_init(&new, mdev); if (ret) { + __media_device_unregister_entity(entity); mutex_unlock(&mdev->graph_mutex); return ret; } @@ -637,38 +670,6 @@ int __must_check media_device_register_entity(struct media_device *mdev, } EXPORT_SYMBOL_GPL(media_device_register_entity); -static void __media_device_unregister_entity(struct media_entity *entity) -{ - struct media_device *mdev = entity->graph_obj.mdev; - struct media_link *link, *tmp; - struct media_interface *intf; - unsigned int i; - - ida_free(&mdev->entity_internal_idx, entity->internal_idx); - - /* Remove all interface links pointing to this entity */ - list_for_each_entry(intf, &mdev->interfaces, graph_obj.list) { - list_for_each_entry_safe(link, tmp, &intf->links, list) { - if (link->entity == entity) - __media_remove_intf_link(link); - } - } - - /* Remove all data links that belong to this entity */ - __media_entity_remove_links(entity); - - /* Remove all pads that belong to this entity */ - for (i = 0; i < entity->num_pads; i++) - media_gobj_destroy(&entity->pads[i].graph_obj); - - /* Remove the entity */ - media_gobj_destroy(&entity->graph_obj); - - /* invoke entity_notify callbacks to handle entity removal?? */ - - entity->graph_obj.mdev = NULL; -} - void media_device_unregister_entity(struct media_entity *entity) { struct media_device *mdev = entity->graph_obj.mdev; -- GitLab From 8e634b7b1953df74784cf7e5f732620f45abfb76 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 4 Oct 2019 11:11:40 +0100 Subject: [PATCH 0886/1304] dma-fence: Serialise signal enabling (dma_fence_enable_sw_signaling) [ Upstream commit 9c98f021e4e717ffd9948fa65340ea3ef12b7935 ] Make dma_fence_enable_sw_signaling() behave like its dma_fence_add_callback() and dma_fence_default_wait() counterparts and perform the test to enable signaling under the fence->lock, along with the action to do so. This ensure that should an implementation be trying to flush the cb_list (by signaling) on retirement before freeing the fence, it can do so in a race-free manner. See also 0fc89b6802ba ("dma-fence: Simply wrap dma_fence_signal_locked with dma_fence_signal"). v2: Refactor all 3 enable_signaling paths to use a common function. v3: Don't argue, just keep the tracepoint in the existing spot. Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20191004101140.32713-1-chris@chris-wilson.co.uk Signed-off-by: Sasha Levin --- drivers/dma-buf/dma-fence.c | 78 +++++++++++++++++-------------------- 1 file changed, 35 insertions(+), 43 deletions(-) diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c index 1551ca7df394..8586cc05def1 100644 --- a/drivers/dma-buf/dma-fence.c +++ b/drivers/dma-buf/dma-fence.c @@ -244,6 +244,30 @@ void dma_fence_free(struct dma_fence *fence) } EXPORT_SYMBOL(dma_fence_free); +static bool __dma_fence_enable_signaling(struct dma_fence *fence) +{ + bool was_set; + + lockdep_assert_held(fence->lock); + + was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, + &fence->flags); + + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) + return false; + + if (!was_set && fence->ops->enable_signaling) { + trace_dma_fence_enable_signal(fence); + + if (!fence->ops->enable_signaling(fence)) { + dma_fence_signal_locked(fence); + return false; + } + } + + return true; +} + /** * dma_fence_enable_sw_signaling - enable signaling on fence * @fence: the fence to enable @@ -256,19 +280,12 @@ void dma_fence_enable_sw_signaling(struct dma_fence *fence) { unsigned long flags; - if (!test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, - &fence->flags) && - !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) && - fence->ops->enable_signaling) { - trace_dma_fence_enable_signal(fence); - - spin_lock_irqsave(fence->lock, flags); - - if (!fence->ops->enable_signaling(fence)) - dma_fence_signal_locked(fence); + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) + return; - spin_unlock_irqrestore(fence->lock, flags); - } + spin_lock_irqsave(fence->lock, flags); + __dma_fence_enable_signaling(fence); + spin_unlock_irqrestore(fence->lock, flags); } EXPORT_SYMBOL(dma_fence_enable_sw_signaling); @@ -302,7 +319,6 @@ int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb, { unsigned long flags; int ret = 0; - bool was_set; if (WARN_ON(!fence || !func)) return -EINVAL; @@ -314,25 +330,14 @@ int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb, spin_lock_irqsave(fence->lock, flags); - was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, - &fence->flags); - - if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) - ret = -ENOENT; - else if (!was_set && fence->ops->enable_signaling) { - trace_dma_fence_enable_signal(fence); - - if (!fence->ops->enable_signaling(fence)) { - dma_fence_signal_locked(fence); - ret = -ENOENT; - } - } - - if (!ret) { + if (__dma_fence_enable_signaling(fence)) { cb->func = func; list_add_tail(&cb->node, &fence->cb_list); - } else + } else { INIT_LIST_HEAD(&cb->node); + ret = -ENOENT; + } + spin_unlock_irqrestore(fence->lock, flags); return ret; @@ -432,7 +437,6 @@ dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout) struct default_wait_cb cb; unsigned long flags; signed long ret = timeout ? timeout : 1; - bool was_set; if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) return ret; @@ -444,21 +448,9 @@ dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout) goto out; } - was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, - &fence->flags); - - if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) + if (!__dma_fence_enable_signaling(fence)) goto out; - if (!was_set && fence->ops->enable_signaling) { - trace_dma_fence_enable_signal(fence); - - if (!fence->ops->enable_signaling(fence)) { - dma_fence_signal_locked(fence); - goto out; - } - } - if (!timeout) { ret = 0; goto out; -- GitLab From ffca49e2925c709372727022273c66a49865009d Mon Sep 17 00:00:00 2001 From: Miaoqing Pan Date: Wed, 9 Oct 2019 16:18:08 +0800 Subject: [PATCH 0887/1304] ath10k: fix array out-of-bounds access [ Upstream commit c5329b2d5b8b4e41be14d31ee8505b4f5607bf9b ] If firmware reports rate_max > WMI_TPC_RATE_MAX(WMI_TPC_FINAL_RATE_MAX) or num_tx_chain > WMI_TPC_TX_N_CHAIN, it will cause array out-of-bounds access, so print a warning and reset to avoid memory corruption. Tested HW: QCA9984 Tested FW: 10.4-3.9.0.2-00035 Signed-off-by: Miaoqing Pan Signed-off-by: Kalle Valo Signed-off-by: Sasha Levin --- drivers/net/wireless/ath/ath10k/debug.c | 2 +- drivers/net/wireless/ath/ath10k/wmi.c | 49 ++++++++++++++++--------- 2 files changed, 32 insertions(+), 19 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index 0baaad90b8d1..aa333110eaba 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -1521,7 +1521,7 @@ static void ath10k_tpc_stats_print(struct ath10k_tpc_stats *tpc_stats, *len += scnprintf(buf + *len, buf_len - *len, "No. Preamble Rate_code "); - for (i = 0; i < WMI_TPC_TX_N_CHAIN; i++) + for (i = 0; i < tpc_stats->num_tx_chain; i++) *len += scnprintf(buf + *len, buf_len - *len, "tpc_value%d ", i); diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index 3372dfa0decc..3f3fbee631c3 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -4550,16 +4550,13 @@ static void ath10k_tpc_config_disp_tables(struct ath10k *ar, } pream_idx = 0; - for (i = 0; i < __le32_to_cpu(ev->rate_max); i++) { + for (i = 0; i < tpc_stats->rate_max; i++) { memset(tpc_value, 0, sizeof(tpc_value)); memset(buff, 0, sizeof(buff)); if (i == pream_table[pream_idx]) pream_idx++; - for (j = 0; j < WMI_TPC_TX_N_CHAIN; j++) { - if (j >= __le32_to_cpu(ev->num_tx_chain)) - break; - + for (j = 0; j < tpc_stats->num_tx_chain; j++) { tpc[j] = ath10k_tpc_config_get_rate(ar, ev, i, j + 1, rate_code[i], type); @@ -4672,7 +4669,7 @@ void ath10k_wmi_tpc_config_get_rate_code(u8 *rate_code, u16 *pream_table, void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb) { - u32 num_tx_chain; + u32 num_tx_chain, rate_max; u8 rate_code[WMI_TPC_RATE_MAX]; u16 pream_table[WMI_TPC_PREAM_TABLE_MAX]; struct wmi_pdev_tpc_config_event *ev; @@ -4688,6 +4685,13 @@ void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb) return; } + rate_max = __le32_to_cpu(ev->rate_max); + if (rate_max > WMI_TPC_RATE_MAX) { + ath10k_warn(ar, "number of rate is %d greater than TPC configured rate %d\n", + rate_max, WMI_TPC_RATE_MAX); + rate_max = WMI_TPC_RATE_MAX; + } + tpc_stats = kzalloc(sizeof(*tpc_stats), GFP_ATOMIC); if (!tpc_stats) return; @@ -4704,8 +4708,8 @@ void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb) __le32_to_cpu(ev->twice_antenna_reduction); tpc_stats->power_limit = __le32_to_cpu(ev->power_limit); tpc_stats->twice_max_rd_power = __le32_to_cpu(ev->twice_max_rd_power); - tpc_stats->num_tx_chain = __le32_to_cpu(ev->num_tx_chain); - tpc_stats->rate_max = __le32_to_cpu(ev->rate_max); + tpc_stats->num_tx_chain = num_tx_chain; + tpc_stats->rate_max = rate_max; ath10k_tpc_config_disp_tables(ar, ev, tpc_stats, rate_code, pream_table, @@ -4900,16 +4904,13 @@ ath10k_wmi_tpc_stats_final_disp_tables(struct ath10k *ar, } pream_idx = 0; - for (i = 0; i < __le32_to_cpu(ev->rate_max); i++) { + for (i = 0; i < tpc_stats->rate_max; i++) { memset(tpc_value, 0, sizeof(tpc_value)); memset(buff, 0, sizeof(buff)); if (i == pream_table[pream_idx]) pream_idx++; - for (j = 0; j < WMI_TPC_TX_N_CHAIN; j++) { - if (j >= __le32_to_cpu(ev->num_tx_chain)) - break; - + for (j = 0; j < tpc_stats->num_tx_chain; j++) { tpc[j] = ath10k_wmi_tpc_final_get_rate(ar, ev, i, j + 1, rate_code[i], type, pream_idx); @@ -4925,7 +4926,7 @@ ath10k_wmi_tpc_stats_final_disp_tables(struct ath10k *ar, void ath10k_wmi_event_tpc_final_table(struct ath10k *ar, struct sk_buff *skb) { - u32 num_tx_chain; + u32 num_tx_chain, rate_max; u8 rate_code[WMI_TPC_FINAL_RATE_MAX]; u16 pream_table[WMI_TPC_PREAM_TABLE_MAX]; struct wmi_pdev_tpc_final_table_event *ev; @@ -4933,12 +4934,24 @@ void ath10k_wmi_event_tpc_final_table(struct ath10k *ar, struct sk_buff *skb) ev = (struct wmi_pdev_tpc_final_table_event *)skb->data; + num_tx_chain = __le32_to_cpu(ev->num_tx_chain); + if (num_tx_chain > WMI_TPC_TX_N_CHAIN) { + ath10k_warn(ar, "number of tx chain is %d greater than TPC final configured tx chain %d\n", + num_tx_chain, WMI_TPC_TX_N_CHAIN); + return; + } + + rate_max = __le32_to_cpu(ev->rate_max); + if (rate_max > WMI_TPC_FINAL_RATE_MAX) { + ath10k_warn(ar, "number of rate is %d greater than TPC final configured rate %d\n", + rate_max, WMI_TPC_FINAL_RATE_MAX); + rate_max = WMI_TPC_FINAL_RATE_MAX; + } + tpc_stats = kzalloc(sizeof(*tpc_stats), GFP_ATOMIC); if (!tpc_stats) return; - num_tx_chain = __le32_to_cpu(ev->num_tx_chain); - ath10k_wmi_tpc_config_get_rate_code(rate_code, pream_table, num_tx_chain); @@ -4951,8 +4964,8 @@ void ath10k_wmi_event_tpc_final_table(struct ath10k *ar, struct sk_buff *skb) __le32_to_cpu(ev->twice_antenna_reduction); tpc_stats->power_limit = __le32_to_cpu(ev->power_limit); tpc_stats->twice_max_rd_power = __le32_to_cpu(ev->twice_max_rd_power); - tpc_stats->num_tx_chain = __le32_to_cpu(ev->num_tx_chain); - tpc_stats->rate_max = __le32_to_cpu(ev->rate_max); + tpc_stats->num_tx_chain = num_tx_chain; + tpc_stats->rate_max = rate_max; ath10k_wmi_tpc_stats_final_disp_tables(ar, ev, tpc_stats, rate_code, pream_table, -- GitLab From c22a24ad5f16a88695f6403d98ae265cf0013424 Mon Sep 17 00:00:00 2001 From: Miaoqing Pan Date: Wed, 9 Oct 2019 16:18:09 +0800 Subject: [PATCH 0888/1304] ath10k: fix memory leak for tpc_stats_final [ Upstream commit 486a8849843455298d49e694cca9968336ce2327 ] The memory of ar->debug.tpc_stats_final is reallocated every debugfs reading, it should be freed in ath10k_debug_destroy() for the last allocation. Tested HW: QCA9984 Tested FW: 10.4-3.9.0.2-00035 Signed-off-by: Miaoqing Pan Signed-off-by: Kalle Valo Signed-off-by: Sasha Levin --- drivers/net/wireless/ath/ath10k/debug.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index aa333110eaba..4e980e78ba95 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -2365,6 +2365,7 @@ void ath10k_debug_destroy(struct ath10k *ar) ath10k_debug_fw_stats_reset(ar); kfree(ar->debug.tpc_stats); + kfree(ar->debug.tpc_stats_final); } int ath10k_debug_register(struct ath10k *ar) -- GitLab From 8579a0440381353e0a71dd6a4d4371be8457eac4 Mon Sep 17 00:00:00 2001 From: Jia He Date: Fri, 11 Oct 2019 22:09:39 +0800 Subject: [PATCH 0889/1304] mm: fix double page fault on arm64 if PTE_AF is cleared [ Upstream commit 83d116c53058d505ddef051e90ab27f57015b025 ] When we tested pmdk unit test [1] vmmalloc_fork TEST3 on arm64 guest, there will be a double page fault in __copy_from_user_inatomic of cow_user_page. To reproduce the bug, the cmd is as follows after you deployed everything: make -C src/test/vmmalloc_fork/ TEST_TIME=60m check Below call trace is from arm64 do_page_fault for debugging purpose: [ 110.016195] Call trace: [ 110.016826] do_page_fault+0x5a4/0x690 [ 110.017812] do_mem_abort+0x50/0xb0 [ 110.018726] el1_da+0x20/0xc4 [ 110.019492] __arch_copy_from_user+0x180/0x280 [ 110.020646] do_wp_page+0xb0/0x860 [ 110.021517] __handle_mm_fault+0x994/0x1338 [ 110.022606] handle_mm_fault+0xe8/0x180 [ 110.023584] do_page_fault+0x240/0x690 [ 110.024535] do_mem_abort+0x50/0xb0 [ 110.025423] el0_da+0x20/0x24 The pte info before __copy_from_user_inatomic is (PTE_AF is cleared): [ffff9b007000] pgd=000000023d4f8003, pud=000000023da9b003, pmd=000000023d4b3003, pte=360000298607bd3 As told by Catalin: "On arm64 without hardware Access Flag, copying from user will fail because the pte is old and cannot be marked young. So we always end up with zeroed page after fork() + CoW for pfn mappings. we don't always have a hardware-managed access flag on arm64." This patch fixes it by calling pte_mkyoung. Also, the parameter is changed because vmf should be passed to cow_user_page() Add a WARN_ON_ONCE when __copy_from_user_inatomic() returns error in case there can be some obscure use-case (by Kirill). [1] https://github.com/pmem/pmdk/tree/master/src/test/vmmalloc_fork Signed-off-by: Jia He Reported-by: Yibo Cai Reviewed-by: Catalin Marinas Acked-by: Kirill A. Shutemov Signed-off-by: Catalin Marinas Signed-off-by: Sasha Levin --- mm/memory.c | 104 ++++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 89 insertions(+), 15 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index bbf0cc4066c8..fcad8a0d943d 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -116,6 +116,18 @@ int randomize_va_space __read_mostly = 2; #endif +#ifndef arch_faults_on_old_pte +static inline bool arch_faults_on_old_pte(void) +{ + /* + * Those arches which don't have hw access flag feature need to + * implement their own helper. By default, "true" means pagefault + * will be hit on old pte. + */ + return true; +} +#endif + static int __init disable_randmaps(char *s) { randomize_va_space = 0; @@ -2335,32 +2347,82 @@ static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd, return same; } -static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma) +static inline bool cow_user_page(struct page *dst, struct page *src, + struct vm_fault *vmf) { + bool ret; + void *kaddr; + void __user *uaddr; + bool force_mkyoung; + struct vm_area_struct *vma = vmf->vma; + struct mm_struct *mm = vma->vm_mm; + unsigned long addr = vmf->address; + debug_dma_assert_idle(src); + if (likely(src)) { + copy_user_highpage(dst, src, addr, vma); + return true; + } + /* * If the source page was a PFN mapping, we don't have * a "struct page" for it. We do a best-effort copy by * just copying from the original user address. If that * fails, we just zero-fill it. Live with it. */ - if (unlikely(!src)) { - void *kaddr = kmap_atomic(dst); - void __user *uaddr = (void __user *)(va & PAGE_MASK); + kaddr = kmap_atomic(dst); + uaddr = (void __user *)(addr & PAGE_MASK); + + /* + * On architectures with software "accessed" bits, we would + * take a double page fault, so mark it accessed here. + */ + force_mkyoung = arch_faults_on_old_pte() && !pte_young(vmf->orig_pte); + if (force_mkyoung) { + pte_t entry; + + vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); + if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) { + /* + * Other thread has already handled the fault + * and we don't need to do anything. If it's + * not the case, the fault will be triggered + * again on the same address. + */ + ret = false; + goto pte_unlock; + } + entry = pte_mkyoung(vmf->orig_pte); + if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0)) + update_mmu_cache(vma, addr, vmf->pte); + } + + /* + * This really shouldn't fail, because the page is there + * in the page tables. But it might just be unreadable, + * in which case we just give up and fill the result with + * zeroes. + */ + if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) { /* - * This really shouldn't fail, because the page is there - * in the page tables. But it might just be unreadable, - * in which case we just give up and fill the result with - * zeroes. + * Give a warn in case there can be some obscure + * use-case */ - if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) - clear_page(kaddr); - kunmap_atomic(kaddr); - flush_dcache_page(dst); - } else - copy_user_highpage(dst, src, va, vma); + WARN_ON_ONCE(1); + clear_page(kaddr); + } + + ret = true; + +pte_unlock: + if (force_mkyoung) + pte_unmap_unlock(vmf->pte, vmf->ptl); + kunmap_atomic(kaddr); + flush_dcache_page(dst); + + return ret; } static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma) @@ -2514,7 +2576,19 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) vmf->address); if (!new_page) goto oom; - cow_user_page(new_page, old_page, vmf->address, vma); + + if (!cow_user_page(new_page, old_page, vmf)) { + /* + * COW failed, if the fault was solved by other, + * it's fine. If not, userspace would re-fault on + * the same address and we will handle the fault + * from the second attempt. + */ + put_page(new_page); + if (old_page) + put_page(old_page); + return 0; + } } if (mem_cgroup_try_charge_delay(new_page, mm, GFP_KERNEL, &memcg, false)) -- GitLab From 0cbdeff7874420a9da774bc9d5bb84cf532b2df2 Mon Sep 17 00:00:00 2001 From: Balsundar P Date: Tue, 15 Oct 2019 11:51:58 +0530 Subject: [PATCH 0890/1304] scsi: aacraid: fix illegal IO beyond last LBA [ Upstream commit c86fbe484c10b2cd1e770770db2d6b2c88801c1d ] The driver fails to handle data when read or written beyond device reported LBA, which triggers kernel panic Link: https://lore.kernel.org/r/1571120524-6037-2-git-send-email-balsundar.p@microsemi.com Signed-off-by: Balsundar P Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin --- drivers/scsi/aacraid/aachba.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index 6e356325d8d9..54717fb84a54 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c @@ -2481,13 +2481,13 @@ static int aac_read(struct scsi_cmnd * scsicmd) scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION; set_sense(&dev->fsa_dev[cid].sense_data, - HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE, + ILLEGAL_REQUEST, SENCODE_LBA_OUT_OF_RANGE, ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0); memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), SCSI_SENSE_BUFFERSIZE)); scsicmd->scsi_done(scsicmd); - return 1; + return 0; } dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %llu, t = %ld.\n", @@ -2573,13 +2573,13 @@ static int aac_write(struct scsi_cmnd * scsicmd) scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION; set_sense(&dev->fsa_dev[cid].sense_data, - HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE, + ILLEGAL_REQUEST, SENCODE_LBA_OUT_OF_RANGE, ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0); memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), SCSI_SENSE_BUFFERSIZE)); scsicmd->scsi_done(scsicmd); - return 1; + return 0; } dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %llu, t = %ld.\n", -- GitLab From 448221087d7793c2959bb10916e0ce78fe045a50 Mon Sep 17 00:00:00 2001 From: Fuqian Huang Date: Fri, 27 Sep 2019 20:15:44 +0800 Subject: [PATCH 0891/1304] m68k: q40: Fix info-leak in rtc_ioctl [ Upstream commit 7cf78b6b12fd5550545e4b73b35dca18bd46b44c ] When the option is RTC_PLL_GET, pll will be copied to userland via copy_to_user. pll is initialized using mach_get_rtc_pll indirect call and mach_get_rtc_pll is only assigned with function q40_get_rtc_pll in arch/m68k/q40/config.c. In function q40_get_rtc_pll, the field pll_ctrl is not initialized. This will leak uninitialized stack content to userland. Fix this by zeroing the uninitialized field. Signed-off-by: Fuqian Huang Link: https://lore.kernel.org/r/20190927121544.7650-1-huangfq.daxian@gmail.com Signed-off-by: Geert Uytterhoeven Signed-off-by: Sasha Levin --- arch/m68k/q40/config.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/m68k/q40/config.c b/arch/m68k/q40/config.c index 96810d91da2b..4a25ce6a1823 100644 --- a/arch/m68k/q40/config.c +++ b/arch/m68k/q40/config.c @@ -273,6 +273,7 @@ static int q40_get_rtc_pll(struct rtc_pll_info *pll) { int tmp = Q40_RTC_CTRL; + pll->pll_ctrl = 0; pll->pll_value = tmp & Q40_RTC_PLL_MASK; if (tmp & Q40_RTC_PLL_SIGN) pll->pll_value = -pll->pll_value; -- GitLab From ef488886e34a65aac17385835572eb5d69c45682 Mon Sep 17 00:00:00 2001 From: Kangjie Lu Date: Thu, 17 Oct 2019 23:29:53 -0500 Subject: [PATCH 0892/1304] gma/gma500: fix a memory disclosure bug due to uninitialized bytes [ Upstream commit 57a25a5f754ce27da2cfa6f413cfd366f878db76 ] `best_clock` is an object that may be sent out. Object `clock` contains uninitialized bytes that are copied to `best_clock`, which leads to memory disclosure and information leak. Signed-off-by: Kangjie Lu Signed-off-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20191018042953.31099-1-kjlu@umn.edu Signed-off-by: Sasha Levin --- drivers/gpu/drm/gma500/cdv_intel_display.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c index 17db4b4749d5..2e8479744ca4 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_display.c +++ b/drivers/gpu/drm/gma500/cdv_intel_display.c @@ -415,6 +415,8 @@ static bool cdv_intel_find_dp_pll(const struct gma_limit_t *limit, struct gma_crtc *gma_crtc = to_gma_crtc(crtc); struct gma_clock_t clock; + memset(&clock, 0, sizeof(clock)); + switch (refclk) { case 27000: if (target < 200000) { -- GitLab From 7f775c06ee25163a189413b2e5d8a9b3fe2883aa Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 23 Oct 2019 16:46:59 +0100 Subject: [PATCH 0893/1304] ASoC: kirkwood: fix IRQ error handling [ Upstream commit 175fc928198236037174e5c5c066fe3c4691903e ] Propagate the error code from request_irq(), rather than returning -EBUSY. Signed-off-by: Russell King Link: https://lore.kernel.org/r/E1iNIqh-0000tW-EZ@rmk-PC.armlinux.org.uk Signed-off-by: Mark Brown Signed-off-by: Sasha Levin --- sound/soc/kirkwood/kirkwood-dma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sound/soc/kirkwood/kirkwood-dma.c b/sound/soc/kirkwood/kirkwood-dma.c index c6a58520d377..255cc45905b8 100644 --- a/sound/soc/kirkwood/kirkwood-dma.c +++ b/sound/soc/kirkwood/kirkwood-dma.c @@ -136,7 +136,7 @@ static int kirkwood_dma_open(struct snd_pcm_substream *substream) err = request_irq(priv->irq, kirkwood_dma_irq, IRQF_SHARED, "kirkwood-i2s", priv); if (err) - return -EBUSY; + return err; /* * Enable Error interrupts. We're only ack'ing them but -- GitLab From a5a35a815c8aba3f7e3c92757f299e2ea85503da Mon Sep 17 00:00:00 2001 From: Sakari Ailus Date: Mon, 23 Sep 2019 11:25:42 -0300 Subject: [PATCH 0894/1304] media: smiapp: Fix error handling at NVM reading [ Upstream commit a5b1d5413534607b05fb34470ff62bf395f5c8d0 ] If NVM reading failed, the device was left powered on. Fix that. Signed-off-by: Sakari Ailus Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Sasha Levin --- drivers/media/i2c/smiapp/smiapp-core.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c index 4731e1c72f96..0a434bdce3b3 100644 --- a/drivers/media/i2c/smiapp/smiapp-core.c +++ b/drivers/media/i2c/smiapp/smiapp-core.c @@ -2337,11 +2337,12 @@ smiapp_sysfs_nvm_read(struct device *dev, struct device_attribute *attr, if (rval < 0) { if (rval != -EBUSY && rval != -EAGAIN) pm_runtime_set_active(&client->dev); - pm_runtime_put(&client->dev); + pm_runtime_put_noidle(&client->dev); return -ENODEV; } if (smiapp_read_nvm(sensor, sensor->nvm)) { + pm_runtime_put(&client->dev); dev_err(&client->dev, "nvm read failed\n"); return -ENODEV; } -- GitLab From a88cda149cd19e97e01f76d3d4fec149ba989cf9 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Fri, 25 Sep 2020 21:19:24 -0700 Subject: [PATCH 0895/1304] arch/x86/lib/usercopy_64.c: fix __copy_user_flushcache() cache writeback commit a1cd6c2ae47ee10ff21e62475685d5b399e2ed4a upstream. If we copy less than 8 bytes and if the destination crosses a cache line, __copy_user_flushcache would invalidate only the first cache line. This patch makes it invalidate the second cache line as well. Fixes: 0aed55af88345b ("x86, uaccess: introduce copy_from_iter_flushcache for pmem / cache-bypass operations") Signed-off-by: Mikulas Patocka Signed-off-by: Andrew Morton Reviewed-by: Dan Williams Cc: Jan Kara Cc: Jeff Moyer Cc: Ingo Molnar Cc: Christoph Hellwig Cc: Toshi Kani Cc: "H. Peter Anvin" Cc: Al Viro Cc: Thomas Gleixner Cc: Matthew Wilcox Cc: Ross Zwisler Cc: Ingo Molnar Cc: Link: https://lkml.kernel.org/r/alpine.LRH.2.02.2009161451140.21915@file01.intranet.prod.int.rdu2.redhat.com Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- arch/x86/lib/usercopy_64.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c index 7077b3e28241..40dbbd8f1fe4 100644 --- a/arch/x86/lib/usercopy_64.c +++ b/arch/x86/lib/usercopy_64.c @@ -139,7 +139,7 @@ long __copy_user_flushcache(void *dst, const void __user *src, unsigned size) */ if (size < 8) { if (!IS_ALIGNED(dest, 4) || size != 4) - clean_cache_range(dst, 1); + clean_cache_range(dst, size); } else { if (!IS_ALIGNED(dest, 8)) { dest = ALIGN(dest, boot_cpu_data.x86_clflush_size); -- GitLab From 9fddc16ff603e1cd311f5ae15131daf1fff506bc Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 23 Sep 2020 17:46:20 +0200 Subject: [PATCH 0896/1304] x86/ioapic: Unbreak check_timer() commit 86a82ae0b5095ea24c55898a3f025791e7958b21 upstream. Several people reported in the kernel bugzilla that between v4.12 and v4.13 the magic which works around broken hardware and BIOSes to find the proper timer interrupt delivery mode stopped working for some older affected platforms which need to fall back to ExtINT delivery mode. The reason is that the core code changed to keep track of the masked and disabled state of an interrupt line more accurately to avoid the expensive hardware operations. That broke an assumption in i8259_make_irq() which invokes disable_irq_nosync(); irq_set_chip_and_handler(); enable_irq(); Up to v4.12 this worked because enable_irq() unconditionally unmasked the interrupt line, but after the state tracking improvements this is not longer the case because the IO/APIC uses lazy disabling. So the line state is unmasked which means that enable_irq() does not call into the new irq chip to unmask it. In principle this is a shortcoming of the core code, but it's more than unclear whether the core code should try to reset state. At least this cannot be done unconditionally as that would break other existing use cases where the chip type is changed, e.g. when changing the trigger type, but the callers expect the state to be preserved. As the way how check_timer() is switching the delivery modes is truly unique, the obvious fix is to simply unmask the i8259 manually after changing the mode to ExtINT delivery and switching the irq chip to the legacy PIC. Note, that the fixes tag is not really precise, but identifies the commit which broke the assumptions in the IO/APIC and i8259 code and that's the kernel version to which this needs to be backported. Fixes: bf22ff45bed6 ("genirq: Avoid unnecessary low level irq function calls") Reported-by: p_c_chan@hotmail.com Reported-by: ecm4@mail.com Reported-by: perdigao1@yahoo.com Reported-by: matzes@users.sourceforge.net Reported-by: rvelascog@gmail.com Signed-off-by: Thomas Gleixner Tested-by: p_c_chan@hotmail.com Tested-by: matzes@users.sourceforge.net Cc: stable@vger.kernel.org Link: https://bugzilla.kernel.org/show_bug.cgi?id=197769 Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/apic/io_apic.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 95e21c438012..15234885e60b 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -2250,6 +2250,7 @@ static inline void __init check_timer(void) legacy_pic->init(0); legacy_pic->make_irq(0); apic_write(APIC_LVT0, APIC_DM_EXTINT); + legacy_pic->unmask(0); unlock_ExtINT_logic(); -- GitLab From 201c2c320802b16e7ba3a58adf0a4cbf6e380c37 Mon Sep 17 00:00:00 2001 From: Joakim Tjernlund Date: Thu, 10 Sep 2020 10:53:28 +0200 Subject: [PATCH 0897/1304] ALSA: usb-audio: Add delay quirk for H570e USB headsets commit 315c7ad7a701baba28c628c4c5426b3d9617ceed upstream. Needs the same delay as H650e Signed-off-by: Joakim Tjernlund Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20200910085328.19188-1-joakim.tjernlund@infinera.com Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/usb/quirks.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index 8d9117312e30..e6dea1c7112b 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c @@ -1338,12 +1338,13 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe, && (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) msleep(20); - /* Zoom R16/24, Logitech H650e, Jabra 550a, Kingston HyperX needs a tiny - * delay here, otherwise requests like get/set frequency return as - * failed despite actually succeeding. + /* Zoom R16/24, Logitech H650e/H570e, Jabra 550a, Kingston HyperX + * needs a tiny delay here, otherwise requests like get/set + * frequency return as failed despite actually succeeding. */ if ((chip->usb_id == USB_ID(0x1686, 0x00dd) || chip->usb_id == USB_ID(0x046d, 0x0a46) || + chip->usb_id == USB_ID(0x046d, 0x0a56) || chip->usb_id == USB_ID(0x0b0e, 0x0349) || chip->usb_id == USB_ID(0x0951, 0x16ad)) && (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) -- GitLab From 761025297a7984a96e8d0a15506ac1d8ee90277b Mon Sep 17 00:00:00 2001 From: Hui Wang Date: Mon, 14 Sep 2020 14:51:18 +0800 Subject: [PATCH 0898/1304] ALSA: hda/realtek - Couldn't detect Mic if booting with headset plugged commit 3f74249057827c5f6676c41c18f6be12ce1469ce upstream. We found a Mic detection issue on many Lenovo laptops, those laptops belong to differnt models and they have different audio design like internal mic connects to the codec or PCH, they all have this problem, the problem is if plugging a headset before powerup/reboot the machine, after booting up, the headphone could be detected but Mic couldn't. If we plug out and plug in the headset, both headphone and Mic could be detected then. Through debugging we found the codec on those laptops are same, it is alc257, and if we don't disable the 3k pulldown in alc256_shutup(), the issue will be fixed. So far there is no pop noise or power consumption regression on those laptops after this change. Cc: Kailang Yang Cc: Signed-off-by: Hui Wang Link: https://lore.kernel.org/r/20200914065118.19238-1-hui.wang@canonical.com Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/pci/hda/patch_realtek.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 9c5b3d19bfa7..4d923d6633c1 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -3290,7 +3290,11 @@ static void alc256_shutup(struct hda_codec *codec) /* 3k pull low control for Headset jack. */ /* NOTE: call this before clearing the pin, otherwise codec stalls */ - alc_update_coef_idx(codec, 0x46, 0, 3 << 12); + /* If disable 3k pulldown control for alc257, the Mic detection will not work correctly + * when booting with headset plugged. So skip setting it for the codec alc257 + */ + if (codec->core.vendor_id != 0x10ec0257) + alc_update_coef_idx(codec, 0x46, 0, 3 << 12); if (!spec->no_shutup_pins) snd_hda_codec_write(codec, hp_pin, 0, -- GitLab From ab61c58cdb29aecb7827fc83a8e83e48d58e130b Mon Sep 17 00:00:00 2001 From: Kai-Heng Feng Date: Mon, 14 Sep 2020 15:02:29 +0800 Subject: [PATCH 0899/1304] ALSA: hda/realtek: Enable front panel headset LED on Lenovo ThinkStation P520 commit f73bbf639b32acb6b409e188fdde5644b301978f upstream. On Lenovo P520, the front panel headset LED isn't lit up right now. Realtek states that the LED needs to be enabled by ALC233's GPIO2, so let's do it accordingly to light the LED up. Signed-off-by: Kai-Heng Feng Acked-by: Hui Wang Cc: Link: https://lore.kernel.org/r/20200914070231.13192-1-kai.heng.feng@canonical.com Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/pci/hda/patch_realtek.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 4d923d6633c1..24bc9e446047 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -5616,6 +5616,7 @@ static void alc_fixup_thinkpad_acpi(struct hda_codec *codec, #include "hp_x360_helper.c" enum { + ALC269_FIXUP_GPIO2, ALC269_FIXUP_SONY_VAIO, ALC275_FIXUP_SONY_VAIO_GPIO2, ALC269_FIXUP_DELL_M101Z, @@ -5768,6 +5769,10 @@ enum { }; static const struct hda_fixup alc269_fixups[] = { + [ALC269_FIXUP_GPIO2] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc_fixup_gpio2, + }, [ALC269_FIXUP_SONY_VAIO] = { .type = HDA_FIXUP_PINCTLS, .v.pins = (const struct hda_pintbl[]) { @@ -6563,6 +6568,8 @@ static const struct hda_fixup alc269_fixups[] = { [ALC233_FIXUP_LENOVO_MULTI_CODECS] = { .type = HDA_FIXUP_FUNC, .v.func = alc233_alc662_fixup_lenovo_dual_codecs, + .chained = true, + .chain_id = ALC269_FIXUP_GPIO2 }, [ALC233_FIXUP_ACER_HEADSET_MIC] = { .type = HDA_FIXUP_VERBS, -- GitLab From 7c29fd831799d09474dfdae556207b7102647a45 Mon Sep 17 00:00:00 2001 From: Nick Desaulniers Date: Fri, 25 Sep 2020 21:19:18 -0700 Subject: [PATCH 0900/1304] lib/string.c: implement stpcpy commit 1e1b6d63d6340764e00356873e5794225a2a03ea upstream. LLVM implemented a recent "libcall optimization" that lowers calls to `sprintf(dest, "%s", str)` where the return value is used to `stpcpy(dest, str) - dest`. This generally avoids the machinery involved in parsing format strings. `stpcpy` is just like `strcpy` except it returns the pointer to the new tail of `dest`. This optimization was introduced into clang-12. Implement this so that we don't observe linkage failures due to missing symbol definitions for `stpcpy`. Similar to last year's fire drill with: commit 5f074f3e192f ("lib/string.c: implement a basic bcmp") The kernel is somewhere between a "freestanding" environment (no full libc) and "hosted" environment (many symbols from libc exist with the same type, function signature, and semantics). As Peter Anvin notes, there's not really a great way to inform the compiler that you're targeting a freestanding environment but would like to opt-in to some libcall optimizations (see pr/47280 below), rather than opt-out. Arvind notes, -fno-builtin-* behaves slightly differently between GCC and Clang, and Clang is missing many __builtin_* definitions, which I consider a bug in Clang and am working on fixing. Masahiro summarizes the subtle distinction between compilers justly: To prevent transformation from foo() into bar(), there are two ways in Clang to do that; -fno-builtin-foo, and -fno-builtin-bar. There is only one in GCC; -fno-buitin-foo. (Any difference in that behavior in Clang is likely a bug from a missing __builtin_* definition.) Masahiro also notes: We want to disable optimization from foo() to bar(), but we may still benefit from the optimization from foo() into something else. If GCC implements the same transform, we would run into a problem because it is not -fno-builtin-bar, but -fno-builtin-foo that disables that optimization. In this regard, -fno-builtin-foo would be more future-proof than -fno-built-bar, but -fno-builtin-foo is still potentially overkill. We may want to prevent calls from foo() being optimized into calls to bar(), but we still may want other optimization on calls to foo(). It seems that compilers today don't quite provide the fine grain control over which libcall optimizations pseudo-freestanding environments would prefer. Finally, Kees notes that this interface is unsafe, so we should not encourage its use. As such, I've removed the declaration from any header, but it still needs to be exported to avoid linkage errors in modules. Reported-by: Sami Tolvanen Suggested-by: Andy Lavr Suggested-by: Arvind Sankar Suggested-by: Joe Perches Suggested-by: Kees Cook Suggested-by: Masahiro Yamada Suggested-by: Rasmus Villemoes Signed-off-by: Nick Desaulniers Signed-off-by: Andrew Morton Tested-by: Nathan Chancellor Cc: Link: https://lkml.kernel.org/r/20200914161643.938408-1-ndesaulniers@google.com Link: https://bugs.llvm.org/show_bug.cgi?id=47162 Link: https://bugs.llvm.org/show_bug.cgi?id=47280 Link: https://github.com/ClangBuiltLinux/linux/issues/1126 Link: https://man7.org/linux/man-pages/man3/stpcpy.3.html Link: https://pubs.opengroup.org/onlinepubs/9699919799/functions/stpcpy.html Link: https://reviews.llvm.org/D85963 Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- lib/string.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/lib/string.c b/lib/string.c index 72125fd5b4a6..edf4907ec946 100644 --- a/lib/string.c +++ b/lib/string.c @@ -236,6 +236,30 @@ ssize_t strscpy(char *dest, const char *src, size_t count) EXPORT_SYMBOL(strscpy); #endif +/** + * stpcpy - copy a string from src to dest returning a pointer to the new end + * of dest, including src's %NUL-terminator. May overrun dest. + * @dest: pointer to end of string being copied into. Must be large enough + * to receive copy. + * @src: pointer to the beginning of string being copied from. Must not overlap + * dest. + * + * stpcpy differs from strcpy in a key way: the return value is a pointer + * to the new %NUL-terminating character in @dest. (For strcpy, the return + * value is a pointer to the start of @dest). This interface is considered + * unsafe as it doesn't perform bounds checking of the inputs. As such it's + * not recommended for usage. Instead, its definition is provided in case + * the compiler lowers other libcalls to stpcpy. + */ +char *stpcpy(char *__restrict__ dest, const char *__restrict__ src); +char *stpcpy(char *__restrict__ dest, const char *__restrict__ src) +{ + while ((*dest++ = *src++) != '\0') + /* nothing */; + return --dest; +} +EXPORT_SYMBOL(stpcpy); + #ifndef __HAVE_ARCH_STRCAT /** * strcat - Append one %NUL-terminated string to another -- GitLab From 07dde782177400db461d4f90b759504e4e234605 Mon Sep 17 00:00:00 2001 From: Oleh Kravchenko Date: Wed, 16 Oct 2019 10:24:30 +0300 Subject: [PATCH 0901/1304] leds: mlxreg: Fix possible buffer overflow [ Upstream commit 7c6082b903ac28dc3f383fba57c6f9e7e2594178 ] Error was detected by PVS-Studio: V512 A call of the 'sprintf' function will lead to overflow of the buffer 'led_data->led_cdev_name'. Acked-by: Jacek Anaszewski Acked-by: Pavel Machek Signed-off-by: Oleh Kravchenko Signed-off-by: Pavel Machek Signed-off-by: Sasha Levin --- drivers/leds/leds-mlxreg.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/leds/leds-mlxreg.c b/drivers/leds/leds-mlxreg.c index 1ee48cb21df9..022e973dc7c3 100644 --- a/drivers/leds/leds-mlxreg.c +++ b/drivers/leds/leds-mlxreg.c @@ -209,8 +209,8 @@ static int mlxreg_led_config(struct mlxreg_led_priv_data *priv) brightness = LED_OFF; led_data->base_color = MLXREG_LED_GREEN_SOLID; } - sprintf(led_data->led_cdev_name, "%s:%s", "mlxreg", - data->label); + snprintf(led_data->led_cdev_name, sizeof(led_data->led_cdev_name), + "mlxreg:%s", data->label); led_cdev->name = led_data->led_cdev_name; led_cdev->brightness = brightness; led_cdev->max_brightness = LED_ON; -- GitLab From ea678da9965512669a6f54524ee4f6a42d62ab40 Mon Sep 17 00:00:00 2001 From: Dmitry Osipenko Date: Tue, 5 Nov 2019 00:56:03 +0300 Subject: [PATCH 0902/1304] PM / devfreq: tegra30: Fix integer overflow on CPU's freq max out [ Upstream commit 53b4b2aeee26f42cde5ff2a16dd0d8590c51a55a ] There is another kHz-conversion bug in the code, resulting in integer overflow. Although, this time the resulting value is 4294966296 and it's close to ULONG_MAX, which is okay in this case. Reviewed-by: Chanwoo Choi Tested-by: Peter Geis Signed-off-by: Dmitry Osipenko Signed-off-by: Chanwoo Choi Signed-off-by: Sasha Levin --- drivers/devfreq/tegra-devfreq.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/devfreq/tegra-devfreq.c b/drivers/devfreq/tegra-devfreq.c index 06768074d2d8..479d9575e124 100644 --- a/drivers/devfreq/tegra-devfreq.c +++ b/drivers/devfreq/tegra-devfreq.c @@ -80,6 +80,8 @@ #define KHZ 1000 +#define KHZ_MAX (ULONG_MAX / KHZ) + /* Assume that the bus is saturated if the utilization is 25% */ #define BUS_SATURATION_RATIO 25 @@ -180,7 +182,7 @@ struct tegra_actmon_emc_ratio { }; static struct tegra_actmon_emc_ratio actmon_emc_ratios[] = { - { 1400000, ULONG_MAX }, + { 1400000, KHZ_MAX }, { 1200000, 750000 }, { 1100000, 600000 }, { 1000000, 500000 }, -- GitLab From 6971ba9e444c569ee93ef30d98afc06d4e8b7d4b Mon Sep 17 00:00:00 2001 From: Pan Bian Date: Mon, 4 Nov 2019 23:26:22 +0800 Subject: [PATCH 0903/1304] scsi: fnic: fix use after free [ Upstream commit ec990306f77fd4c58c3b27cc3b3c53032d6e6670 ] The memory chunk io_req is released by mempool_free. Accessing io_req->start_time will result in a use after free bug. The variable start_time is a backup of the timestamp. So, use start_time here to avoid use after free. Link: https://lore.kernel.org/r/1572881182-37664-1-git-send-email-bianpan2016@163.com Signed-off-by: Pan Bian Reviewed-by: Satish Kharat Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin --- drivers/scsi/fnic/fnic_scsi.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c index 73ffc16ec022..b521fc7650cb 100644 --- a/drivers/scsi/fnic/fnic_scsi.c +++ b/drivers/scsi/fnic/fnic_scsi.c @@ -1034,7 +1034,8 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, atomic64_inc(&fnic_stats->io_stats.io_completions); - io_duration_time = jiffies_to_msecs(jiffies) - jiffies_to_msecs(io_req->start_time); + io_duration_time = jiffies_to_msecs(jiffies) - + jiffies_to_msecs(start_time); if(io_duration_time <= 10) atomic64_inc(&fnic_stats->io_stats.io_btw_0_to_10_msec); -- GitLab From 91e31be9baa6c526eef764786fab2485f9765c82 Mon Sep 17 00:00:00 2001 From: James Smart Date: Mon, 4 Nov 2019 16:57:00 -0800 Subject: [PATCH 0904/1304] scsi: lpfc: Fix kernel crash at lpfc_nvme_info_show during remote port bounce [ Upstream commit 6c1e803eac846f886cd35131e6516fc51a8414b9 ] When reading sysfs nvme_info file while a remote port leaves and comes back, a NULL pointer is encountered. The issue is due to ndlp list corruption as the the nvme_info_show does not use the same lock as the rest of the code. Correct by removing the rcu_xxx_lock calls and replace by the host_lock and phba->hbaLock spinlocks that are used by the rest of the driver. Given we're called from sysfs, we are safe to use _irq rather than _irqsave. Link: https://lore.kernel.org/r/20191105005708.7399-4-jsmart2021@gmail.com Signed-off-by: Dick Kennedy Signed-off-by: James Smart Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin --- drivers/scsi/lpfc/lpfc_attr.c | 35 ++++++++++++++++++----------------- 1 file changed, 18 insertions(+), 17 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index fe084d47ed9e..3447d19d4147 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -332,7 +332,6 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, if (strlcat(buf, "\nNVME Initiator Enabled\n", PAGE_SIZE) >= PAGE_SIZE) goto buffer_done; - rcu_read_lock(); scnprintf(tmp, sizeof(tmp), "XRI Dist lpfc%d Total %d NVME %d SCSI %d ELS %d\n", phba->brd_no, @@ -341,7 +340,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, phba->sli4_hba.scsi_xri_max, lpfc_sli4_get_els_iocb_cnt(phba)); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) - goto rcu_unlock_buf_done; + goto buffer_done; /* Port state is only one of two values for now. */ if (localport->port_id) @@ -357,7 +356,9 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, wwn_to_u64(vport->fc_nodename.u.wwn), localport->port_id, statep); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) - goto rcu_unlock_buf_done; + goto buffer_done; + + spin_lock_irq(shost->host_lock); list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { nrport = NULL; @@ -384,39 +385,39 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, /* Tab in to show lport ownership. */ if (strlcat(buf, "NVME RPORT ", PAGE_SIZE) >= PAGE_SIZE) - goto rcu_unlock_buf_done; + goto unlock_buf_done; if (phba->brd_no >= 10) { if (strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE) - goto rcu_unlock_buf_done; + goto unlock_buf_done; } scnprintf(tmp, sizeof(tmp), "WWPN x%llx ", nrport->port_name); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) - goto rcu_unlock_buf_done; + goto unlock_buf_done; scnprintf(tmp, sizeof(tmp), "WWNN x%llx ", nrport->node_name); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) - goto rcu_unlock_buf_done; + goto unlock_buf_done; scnprintf(tmp, sizeof(tmp), "DID x%06x ", nrport->port_id); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) - goto rcu_unlock_buf_done; + goto unlock_buf_done; /* An NVME rport can have multiple roles. */ if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR) { if (strlcat(buf, "INITIATOR ", PAGE_SIZE) >= PAGE_SIZE) - goto rcu_unlock_buf_done; + goto unlock_buf_done; } if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET) { if (strlcat(buf, "TARGET ", PAGE_SIZE) >= PAGE_SIZE) - goto rcu_unlock_buf_done; + goto unlock_buf_done; } if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY) { if (strlcat(buf, "DISCSRVC ", PAGE_SIZE) >= PAGE_SIZE) - goto rcu_unlock_buf_done; + goto unlock_buf_done; } if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR | FC_PORT_ROLE_NVME_TARGET | @@ -424,14 +425,14 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, scnprintf(tmp, sizeof(tmp), "UNKNOWN ROLE x%x", nrport->port_role); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) - goto rcu_unlock_buf_done; + goto unlock_buf_done; } scnprintf(tmp, sizeof(tmp), "%s\n", statep); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) - goto rcu_unlock_buf_done; + goto unlock_buf_done; } - rcu_read_unlock(); + spin_unlock_irq(shost->host_lock); if (!lport) goto buffer_done; @@ -491,11 +492,11 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, atomic_read(&lport->cmpl_fcp_err)); strlcat(buf, tmp, PAGE_SIZE); - /* RCU is already unlocked. */ + /* host_lock is already unlocked. */ goto buffer_done; - rcu_unlock_buf_done: - rcu_read_unlock(); + unlock_buf_done: + spin_unlock_irq(shost->host_lock); buffer_done: len = strnlen(buf, PAGE_SIZE); -- GitLab From 08f4fc4c8543a5b94299664eec3b6fafadac3ed0 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 6 Nov 2019 10:04:11 -0800 Subject: [PATCH 0905/1304] net: silence data-races on sk_backlog.tail [ Upstream commit 9ed498c6280a2f2b51d02df96df53037272ede49 ] sk->sk_backlog.tail might be read without holding the socket spinlock, we need to add proper READ_ONCE()/WRITE_ONCE() to silence the warnings. KCSAN reported : BUG: KCSAN: data-race in tcp_add_backlog / tcp_recvmsg write to 0xffff8881265109f8 of 8 bytes by interrupt on cpu 1: __sk_add_backlog include/net/sock.h:907 [inline] sk_add_backlog include/net/sock.h:938 [inline] tcp_add_backlog+0x476/0xce0 net/ipv4/tcp_ipv4.c:1759 tcp_v4_rcv+0x1a70/0x1bd0 net/ipv4/tcp_ipv4.c:1947 ip_protocol_deliver_rcu+0x4d/0x420 net/ipv4/ip_input.c:204 ip_local_deliver_finish+0x110/0x140 net/ipv4/ip_input.c:231 NF_HOOK include/linux/netfilter.h:305 [inline] NF_HOOK include/linux/netfilter.h:299 [inline] ip_local_deliver+0x133/0x210 net/ipv4/ip_input.c:252 dst_input include/net/dst.h:442 [inline] ip_rcv_finish+0x121/0x160 net/ipv4/ip_input.c:413 NF_HOOK include/linux/netfilter.h:305 [inline] NF_HOOK include/linux/netfilter.h:299 [inline] ip_rcv+0x18f/0x1a0 net/ipv4/ip_input.c:523 __netif_receive_skb_one_core+0xa7/0xe0 net/core/dev.c:4929 __netif_receive_skb+0x37/0xf0 net/core/dev.c:5043 netif_receive_skb_internal+0x59/0x190 net/core/dev.c:5133 napi_skb_finish net/core/dev.c:5596 [inline] napi_gro_receive+0x28f/0x330 net/core/dev.c:5629 receive_buf+0x284/0x30b0 drivers/net/virtio_net.c:1061 virtnet_receive drivers/net/virtio_net.c:1323 [inline] virtnet_poll+0x436/0x7d0 drivers/net/virtio_net.c:1428 napi_poll net/core/dev.c:6311 [inline] net_rx_action+0x3ae/0xa90 net/core/dev.c:6379 __do_softirq+0x115/0x33f kernel/softirq.c:292 invoke_softirq kernel/softirq.c:373 [inline] irq_exit+0xbb/0xe0 kernel/softirq.c:413 exiting_irq arch/x86/include/asm/apic.h:536 [inline] do_IRQ+0xa6/0x180 arch/x86/kernel/irq.c:263 ret_from_intr+0x0/0x19 native_safe_halt+0xe/0x10 arch/x86/kernel/paravirt.c:71 arch_cpu_idle+0x1f/0x30 arch/x86/kernel/process.c:571 default_idle_call+0x1e/0x40 kernel/sched/idle.c:94 cpuidle_idle_call kernel/sched/idle.c:154 [inline] do_idle+0x1af/0x280 kernel/sched/idle.c:263 cpu_startup_entry+0x1b/0x20 kernel/sched/idle.c:355 start_secondary+0x208/0x260 arch/x86/kernel/smpboot.c:264 secondary_startup_64+0xa4/0xb0 arch/x86/kernel/head_64.S:241 read to 0xffff8881265109f8 of 8 bytes by task 8057 on cpu 0: tcp_recvmsg+0x46e/0x1b40 net/ipv4/tcp.c:2050 inet_recvmsg+0xbb/0x250 net/ipv4/af_inet.c:838 sock_recvmsg_nosec net/socket.c:871 [inline] sock_recvmsg net/socket.c:889 [inline] sock_recvmsg+0x92/0xb0 net/socket.c:885 sock_read_iter+0x15f/0x1e0 net/socket.c:967 call_read_iter include/linux/fs.h:1889 [inline] new_sync_read+0x389/0x4f0 fs/read_write.c:414 __vfs_read+0xb1/0xc0 fs/read_write.c:427 vfs_read fs/read_write.c:461 [inline] vfs_read+0x143/0x2c0 fs/read_write.c:446 ksys_read+0xd5/0x1b0 fs/read_write.c:587 __do_sys_read fs/read_write.c:597 [inline] __se_sys_read fs/read_write.c:595 [inline] __x64_sys_read+0x4c/0x60 fs/read_write.c:595 do_syscall_64+0xcc/0x370 arch/x86/entry/common.c:290 entry_SYSCALL_64_after_hwframe+0x44/0xa9 Reported by Kernel Concurrency Sanitizer on: CPU: 0 PID: 8057 Comm: syz-fuzzer Not tainted 5.4.0-rc6+ #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/crypto/chelsio/chtls/chtls_io.c | 10 +++++----- include/net/sock.h | 4 ++-- net/ipv4/tcp.c | 2 +- net/llc/af_llc.c | 2 +- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c index 1e0cc96306dd..2c1f3ddb0cc7 100644 --- a/drivers/crypto/chelsio/chtls/chtls_io.c +++ b/drivers/crypto/chelsio/chtls/chtls_io.c @@ -1449,7 +1449,7 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, csk->wr_max_credits)) sk->sk_write_space(sk); - if (copied >= target && !sk->sk_backlog.tail) + if (copied >= target && !READ_ONCE(sk->sk_backlog.tail)) break; if (copied) { @@ -1482,7 +1482,7 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, break; } } - if (sk->sk_backlog.tail) { + if (READ_ONCE(sk->sk_backlog.tail)) { release_sock(sk); lock_sock(sk); chtls_cleanup_rbuf(sk, copied); @@ -1627,7 +1627,7 @@ static int peekmsg(struct sock *sk, struct msghdr *msg, break; } - if (sk->sk_backlog.tail) { + if (READ_ONCE(sk->sk_backlog.tail)) { /* Do not sleep, just process backlog. */ release_sock(sk); lock_sock(sk); @@ -1759,7 +1759,7 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, csk->wr_max_credits)) sk->sk_write_space(sk); - if (copied >= target && !sk->sk_backlog.tail) + if (copied >= target && !READ_ONCE(sk->sk_backlog.tail)) break; if (copied) { @@ -1790,7 +1790,7 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, } } - if (sk->sk_backlog.tail) { + if (READ_ONCE(sk->sk_backlog.tail)) { release_sock(sk); lock_sock(sk); chtls_cleanup_rbuf(sk, copied); diff --git a/include/net/sock.h b/include/net/sock.h index 77f36257cac9..bc752237dff3 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -900,11 +900,11 @@ static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb) skb_dst_force(skb); if (!sk->sk_backlog.tail) - sk->sk_backlog.head = skb; + WRITE_ONCE(sk->sk_backlog.head, skb); else sk->sk_backlog.tail->next = skb; - sk->sk_backlog.tail = skb; + WRITE_ONCE(sk->sk_backlog.tail, skb); skb->next = NULL; } diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 616ff2970f4f..4ce3397e6fcf 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2038,7 +2038,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, /* Well, if we have backlog, try to process it now yet. */ - if (copied >= target && !sk->sk_backlog.tail) + if (copied >= target && !READ_ONCE(sk->sk_backlog.tail)) break; if (copied) { diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index 6ead3c39f356..bcba579e292f 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c @@ -785,7 +785,7 @@ static int llc_ui_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, } /* Well, if we have backlog, try to process it now yet. */ - if (copied >= target && !sk->sk_backlog.tail) + if (copied >= target && !READ_ONCE(sk->sk_backlog.tail)) break; if (copied) { -- GitLab From 297717d42418cf9aba7b24e48ca940ef30697469 Mon Sep 17 00:00:00 2001 From: Stephen Kitt Date: Sat, 19 Oct 2019 16:06:34 +0200 Subject: [PATCH 0906/1304] clk/ti/adpll: allocate room for terminating null [ Upstream commit 7f6ac72946b88b89ee44c1c527aa8591ac5ffcbe ] The buffer allocated in ti_adpll_clk_get_name doesn't account for the terminating null. This patch switches to devm_kasprintf to avoid overflowing. Signed-off-by: Stephen Kitt Link: https://lkml.kernel.org/r/20191019140634.15596-1-steve@sk2.org Acked-by: Tony Lindgren Signed-off-by: Stephen Boyd Signed-off-by: Sasha Levin --- drivers/clk/ti/adpll.c | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/drivers/clk/ti/adpll.c b/drivers/clk/ti/adpll.c index 688e403333b9..14926e07d09a 100644 --- a/drivers/clk/ti/adpll.c +++ b/drivers/clk/ti/adpll.c @@ -193,15 +193,8 @@ static const char *ti_adpll_clk_get_name(struct ti_adpll_data *d, if (err) return NULL; } else { - const char *base_name = "adpll"; - char *buf; - - buf = devm_kzalloc(d->dev, 8 + 1 + strlen(base_name) + 1 + - strlen(postfix), GFP_KERNEL); - if (!buf) - return NULL; - sprintf(buf, "%08lx.%s.%s", d->pa, base_name, postfix); - name = buf; + name = devm_kasprintf(d->dev, GFP_KERNEL, "%08lx.adpll.%s", + d->pa, postfix); } return name; -- GitLab From 45a65fff68ac457c9e8365edca00e1d9e34490a2 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 7 Nov 2019 09:50:18 -0500 Subject: [PATCH 0907/1304] drm/amdgpu/powerplay: fix AVFS handling with custom powerplay table [ Upstream commit 53dbc27ad5a93932ff1892a8e4ef266827d74a0f ] When a custom powerplay table is provided, we need to update the OD VDDC flag to avoid AVFS being enabled when it shouldn't be. Bug: https://bugzilla.kernel.org/show_bug.cgi?id=205393 Reviewed-by: Evan Quan Signed-off-by: Alex Deucher Signed-off-by: Sasha Levin --- drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index ce459ea4ec3a..da9e6923fa65 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -3591,6 +3591,13 @@ static int vega10_set_power_state_tasks(struct pp_hwmgr *hwmgr, PP_ASSERT_WITH_CODE(!result, "Failed to upload PPtable!", return result); + /* + * If a custom pp table is loaded, set DPMTABLE_OD_UPDATE_VDDC flag. + * That effectively disables AVFS feature. + */ + if(hwmgr->hardcode_pp_table != NULL) + data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC; + vega10_update_avfs(hwmgr); data->need_update_dpm_table &= DPMTABLE_OD_UPDATE_VDDC; -- GitLab From 6edb956e3c9d7b6936a236b1647b1a1ea993ea5d Mon Sep 17 00:00:00 2001 From: Hou Tao Date: Tue, 8 Oct 2019 10:36:37 +0800 Subject: [PATCH 0908/1304] mtd: cfi_cmdset_0002: don't free cfi->cfiq in error path of cfi_amdstd_setup() [ Upstream commit 03976af89e3bd9489d542582a325892e6a8cacc0 ] Else there may be a double-free problem, because cfi->cfiq will be freed by mtd_do_chip_probe() if both the two invocations of check_cmd_set() return failure. Signed-off-by: Hou Tao Reviewed-by: Richard Weinberger Signed-off-by: Vignesh Raghavendra Signed-off-by: Sasha Levin --- drivers/mtd/chips/cfi_cmdset_0002.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c index 1dbc9554a078..3ab75d3e2ce3 100644 --- a/drivers/mtd/chips/cfi_cmdset_0002.c +++ b/drivers/mtd/chips/cfi_cmdset_0002.c @@ -727,7 +727,6 @@ static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd) kfree(mtd->eraseregions); kfree(mtd); kfree(cfi->cmdset_priv); - kfree(cfi->cfiq); return NULL; } -- GitLab From 66bde9c7f036cf7e6c36ec16f90f354df3e779ab Mon Sep 17 00:00:00 2001 From: Lee Jones Date: Mon, 21 Oct 2019 10:16:34 +0100 Subject: [PATCH 0909/1304] mfd: mfd-core: Protect against NULL call-back function pointer [ Upstream commit b195e101580db390f50b0d587b7f66f241d2bc88 ] If a child device calls mfd_cell_{en,dis}able() without an appropriate call-back being set, we are likely to encounter a panic. Avoid this by adding suitable checking. Signed-off-by: Lee Jones Reviewed-by: Daniel Thompson Reviewed-by: Mark Brown Signed-off-by: Sasha Levin --- drivers/mfd/mfd-core.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c index 182973df1aed..77c965c6a65f 100644 --- a/drivers/mfd/mfd-core.c +++ b/drivers/mfd/mfd-core.c @@ -32,6 +32,11 @@ int mfd_cell_enable(struct platform_device *pdev) const struct mfd_cell *cell = mfd_get_cell(pdev); int err = 0; + if (!cell->enable) { + dev_dbg(&pdev->dev, "No .enable() call-back registered\n"); + return 0; + } + /* only call enable hook if the cell wasn't previously enabled */ if (atomic_inc_return(cell->usage_count) == 1) err = cell->enable(pdev); @@ -49,6 +54,11 @@ int mfd_cell_disable(struct platform_device *pdev) const struct mfd_cell *cell = mfd_get_cell(pdev); int err = 0; + if (!cell->disable) { + dev_dbg(&pdev->dev, "No .disable() call-back registered\n"); + return 0; + } + /* only disable if no other clients are using it */ if (atomic_dec_return(cell->usage_count) == 0) err = cell->disable(pdev); -- GitLab From eeda579a6376f87268159b80813dee70d26a41c3 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 8 Nov 2019 11:15:17 -0500 Subject: [PATCH 0910/1304] drm/amdgpu/powerplay/smu7: fix AVFS handling with custom powerplay table [ Upstream commit 901245624c7812b6c95d67177bae850e783b5212 ] When a custom powerplay table is provided, we need to update the OD VDDC flag to avoid AVFS being enabled when it shouldn't be. Bug: https://bugzilla.kernel.org/show_bug.cgi?id=205393 Reviewed-by: Evan Quan Signed-off-by: Alex Deucher Signed-off-by: Sasha Levin --- drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 72c0a2ae2dd4..058898b321b8 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -3970,6 +3970,13 @@ static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input) "Failed to populate and upload SCLK MCLK DPM levels!", result = tmp_result); + /* + * If a custom pp table is loaded, set DPMTABLE_OD_UPDATE_VDDC flag. + * That effectively disables AVFS feature. + */ + if (hwmgr->hardcode_pp_table != NULL) + data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC; + tmp_result = smu7_update_avfs(hwmgr); PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to update avfs voltages!", -- GitLab From b55eddf907af045cc3c74b4bb28868bfed62e4d0 Mon Sep 17 00:00:00 2001 From: Ivan Lazeev Date: Wed, 16 Oct 2019 21:28:14 +0300 Subject: [PATCH 0911/1304] tpm_crb: fix fTPM on AMD Zen+ CPUs [ Upstream commit 3ef193822b25e9ee629974f66dc1ff65167f770c ] Bug link: https://bugzilla.kernel.org/show_bug.cgi?id=195657 cmd/rsp buffers are expected to be in the same ACPI region. For Zen+ CPUs BIOS's might report two different regions, some of them also report region sizes inconsistent with values from TPM registers. Memory configuration on ASRock x470 ITX: db0a0000-dc59efff : Reserved dc57e000-dc57efff : MSFT0101:00 dc582000-dc582fff : MSFT0101:00 Work around the issue by storing ACPI regions declared for the device in a fixed array and adding an array for pointers to corresponding possibly allocated resources in crb_map_io function. This data was previously held for a single resource in struct crb_priv (iobase field) and local variable io_res in crb_map_io function. ACPI resources array is used to find index of corresponding region for each buffer and make the buffer size consistent with region's length. Array of pointers to allocated resources is used to map the region at most once. Signed-off-by: Ivan Lazeev Tested-by: Jerry Snitselaar Tested-by: Jarkko Sakkinen Reviewed-by: Jarkko Sakkinen Signed-off-by: Jarkko Sakkinen Signed-off-by: Sasha Levin --- drivers/char/tpm/tpm_crb.c | 123 +++++++++++++++++++++++++++---------- 1 file changed, 90 insertions(+), 33 deletions(-) diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c index 763fc7e6c005..20f27100708b 100644 --- a/drivers/char/tpm/tpm_crb.c +++ b/drivers/char/tpm/tpm_crb.c @@ -26,6 +26,7 @@ #include "tpm.h" #define ACPI_SIG_TPM2 "TPM2" +#define TPM_CRB_MAX_RESOURCES 3 static const guid_t crb_acpi_start_guid = GUID_INIT(0x6BBF6CAB, 0x5463, 0x4714, @@ -95,7 +96,6 @@ enum crb_status { struct crb_priv { u32 sm; const char *hid; - void __iomem *iobase; struct crb_regs_head __iomem *regs_h; struct crb_regs_tail __iomem *regs_t; u8 __iomem *cmd; @@ -438,21 +438,27 @@ static const struct tpm_class_ops tpm_crb = { static int crb_check_resource(struct acpi_resource *ares, void *data) { - struct resource *io_res = data; + struct resource *iores_array = data; struct resource_win win; struct resource *res = &(win.res); + int i; if (acpi_dev_resource_memory(ares, res) || acpi_dev_resource_address_space(ares, &win)) { - *io_res = *res; - io_res->name = NULL; + for (i = 0; i < TPM_CRB_MAX_RESOURCES + 1; ++i) { + if (resource_type(iores_array + i) != IORESOURCE_MEM) { + iores_array[i] = *res; + iores_array[i].name = NULL; + break; + } + } } return 1; } -static void __iomem *crb_map_res(struct device *dev, struct crb_priv *priv, - struct resource *io_res, u64 start, u32 size) +static void __iomem *crb_map_res(struct device *dev, struct resource *iores, + void __iomem **iobase_ptr, u64 start, u32 size) { struct resource new_res = { .start = start, @@ -464,10 +470,16 @@ static void __iomem *crb_map_res(struct device *dev, struct crb_priv *priv, if (start != new_res.start) return (void __iomem *) ERR_PTR(-EINVAL); - if (!resource_contains(io_res, &new_res)) + if (!iores) return devm_ioremap_resource(dev, &new_res); - return priv->iobase + (new_res.start - io_res->start); + if (!*iobase_ptr) { + *iobase_ptr = devm_ioremap_resource(dev, iores); + if (IS_ERR(*iobase_ptr)) + return *iobase_ptr; + } + + return *iobase_ptr + (new_res.start - iores->start); } /* @@ -494,9 +506,13 @@ static u64 crb_fixup_cmd_size(struct device *dev, struct resource *io_res, static int crb_map_io(struct acpi_device *device, struct crb_priv *priv, struct acpi_table_tpm2 *buf) { - struct list_head resources; - struct resource io_res; + struct list_head acpi_resource_list; + struct resource iores_array[TPM_CRB_MAX_RESOURCES + 1] = { {0} }; + void __iomem *iobase_array[TPM_CRB_MAX_RESOURCES] = {NULL}; struct device *dev = &device->dev; + struct resource *iores; + void __iomem **iobase_ptr; + int i; u32 pa_high, pa_low; u64 cmd_pa; u32 cmd_size; @@ -505,21 +521,41 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv, u32 rsp_size; int ret; - INIT_LIST_HEAD(&resources); - ret = acpi_dev_get_resources(device, &resources, crb_check_resource, - &io_res); + INIT_LIST_HEAD(&acpi_resource_list); + ret = acpi_dev_get_resources(device, &acpi_resource_list, + crb_check_resource, iores_array); if (ret < 0) return ret; - acpi_dev_free_resource_list(&resources); + acpi_dev_free_resource_list(&acpi_resource_list); - if (resource_type(&io_res) != IORESOURCE_MEM) { + if (resource_type(iores_array) != IORESOURCE_MEM) { dev_err(dev, FW_BUG "TPM2 ACPI table does not define a memory resource\n"); return -EINVAL; + } else if (resource_type(iores_array + TPM_CRB_MAX_RESOURCES) == + IORESOURCE_MEM) { + dev_warn(dev, "TPM2 ACPI table defines too many memory resources\n"); + memset(iores_array + TPM_CRB_MAX_RESOURCES, + 0, sizeof(*iores_array)); + iores_array[TPM_CRB_MAX_RESOURCES].flags = 0; } - priv->iobase = devm_ioremap_resource(dev, &io_res); - if (IS_ERR(priv->iobase)) - return PTR_ERR(priv->iobase); + iores = NULL; + iobase_ptr = NULL; + for (i = 0; resource_type(iores_array + i) == IORESOURCE_MEM; ++i) { + if (buf->control_address >= iores_array[i].start && + buf->control_address + sizeof(struct crb_regs_tail) - 1 <= + iores_array[i].end) { + iores = iores_array + i; + iobase_ptr = iobase_array + i; + break; + } + } + + priv->regs_t = crb_map_res(dev, iores, iobase_ptr, buf->control_address, + sizeof(struct crb_regs_tail)); + + if (IS_ERR(priv->regs_t)) + return PTR_ERR(priv->regs_t); /* The ACPI IO region starts at the head area and continues to include * the control area, as one nice sane region except for some older @@ -527,9 +563,10 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv, */ if ((priv->sm == ACPI_TPM2_COMMAND_BUFFER) || (priv->sm == ACPI_TPM2_MEMORY_MAPPED)) { - if (buf->control_address == io_res.start + + if (iores && + buf->control_address == iores->start + sizeof(*priv->regs_h)) - priv->regs_h = priv->iobase; + priv->regs_h = *iobase_ptr; else dev_warn(dev, FW_BUG "Bad ACPI memory layout"); } @@ -538,13 +575,6 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv, if (ret) return ret; - priv->regs_t = crb_map_res(dev, priv, &io_res, buf->control_address, - sizeof(struct crb_regs_tail)); - if (IS_ERR(priv->regs_t)) { - ret = PTR_ERR(priv->regs_t); - goto out_relinquish_locality; - } - /* * PTT HW bug w/a: wake up the device to access * possibly not retained registers. @@ -556,13 +586,26 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv, pa_high = ioread32(&priv->regs_t->ctrl_cmd_pa_high); pa_low = ioread32(&priv->regs_t->ctrl_cmd_pa_low); cmd_pa = ((u64)pa_high << 32) | pa_low; - cmd_size = crb_fixup_cmd_size(dev, &io_res, cmd_pa, - ioread32(&priv->regs_t->ctrl_cmd_size)); + cmd_size = ioread32(&priv->regs_t->ctrl_cmd_size); + + iores = NULL; + iobase_ptr = NULL; + for (i = 0; iores_array[i].end; ++i) { + if (cmd_pa >= iores_array[i].start && + cmd_pa <= iores_array[i].end) { + iores = iores_array + i; + iobase_ptr = iobase_array + i; + break; + } + } + + if (iores) + cmd_size = crb_fixup_cmd_size(dev, iores, cmd_pa, cmd_size); dev_dbg(dev, "cmd_hi = %X cmd_low = %X cmd_size %X\n", pa_high, pa_low, cmd_size); - priv->cmd = crb_map_res(dev, priv, &io_res, cmd_pa, cmd_size); + priv->cmd = crb_map_res(dev, iores, iobase_ptr, cmd_pa, cmd_size); if (IS_ERR(priv->cmd)) { ret = PTR_ERR(priv->cmd); goto out; @@ -570,11 +613,25 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv, memcpy_fromio(&__rsp_pa, &priv->regs_t->ctrl_rsp_pa, 8); rsp_pa = le64_to_cpu(__rsp_pa); - rsp_size = crb_fixup_cmd_size(dev, &io_res, rsp_pa, - ioread32(&priv->regs_t->ctrl_rsp_size)); + rsp_size = ioread32(&priv->regs_t->ctrl_rsp_size); + + iores = NULL; + iobase_ptr = NULL; + for (i = 0; resource_type(iores_array + i) == IORESOURCE_MEM; ++i) { + if (rsp_pa >= iores_array[i].start && + rsp_pa <= iores_array[i].end) { + iores = iores_array + i; + iobase_ptr = iobase_array + i; + break; + } + } + + if (iores) + rsp_size = crb_fixup_cmd_size(dev, iores, rsp_pa, rsp_size); if (cmd_pa != rsp_pa) { - priv->rsp = crb_map_res(dev, priv, &io_res, rsp_pa, rsp_size); + priv->rsp = crb_map_res(dev, iores, iobase_ptr, + rsp_pa, rsp_size); ret = PTR_ERR_OR_ZERO(priv->rsp); goto out; } -- GitLab From cbef6b996e94a30e0c18995cdfea1477a2b257fc Mon Sep 17 00:00:00 2001 From: Divya Indi Date: Wed, 14 Aug 2019 10:55:25 -0700 Subject: [PATCH 0912/1304] tracing: Adding NULL checks for trace_array descriptor pointer [ Upstream commit 953ae45a0c25e09428d4a03d7654f97ab8a36647 ] As part of commit f45d1225adb0 ("tracing: Kernel access to Ftrace instances") we exported certain functions. Here, we are adding some additional NULL checks to ensure safe usage by users of these APIs. Link: http://lkml.kernel.org/r/1565805327-579-4-git-send-email-divya.indi@oracle.com Signed-off-by: Divya Indi Signed-off-by: Steven Rostedt (VMware) Signed-off-by: Sasha Levin --- kernel/trace/trace.c | 3 +++ kernel/trace/trace_events.c | 2 ++ 2 files changed, 5 insertions(+) diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 4966410bb0f4..17505a22d800 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -3037,6 +3037,9 @@ int trace_array_printk(struct trace_array *tr, if (!(global_trace.trace_flags & TRACE_ITER_PRINTK)) return 0; + if (!tr) + return -ENOENT; + va_start(ap, fmt); ret = trace_array_vprintk(tr, ip, fmt, ap); va_end(ap); diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 27726121d332..0fc06a7da87f 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -800,6 +800,8 @@ static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set) char *event = NULL, *sub = NULL, *match; int ret; + if (!tr) + return -ENOENT; /* * The buf format can be : * *: means any event by that name. -- GitLab From c85bf62a0d6f62d8611be1100ad3a2e39cbe5426 Mon Sep 17 00:00:00 2001 From: Guoju Fang Date: Wed, 13 Nov 2019 16:03:16 +0800 Subject: [PATCH 0913/1304] bcache: fix a lost wake-up problem caused by mca_cannibalize_lock [ Upstream commit 34cf78bf34d48dddddfeeadb44f9841d7864997a ] This patch fix a lost wake-up problem caused by the race between mca_cannibalize_lock and bch_cannibalize_unlock. Consider two processes, A and B. Process A is executing mca_cannibalize_lock, while process B takes c->btree_cache_alloc_lock and is executing bch_cannibalize_unlock. The problem happens that after process A executes cmpxchg and will execute prepare_to_wait. In this timeslice process B executes wake_up, but after that process A executes prepare_to_wait and set the state to TASK_INTERRUPTIBLE. Then process A goes to sleep but no one will wake up it. This problem may cause bcache device to dead. Signed-off-by: Guoju Fang Signed-off-by: Coly Li Signed-off-by: Jens Axboe Signed-off-by: Sasha Levin --- drivers/md/bcache/bcache.h | 1 + drivers/md/bcache/btree.c | 12 ++++++++---- drivers/md/bcache/super.c | 1 + 3 files changed, 10 insertions(+), 4 deletions(-) diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index 1cc6ae3e058c..6a380ed4919a 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -585,6 +585,7 @@ struct cache_set { */ wait_queue_head_t btree_cache_wait; struct task_struct *btree_cache_alloc_lock; + spinlock_t btree_cannibalize_lock; /* * When we free a btree node, we increment the gen of the bucket the diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index d320574b9a4c..e388e7bb7b5d 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -876,15 +876,17 @@ static struct btree *mca_find(struct cache_set *c, struct bkey *k) static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op) { - struct task_struct *old; - - old = cmpxchg(&c->btree_cache_alloc_lock, NULL, current); - if (old && old != current) { + spin_lock(&c->btree_cannibalize_lock); + if (likely(c->btree_cache_alloc_lock == NULL)) { + c->btree_cache_alloc_lock = current; + } else if (c->btree_cache_alloc_lock != current) { if (op) prepare_to_wait(&c->btree_cache_wait, &op->wait, TASK_UNINTERRUPTIBLE); + spin_unlock(&c->btree_cannibalize_lock); return -EINTR; } + spin_unlock(&c->btree_cannibalize_lock); return 0; } @@ -919,10 +921,12 @@ static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op, */ static void bch_cannibalize_unlock(struct cache_set *c) { + spin_lock(&c->btree_cannibalize_lock); if (c->btree_cache_alloc_lock == current) { c->btree_cache_alloc_lock = NULL; wake_up(&c->btree_cache_wait); } + spin_unlock(&c->btree_cannibalize_lock); } static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op, diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 825bfde10c69..7787ec42f81e 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -1737,6 +1737,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) sema_init(&c->sb_write_mutex, 1); mutex_init(&c->bucket_lock); init_waitqueue_head(&c->btree_cache_wait); + spin_lock_init(&c->btree_cannibalize_lock); init_waitqueue_head(&c->bucket_wait); init_waitqueue_head(&c->gc_wait); sema_init(&c->uuid_write_mutex, 1); -- GitLab From 515184695862ef58560603e105dc311138bd0e04 Mon Sep 17 00:00:00 2001 From: Satendra Singh Thakur Date: Sat, 9 Nov 2019 17:05:23 +0530 Subject: [PATCH 0914/1304] dmaengine: mediatek: hsdma_probe: fixed a memory leak when devm_request_irq fails [ Upstream commit 1ff95243257fad07290dcbc5f7a6ad79d6e703e2 ] When devm_request_irq fails, currently, the function dma_async_device_unregister gets called. This doesn't free the resources allocated by of_dma_controller_register. Therefore, we have called of_dma_controller_free for this purpose. Signed-off-by: Satendra Singh Thakur Link: https://lore.kernel.org/r/20191109113523.6067-1-sst2005@gmail.com Signed-off-by: Vinod Koul Signed-off-by: Sasha Levin --- drivers/dma/mediatek/mtk-hsdma.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/dma/mediatek/mtk-hsdma.c b/drivers/dma/mediatek/mtk-hsdma.c index b7ec56ae02a6..fca232b1d4a6 100644 --- a/drivers/dma/mediatek/mtk-hsdma.c +++ b/drivers/dma/mediatek/mtk-hsdma.c @@ -997,7 +997,7 @@ static int mtk_hsdma_probe(struct platform_device *pdev) if (err) { dev_err(&pdev->dev, "request_irq failed with err %d\n", err); - goto err_unregister; + goto err_free; } platform_set_drvdata(pdev, hsdma); @@ -1006,6 +1006,8 @@ static int mtk_hsdma_probe(struct platform_device *pdev) return 0; +err_free: + of_dma_controller_free(pdev->dev.of_node); err_unregister: dma_async_device_unregister(dd); -- GitLab From 14d73b6a1183f8573fae64996f3a7ccadcee9a33 Mon Sep 17 00:00:00 2001 From: Pan Bian Date: Wed, 6 Nov 2019 14:23:54 +0800 Subject: [PATCH 0915/1304] RDMA/qedr: Fix potential use after free MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 960657b732e1ce21b07be5ab48a7ad3913d72ba4 ] Move the release operation after error log to avoid possible use after free. Link: https://lore.kernel.org/r/1573021434-18768-1-git-send-email-bianpan2016@163.com Signed-off-by: Pan Bian Acked-by: Michal Kalderon  Reviewed-by: Jason Gunthorpe Signed-off-by: Jason Gunthorpe Signed-off-by: Sasha Levin --- drivers/infiniband/hw/qedr/qedr_iw_cm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c index 256671577367..e908dfbaa137 100644 --- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c +++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c @@ -460,10 +460,10 @@ qedr_addr6_resolve(struct qedr_dev *dev, if ((!dst) || dst->error) { if (dst) { - dst_release(dst); DP_ERR(dev, "ip6_route_output returned dst->error = %d\n", dst->error); + dst_release(dst); } return -EINVAL; } -- GitLab From 1e4922f7e29fcbe46ac0d3e0eeab2ae0ee2e12fd Mon Sep 17 00:00:00 2001 From: Pan Bian Date: Wed, 6 Nov 2019 14:44:11 +0800 Subject: [PATCH 0916/1304] RDMA/i40iw: Fix potential use after free [ Upstream commit da046d5f895fca18d63b15ac8faebd5bf784e23a ] Release variable dst after logging dst->error to avoid possible use after free. Link: https://lore.kernel.org/r/1573022651-37171-1-git-send-email-bianpan2016@163.com Signed-off-by: Pan Bian Reviewed-by: Jason Gunthorpe Signed-off-by: Jason Gunthorpe Signed-off-by: Sasha Levin --- drivers/infiniband/hw/i40iw/i40iw_cm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c index 4321b9e3dbb4..0273d0404e74 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_cm.c +++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c @@ -2071,9 +2071,9 @@ static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev, dst = i40iw_get_dst_ipv6(&src_addr, &dst_addr); if (!dst || dst->error) { if (dst) { - dst_release(dst); i40iw_pr_err("ip6_route_output returned dst->error = %d\n", dst->error); + dst_release(dst); } return rc; } -- GitLab From 98e151baabddf8f26212739ad4f194591a1b22f6 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 31 Oct 2019 01:43:31 -0400 Subject: [PATCH 0917/1304] fix dget_parent() fastpath race [ Upstream commit e84009336711d2bba885fc9cea66348ddfce3758 ] We are overoptimistic about taking the fast path there; seeing the same value in ->d_parent after having grabbed a reference to that parent does *not* mean that it has remained our parent all along. That wouldn't be a big deal (in the end it is our parent and we have grabbed the reference we are about to return), but... the situation with barriers is messed up. We might have hit the following sequence: d is a dentry of /tmp/a/b CPU1: CPU2: parent = d->d_parent (i.e. dentry of /tmp/a) rename /tmp/a/b to /tmp/b rmdir /tmp/a, making its dentry negative grab reference to parent, end up with cached parent->d_inode (NULL) mkdir /tmp/a, rename /tmp/b to /tmp/a/b recheck d->d_parent, which is back to original decide that everything's fine and return the reference we'd got. The trouble is, caller (on CPU1) will observe dget_parent() returning an apparently negative dentry. It actually is positive, but CPU1 has stale ->d_inode cached. Use d->d_seq to see if it has been moved instead of rechecking ->d_parent. NOTE: we are *NOT* going to retry on any kind of ->d_seq mismatch; we just go into the slow path in such case. We don't wait for ->d_seq to become even either - again, if we are racing with renames, we can bloody well go to slow path anyway. Signed-off-by: Al Viro Signed-off-by: Sasha Levin --- fs/dcache.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/fs/dcache.c b/fs/dcache.c index 6e0022326afe..20370a0997bf 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -864,17 +864,19 @@ struct dentry *dget_parent(struct dentry *dentry) { int gotref; struct dentry *ret; + unsigned seq; /* * Do optimistic parent lookup without any * locking. */ rcu_read_lock(); + seq = raw_seqcount_begin(&dentry->d_seq); ret = READ_ONCE(dentry->d_parent); gotref = lockref_get_not_zero(&ret->d_lockref); rcu_read_unlock(); if (likely(gotref)) { - if (likely(ret == READ_ONCE(dentry->d_parent))) + if (!read_seqcount_retry(&dentry->d_seq, seq)) return ret; dput(ret); } -- GitLab From 03ad258887f66a51ece17e6604905616a5647c55 Mon Sep 17 00:00:00 2001 From: Brian Foster Date: Fri, 15 Nov 2019 21:15:08 -0800 Subject: [PATCH 0918/1304] xfs: fix attr leaf header freemap.size underflow [ Upstream commit 2a2b5932db67586bacc560cc065d62faece5b996 ] The leaf format xattr addition helper xfs_attr3_leaf_add_work() adjusts the block freemap in a couple places. The first update drops the size of the freemap that the caller had already selected to place the xattr name/value data. Before the function returns, it also checks whether the entries array has encroached on a freemap range by virtue of the new entry addition. This is necessary because the entries array grows from the start of the block (but end of the block header) towards the end of the block while the name/value data grows from the end of the block in the opposite direction. If the associated freemap is already empty, however, size is zero and the subtraction underflows the field and causes corruption. This is reproduced rarely by generic/070. The observed behavior is that a smaller sized freemap is aligned to the end of the entries list, several subsequent xattr additions land in larger freemaps and the entries list expands into the smaller freemap until it is fully consumed and then underflows. Note that it is not otherwise a corruption for the entries array to consume an empty freemap because the nameval list (i.e. the firstused pointer in the xattr header) starts beyond the end of the corrupted freemap. Update the freemap size modification to account for the fact that the freemap entry can be empty and thus stale. Signed-off-by: Brian Foster Reviewed-by: Darrick J. Wong Signed-off-by: Darrick J. Wong Signed-off-by: Sasha Levin --- fs/xfs/libxfs/xfs_attr_leaf.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c index bd37f4a292c3..efb586ea508b 100644 --- a/fs/xfs/libxfs/xfs_attr_leaf.c +++ b/fs/xfs/libxfs/xfs_attr_leaf.c @@ -1438,7 +1438,9 @@ xfs_attr3_leaf_add_work( for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) { if (ichdr->freemap[i].base == tmp) { ichdr->freemap[i].base += sizeof(xfs_attr_leaf_entry_t); - ichdr->freemap[i].size -= sizeof(xfs_attr_leaf_entry_t); + ichdr->freemap[i].size -= + min_t(uint16_t, ichdr->freemap[i].size, + sizeof(xfs_attr_leaf_entry_t)); } } ichdr->usedbytes += xfs_attr_leaf_entsize(leaf, args->index); -- GitLab From 3be4a89abe31efcd9d53dd5fbfd4c384593c85ec Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Mon, 23 Sep 2019 21:07:46 +0200 Subject: [PATCH 0919/1304] RDMA/iw_cgxb4: Fix an error handling path in 'c4iw_connect()' [ Upstream commit 9067f2f0b41d7e817fc8c5259bab1f17512b0147 ] We should jump to fail3 in order to undo the 'xa_insert_irq()' call. Link: https://lore.kernel.org/r/20190923190746.10964-1-christophe.jaillet@wanadoo.fr Signed-off-by: Christophe JAILLET Signed-off-by: Jason Gunthorpe Signed-off-by: Sasha Levin --- drivers/infiniband/hw/cxgb4/cm.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 16145b0a1458..3fd3dfa3478b 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -3293,7 +3293,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) if (raddr->sin_addr.s_addr == htonl(INADDR_ANY)) { err = pick_local_ipaddrs(dev, cm_id); if (err) - goto fail2; + goto fail3; } /* find a route */ @@ -3315,7 +3315,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) if (ipv6_addr_type(&raddr6->sin6_addr) == IPV6_ADDR_ANY) { err = pick_local_ip6addrs(dev, cm_id); if (err) - goto fail2; + goto fail3; } /* find a route */ -- GitLab From 1ff84e6366b02643667b9a50548e07422afc0757 Mon Sep 17 00:00:00 2001 From: Sascha Hauer Date: Tue, 5 Nov 2019 09:12:51 +0100 Subject: [PATCH 0920/1304] ubi: Fix producing anchor PEBs [ Upstream commit f9c34bb529975fe9f85b870a80c53a83a3c5a182 ] When a new fastmap is about to be written UBI must make sure it has a free block for a fastmap anchor available. For this ubi_update_fastmap() calls ubi_ensure_anchor_pebs(). This stopped working with 2e8f08deabbc ("ubi: Fix races around ubi_refill_pools()"), with this commit the wear leveling code is blocked and can no longer produce free PEBs. UBI then more often than not falls back to write the new fastmap anchor to the same block it was already on which means the same erase block gets erased during each fastmap write and wears out quite fast. As the locking prevents us from producing the anchor PEB when we actually need it, this patch changes the strategy for creating the anchor PEB. We no longer create it on demand right before we want to write a fastmap, but instead we create an anchor PEB right after we have written a fastmap. This gives us enough time to produce a new anchor PEB before it is needed. To make sure we have an anchor PEB for the very first fastmap write we call ubi_ensure_anchor_pebs() during initialisation as well. Fixes: 2e8f08deabbc ("ubi: Fix races around ubi_refill_pools()") Signed-off-by: Sascha Hauer Signed-off-by: Richard Weinberger Signed-off-by: Sasha Levin --- drivers/mtd/ubi/fastmap-wl.c | 31 ++++++++++++++++++------------- drivers/mtd/ubi/fastmap.c | 14 +++++--------- drivers/mtd/ubi/ubi.h | 6 ++++-- drivers/mtd/ubi/wl.c | 32 ++++++++++++++------------------ drivers/mtd/ubi/wl.h | 1 - 5 files changed, 41 insertions(+), 43 deletions(-) diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c index 98f7d6be8d1f..13efebb40022 100644 --- a/drivers/mtd/ubi/fastmap-wl.c +++ b/drivers/mtd/ubi/fastmap-wl.c @@ -66,18 +66,6 @@ static void return_unused_pool_pebs(struct ubi_device *ubi, } } -static int anchor_pebs_available(struct rb_root *root) -{ - struct rb_node *p; - struct ubi_wl_entry *e; - - ubi_rb_for_each_entry(p, e, root, u.rb) - if (e->pnum < UBI_FM_MAX_START) - return 1; - - return 0; -} - /** * ubi_wl_get_fm_peb - find a physical erase block with a given maximal number. * @ubi: UBI device description object @@ -286,8 +274,26 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi) int ubi_ensure_anchor_pebs(struct ubi_device *ubi) { struct ubi_work *wrk; + struct ubi_wl_entry *anchor; spin_lock(&ubi->wl_lock); + + /* Do we already have an anchor? */ + if (ubi->fm_anchor) { + spin_unlock(&ubi->wl_lock); + return 0; + } + + /* See if we can find an anchor PEB on the list of free PEBs */ + anchor = ubi_wl_get_fm_peb(ubi, 1); + if (anchor) { + ubi->fm_anchor = anchor; + spin_unlock(&ubi->wl_lock); + return 0; + } + + /* No luck, trigger wear leveling to produce a new anchor PEB */ + ubi->fm_do_produce_anchor = 1; if (ubi->wl_scheduled) { spin_unlock(&ubi->wl_lock); return 0; @@ -303,7 +309,6 @@ int ubi_ensure_anchor_pebs(struct ubi_device *ubi) return -ENOMEM; } - wrk->anchor = 1; wrk->func = &wear_leveling_worker; __schedule_ubi_work(ubi, wrk); return 0; diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c index 8e292992f84c..b88ef875236c 100644 --- a/drivers/mtd/ubi/fastmap.c +++ b/drivers/mtd/ubi/fastmap.c @@ -1552,14 +1552,6 @@ int ubi_update_fastmap(struct ubi_device *ubi) return 0; } - ret = ubi_ensure_anchor_pebs(ubi); - if (ret) { - up_write(&ubi->fm_eba_sem); - up_write(&ubi->work_sem); - up_write(&ubi->fm_protect); - return ret; - } - new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL); if (!new_fm) { up_write(&ubi->fm_eba_sem); @@ -1630,7 +1622,8 @@ int ubi_update_fastmap(struct ubi_device *ubi) } spin_lock(&ubi->wl_lock); - tmp_e = ubi_wl_get_fm_peb(ubi, 1); + tmp_e = ubi->fm_anchor; + ubi->fm_anchor = NULL; spin_unlock(&ubi->wl_lock); if (old_fm) { @@ -1682,6 +1675,9 @@ int ubi_update_fastmap(struct ubi_device *ubi) up_write(&ubi->work_sem); up_write(&ubi->fm_protect); kfree(old_fm); + + ubi_ensure_anchor_pebs(ubi); + return ret; err: diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h index d47b9e436e67..d248ec371cc1 100644 --- a/drivers/mtd/ubi/ubi.h +++ b/drivers/mtd/ubi/ubi.h @@ -504,6 +504,8 @@ struct ubi_debug_info { * @fm_work: fastmap work queue * @fm_work_scheduled: non-zero if fastmap work was scheduled * @fast_attach: non-zero if UBI was attached by fastmap + * @fm_anchor: The next anchor PEB to use for fastmap + * @fm_do_produce_anchor: If true produce an anchor PEB in wl * * @used: RB-tree of used physical eraseblocks * @erroneous: RB-tree of erroneous used physical eraseblocks @@ -612,6 +614,8 @@ struct ubi_device { struct work_struct fm_work; int fm_work_scheduled; int fast_attach; + struct ubi_wl_entry *fm_anchor; + int fm_do_produce_anchor; /* Wear-leveling sub-system's stuff */ struct rb_root used; @@ -802,7 +806,6 @@ struct ubi_attach_info { * @vol_id: the volume ID on which this erasure is being performed * @lnum: the logical eraseblock number * @torture: if the physical eraseblock has to be tortured - * @anchor: produce a anchor PEB to by used by fastmap * * The @func pointer points to the worker function. If the @shutdown argument is * not zero, the worker has to free the resources and exit immediately as the @@ -818,7 +821,6 @@ struct ubi_work { int vol_id; int lnum; int torture; - int anchor; }; #include "debug.h" diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index 6f2ac865ff05..80d64d7e7a8b 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c @@ -331,13 +331,6 @@ static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi, } } - /* If no fastmap has been written and this WL entry can be used - * as anchor PEB, hold it back and return the second best WL entry - * such that fastmap can use the anchor PEB later. */ - if (prev_e && !ubi->fm_disabled && - !ubi->fm && e->pnum < UBI_FM_MAX_START) - return prev_e; - return e; } @@ -648,9 +641,6 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, { int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0; int erase = 0, keep = 0, vol_id = -1, lnum = -1; -#ifdef CONFIG_MTD_UBI_FASTMAP - int anchor = wrk->anchor; -#endif struct ubi_wl_entry *e1, *e2; struct ubi_vid_io_buf *vidb; struct ubi_vid_hdr *vid_hdr; @@ -690,11 +680,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, } #ifdef CONFIG_MTD_UBI_FASTMAP - /* Check whether we need to produce an anchor PEB */ - if (!anchor) - anchor = !anchor_pebs_available(&ubi->free); - - if (anchor) { + if (ubi->fm_do_produce_anchor) { e1 = find_anchor_wl_entry(&ubi->used); if (!e1) goto out_cancel; @@ -705,6 +691,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, self_check_in_wl_tree(ubi, e1, &ubi->used); rb_erase(&e1->u.rb, &ubi->used); dbg_wl("anchor-move PEB %d to PEB %d", e1->pnum, e2->pnum); + ubi->fm_do_produce_anchor = 0; } else if (!ubi->scrub.rb_node) { #else if (!ubi->scrub.rb_node) { @@ -1037,7 +1024,6 @@ static int ensure_wear_leveling(struct ubi_device *ubi, int nested) goto out_cancel; } - wrk->anchor = 0; wrk->func = &wear_leveling_worker; if (nested) __schedule_ubi_work(ubi, wrk); @@ -1079,8 +1065,15 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk) err = sync_erase(ubi, e, wl_wrk->torture); if (!err) { spin_lock(&ubi->wl_lock); - wl_tree_add(e, &ubi->free); - ubi->free_count++; + + if (!ubi->fm_anchor && e->pnum < UBI_FM_MAX_START) { + ubi->fm_anchor = e; + ubi->fm_do_produce_anchor = 0; + } else { + wl_tree_add(e, &ubi->free); + ubi->free_count++; + } + spin_unlock(&ubi->wl_lock); /* @@ -1724,6 +1717,9 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) if (err) goto out_free; +#ifdef CONFIG_MTD_UBI_FASTMAP + ubi_ensure_anchor_pebs(ubi); +#endif return 0; out_free: diff --git a/drivers/mtd/ubi/wl.h b/drivers/mtd/ubi/wl.h index a9e2d669acd8..c93a53293786 100644 --- a/drivers/mtd/ubi/wl.h +++ b/drivers/mtd/ubi/wl.h @@ -2,7 +2,6 @@ #ifndef UBI_WL_H #define UBI_WL_H #ifdef CONFIG_MTD_UBI_FASTMAP -static int anchor_pebs_available(struct rb_root *root); static void update_fastmap_work_fn(struct work_struct *wrk); static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root); static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi); -- GitLab From 5b686ce8126c971bdab261707296f4d26a71ac4e Mon Sep 17 00:00:00 2001 From: Bradley Bolen Date: Sat, 16 Nov 2019 20:00:45 -0500 Subject: [PATCH 0921/1304] mmc: core: Fix size overflow for mmc partitions [ Upstream commit f3d7c2292d104519195fdb11192daec13229c219 ] With large eMMC cards, it is possible to create general purpose partitions that are bigger than 4GB. The size member of the mmc_part struct is only an unsigned int which overflows for gp partitions larger than 4GB. Change this to a u64 to handle the overflow. Signed-off-by: Bradley Bolen Signed-off-by: Ulf Hansson Signed-off-by: Sasha Levin --- drivers/mmc/core/mmc.c | 9 ++++----- include/linux/mmc/card.h | 2 +- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index 5ca53e225382..4b18034537f5 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -300,7 +300,7 @@ static void mmc_manage_enhanced_area(struct mmc_card *card, u8 *ext_csd) } } -static void mmc_part_add(struct mmc_card *card, unsigned int size, +static void mmc_part_add(struct mmc_card *card, u64 size, unsigned int part_cfg, char *name, int idx, bool ro, int area_type) { @@ -316,7 +316,7 @@ static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd) { int idx; u8 hc_erase_grp_sz, hc_wp_grp_sz; - unsigned int part_size; + u64 part_size; /* * General purpose partition feature support -- @@ -346,8 +346,7 @@ static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd) (ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1] << 8) + ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3]; - part_size *= (size_t)(hc_erase_grp_sz * - hc_wp_grp_sz); + part_size *= (hc_erase_grp_sz * hc_wp_grp_sz); mmc_part_add(card, part_size << 19, EXT_CSD_PART_CONFIG_ACC_GP0 + idx, "gp%d", idx, false, @@ -365,7 +364,7 @@ static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd) static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd) { int err = 0, idx; - unsigned int part_size; + u64 part_size; struct device_node *np; bool broken_hpi = false; diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index 8ef330027b13..3f8e84a80b4a 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h @@ -227,7 +227,7 @@ struct mmc_queue_req; * MMC Physical partitions */ struct mmc_part { - unsigned int size; /* partition size (in bytes) */ + u64 size; /* partition size (in bytes) */ unsigned int part_cfg; /* partition type */ char name[MAX_MMC_PART_NAME_LEN]; bool force_ro; /* to make boot parts RO by default */ -- GitLab From 19709adfd7cda040781705ca02a6f7b638d2ac14 Mon Sep 17 00:00:00 2001 From: Bob Peterson Date: Tue, 19 Nov 2019 11:40:46 -0500 Subject: [PATCH 0922/1304] gfs2: clean up iopen glock mess in gfs2_create_inode [ Upstream commit 2c47c1be51fbded1f7baa2ceaed90f97932f79be ] Before this patch, gfs2_create_inode had a use-after-free for the iopen glock in some error paths because it did this: gfs2_glock_put(io_gl); fail_gunlock2: if (io_gl) clear_bit(GLF_INODE_CREATING, &io_gl->gl_flags); In some cases, the io_gl was used for create and only had one reference, so the glock might be freed before the clear_bit(). This patch tries to straighten it out by only jumping to the error paths where iopen is properly set, and moving the gfs2_glock_put after the clear_bit. Signed-off-by: Bob Peterson Signed-off-by: Andreas Gruenbacher Signed-off-by: Sasha Levin --- fs/gfs2/inode.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c index d968b5c5df21..a52b8b0dceeb 100644 --- a/fs/gfs2/inode.c +++ b/fs/gfs2/inode.c @@ -715,7 +715,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, error = gfs2_trans_begin(sdp, blocks, 0); if (error) - goto fail_gunlock2; + goto fail_free_inode; if (blocks > 1) { ip->i_eattr = ip->i_no_addr + 1; @@ -726,7 +726,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, error = gfs2_glock_get(sdp, ip->i_no_addr, &gfs2_iopen_glops, CREATE, &io_gl); if (error) - goto fail_gunlock2; + goto fail_free_inode; BUG_ON(test_and_set_bit(GLF_INODE_CREATING, &io_gl->gl_flags)); @@ -735,7 +735,6 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, goto fail_gunlock2; glock_set_object(ip->i_iopen_gh.gh_gl, ip); - gfs2_glock_put(io_gl); gfs2_set_iop(inode); insert_inode_hash(inode); @@ -768,6 +767,8 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, mark_inode_dirty(inode); d_instantiate(dentry, inode); + /* After instantiate, errors should result in evict which will destroy + * both inode and iopen glocks properly. */ if (file) { file->f_mode |= FMODE_CREATED; error = finish_open(file, dentry, gfs2_open_common); @@ -775,15 +776,15 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, gfs2_glock_dq_uninit(ghs); gfs2_glock_dq_uninit(ghs + 1); clear_bit(GLF_INODE_CREATING, &io_gl->gl_flags); + gfs2_glock_put(io_gl); return error; fail_gunlock3: glock_clear_object(io_gl, ip); gfs2_glock_dq_uninit(&ip->i_iopen_gh); - gfs2_glock_put(io_gl); fail_gunlock2: - if (io_gl) - clear_bit(GLF_INODE_CREATING, &io_gl->gl_flags); + clear_bit(GLF_INODE_CREATING, &io_gl->gl_flags); + gfs2_glock_put(io_gl); fail_free_inode: if (ip->i_gl) { glock_clear_object(ip->i_gl, ip); -- GitLab From db3ff4f7d7a83dfd63ec4e1c34e387b4e9aa93a6 Mon Sep 17 00:00:00 2001 From: peter chang Date: Thu, 14 Nov 2019 15:39:06 +0530 Subject: [PATCH 0923/1304] scsi: pm80xx: Cleanup command when a reset times out [ Upstream commit 51c1c5f6ed64c2b65a8cf89dac136273d25ca540 ] Added the fix so the if driver properly sent the abort it tries to remove it from the firmware's list of outstanding commands regardless of the abort status. This means that the task gets freed 'now' rather than possibly getting freed later when the scsi layer thinks it's leaked but still valid. Link: https://lore.kernel.org/r/20191114100910.6153-10-deepak.ukey@microchip.com Acked-by: Jack Wang Signed-off-by: peter chang Signed-off-by: Deepak Ukey Signed-off-by: Viswas G Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin --- drivers/scsi/pm8001/pm8001_sas.c | 50 +++++++++++++++++++++++--------- 1 file changed, 37 insertions(+), 13 deletions(-) diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c index ba79b37d8cf7..5becdde3ea32 100644 --- a/drivers/scsi/pm8001/pm8001_sas.c +++ b/drivers/scsi/pm8001/pm8001_sas.c @@ -1184,8 +1184,8 @@ int pm8001_abort_task(struct sas_task *task) pm8001_ha = pm8001_find_ha_by_dev(dev); device_id = pm8001_dev->device_id; phy_id = pm8001_dev->attached_phy; - rc = pm8001_find_tag(task, &tag); - if (rc == 0) { + ret = pm8001_find_tag(task, &tag); + if (ret == 0) { pm8001_printk("no tag for task:%p\n", task); return TMF_RESP_FUNC_FAILED; } @@ -1223,26 +1223,50 @@ int pm8001_abort_task(struct sas_task *task) /* 2. Send Phy Control Hard Reset */ reinit_completion(&completion); + phy->port_reset_status = PORT_RESET_TMO; phy->reset_success = false; phy->enable_completion = &completion; phy->reset_completion = &completion_reset; ret = PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id, PHY_HARD_RESET); - if (ret) - goto out; - PM8001_MSG_DBG(pm8001_ha, - pm8001_printk("Waiting for local phy ctl\n")); - wait_for_completion(&completion); - if (!phy->reset_success) + if (ret) { + phy->enable_completion = NULL; + phy->reset_completion = NULL; goto out; + } - /* 3. Wait for Port Reset complete / Port reset TMO */ + /* In the case of the reset timeout/fail we still + * abort the command at the firmware. The assumption + * here is that the drive is off doing something so + * that it's not processing requests, and we want to + * avoid getting a completion for this and either + * leaking the task in libsas or losing the race and + * getting a double free. + */ PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("Waiting for local phy ctl\n")); + ret = wait_for_completion_timeout(&completion, + PM8001_TASK_TIMEOUT * HZ); + if (!ret || !phy->reset_success) { + phy->enable_completion = NULL; + phy->reset_completion = NULL; + } else { + /* 3. Wait for Port Reset complete or + * Port reset TMO + */ + PM8001_MSG_DBG(pm8001_ha, pm8001_printk("Waiting for Port reset\n")); - wait_for_completion(&completion_reset); - if (phy->port_reset_status) { - pm8001_dev_gone_notify(dev); - goto out; + ret = wait_for_completion_timeout( + &completion_reset, + PM8001_TASK_TIMEOUT * HZ); + if (!ret) + phy->reset_completion = NULL; + WARN_ON(phy->port_reset_status == + PORT_RESET_TMO); + if (phy->port_reset_status == PORT_RESET_TMO) { + pm8001_dev_gone_notify(dev); + goto out; + } } /* -- GitLab From 421ac08d3d5df1eb7a3a6481986714ca2131ff44 Mon Sep 17 00:00:00 2001 From: Kusanagi Kouichi Date: Thu, 21 Nov 2019 19:20:21 +0900 Subject: [PATCH 0924/1304] debugfs: Fix !DEBUG_FS debugfs_create_automount [ Upstream commit 4250b047039d324e0ff65267c8beb5bad5052a86 ] If DEBUG_FS=n, compile fails with the following error: kernel/trace/trace.c: In function 'tracing_init_dentry': kernel/trace/trace.c:8658:9: error: passing argument 3 of 'debugfs_create_automount' from incompatible pointer type [-Werror=incompatible-pointer-types] 8658 | trace_automount, NULL); | ^~~~~~~~~~~~~~~ | | | struct vfsmount * (*)(struct dentry *, void *) In file included from kernel/trace/trace.c:24: ./include/linux/debugfs.h:206:25: note: expected 'struct vfsmount * (*)(void *)' but argument is of type 'struct vfsmount * (*)(struct dentry *, void *)' 206 | struct vfsmount *(*f)(void *), | ~~~~~~~~~~~~~~~~~~~^~~~~~~~~~ Signed-off-by: Kusanagi Kouichi Link: https://lore.kernel.org/r/20191121102021787.MLMY.25002.ppp.dion.ne.jp@dmta0003.auone-net.jp Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin --- include/linux/debugfs.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h index 3b0ba54cc4d5..3bc1034c57e6 100644 --- a/include/linux/debugfs.h +++ b/include/linux/debugfs.h @@ -54,6 +54,8 @@ static const struct file_operations __fops = { \ .llseek = no_llseek, \ } +typedef struct vfsmount *(*debugfs_automount_t)(struct dentry *, void *); + #if defined(CONFIG_DEBUG_FS) struct dentry *debugfs_lookup(const char *name, struct dentry *parent); @@ -75,7 +77,6 @@ struct dentry *debugfs_create_dir(const char *name, struct dentry *parent); struct dentry *debugfs_create_symlink(const char *name, struct dentry *parent, const char *dest); -typedef struct vfsmount *(*debugfs_automount_t)(struct dentry *, void *); struct dentry *debugfs_create_automount(const char *name, struct dentry *parent, debugfs_automount_t f, @@ -204,7 +205,7 @@ static inline struct dentry *debugfs_create_symlink(const char *name, static inline struct dentry *debugfs_create_automount(const char *name, struct dentry *parent, - struct vfsmount *(*f)(void *), + debugfs_automount_t f, void *data) { return ERR_PTR(-ENODEV); -- GitLab From 18201a7938dc7dbd9ebee7e02d50357657643b22 Mon Sep 17 00:00:00 2001 From: Pavel Shilovsky Date: Tue, 29 Oct 2019 16:51:19 -0700 Subject: [PATCH 0925/1304] CIFS: Properly process SMB3 lease breaks [ Upstream commit 9bd4540836684013aaad6070a65d6fcdd9006625 ] Currenly we doesn't assume that a server may break a lease from RWH to RW which causes us setting a wrong lease state on a file and thus mistakenly flushing data and byte-range locks and purging cached data on the client. This leads to performance degradation because subsequent IOs go directly to the server. Fix this by propagating new lease state and epoch values to the oplock break handler through cifsFileInfo structure and removing the use of cifsInodeInfo flags for that. It allows to avoid some races of several lease/oplock breaks using those flags in parallel. Signed-off-by: Pavel Shilovsky Signed-off-by: Steve French Signed-off-by: Sasha Levin --- fs/cifs/cifsglob.h | 9 ++++++--- fs/cifs/file.c | 10 +++++++--- fs/cifs/misc.c | 17 +++-------------- fs/cifs/smb1ops.c | 8 +++----- fs/cifs/smb2misc.c | 32 +++++++------------------------- fs/cifs/smb2ops.c | 44 ++++++++++++++++++++++++++++++-------------- fs/cifs/smb2pdu.h | 2 +- 7 files changed, 57 insertions(+), 65 deletions(-) diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 71c2dd0c7f03..2c632793c88c 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -259,8 +259,9 @@ struct smb_version_operations { int (*check_message)(char *, unsigned int, struct TCP_Server_Info *); bool (*is_oplock_break)(char *, struct TCP_Server_Info *); int (*handle_cancelled_mid)(char *, struct TCP_Server_Info *); - void (*downgrade_oplock)(struct TCP_Server_Info *, - struct cifsInodeInfo *, bool); + void (*downgrade_oplock)(struct TCP_Server_Info *server, + struct cifsInodeInfo *cinode, __u32 oplock, + unsigned int epoch, bool *purge_cache); /* process transaction2 response */ bool (*check_trans2)(struct mid_q_entry *, struct TCP_Server_Info *, char *, int); @@ -1160,6 +1161,8 @@ struct cifsFileInfo { unsigned int f_flags; bool invalidHandle:1; /* file closed via session abend */ bool oplock_break_cancelled:1; + unsigned int oplock_epoch; /* epoch from the lease break */ + __u32 oplock_level; /* oplock/lease level from the lease break */ int count; spinlock_t file_info_lock; /* protects four flag/count fields above */ struct mutex fh_mutex; /* prevents reopen race after dead ses*/ @@ -1300,7 +1303,7 @@ struct cifsInodeInfo { unsigned int epoch; /* used to track lease state changes */ #define CIFS_INODE_PENDING_OPLOCK_BREAK (0) /* oplock break in progress */ #define CIFS_INODE_PENDING_WRITERS (1) /* Writes in progress */ -#define CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2 (2) /* Downgrade oplock to L2 */ +#define CIFS_INODE_FLAG_UNUSED (2) /* Unused flag */ #define CIFS_INO_DELETE_PENDING (3) /* delete pending on server */ #define CIFS_INO_INVALID_MAPPING (4) /* pagecache is invalid */ #define CIFS_INO_LOCK (5) /* lock bit for synchronization */ diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 128cbd69911b..e78b52c582f1 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -4185,12 +4185,13 @@ void cifs_oplock_break(struct work_struct *work) struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); struct TCP_Server_Info *server = tcon->ses->server; int rc = 0; + bool purge_cache = false; wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS, TASK_UNINTERRUPTIBLE); - server->ops->downgrade_oplock(server, cinode, - test_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, &cinode->flags)); + server->ops->downgrade_oplock(server, cinode, cfile->oplock_level, + cfile->oplock_epoch, &purge_cache); if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) && cifs_has_mand_locks(cinode)) { @@ -4205,18 +4206,21 @@ void cifs_oplock_break(struct work_struct *work) else break_lease(inode, O_WRONLY); rc = filemap_fdatawrite(inode->i_mapping); - if (!CIFS_CACHE_READ(cinode)) { + if (!CIFS_CACHE_READ(cinode) || purge_cache) { rc = filemap_fdatawait(inode->i_mapping); mapping_set_error(inode->i_mapping, rc); cifs_zap_mapping(inode); } cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc); + if (CIFS_CACHE_WRITE(cinode)) + goto oplock_break_ack; } rc = cifs_push_locks(cfile); if (rc) cifs_dbg(VFS, "Push locks rc = %d\n", rc); +oplock_break_ack: /* * releasing stale oplock after recent reconnect of smb session using * a now incorrect file handle is not a data integrity issue but do diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index e45f8e321371..dd67f56ea61e 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c @@ -477,21 +477,10 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv) set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &pCifsInode->flags); - /* - * Set flag if the server downgrades the oplock - * to L2 else clear. - */ - if (pSMB->OplockLevel) - set_bit( - CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, - &pCifsInode->flags); - else - clear_bit( - CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, - &pCifsInode->flags); - - cifs_queue_oplock_break(netfile); + netfile->oplock_epoch = 0; + netfile->oplock_level = pSMB->OplockLevel; netfile->oplock_break_cancelled = false; + cifs_queue_oplock_break(netfile); spin_unlock(&tcon->open_file_lock); spin_unlock(&cifs_tcp_ses_lock); diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c index c7f0c8566442..0b7f92451284 100644 --- a/fs/cifs/smb1ops.c +++ b/fs/cifs/smb1ops.c @@ -381,12 +381,10 @@ coalesce_t2(char *second_buf, struct smb_hdr *target_hdr) static void cifs_downgrade_oplock(struct TCP_Server_Info *server, - struct cifsInodeInfo *cinode, bool set_level2) + struct cifsInodeInfo *cinode, __u32 oplock, + unsigned int epoch, bool *purge_cache) { - if (set_level2) - cifs_set_oplock_level(cinode, OPLOCK_READ); - else - cifs_set_oplock_level(cinode, 0); + cifs_set_oplock_level(cinode, oplock); } static bool diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c index 2fc96f7923ee..7d875a47d022 100644 --- a/fs/cifs/smb2misc.c +++ b/fs/cifs/smb2misc.c @@ -550,7 +550,7 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp) cifs_dbg(FYI, "found in the open list\n"); cifs_dbg(FYI, "lease key match, lease break 0x%x\n", - le32_to_cpu(rsp->NewLeaseState)); + lease_state); if (ack_req) cfile->oplock_break_cancelled = false; @@ -559,17 +559,8 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp) set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags); - /* - * Set or clear flags depending on the lease state being READ. - * HANDLE caching flag should be added when the client starts - * to defer closing remote file handles with HANDLE leases. - */ - if (lease_state & SMB2_LEASE_READ_CACHING_HE) - set_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, - &cinode->flags); - else - clear_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, - &cinode->flags); + cfile->oplock_epoch = le16_to_cpu(rsp->Epoch); + cfile->oplock_level = lease_state; cifs_queue_oplock_break(cfile); return true; @@ -599,7 +590,7 @@ smb2_tcon_find_pending_open_lease(struct cifs_tcon *tcon, cifs_dbg(FYI, "found in the pending open list\n"); cifs_dbg(FYI, "lease key match, lease break 0x%x\n", - le32_to_cpu(rsp->NewLeaseState)); + lease_state); open->oplock = lease_state; } @@ -732,18 +723,9 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server) set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags); - /* - * Set flag if the server downgrades the oplock - * to L2 else clear. - */ - if (rsp->OplockLevel) - set_bit( - CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, - &cinode->flags); - else - clear_bit( - CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, - &cinode->flags); + cfile->oplock_epoch = 0; + cfile->oplock_level = rsp->OplockLevel; + spin_unlock(&cfile->file_info_lock); cifs_queue_oplock_break(cfile); diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index 2a523139a05f..947a40069d24 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -2358,22 +2358,38 @@ static long smb3_fallocate(struct file *file, struct cifs_tcon *tcon, int mode, static void smb2_downgrade_oplock(struct TCP_Server_Info *server, - struct cifsInodeInfo *cinode, bool set_level2) + struct cifsInodeInfo *cinode, __u32 oplock, + unsigned int epoch, bool *purge_cache) { - if (set_level2) - server->ops->set_oplock_level(cinode, SMB2_OPLOCK_LEVEL_II, - 0, NULL); - else - server->ops->set_oplock_level(cinode, 0, 0, NULL); + server->ops->set_oplock_level(cinode, oplock, 0, NULL); } static void -smb21_downgrade_oplock(struct TCP_Server_Info *server, - struct cifsInodeInfo *cinode, bool set_level2) +smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock, + unsigned int epoch, bool *purge_cache); + +static void +smb3_downgrade_oplock(struct TCP_Server_Info *server, + struct cifsInodeInfo *cinode, __u32 oplock, + unsigned int epoch, bool *purge_cache) { - server->ops->set_oplock_level(cinode, - set_level2 ? SMB2_LEASE_READ_CACHING_HE : - 0, 0, NULL); + unsigned int old_state = cinode->oplock; + unsigned int old_epoch = cinode->epoch; + unsigned int new_state; + + if (epoch > old_epoch) { + smb21_set_oplock_level(cinode, oplock, 0, NULL); + cinode->epoch = epoch; + } + + new_state = cinode->oplock; + *purge_cache = false; + + if ((old_state & CIFS_CACHE_READ_FLG) != 0 && + (new_state & CIFS_CACHE_READ_FLG) == 0) + *purge_cache = true; + else if (old_state == new_state && (epoch - old_epoch > 1)) + *purge_cache = true; } static void @@ -3449,7 +3465,7 @@ struct smb_version_operations smb21_operations = { .print_stats = smb2_print_stats, .is_oplock_break = smb2_is_valid_oplock_break, .handle_cancelled_mid = smb2_handle_cancelled_mid, - .downgrade_oplock = smb21_downgrade_oplock, + .downgrade_oplock = smb2_downgrade_oplock, .need_neg = smb2_need_neg, .negotiate = smb2_negotiate, .negotiate_wsize = smb2_negotiate_wsize, @@ -3546,7 +3562,7 @@ struct smb_version_operations smb30_operations = { .dump_share_caps = smb2_dump_share_caps, .is_oplock_break = smb2_is_valid_oplock_break, .handle_cancelled_mid = smb2_handle_cancelled_mid, - .downgrade_oplock = smb21_downgrade_oplock, + .downgrade_oplock = smb3_downgrade_oplock, .need_neg = smb2_need_neg, .negotiate = smb2_negotiate, .negotiate_wsize = smb2_negotiate_wsize, @@ -3651,7 +3667,7 @@ struct smb_version_operations smb311_operations = { .dump_share_caps = smb2_dump_share_caps, .is_oplock_break = smb2_is_valid_oplock_break, .handle_cancelled_mid = smb2_handle_cancelled_mid, - .downgrade_oplock = smb21_downgrade_oplock, + .downgrade_oplock = smb3_downgrade_oplock, .need_neg = smb2_need_neg, .negotiate = smb2_negotiate, .negotiate_wsize = smb2_negotiate_wsize, diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h index 308c682fa4d3..44501f8cbd75 100644 --- a/fs/cifs/smb2pdu.h +++ b/fs/cifs/smb2pdu.h @@ -1209,7 +1209,7 @@ struct smb2_oplock_break { struct smb2_lease_break { struct smb2_sync_hdr sync_hdr; __le16 StructureSize; /* Must be 44 */ - __le16 Reserved; + __le16 Epoch; __le32 Flags; __u8 LeaseKey[16]; __le32 CurrentLeaseState; -- GitLab From 6ce5dd69dc104b426c76664035b24987b74ae431 Mon Sep 17 00:00:00 2001 From: Tzung-Bi Shih Date: Fri, 22 Nov 2019 15:31:12 +0800 Subject: [PATCH 0926/1304] ASoC: max98090: remove msleep in PLL unlocked workaround [ Upstream commit acb874a7c049ec49d8fc66c893170fb42c01bdf7 ] It was observed Baytrail-based chromebooks could cause continuous PLL unlocked when using playback stream and capture stream simultaneously. Specifically, starting a capture stream after started a playback stream. As a result, the audio data could corrupt or turn completely silent. As the datasheet suggested, the maximum PLL lock time should be 7 msec. The workaround resets the codec softly by toggling SHDN off and on if PLL failed to lock for 10 msec. Notably, there is no suggested hold time for SHDN off. On Baytrail-based chromebooks, it would easily happen continuous PLL unlocked if there is a 10 msec delay between SHDN off and on. Removes the msleep(). Signed-off-by: Tzung-Bi Shih Link: https://lore.kernel.org/r/20191122073114.219945-2-tzungbi@google.com Reviewed-by: Pierre-Louis Bossart Signed-off-by: Mark Brown Signed-off-by: Sasha Levin --- sound/soc/codecs/max98090.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c index 89b6e187ac23..a5b0c40ee545 100644 --- a/sound/soc/codecs/max98090.c +++ b/sound/soc/codecs/max98090.c @@ -2130,10 +2130,16 @@ static void max98090_pll_work(struct max98090_priv *max98090) dev_info_ratelimited(component->dev, "PLL unlocked\n"); + /* + * As the datasheet suggested, the maximum PLL lock time should be + * 7 msec. The workaround resets the codec softly by toggling SHDN + * off and on if PLL failed to lock for 10 msec. Notably, there is + * no suggested hold time for SHDN off. + */ + /* Toggle shutdown OFF then ON */ snd_soc_component_update_bits(component, M98090_REG_DEVICE_SHUTDOWN, M98090_SHDNN_MASK, 0); - msleep(10); snd_soc_component_update_bits(component, M98090_REG_DEVICE_SHUTDOWN, M98090_SHDNN_MASK, M98090_SHDNN_MASK); -- GitLab From 489cee9124d53ce20fb3f989449efa2aa51a4b18 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Wed, 4 Dec 2019 16:50:53 -0800 Subject: [PATCH 0927/1304] kernel/sys.c: avoid copying possible padding bytes in copy_to_user [ Upstream commit 5e1aada08cd19ea652b2d32a250501d09b02ff2e ] Initialization is not guaranteed to zero padding bytes so use an explicit memset instead to avoid leaking any kernel content in any possible padding bytes. Link: http://lkml.kernel.org/r/dfa331c00881d61c8ee51577a082d8bebd61805c.camel@perches.com Signed-off-by: Joe Perches Cc: Dan Carpenter Cc: Julia Lawall Cc: Thomas Gleixner Cc: Kees Cook Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin --- kernel/sys.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/kernel/sys.c b/kernel/sys.c index 096932a45046..baf60a3aa34b 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -1275,11 +1275,13 @@ SYSCALL_DEFINE1(uname, struct old_utsname __user *, name) SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name) { - struct oldold_utsname tmp = {}; + struct oldold_utsname tmp; if (!name) return -EFAULT; + memset(&tmp, 0, sizeof(tmp)); + down_read(&uts_sem); memcpy(&tmp.sysname, &utsname()->sysname, __OLD_UTS_LEN); memcpy(&tmp.nodename, &utsname()->nodename, __OLD_UTS_LEN); -- GitLab From 8b3a8c6a73c589006eb5d29748f0b8a6c81f3fd6 Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Thu, 28 Nov 2019 14:38:48 +0800 Subject: [PATCH 0928/1304] KVM: arm/arm64: vgic: Fix potential double free dist->spis in __kvm_vgic_destroy() [ Upstream commit 0bda9498dd45280e334bfe88b815ebf519602cc3 ] In kvm_vgic_dist_init() called from kvm_vgic_map_resources(), if dist->vgic_model is invalid, dist->spis will be freed without set dist->spis = NULL. And in vgicv2 resources clean up path, __kvm_vgic_destroy() will be called to free allocated resources. And dist->spis will be freed again in clean up chain because we forget to set dist->spis = NULL in kvm_vgic_dist_init() failed path. So double free would happen. Signed-off-by: Miaohe Lin Signed-off-by: Marc Zyngier Reviewed-by: Eric Auger Link: https://lore.kernel.org/r/1574923128-19956-1-git-send-email-linmiaohe@huawei.com Signed-off-by: Sasha Levin --- virt/kvm/arm/vgic/vgic-init.c | 1 + 1 file changed, 1 insertion(+) diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c index cd75df25fe14..2fc1777da50d 100644 --- a/virt/kvm/arm/vgic/vgic-init.c +++ b/virt/kvm/arm/vgic/vgic-init.c @@ -187,6 +187,7 @@ static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis) break; default: kfree(dist->spis); + dist->spis = NULL; return -EINVAL; } } -- GitLab From 576f57da9107056935364824ecd2d78a07d542e6 Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Wed, 11 Dec 2019 13:19:07 -0800 Subject: [PATCH 0929/1304] xfs: fix log reservation overflows when allocating large rt extents [ Upstream commit b1de6fc7520fe12949c070af0e8c0e4044cd3420 ] Omar Sandoval reported that a 4G fallocate on the realtime device causes filesystem shutdowns due to a log reservation overflow that happens when we log the rtbitmap updates. Factor rtbitmap/rtsummary updates into the the tr_write and tr_itruncate log reservation calculation. "The following reproducer results in a transaction log overrun warning for me: mkfs.xfs -f -r rtdev=/dev/vdc -d rtinherit=1 -m reflink=0 /dev/vdb mount -o rtdev=/dev/vdc /dev/vdb /mnt fallocate -l 4G /mnt/foo Reported-by: Omar Sandoval Tested-by: Omar Sandoval Signed-off-by: Darrick J. Wong Reviewed-by: Brian Foster Signed-off-by: Sasha Levin --- fs/xfs/libxfs/xfs_trans_resv.c | 96 +++++++++++++++++++++++++++------- 1 file changed, 77 insertions(+), 19 deletions(-) diff --git a/fs/xfs/libxfs/xfs_trans_resv.c b/fs/xfs/libxfs/xfs_trans_resv.c index f99a7aefe418..2b3cc5a8ced1 100644 --- a/fs/xfs/libxfs/xfs_trans_resv.c +++ b/fs/xfs/libxfs/xfs_trans_resv.c @@ -197,6 +197,24 @@ xfs_calc_inode_chunk_res( return res; } +/* + * Per-extent log reservation for the btree changes involved in freeing or + * allocating a realtime extent. We have to be able to log as many rtbitmap + * blocks as needed to mark inuse MAXEXTLEN blocks' worth of realtime extents, + * as well as the realtime summary block. + */ +unsigned int +xfs_rtalloc_log_count( + struct xfs_mount *mp, + unsigned int num_ops) +{ + unsigned int blksz = XFS_FSB_TO_B(mp, 1); + unsigned int rtbmp_bytes; + + rtbmp_bytes = (MAXEXTLEN / mp->m_sb.sb_rextsize) / NBBY; + return (howmany(rtbmp_bytes, blksz) + 1) * num_ops; +} + /* * Various log reservation values. * @@ -219,13 +237,21 @@ xfs_calc_inode_chunk_res( /* * In a write transaction we can allocate a maximum of 2 - * extents. This gives: + * extents. This gives (t1): * the inode getting the new extents: inode size * the inode's bmap btree: max depth * block size * the agfs of the ags from which the extents are allocated: 2 * sector * the superblock free block counter: sector size * the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size - * And the bmap_finish transaction can free bmap blocks in a join: + * Or, if we're writing to a realtime file (t2): + * the inode getting the new extents: inode size + * the inode's bmap btree: max depth * block size + * the agfs of the ags from which the extents are allocated: 2 * sector + * the superblock free block counter: sector size + * the realtime bitmap: ((MAXEXTLEN / rtextsize) / NBBY) bytes + * the realtime summary: 1 block + * the allocation btrees: 2 trees * (2 * max depth - 1) * block size + * And the bmap_finish transaction can free bmap blocks in a join (t3): * the agfs of the ags containing the blocks: 2 * sector size * the agfls of the ags containing the blocks: 2 * sector size * the super block free block counter: sector size @@ -235,40 +261,72 @@ STATIC uint xfs_calc_write_reservation( struct xfs_mount *mp) { - return XFS_DQUOT_LOGRES(mp) + - max((xfs_calc_inode_res(mp, 1) + + unsigned int t1, t2, t3; + unsigned int blksz = XFS_FSB_TO_B(mp, 1); + + t1 = xfs_calc_inode_res(mp, 1) + + xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK), blksz) + + xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) + + xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2), blksz); + + if (xfs_sb_version_hasrealtime(&mp->m_sb)) { + t2 = xfs_calc_inode_res(mp, 1) + xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK), - XFS_FSB_TO_B(mp, 1)) + + blksz) + xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) + - xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2), - XFS_FSB_TO_B(mp, 1))), - (xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) + - xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2), - XFS_FSB_TO_B(mp, 1)))); + xfs_calc_buf_res(xfs_rtalloc_log_count(mp, 1), blksz) + + xfs_calc_buf_res(xfs_allocfree_log_count(mp, 1), blksz); + } else { + t2 = 0; + } + + t3 = xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) + + xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2), blksz); + + return XFS_DQUOT_LOGRES(mp) + max3(t1, t2, t3); } /* - * In truncating a file we free up to two extents at once. We can modify: + * In truncating a file we free up to two extents at once. We can modify (t1): * the inode being truncated: inode size * the inode's bmap btree: (max depth + 1) * block size - * And the bmap_finish transaction can free the blocks and bmap blocks: + * And the bmap_finish transaction can free the blocks and bmap blocks (t2): * the agf for each of the ags: 4 * sector size * the agfl for each of the ags: 4 * sector size * the super block to reflect the freed blocks: sector size * worst case split in allocation btrees per extent assuming 4 extents: * 4 exts * 2 trees * (2 * max depth - 1) * block size + * Or, if it's a realtime file (t3): + * the agf for each of the ags: 2 * sector size + * the agfl for each of the ags: 2 * sector size + * the super block to reflect the freed blocks: sector size + * the realtime bitmap: 2 exts * ((MAXEXTLEN / rtextsize) / NBBY) bytes + * the realtime summary: 2 exts * 1 block + * worst case split in allocation btrees per extent assuming 2 extents: + * 2 exts * 2 trees * (2 * max depth - 1) * block size */ STATIC uint xfs_calc_itruncate_reservation( struct xfs_mount *mp) { - return XFS_DQUOT_LOGRES(mp) + - max((xfs_calc_inode_res(mp, 1) + - xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) + 1, - XFS_FSB_TO_B(mp, 1))), - (xfs_calc_buf_res(9, mp->m_sb.sb_sectsize) + - xfs_calc_buf_res(xfs_allocfree_log_count(mp, 4), - XFS_FSB_TO_B(mp, 1)))); + unsigned int t1, t2, t3; + unsigned int blksz = XFS_FSB_TO_B(mp, 1); + + t1 = xfs_calc_inode_res(mp, 1) + + xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) + 1, blksz); + + t2 = xfs_calc_buf_res(9, mp->m_sb.sb_sectsize) + + xfs_calc_buf_res(xfs_allocfree_log_count(mp, 4), blksz); + + if (xfs_sb_version_hasrealtime(&mp->m_sb)) { + t3 = xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) + + xfs_calc_buf_res(xfs_rtalloc_log_count(mp, 2), blksz) + + xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2), blksz); + } else { + t3 = 0; + } + + return XFS_DQUOT_LOGRES(mp) + max3(t1, t2, t3); } /* -- GitLab From f2cd82a26fcf477d593c546d8a84fc7881ceaf27 Mon Sep 17 00:00:00 2001 From: Vasily Averin Date: Thu, 23 Jan 2020 10:11:28 +0300 Subject: [PATCH 0930/1304] neigh_stat_seq_next() should increase position index [ Upstream commit 1e3f9f073c47bee7c23e77316b07bc12338c5bba ] if seq_file .next fuction does not change position index, read after some lseek can generate unexpected output. https://bugzilla.kernel.org/show_bug.cgi?id=206283 Signed-off-by: Vasily Averin Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- net/core/neighbour.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/core/neighbour.c b/net/core/neighbour.c index bf738ec68cb5..6e890f51b7d8 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -2844,6 +2844,7 @@ static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos) *pos = cpu+1; return per_cpu_ptr(tbl->stats, cpu); } + (*pos)++; return NULL; } -- GitLab From e10f2f29ba39f70d657f563d332405ea8f0cb349 Mon Sep 17 00:00:00 2001 From: Vasily Averin Date: Thu, 23 Jan 2020 10:11:35 +0300 Subject: [PATCH 0931/1304] rt_cpu_seq_next should increase position index [ Upstream commit a3ea86739f1bc7e121d921842f0f4a8ab1af94d9 ] if seq_file .next fuction does not change position index, read after some lseek can generate unexpected output. https://bugzilla.kernel.org/show_bug.cgi?id=206283 Signed-off-by: Vasily Averin Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- net/ipv4/route.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 84de87b7eedc..3db428242b22 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -274,6 +274,7 @@ static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos) *pos = cpu+1; return &per_cpu(rt_cache_stat, cpu); } + (*pos)++; return NULL; } -- GitLab From 24cb471708d57962df50de38547dac45f0879f76 Mon Sep 17 00:00:00 2001 From: Vasily Averin Date: Thu, 23 Jan 2020 10:12:06 +0300 Subject: [PATCH 0932/1304] ipv6_route_seq_next should increase position index [ Upstream commit 4fc427e0515811250647d44de38d87d7b0e0790f ] if seq_file .next fuction does not change position index, read after some lseek can generate unexpected output. https://bugzilla.kernel.org/show_bug.cgi?id=206283 Signed-off-by: Vasily Averin Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- net/ipv6/ip6_fib.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 05a206202e23..b924941b96a3 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -2377,14 +2377,13 @@ static void *ipv6_route_seq_next(struct seq_file *seq, void *v, loff_t *pos) struct net *net = seq_file_net(seq); struct ipv6_route_iter *iter = seq->private; + ++(*pos); if (!v) goto iter_table; n = rcu_dereference_bh(((struct fib6_info *)v)->fib6_next); - if (n) { - ++*pos; + if (n) return n; - } iter_table: ipv6_route_check_sernum(iter); @@ -2392,8 +2391,6 @@ static void *ipv6_route_seq_next(struct seq_file *seq, void *v, loff_t *pos) r = fib6_walk_continue(&iter->w); spin_unlock_bh(&iter->tbl->tb6_lock); if (r > 0) { - if (v) - ++*pos; return iter->w.leaf; } else if (r < 0) { fib6_walker_unlink(net, &iter->w); -- GitLab From 0b4ba98354cc0e8f5a800bc19a1f576e61f49a2a Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Thu, 14 Nov 2019 19:03:00 +0100 Subject: [PATCH 0933/1304] seqlock: Require WRITE_ONCE surrounding raw_seqcount_barrier [ Upstream commit bf07132f96d426bcbf2098227fb680915cf44498 ] This patch proposes to require marked atomic accesses surrounding raw_write_seqcount_barrier. We reason that otherwise there is no way to guarantee propagation nor atomicity of writes before/after the barrier [1]. For example, consider the compiler tears stores either before or after the barrier; in this case, readers may observe a partial value, and because readers are unaware that writes are going on (writes are not in a seq-writer critical section), will complete the seq-reader critical section while having observed some partial state. [1] https://lwn.net/Articles/793253/ This came up when designing and implementing KCSAN, because KCSAN would flag these accesses as data-races. After careful analysis, our reasoning as above led us to conclude that the best thing to do is to propose an amendment to the raw_seqcount_barrier usage. Signed-off-by: Marco Elver Acked-by: Paul E. McKenney Signed-off-by: Paul E. McKenney Signed-off-by: Sasha Levin --- include/linux/seqlock.h | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index bcf4cf26b8c8..a42a29952889 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -243,6 +243,13 @@ static inline void raw_write_seqcount_end(seqcount_t *s) * usual consistency guarantee. It is one wmb cheaper, because we can * collapse the two back-to-back wmb()s. * + * Note that, writes surrounding the barrier should be declared atomic (e.g. + * via WRITE_ONCE): a) to ensure the writes become visible to other threads + * atomically, avoiding compiler optimizations; b) to document which writes are + * meant to propagate to the reader critical section. This is necessary because + * neither writes before and after the barrier are enclosed in a seq-writer + * critical section that would ensure readers are aware of ongoing writes. + * * seqcount_t seq; * bool X = true, Y = false; * @@ -262,11 +269,11 @@ static inline void raw_write_seqcount_end(seqcount_t *s) * * void write(void) * { - * Y = true; + * WRITE_ONCE(Y, true); * * raw_write_seqcount_barrier(seq); * - * X = false; + * WRITE_ONCE(X, false); * } */ static inline void raw_write_seqcount_barrier(seqcount_t *s) -- GitLab From 4f3de4dd102e51461e7e6a03789824b52ff92c8c Mon Sep 17 00:00:00 2001 From: Nikhil Devshatwar Date: Tue, 12 Nov 2019 15:53:33 +0100 Subject: [PATCH 0934/1304] media: ti-vpe: cal: Restrict DMA to avoid memory corruption [ Upstream commit 6e72eab2e7b7a157d554b8f9faed7676047be7c1 ] When setting DMA for video capture from CSI channel, if the DMA size is not given, it ends up writing as much data as sent by the camera. This may lead to overwriting the buffers causing memory corruption. Observed green lines on the default framebuffer. Restrict the DMA to maximum height as specified in the S_FMT ioctl. Signed-off-by: Nikhil Devshatwar Signed-off-by: Benoit Parrot Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Sasha Levin --- drivers/media/platform/ti-vpe/cal.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/media/platform/ti-vpe/cal.c b/drivers/media/platform/ti-vpe/cal.c index be3155275a6b..d945323fc437 100644 --- a/drivers/media/platform/ti-vpe/cal.c +++ b/drivers/media/platform/ti-vpe/cal.c @@ -684,12 +684,13 @@ static void pix_proc_config(struct cal_ctx *ctx) } static void cal_wr_dma_config(struct cal_ctx *ctx, - unsigned int width) + unsigned int width, unsigned int height) { u32 val; val = reg_read(ctx->dev, CAL_WR_DMA_CTRL(ctx->csi2_port)); set_field(&val, ctx->csi2_port, CAL_WR_DMA_CTRL_CPORT_MASK); + set_field(&val, height, CAL_WR_DMA_CTRL_YSIZE_MASK); set_field(&val, CAL_WR_DMA_CTRL_DTAG_PIX_DAT, CAL_WR_DMA_CTRL_DTAG_MASK); set_field(&val, CAL_WR_DMA_CTRL_MODE_CONST, @@ -1315,7 +1316,8 @@ static int cal_start_streaming(struct vb2_queue *vq, unsigned int count) csi2_lane_config(ctx); csi2_ctx_config(ctx); pix_proc_config(ctx); - cal_wr_dma_config(ctx, ctx->v_fmt.fmt.pix.bytesperline); + cal_wr_dma_config(ctx, ctx->v_fmt.fmt.pix.bytesperline, + ctx->v_fmt.fmt.pix.height); cal_wr_dma_addr(ctx, addr); csi2_ppi_enable(ctx); -- GitLab From 479468bef2fa4845cd894ad352181b619195fe70 Mon Sep 17 00:00:00 2001 From: Kevin Kou Date: Thu, 26 Dec 2019 12:29:17 +0000 Subject: [PATCH 0935/1304] sctp: move trace_sctp_probe_path into sctp_outq_sack [ Upstream commit f643ee295c1c63bc117fb052d4da681354d6f732 ] The original patch bringed in the "SCTP ACK tracking trace event" feature was committed at Dec.20, 2017, it replaced jprobe usage with trace events, and bringed in two trace events, one is TRACE_EVENT(sctp_probe), another one is TRACE_EVENT(sctp_probe_path). The original patch intended to trigger the trace_sctp_probe_path in TRACE_EVENT(sctp_probe) as below code, +TRACE_EVENT(sctp_probe, + + TP_PROTO(const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + struct sctp_chunk *chunk), + + TP_ARGS(ep, asoc, chunk), + + TP_STRUCT__entry( + __field(__u64, asoc) + __field(__u32, mark) + __field(__u16, bind_port) + __field(__u16, peer_port) + __field(__u32, pathmtu) + __field(__u32, rwnd) + __field(__u16, unack_data) + ), + + TP_fast_assign( + struct sk_buff *skb = chunk->skb; + + __entry->asoc = (unsigned long)asoc; + __entry->mark = skb->mark; + __entry->bind_port = ep->base.bind_addr.port; + __entry->peer_port = asoc->peer.port; + __entry->pathmtu = asoc->pathmtu; + __entry->rwnd = asoc->peer.rwnd; + __entry->unack_data = asoc->unack_data; + + if (trace_sctp_probe_path_enabled()) { + struct sctp_transport *sp; + + list_for_each_entry(sp, &asoc->peer.transport_addr_list, + transports) { + trace_sctp_probe_path(sp, asoc); + } + } + ), But I found it did not work when I did testing, and trace_sctp_probe_path had no output, I finally found that there is trace buffer lock operation(trace_event_buffer_reserve) in include/trace/trace_events.h: static notrace void \ trace_event_raw_event_##call(void *__data, proto) \ { \ struct trace_event_file *trace_file = __data; \ struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\ struct trace_event_buffer fbuffer; \ struct trace_event_raw_##call *entry; \ int __data_size; \ \ if (trace_trigger_soft_disabled(trace_file)) \ return; \ \ __data_size = trace_event_get_offsets_##call(&__data_offsets, args); \ \ entry = trace_event_buffer_reserve(&fbuffer, trace_file, \ sizeof(*entry) + __data_size); \ \ if (!entry) \ return; \ \ tstruct \ \ { assign; } \ \ trace_event_buffer_commit(&fbuffer); \ } The reason caused no output of trace_sctp_probe_path is that trace_sctp_probe_path written in TP_fast_assign part of TRACE_EVENT(sctp_probe), and it will be placed( { assign; } ) after the trace_event_buffer_reserve() when compiler expands Macro, entry = trace_event_buffer_reserve(&fbuffer, trace_file, \ sizeof(*entry) + __data_size); \ \ if (!entry) \ return; \ \ tstruct \ \ { assign; } \ so trace_sctp_probe_path finally can not acquire trace_event_buffer and return no output, that is to say the nest of tracepoint entry function is not allowed. The function call flow is: trace_sctp_probe() -> trace_event_raw_event_sctp_probe() -> lock buffer -> trace_sctp_probe_path() -> trace_event_raw_event_sctp_probe_path() --nested -> buffer has been locked and return no output. This patch is to remove trace_sctp_probe_path from the TP_fast_assign part of TRACE_EVENT(sctp_probe) to avoid the nest of entry function, and trigger sctp_probe_path_trace in sctp_outq_sack. After this patch, you can enable both events individually, # cd /sys/kernel/debug/tracing # echo 1 > events/sctp/sctp_probe/enable # echo 1 > events/sctp/sctp_probe_path/enable Or, you can enable all the events under sctp. # echo 1 > events/sctp/enable Signed-off-by: Kevin Kou Acked-by: Marcelo Ricardo Leitner Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- include/trace/events/sctp.h | 9 --------- net/sctp/outqueue.c | 6 ++++++ 2 files changed, 6 insertions(+), 9 deletions(-) diff --git a/include/trace/events/sctp.h b/include/trace/events/sctp.h index 7475c7be165a..d4aac3436595 100644 --- a/include/trace/events/sctp.h +++ b/include/trace/events/sctp.h @@ -75,15 +75,6 @@ TRACE_EVENT(sctp_probe, __entry->pathmtu = asoc->pathmtu; __entry->rwnd = asoc->peer.rwnd; __entry->unack_data = asoc->unack_data; - - if (trace_sctp_probe_path_enabled()) { - struct sctp_transport *sp; - - list_for_each_entry(sp, &asoc->peer.transport_addr_list, - transports) { - trace_sctp_probe_path(sp, asoc); - } - } ), TP_printk("asoc=%#llx mark=%#x bind_port=%d peer_port=%d pathmtu=%d " diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index 7bb8e5603298..d6e83a37a1ad 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c @@ -51,6 +51,7 @@ #include #include #include +#include /* Declare internal functions here. */ static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn); @@ -1257,6 +1258,11 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk) /* Grab the association's destination address list. */ transport_list = &asoc->peer.transport_addr_list; + /* SCTP path tracepoint for congestion control debugging. */ + list_for_each_entry(transport, transport_list, transports) { + trace_sctp_probe_path(transport, asoc); + } + sack_ctsn = ntohl(sack->cum_tsn_ack); gap_ack_blocks = ntohs(sack->num_gap_ack_blocks); asoc->stats.gapcnt += gap_ack_blocks; -- GitLab From 4913d773d113b1f61620baeadaa9d8ef3a4400c1 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Fri, 27 Dec 2019 11:04:21 +0100 Subject: [PATCH 0936/1304] ACPI: EC: Reference count query handlers under lock [ Upstream commit 3df663a147fe077a6ee8444ec626738946e65547 ] There is a race condition in acpi_ec_get_query_handler() theoretically allowing query handlers to go away before refernce counting them. In order to avoid it, call kref_get() on query handlers under ec->mutex. Also simplify the code a bit while at it. Signed-off-by: Rafael J. Wysocki Signed-off-by: Sasha Levin --- drivers/acpi/ec.c | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 49e16f009095..9415a0041aaf 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -1080,29 +1080,21 @@ void acpi_ec_dispatch_gpe(void) /* -------------------------------------------------------------------------- Event Management -------------------------------------------------------------------------- */ -static struct acpi_ec_query_handler * -acpi_ec_get_query_handler(struct acpi_ec_query_handler *handler) -{ - if (handler) - kref_get(&handler->kref); - return handler; -} - static struct acpi_ec_query_handler * acpi_ec_get_query_handler_by_value(struct acpi_ec *ec, u8 value) { struct acpi_ec_query_handler *handler; - bool found = false; mutex_lock(&ec->mutex); list_for_each_entry(handler, &ec->list, node) { if (value == handler->query_bit) { - found = true; - break; + kref_get(&handler->kref); + mutex_unlock(&ec->mutex); + return handler; } } mutex_unlock(&ec->mutex); - return found ? acpi_ec_get_query_handler(handler) : NULL; + return NULL; } static void acpi_ec_query_handler_release(struct kref *kref) -- GitLab From 2f26915042bd914cf9e7d3fbf68bca6daa99ec6d Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Tue, 24 Dec 2019 14:02:44 -0800 Subject: [PATCH 0937/1304] scsi: ufs: Make ufshcd_add_command_trace() easier to read [ Upstream commit e4d2add7fd5bc64ee3e388eabe6b9e081cb42e11 ] Since the lrbp->cmd expression occurs multiple times, introduce a new local variable to hold that pointer. This patch does not change any functionality. Cc: Bean Huo Cc: Can Guo Cc: Avri Altman Cc: Stanley Chu Cc: Tomas Winkler Link: https://lore.kernel.org/r/20191224220248.30138-3-bvanassche@acm.org Reviewed-by: Stanley Chu Reviewed-by: Can Guo Reviewed-by: Alim Akhtar Signed-off-by: Bart Van Assche Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin --- drivers/scsi/ufs/ufshcd.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index eb10a5cacd90..faf195998178 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -353,27 +353,27 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba, u8 opcode = 0; u32 intr, doorbell; struct ufshcd_lrb *lrbp = &hba->lrb[tag]; + struct scsi_cmnd *cmd = lrbp->cmd; int transfer_len = -1; if (!trace_ufshcd_command_enabled()) { /* trace UPIU W/O tracing command */ - if (lrbp->cmd) + if (cmd) ufshcd_add_cmd_upiu_trace(hba, tag, str); return; } - if (lrbp->cmd) { /* data phase exists */ + if (cmd) { /* data phase exists */ /* trace UPIU also */ ufshcd_add_cmd_upiu_trace(hba, tag, str); - opcode = (u8)(*lrbp->cmd->cmnd); + opcode = cmd->cmnd[0]; if ((opcode == READ_10) || (opcode == WRITE_10)) { /* * Currently we only fully trace read(10) and write(10) * commands */ - if (lrbp->cmd->request && lrbp->cmd->request->bio) - lba = - lrbp->cmd->request->bio->bi_iter.bi_sector; + if (cmd->request && cmd->request->bio) + lba = cmd->request->bio->bi_iter.bi_sector; transfer_len = be32_to_cpu( lrbp->ucd_req_ptr->sc.exp_data_transfer_len); } -- GitLab From 714ddb5ec8af1bb2c24878bf61ea2a1b7cb23735 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Tue, 24 Dec 2019 14:02:46 -0800 Subject: [PATCH 0938/1304] scsi: ufs: Fix a race condition in the tracing code [ Upstream commit eacf36f5bebde5089dddb3d5bfcbeab530b01f8a ] Starting execution of a command before tracing a command may cause the completion handler to free data while it is being traced. Fix this race by tracing a command before it is submitted. Cc: Bean Huo Cc: Can Guo Cc: Avri Altman Cc: Stanley Chu Cc: Tomas Winkler Link: https://lore.kernel.org/r/20191224220248.30138-5-bvanassche@acm.org Reviewed-by: Alim Akhtar Signed-off-by: Bart Van Assche Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin --- drivers/scsi/ufs/ufshcd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index faf195998178..b2cbdd01ab10 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -1910,12 +1910,12 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag) { hba->lrb[task_tag].issue_time_stamp = ktime_get(); hba->lrb[task_tag].compl_time_stamp = ktime_set(0, 0); + ufshcd_add_command_trace(hba, task_tag, "send"); ufshcd_clk_scaling_start_busy(hba); __set_bit(task_tag, &hba->outstanding_reqs); ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL); /* Make sure that doorbell is committed immediately */ wmb(); - ufshcd_add_command_trace(hba, task_tag, "send"); } /** -- GitLab From 2708ed6048bc8439f289e7b6a33c8a5a660e446b Mon Sep 17 00:00:00 2001 From: Matthias Fend Date: Wed, 15 Jan 2020 11:22:49 +0100 Subject: [PATCH 0939/1304] dmaengine: zynqmp_dma: fix burst length configuration [ Upstream commit cc88525ebffc757e00cc5a5d61da6271646c7f5f ] Since the dma engine expects the burst length register content as power of 2 value, the burst length needs to be converted first. Additionally add a burst length range check to avoid corrupting unrelated register bits. Signed-off-by: Matthias Fend Link: https://lore.kernel.org/r/20200115102249.24398-1-matthias.fend@wolfvision.net Signed-off-by: Vinod Koul Signed-off-by: Sasha Levin --- drivers/dma/xilinx/zynqmp_dma.c | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c index 73de6a6179fc..e002ff8413e2 100644 --- a/drivers/dma/xilinx/zynqmp_dma.c +++ b/drivers/dma/xilinx/zynqmp_dma.c @@ -127,10 +127,12 @@ /* Max transfer size per descriptor */ #define ZYNQMP_DMA_MAX_TRANS_LEN 0x40000000 +/* Max burst lengths */ +#define ZYNQMP_DMA_MAX_DST_BURST_LEN 32768U +#define ZYNQMP_DMA_MAX_SRC_BURST_LEN 32768U + /* Reset values for data attributes */ #define ZYNQMP_DMA_AXCACHE_VAL 0xF -#define ZYNQMP_DMA_ARLEN_RST_VAL 0xF -#define ZYNQMP_DMA_AWLEN_RST_VAL 0xF #define ZYNQMP_DMA_SRC_ISSUE_RST_VAL 0x1F @@ -536,17 +538,19 @@ static void zynqmp_dma_handle_ovfl_int(struct zynqmp_dma_chan *chan, u32 status) static void zynqmp_dma_config(struct zynqmp_dma_chan *chan) { - u32 val; + u32 val, burst_val; val = readl(chan->regs + ZYNQMP_DMA_CTRL0); val |= ZYNQMP_DMA_POINT_TYPE_SG; writel(val, chan->regs + ZYNQMP_DMA_CTRL0); val = readl(chan->regs + ZYNQMP_DMA_DATA_ATTR); + burst_val = __ilog2_u32(chan->src_burst_len); val = (val & ~ZYNQMP_DMA_ARLEN) | - (chan->src_burst_len << ZYNQMP_DMA_ARLEN_OFST); + ((burst_val << ZYNQMP_DMA_ARLEN_OFST) & ZYNQMP_DMA_ARLEN); + burst_val = __ilog2_u32(chan->dst_burst_len); val = (val & ~ZYNQMP_DMA_AWLEN) | - (chan->dst_burst_len << ZYNQMP_DMA_AWLEN_OFST); + ((burst_val << ZYNQMP_DMA_AWLEN_OFST) & ZYNQMP_DMA_AWLEN); writel(val, chan->regs + ZYNQMP_DMA_DATA_ATTR); } @@ -562,8 +566,10 @@ static int zynqmp_dma_device_config(struct dma_chan *dchan, { struct zynqmp_dma_chan *chan = to_chan(dchan); - chan->src_burst_len = config->src_maxburst; - chan->dst_burst_len = config->dst_maxburst; + chan->src_burst_len = clamp(config->src_maxburst, 1U, + ZYNQMP_DMA_MAX_SRC_BURST_LEN); + chan->dst_burst_len = clamp(config->dst_maxburst, 1U, + ZYNQMP_DMA_MAX_DST_BURST_LEN); return 0; } @@ -884,8 +890,8 @@ static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev, return PTR_ERR(chan->regs); chan->bus_width = ZYNQMP_DMA_BUS_WIDTH_64; - chan->dst_burst_len = ZYNQMP_DMA_AWLEN_RST_VAL; - chan->src_burst_len = ZYNQMP_DMA_ARLEN_RST_VAL; + chan->dst_burst_len = ZYNQMP_DMA_MAX_DST_BURST_LEN; + chan->src_burst_len = ZYNQMP_DMA_MAX_SRC_BURST_LEN; err = of_property_read_u32(node, "xlnx,bus-width", &chan->bus_width); if (err < 0) { dev_err(&pdev->dev, "missing xlnx,bus-width property\n"); -- GitLab From a356441de6d18b1960652b221e014253e439d45d Mon Sep 17 00:00:00 2001 From: Thomas Richter Date: Thu, 19 Dec 2019 14:56:13 +0100 Subject: [PATCH 0940/1304] s390/cpum_sf: Use kzalloc and minor changes [ Upstream commit 32dab6828c42f087439d3e2617dc7283546bd8f7 ] Use kzalloc() to allocate auxiliary buffer structure initialized with all zeroes to avoid random value in trace output. Avoid double access to SBD hardware flags. Signed-off-by: Thomas Richter Signed-off-by: Vasily Gorbik Signed-off-by: Sasha Levin --- arch/s390/kernel/perf_cpum_sf.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c index 74a296cea21c..0e6d01225a67 100644 --- a/arch/s390/kernel/perf_cpum_sf.c +++ b/arch/s390/kernel/perf_cpum_sf.c @@ -1377,8 +1377,8 @@ static int aux_output_begin(struct perf_output_handle *handle, idx = aux->empty_mark + 1; for (i = 0; i < range_scan; i++, idx++) { te = aux_sdb_trailer(aux, idx); - te->flags = te->flags & ~SDB_TE_BUFFER_FULL_MASK; - te->flags = te->flags & ~SDB_TE_ALERT_REQ_MASK; + te->flags &= ~(SDB_TE_BUFFER_FULL_MASK | + SDB_TE_ALERT_REQ_MASK); te->overflow = 0; } /* Save the position of empty SDBs */ @@ -1425,8 +1425,7 @@ static bool aux_set_alert(struct aux_buffer *aux, unsigned long alert_index, te = aux_sdb_trailer(aux, alert_index); do { orig_flags = te->flags; - orig_overflow = te->overflow; - *overflow = orig_overflow; + *overflow = orig_overflow = te->overflow; if (orig_flags & SDB_TE_BUFFER_FULL_MASK) { /* * SDB is already set by hardware. @@ -1660,7 +1659,7 @@ static void *aux_buffer_setup(struct perf_event *event, void **pages, } /* Allocate aux_buffer struct for the event */ - aux = kmalloc(sizeof(struct aux_buffer), GFP_KERNEL); + aux = kzalloc(sizeof(struct aux_buffer), GFP_KERNEL); if (!aux) goto no_aux; sfb = &aux->sfb; -- GitLab From da67f7ae534fbb84d1cf9bba074ab7ef027ec26c Mon Sep 17 00:00:00 2001 From: Oliver O'Halloran Date: Wed, 16 Oct 2019 12:25:36 +1100 Subject: [PATCH 0941/1304] powerpc/eeh: Only dump stack once if an MMIO loop is detected [ Upstream commit 4e0942c0302b5ad76b228b1a7b8c09f658a1d58a ] Many drivers don't check for errors when they get a 0xFFs response from an MMIO load. As a result after an EEH event occurs a driver can get stuck in a polling loop unless it some kind of internal timeout logic. Currently EEH tries to detect and report stuck drivers by dumping a stack trace after eeh_dev_check_failure() is called EEH_MAX_FAILS times on an already frozen PE. The value of EEH_MAX_FAILS was chosen so that a dump would occur every few seconds if the driver was spinning in a loop. This results in a lot of spurious stack traces in the kernel log. Fix this by limiting it to printing one stack trace for each PE freeze. If the driver is truely stuck the kernel's hung task detector is better suited to reporting the probelm anyway. Signed-off-by: Oliver O'Halloran Reviewed-by: Sam Bobroff Tested-by: Sam Bobroff Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20191016012536.22588-1-oohall@gmail.com Signed-off-by: Sasha Levin --- arch/powerpc/kernel/eeh.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index fe3c6f3bd3b6..d123cba0992d 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c @@ -502,7 +502,7 @@ int eeh_dev_check_failure(struct eeh_dev *edev) rc = 1; if (pe->state & EEH_PE_ISOLATED) { pe->check_count++; - if (pe->check_count % EEH_MAX_FAILS == 0) { + if (pe->check_count == EEH_MAX_FAILS) { dn = pci_device_to_OF_node(dev); if (dn) location = of_get_property(dn, "ibm,loc-code", -- GitLab From 4d04a4da5977084ff271e5565bf753c59c22f1ee Mon Sep 17 00:00:00 2001 From: Maxim Mikityanskiy Date: Fri, 24 Jan 2020 19:15:35 +0200 Subject: [PATCH 0942/1304] Bluetooth: btrtl: Use kvmalloc for FW allocations [ Upstream commit 268d3636dfb22254324774de1f8875174b3be064 ] Currently, kmemdup is applied to the firmware data, and it invokes kmalloc under the hood. The firmware size and patch_length are big (more than PAGE_SIZE), and on some low-end systems (like ASUS E202SA) kmalloc may fail to allocate a contiguous chunk under high memory usage and fragmentation: Bluetooth: hci0: RTL: examining hci_ver=06 hci_rev=000a lmp_ver=06 lmp_subver=8821 Bluetooth: hci0: RTL: rom_version status=0 version=1 Bluetooth: hci0: RTL: loading rtl_bt/rtl8821a_fw.bin kworker/u9:2: page allocation failure: order:4, mode:0x40cc0(GFP_KERNEL|__GFP_COMP), nodemask=(null),cpuset=/,mems_allowed=0 As firmware load happens on each resume, Bluetooth will stop working after several iterations, when the kernel fails to allocate an order-4 page. This patch replaces kmemdup with kvmalloc+memcpy. It's not required to have a contiguous chunk here, because it's not mapped to the device directly. Signed-off-by: Maxim Mikityanskiy Signed-off-by: Marcel Holtmann Signed-off-by: Sasha Levin --- drivers/bluetooth/btrtl.c | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c index 8d1cd2479e36..cc51395d8b0e 100644 --- a/drivers/bluetooth/btrtl.c +++ b/drivers/bluetooth/btrtl.c @@ -343,11 +343,11 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev, * the end. */ len = patch_length; - buf = kmemdup(btrtl_dev->fw_data + patch_offset, patch_length, - GFP_KERNEL); + buf = kvmalloc(patch_length, GFP_KERNEL); if (!buf) return -ENOMEM; + memcpy(buf, btrtl_dev->fw_data + patch_offset, patch_length - 4); memcpy(buf + patch_length - 4, &epatch_info->fw_version, 4); *_buf = buf; @@ -415,8 +415,10 @@ static int rtl_load_file(struct hci_dev *hdev, const char *name, u8 **buff) if (ret < 0) return ret; ret = fw->size; - *buff = kmemdup(fw->data, ret, GFP_KERNEL); - if (!*buff) + *buff = kvmalloc(fw->size, GFP_KERNEL); + if (*buff) + memcpy(*buff, fw->data, ret); + else ret = -ENOMEM; release_firmware(fw); @@ -454,14 +456,14 @@ static int btrtl_setup_rtl8723b(struct hci_dev *hdev, goto out; if (btrtl_dev->cfg_len > 0) { - tbuff = kzalloc(ret + btrtl_dev->cfg_len, GFP_KERNEL); + tbuff = kvzalloc(ret + btrtl_dev->cfg_len, GFP_KERNEL); if (!tbuff) { ret = -ENOMEM; goto out; } memcpy(tbuff, fw_data, ret); - kfree(fw_data); + kvfree(fw_data); memcpy(tbuff + ret, btrtl_dev->cfg_data, btrtl_dev->cfg_len); ret += btrtl_dev->cfg_len; @@ -474,7 +476,7 @@ static int btrtl_setup_rtl8723b(struct hci_dev *hdev, ret = rtl_download_firmware(hdev, fw_data, ret); out: - kfree(fw_data); + kvfree(fw_data); return ret; } @@ -501,8 +503,8 @@ static struct sk_buff *btrtl_read_local_version(struct hci_dev *hdev) void btrtl_free(struct btrtl_device_info *btrtl_dev) { - kfree(btrtl_dev->fw_data); - kfree(btrtl_dev->cfg_data); + kvfree(btrtl_dev->fw_data); + kvfree(btrtl_dev->cfg_data); kfree(btrtl_dev); } EXPORT_SYMBOL_GPL(btrtl_free); -- GitLab From 72913876dc5fe6ae97963b5674c1797b19f6efcd Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 24 Sep 2014 16:14:12 -0400 Subject: [PATCH 0943/1304] tracing: Set kernel_stack's caller size properly [ Upstream commit cbc3b92ce037f5e7536f6db157d185cd8b8f615c ] I noticed when trying to use the trace-cmd python interface that reading the raw buffer wasn't working for kernel_stack events. This is because it uses a stubbed version of __dynamic_array that doesn't do the __data_loc trick and encode the length of the array into the field. Instead it just shows up as a size of 0. So change this to __array and set the len to FTRACE_STACK_ENTRIES since this is what we actually do in practice and matches how user_stack_trace works. Link: http://lkml.kernel.org/r/1411589652-1318-1-git-send-email-jbacik@fb.com Signed-off-by: Josef Bacik [ Pulled from the archeological digging of my INBOX ] Signed-off-by: Steven Rostedt (VMware) Signed-off-by: Sasha Levin --- kernel/trace/trace_entries.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h index 06bb2fd9a56c..a97aad105d36 100644 --- a/kernel/trace/trace_entries.h +++ b/kernel/trace/trace_entries.h @@ -179,7 +179,7 @@ FTRACE_ENTRY(kernel_stack, stack_entry, F_STRUCT( __field( int, size ) - __dynamic_array(unsigned long, caller ) + __array( unsigned long, caller, FTRACE_STACK_ENTRIES ) ), F_printk("\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n" -- GitLab From cd0c2804720e2ffb0360ca3a2c7b442ed009ea4b Mon Sep 17 00:00:00 2001 From: Vincent Whitchurch Date: Mon, 16 Dec 2019 11:48:28 +0100 Subject: [PATCH 0944/1304] ARM: 8948/1: Prevent OOB access in stacktrace [ Upstream commit 40ff1ddb5570284e039e0ff14d7a859a73dc3673 ] The stacktrace code can read beyond the stack size, when it attempts to read pt_regs from exception frames. This can happen on normal, non-corrupt stacks. Since the unwind information in the extable is not correct for function prologues, the unwinding code can return data from the stack which is not actually the caller function address, and if in_entry_text() happens to succeed on this value, we can end up reading data from outside the task's stack when attempting to read pt_regs, since there is no bounds check. Example: [<8010e729>] (unwind_backtrace) from [<8010a9c9>] (show_stack+0x11/0x14) [<8010a9c9>] (show_stack) from [<8057d8d7>] (dump_stack+0x87/0xac) [<8057d8d7>] (dump_stack) from [<8012271d>] (tasklet_action_common.constprop.4+0xa5/0xa8) [<8012271d>] (tasklet_action_common.constprop.4) from [<80102333>] (__do_softirq+0x11b/0x31c) [<80102333>] (__do_softirq) from [<80122485>] (irq_exit+0xad/0xd8) [<80122485>] (irq_exit) from [<8015f3d7>] (__handle_domain_irq+0x47/0x84) [<8015f3d7>] (__handle_domain_irq) from [<8036a523>] (gic_handle_irq+0x43/0x78) [<8036a523>] (gic_handle_irq) from [<80101a49>] (__irq_svc+0x69/0xb4) Exception stack(0xeb491f58 to 0xeb491fa0) 1f40: 7eb14794 00000000 1f60: ffffffff 008dd32c 008dd324 ffffffff 008dd314 0000002a 801011e4 eb490000 1f80: 0000002a 7eb1478c 50c5387d eb491fa8 80101001 8023d09c 40080033 ffffffff [<80101a49>] (__irq_svc) from [<8023d09c>] (do_pipe2+0x0/0xac) [<8023d09c>] (do_pipe2) from [] (0xffffffff) Exception stack(0xeb491fc8 to 0xeb492010) 1fc0: 008dd314 0000002a 00511ad8 008de4c8 7eb14790 7eb1478c 1fe0: 00511e34 7eb14774 004c8557 76f44098 60080030 7eb14794 00000000 00000000 2000: 00000001 00000000 ea846c00 ea847cc0 In this example, the stack limit is 0xeb492000, but 16 bytes outside the stack have been read. Fix it by adding bounds checks. Signed-off-by: Vincent Whitchurch Signed-off-by: Russell King Signed-off-by: Sasha Levin --- arch/arm/kernel/stacktrace.c | 2 ++ arch/arm/kernel/traps.c | 6 ++++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c index a4d4a28fe07d..d23ab9ec130a 100644 --- a/arch/arm/kernel/stacktrace.c +++ b/arch/arm/kernel/stacktrace.c @@ -115,6 +115,8 @@ static int save_trace(struct stackframe *frame, void *d) return 0; regs = (struct pt_regs *)frame->sp; + if ((unsigned long)®s[1] > ALIGN(frame->sp, THREAD_SIZE)) + return 0; trace->entries[trace->nr_entries++] = regs->ARM_pc; diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index badf02ca3693..aec533168f04 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -67,14 +67,16 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long); void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame) { + unsigned long end = frame + 4 + sizeof(struct pt_regs); + #ifdef CONFIG_KALLSYMS printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from); #else printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from); #endif - if (in_entry_text(from)) - dump_mem("", "Exception stack", frame + 4, frame + 4 + sizeof(struct pt_regs)); + if (in_entry_text(from) && end <= ALIGN(frame, THREAD_SIZE)) + dump_mem("", "Exception stack", frame + 4, end); } void dump_backtrace_stm(u32 *stack, u32 instruction) -- GitLab From a567bac12f116deb32f1a5b78346872ce1256e3d Mon Sep 17 00:00:00 2001 From: Mert Dirik Date: Thu, 16 Jan 2020 14:11:25 +0300 Subject: [PATCH 0945/1304] ar5523: Add USB ID of SMCWUSBT-G2 wireless adapter [ Upstream commit 5b362498a79631f283578b64bf6f4d15ed4cc19a ] Add the required USB ID for running SMCWUSBT-G2 wireless adapter (SMC "EZ Connect g"). This device uses ar5523 chipset and requires firmware to be loaded. Even though pid of the device is 4507, this patch adds it as 4506 so that AR5523_DEVICE_UG macro can set the AR5523_FLAG_PRE_FIRMWARE flag for pid 4507. Signed-off-by: Mert Dirik Signed-off-by: Kalle Valo Signed-off-by: Sasha Levin --- drivers/net/wireless/ath/ar5523/ar5523.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c index da2d179430ca..4c57e79e5779 100644 --- a/drivers/net/wireless/ath/ar5523/ar5523.c +++ b/drivers/net/wireless/ath/ar5523/ar5523.c @@ -1770,6 +1770,8 @@ static const struct usb_device_id ar5523_id_table[] = { AR5523_DEVICE_UX(0x0846, 0x4300), /* Netgear / WG111U */ AR5523_DEVICE_UG(0x0846, 0x4250), /* Netgear / WG111T */ AR5523_DEVICE_UG(0x0846, 0x5f00), /* Netgear / WPN111 */ + AR5523_DEVICE_UG(0x083a, 0x4506), /* SMC / EZ Connect + SMCWUSBT-G2 */ AR5523_DEVICE_UG(0x157e, 0x3006), /* Umedia / AR5523_1 */ AR5523_DEVICE_UX(0x157e, 0x3205), /* Umedia / AR5523_2 */ AR5523_DEVICE_UG(0x157e, 0x3006), /* Umedia / TEW444UBEU */ -- GitLab From 4d9cf934d67f32712e444587022aef17bba85276 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Thu, 5 Dec 2019 08:41:25 -0500 Subject: [PATCH 0946/1304] ceph: ensure we have a new cap before continuing in fill_inode [ Upstream commit 9a6bed4fe0c8bf57785cbc4db9f86086cb9b193d ] If the caller passes in a NULL cap_reservation, and we can't allocate one then ensure that we fail gracefully. Signed-off-by: Jeff Layton Signed-off-by: Ilya Dryomov Signed-off-by: Sasha Levin --- fs/ceph/inode.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index 1e438e0faf77..3c24fb77ef32 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -764,8 +764,11 @@ static int fill_inode(struct inode *inode, struct page *locked_page, info_caps = le32_to_cpu(info->cap.caps); /* prealloc new cap struct */ - if (info_caps && ceph_snap(inode) == CEPH_NOSNAP) + if (info_caps && ceph_snap(inode) == CEPH_NOSNAP) { new_cap = ceph_get_cap(mdsc, caps_reservation); + if (!new_cap) + return -ENOMEM; + } /* * prealloc xattr data, if it looks like we'll need it. only -- GitLab From b3c930914ea92761dc2982f0195d20809ce2d518 Mon Sep 17 00:00:00 2001 From: Sven Schnelle Date: Tue, 28 Jan 2020 09:30:29 +0100 Subject: [PATCH 0947/1304] selftests/ftrace: fix glob selftest [ Upstream commit af4ddd607dff7aabd466a4a878e01b9f592a75ab ] test.d/ftrace/func-filter-glob.tc is failing on s390 because it has ARCH_INLINE_SPIN_LOCK and friends set to 'y'. So the usual __raw_spin_lock symbol isn't in the ftrace function list. Change '*aw*lock' to '*spin*lock' which would hopefully match some of the locking functions on all platforms. Reviewed-by: Steven Rostedt (VMware) Signed-off-by: Sven Schnelle Signed-off-by: Shuah Khan Signed-off-by: Sasha Levin --- .../testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc index 27a54a17da65..f4e92afab14b 100644 --- a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc +++ b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc @@ -30,7 +30,7 @@ ftrace_filter_check '*schedule*' '^.*schedule.*$' ftrace_filter_check 'schedule*' '^schedule.*$' # filter by *mid*end -ftrace_filter_check '*aw*lock' '.*aw.*lock$' +ftrace_filter_check '*pin*lock' '.*pin.*lock$' # filter by start*mid* ftrace_filter_check 'mutex*try*' '^mutex.*try.*' -- GitLab From fd0956234c72ce13a765ea814942ed11654d3b6e Mon Sep 17 00:00:00 2001 From: Doug Smythies Date: Mon, 27 Jan 2020 19:59:56 -0800 Subject: [PATCH 0948/1304] tools/power/x86/intel_pstate_tracer: changes for python 3 compatibility [ Upstream commit e749e09db30c38f1a275945814b0109e530a07b0 ] Some syntax needs to be more rigorous for python 3. Backwards compatibility tested with python 2.7 Signed-off-by: Doug Smythies Signed-off-by: Rafael J. Wysocki Signed-off-by: Sasha Levin --- .../intel_pstate_tracer.py | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py b/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py index 2fa3c5757bcb..dbed3d213bf1 100755 --- a/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py +++ b/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py @@ -10,11 +10,11 @@ then this utility enables and collects trace data for a user specified interval and generates performance plots. Prerequisites: - Python version 2.7.x + Python version 2.7.x or higher gnuplot 5.0 or higher - gnuplot-py 1.8 + gnuplot-py 1.8 or higher (Most of the distributions have these required packages. They may be called - gnuplot-py, phython-gnuplot. ) + gnuplot-py, phython-gnuplot or phython3-gnuplot, gnuplot-nox, ... ) HWP (Hardware P-States are disabled) Kernel config for Linux trace is enabled @@ -180,7 +180,7 @@ def plot_pstate_cpu_with_sample(): g_plot('set xlabel "Samples"') g_plot('set ylabel "P-State"') g_plot('set title "{} : cpu pstate vs. sample : {:%F %H:%M}"'.format(testname, datetime.now())) - title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ') + title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ') plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_SAMPLE, C_TO) g_plot('title_list = "{}"'.format(title_list)) g_plot(plot_str) @@ -197,7 +197,7 @@ def plot_pstate_cpu(): # the following command is really cool, but doesn't work with the CPU masking option because it aborts on the first missing file. # plot_str = 'plot for [i=0:*] file=sprintf("cpu%03d.csv",i) title_s=sprintf("cpu%03d",i) file using 16:7 pt 7 ps 1 title title_s' # - title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ') + title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ') plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_TO) g_plot('title_list = "{}"'.format(title_list)) g_plot(plot_str) @@ -211,7 +211,7 @@ def plot_load_cpu(): g_plot('set ylabel "CPU load (percent)"') g_plot('set title "{} : cpu loads : {:%F %H:%M}"'.format(testname, datetime.now())) - title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ') + title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ') plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_LOAD) g_plot('title_list = "{}"'.format(title_list)) g_plot(plot_str) @@ -225,7 +225,7 @@ def plot_frequency_cpu(): g_plot('set ylabel "CPU Frequency (GHz)"') g_plot('set title "{} : cpu frequencies : {:%F %H:%M}"'.format(testname, datetime.now())) - title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ') + title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ') plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_FREQ) g_plot('title_list = "{}"'.format(title_list)) g_plot(plot_str) @@ -240,7 +240,7 @@ def plot_duration_cpu(): g_plot('set ylabel "Timer Duration (MilliSeconds)"') g_plot('set title "{} : cpu durations : {:%F %H:%M}"'.format(testname, datetime.now())) - title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ') + title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ') plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_DURATION) g_plot('title_list = "{}"'.format(title_list)) g_plot(plot_str) @@ -254,7 +254,7 @@ def plot_scaled_cpu(): g_plot('set ylabel "Scaled Busy (Unitless)"') g_plot('set title "{} : cpu scaled busy : {:%F %H:%M}"'.format(testname, datetime.now())) - title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ') + title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ') plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_SCALED) g_plot('title_list = "{}"'.format(title_list)) g_plot(plot_str) @@ -268,7 +268,7 @@ def plot_boost_cpu(): g_plot('set ylabel "CPU IO Boost (percent)"') g_plot('set title "{} : cpu io boost : {:%F %H:%M}"'.format(testname, datetime.now())) - title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ') + title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ') plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_BOOST) g_plot('title_list = "{}"'.format(title_list)) g_plot(plot_str) @@ -282,7 +282,7 @@ def plot_ghz_cpu(): g_plot('set ylabel "TSC Frequency (GHz)"') g_plot('set title "{} : cpu TSC Frequencies (Sanity check calculation) : {:%F %H:%M}"'.format(testname, datetime.now())) - title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ') + title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ') plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_GHZ) g_plot('title_list = "{}"'.format(title_list)) g_plot(plot_str) -- GitLab From 3b73af6cb9550fda83ee09b8cf0a5992967eeae5 Mon Sep 17 00:00:00 2001 From: Manish Mandlik Date: Tue, 28 Jan 2020 10:54:14 -0800 Subject: [PATCH 0949/1304] Bluetooth: Fix refcount use-after-free issue [ Upstream commit 6c08fc896b60893c5d673764b0668015d76df462 ] There is no lock preventing both l2cap_sock_release() and chan->ops->close() from running at the same time. If we consider Thread A running l2cap_chan_timeout() and Thread B running l2cap_sock_release(), expected behavior is: A::l2cap_chan_timeout()->l2cap_chan_close()->l2cap_sock_teardown_cb() A::l2cap_chan_timeout()->l2cap_sock_close_cb()->l2cap_sock_kill() B::l2cap_sock_release()->sock_orphan() B::l2cap_sock_release()->l2cap_sock_kill() where, sock_orphan() clears "sk->sk_socket" and l2cap_sock_teardown_cb() marks socket as SOCK_ZAPPED. In l2cap_sock_kill(), there is an "if-statement" that checks if both sock_orphan() and sock_teardown() has been run i.e. sk->sk_socket is NULL and socket is marked as SOCK_ZAPPED. Socket is killed if the condition is satisfied. In the race condition, following occurs: A::l2cap_chan_timeout()->l2cap_chan_close()->l2cap_sock_teardown_cb() B::l2cap_sock_release()->sock_orphan() B::l2cap_sock_release()->l2cap_sock_kill() A::l2cap_chan_timeout()->l2cap_sock_close_cb()->l2cap_sock_kill() In this scenario, "if-statement" is true in both B::l2cap_sock_kill() and A::l2cap_sock_kill() and we hit "refcount: underflow; use-after-free" bug. Similar condition occurs at other places where teardown/sock_kill is happening: l2cap_disconnect_rsp()->l2cap_chan_del()->l2cap_sock_teardown_cb() l2cap_disconnect_rsp()->l2cap_sock_close_cb()->l2cap_sock_kill() l2cap_conn_del()->l2cap_chan_del()->l2cap_sock_teardown_cb() l2cap_conn_del()->l2cap_sock_close_cb()->l2cap_sock_kill() l2cap_disconnect_req()->l2cap_chan_del()->l2cap_sock_teardown_cb() l2cap_disconnect_req()->l2cap_sock_close_cb()->l2cap_sock_kill() l2cap_sock_cleanup_listen()->l2cap_chan_close()->l2cap_sock_teardown_cb() l2cap_sock_cleanup_listen()->l2cap_sock_kill() Protect teardown/sock_kill and orphan/sock_kill by adding hold_lock on l2cap channel to ensure that the socket is killed only after marked as zapped and orphan. Signed-off-by: Manish Mandlik Signed-off-by: Marcel Holtmann Signed-off-by: Sasha Levin --- net/bluetooth/l2cap_core.c | 26 +++++++++++++++----------- net/bluetooth/l2cap_sock.c | 16 +++++++++++++--- 2 files changed, 28 insertions(+), 14 deletions(-) diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index 0d84d1f820d4..b1f51cb007ea 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -414,6 +414,9 @@ static void l2cap_chan_timeout(struct work_struct *work) BT_DBG("chan %p state %s", chan, state_to_string(chan->state)); mutex_lock(&conn->chan_lock); + /* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling + * this work. No need to call l2cap_chan_hold(chan) here again. + */ l2cap_chan_lock(chan); if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG) @@ -426,12 +429,12 @@ static void l2cap_chan_timeout(struct work_struct *work) l2cap_chan_close(chan, reason); - l2cap_chan_unlock(chan); - chan->ops->close(chan); - mutex_unlock(&conn->chan_lock); + l2cap_chan_unlock(chan); l2cap_chan_put(chan); + + mutex_unlock(&conn->chan_lock); } struct l2cap_chan *l2cap_chan_create(void) @@ -1725,9 +1728,9 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err) l2cap_chan_del(chan, err); - l2cap_chan_unlock(chan); - chan->ops->close(chan); + + l2cap_chan_unlock(chan); l2cap_chan_put(chan); } @@ -4337,6 +4340,7 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, return 0; } + l2cap_chan_hold(chan); l2cap_chan_lock(chan); rsp.dcid = cpu_to_le16(chan->scid); @@ -4345,12 +4349,11 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, chan->ops->set_shutdown(chan); - l2cap_chan_hold(chan); l2cap_chan_del(chan, ECONNRESET); - l2cap_chan_unlock(chan); - chan->ops->close(chan); + + l2cap_chan_unlock(chan); l2cap_chan_put(chan); mutex_unlock(&conn->chan_lock); @@ -4382,20 +4385,21 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, return 0; } + l2cap_chan_hold(chan); l2cap_chan_lock(chan); if (chan->state != BT_DISCONN) { l2cap_chan_unlock(chan); + l2cap_chan_put(chan); mutex_unlock(&conn->chan_lock); return 0; } - l2cap_chan_hold(chan); l2cap_chan_del(chan, 0); - l2cap_chan_unlock(chan); - chan->ops->close(chan); + + l2cap_chan_unlock(chan); l2cap_chan_put(chan); mutex_unlock(&conn->chan_lock); diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index a3a2cd55e23a..d128750e4730 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c @@ -1039,7 +1039,7 @@ static int l2cap_sock_recvmsg(struct socket *sock, struct msghdr *msg, } /* Kill socket (only if zapped and orphan) - * Must be called on unlocked socket. + * Must be called on unlocked socket, with l2cap channel lock. */ static void l2cap_sock_kill(struct sock *sk) { @@ -1200,8 +1200,15 @@ static int l2cap_sock_release(struct socket *sock) err = l2cap_sock_shutdown(sock, 2); + l2cap_chan_hold(l2cap_pi(sk)->chan); + l2cap_chan_lock(l2cap_pi(sk)->chan); + sock_orphan(sk); l2cap_sock_kill(sk); + + l2cap_chan_unlock(l2cap_pi(sk)->chan); + l2cap_chan_put(l2cap_pi(sk)->chan); + return err; } @@ -1219,12 +1226,15 @@ static void l2cap_sock_cleanup_listen(struct sock *parent) BT_DBG("child chan %p state %s", chan, state_to_string(chan->state)); + l2cap_chan_hold(chan); l2cap_chan_lock(chan); + __clear_chan_timer(chan); l2cap_chan_close(chan, ECONNRESET); - l2cap_chan_unlock(chan); - l2cap_sock_kill(sk); + + l2cap_chan_unlock(chan); + l2cap_chan_put(chan); } } -- GitLab From 52f5a09ab7583ed497fc4b331311d71b7d8a6e12 Mon Sep 17 00:00:00 2001 From: Vasily Averin Date: Thu, 30 Jan 2020 22:13:39 -0800 Subject: [PATCH 0950/1304] mm/swapfile.c: swap_next should increase position index [ Upstream commit 10c8d69f314d557d94d74ec492575ae6a4f1eb1c ] If seq_file .next fuction does not change position index, read after some lseek can generate unexpected output. In Aug 2018 NeilBrown noticed commit 1f4aace60b0e ("fs/seq_file.c: simplify seq_file iteration code and interface") "Some ->next functions do not increment *pos when they return NULL... Note that such ->next functions are buggy and should be fixed. A simple demonstration is dd if=/proc/swaps bs=1000 skip=1 Choose any block size larger than the size of /proc/swaps. This will always show the whole last line of /proc/swaps" Described problem is still actual. If you make lseek into middle of last output line following read will output end of last line and whole last line once again. $ dd if=/proc/swaps bs=1 # usual output Filename Type Size Used Priority /dev/dm-0 partition 4194812 97536 -2 104+0 records in 104+0 records out 104 bytes copied $ dd if=/proc/swaps bs=40 skip=1 # last line was generated twice dd: /proc/swaps: cannot skip to specified offset v/dm-0 partition 4194812 97536 -2 /dev/dm-0 partition 4194812 97536 -2 3+1 records in 3+1 records out 131 bytes copied https://bugzilla.kernel.org/show_bug.cgi?id=206283 Link: http://lkml.kernel.org/r/bd8cfd7b-ac95-9b91-f9e7-e8438bd5047d@virtuozzo.com Signed-off-by: Vasily Averin Reviewed-by: Andrew Morton Cc: Jann Horn Cc: Alexander Viro Cc: Kees Cook Cc: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin --- mm/swapfile.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/swapfile.c b/mm/swapfile.c index 0047dcaf9369..c3684cfa9534 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -2738,10 +2738,10 @@ static void *swap_next(struct seq_file *swap, void *v, loff_t *pos) else type = si->type + 1; + ++(*pos); for (; (si = swap_type_to_swap_info(type)); type++) { if (!(si->flags & SWP_USED) || !si->swap_map) continue; - ++*pos; return si; } -- GitLab From f9cb6b6124ac3e1586251329e51b898f1d0a54e6 Mon Sep 17 00:00:00 2001 From: Steven Price Date: Mon, 3 Feb 2020 17:35:58 -0800 Subject: [PATCH 0951/1304] mm: pagewalk: fix termination condition in walk_pte_range() [ Upstream commit c02a98753e0a36ba65a05818626fa6adeb4e7c97 ] If walk_pte_range() is called with a 'end' argument that is beyond the last page of memory (e.g. ~0UL) then the comparison between 'addr' and 'end' will always fail and the loop will be infinite. Instead change the comparison to >= while accounting for overflow. Link: http://lkml.kernel.org/r/20191218162402.45610-15-steven.price@arm.com Signed-off-by: Steven Price Cc: Albert Ou Cc: Alexandre Ghiti Cc: Andy Lutomirski Cc: Ard Biesheuvel Cc: Arnd Bergmann Cc: Benjamin Herrenschmidt Cc: Borislav Petkov Cc: Catalin Marinas Cc: Christian Borntraeger Cc: Dave Hansen Cc: David S. Miller Cc: Heiko Carstens Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: James Hogan Cc: James Morse Cc: Jerome Glisse Cc: "Liang, Kan" Cc: Mark Rutland Cc: Michael Ellerman Cc: Paul Burton Cc: Paul Mackerras Cc: Paul Walmsley Cc: Peter Zijlstra Cc: Ralf Baechle Cc: Russell King Cc: Thomas Gleixner Cc: Vasily Gorbik Cc: Vineet Gupta Cc: Will Deacon Cc: Zong Li Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin --- mm/pagewalk.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mm/pagewalk.c b/mm/pagewalk.c index c3084ff2569d..3c0930d94a29 100644 --- a/mm/pagewalk.c +++ b/mm/pagewalk.c @@ -15,9 +15,9 @@ static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, walk); if (err) break; - addr += PAGE_SIZE; - if (addr == end) + if (addr >= end - PAGE_SIZE) break; + addr += PAGE_SIZE; pte++; } -- GitLab From 4b0795d5b615de48541eae1a87b5a37543e1c17f Mon Sep 17 00:00:00 2001 From: Hillf Danton Date: Wed, 5 Feb 2020 10:31:59 +0800 Subject: [PATCH 0952/1304] Bluetooth: prefetch channel before killing sock [ Upstream commit 2a154903cec20fb64ff4d7d617ca53c16f8fd53a ] Prefetch channel before killing sock in order to fix UAF like BUG: KASAN: use-after-free in l2cap_sock_release+0x24c/0x290 net/bluetooth/l2cap_sock.c:1212 Read of size 8 at addr ffff8880944904a0 by task syz-fuzzer/9751 Reported-by: syzbot+c3c5bdea7863886115dc@syzkaller.appspotmail.com Fixes: 6c08fc896b60 ("Bluetooth: Fix refcount use-after-free issue") Cc: Manish Mandlik Signed-off-by: Hillf Danton Signed-off-by: Marcel Holtmann Signed-off-by: Sasha Levin --- net/bluetooth/l2cap_sock.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index d128750e4730..5572042f0453 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c @@ -1190,6 +1190,7 @@ static int l2cap_sock_release(struct socket *sock) { struct sock *sk = sock->sk; int err; + struct l2cap_chan *chan; BT_DBG("sock %p, sk %p", sock, sk); @@ -1199,15 +1200,16 @@ static int l2cap_sock_release(struct socket *sock) bt_sock_unlink(&l2cap_sk_list, sk); err = l2cap_sock_shutdown(sock, 2); + chan = l2cap_pi(sk)->chan; - l2cap_chan_hold(l2cap_pi(sk)->chan); - l2cap_chan_lock(l2cap_pi(sk)->chan); + l2cap_chan_hold(chan); + l2cap_chan_lock(chan); sock_orphan(sk); l2cap_sock_kill(sk); - l2cap_chan_unlock(l2cap_pi(sk)->chan); - l2cap_chan_put(l2cap_pi(sk)->chan); + l2cap_chan_unlock(chan); + l2cap_chan_put(chan); return err; } -- GitLab From 58f79f42e53ee35c9270c2205f34a62924d9e216 Mon Sep 17 00:00:00 2001 From: Zhuang Yanying Date: Sat, 12 Oct 2019 11:37:31 +0800 Subject: [PATCH 0953/1304] KVM: fix overflow of zero page refcount with ksm running MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 7df003c85218b5f5b10a7f6418208f31e813f38f ] We are testing Virtual Machine with KSM on v5.4-rc2 kernel, and found the zero_page refcount overflow. The cause of refcount overflow is increased in try_async_pf (get_user_page) without being decreased in mmu_set_spte() while handling ept violation. In kvm_release_pfn_clean(), only unreserved page will call put_page. However, zero page is reserved. So, as well as creating and destroy vm, the refcount of zero page will continue to increase until it overflows. step1: echo 10000 > /sys/kernel/pages_to_scan/pages_to_scan echo 1 > /sys/kernel/pages_to_scan/run echo 1 > /sys/kernel/pages_to_scan/use_zero_pages step2: just create several normal qemu kvm vms. And destroy it after 10s. Repeat this action all the time. After a long period of time, all domains hang because of the refcount of zero page overflow. Qemu print error log as follow: … error: kvm run failed Bad address EAX=00006cdc EBX=00000008 ECX=80202001 EDX=078bfbfd ESI=ffffffff EDI=00000000 EBP=00000008 ESP=00006cc4 EIP=000efd75 EFL=00010002 [-------] CPL=0 II=0 A20=1 SMM=0 HLT=0 ES =0010 00000000 ffffffff 00c09300 DPL=0 DS [-WA] CS =0008 00000000 ffffffff 00c09b00 DPL=0 CS32 [-RA] SS =0010 00000000 ffffffff 00c09300 DPL=0 DS [-WA] DS =0010 00000000 ffffffff 00c09300 DPL=0 DS [-WA] FS =0010 00000000 ffffffff 00c09300 DPL=0 DS [-WA] GS =0010 00000000 ffffffff 00c09300 DPL=0 DS [-WA] LDT=0000 00000000 0000ffff 00008200 DPL=0 LDT TR =0000 00000000 0000ffff 00008b00 DPL=0 TSS32-busy GDT= 000f7070 00000037 IDT= 000f70ae 00000000 CR0=00000011 CR2=00000000 CR3=00000000 CR4=00000000 DR0=0000000000000000 DR1=0000000000000000 DR2=0000000000000000 DR3=0000000000000000 DR6=00000000ffff0ff0 DR7=0000000000000400 EFER=0000000000000000 Code=00 01 00 00 00 e9 e8 00 00 00 c7 05 4c 55 0f 00 01 00 00 00 <8b> 35 00 00 01 00 8b 3d 04 00 01 00 b8 d8 d3 00 00 c1 e0 08 0c ea a3 00 00 01 00 c7 05 04 … Meanwhile, a kernel warning is departed. [40914.836375] WARNING: CPU: 3 PID: 82067 at ./include/linux/mm.h:987 try_get_page+0x1f/0x30 [40914.836412] CPU: 3 PID: 82067 Comm: CPU 0/KVM Kdump: loaded Tainted: G OE 5.2.0-rc2 #5 [40914.836415] RIP: 0010:try_get_page+0x1f/0x30 [40914.836417] Code: 40 00 c3 0f 1f 84 00 00 00 00 00 48 8b 47 08 a8 01 75 11 8b 47 34 85 c0 7e 10 f0 ff 47 34 b8 01 00 00 00 c3 48 8d 78 ff eb e9 <0f> 0b 31 c0 c3 66 90 66 2e 0f 1f 84 00 0 0 00 00 00 48 8b 47 08 a8 [40914.836418] RSP: 0018:ffffb4144e523988 EFLAGS: 00010286 [40914.836419] RAX: 0000000080000000 RBX: 0000000000000326 RCX: 0000000000000000 [40914.836420] RDX: 0000000000000000 RSI: 00004ffdeba10000 RDI: ffffdf07093f6440 [40914.836421] RBP: ffffdf07093f6440 R08: 800000424fd91225 R09: 0000000000000000 [40914.836421] R10: ffff9eb41bfeebb8 R11: 0000000000000000 R12: ffffdf06bbd1e8a8 [40914.836422] R13: 0000000000000080 R14: 800000424fd91225 R15: ffffdf07093f6440 [40914.836423] FS: 00007fb60ffff700(0000) GS:ffff9eb4802c0000(0000) knlGS:0000000000000000 [40914.836425] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [40914.836426] CR2: 0000000000000000 CR3: 0000002f220e6002 CR4: 00000000003626e0 [40914.836427] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [40914.836427] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 [40914.836428] Call Trace: [40914.836433] follow_page_pte+0x302/0x47b [40914.836437] __get_user_pages+0xf1/0x7d0 [40914.836441] ? irq_work_queue+0x9/0x70 [40914.836443] get_user_pages_unlocked+0x13f/0x1e0 [40914.836469] __gfn_to_pfn_memslot+0x10e/0x400 [kvm] [40914.836486] try_async_pf+0x87/0x240 [kvm] [40914.836503] tdp_page_fault+0x139/0x270 [kvm] [40914.836523] kvm_mmu_page_fault+0x76/0x5e0 [kvm] [40914.836588] vcpu_enter_guest+0xb45/0x1570 [kvm] [40914.836632] kvm_arch_vcpu_ioctl_run+0x35d/0x580 [kvm] [40914.836645] kvm_vcpu_ioctl+0x26e/0x5d0 [kvm] [40914.836650] do_vfs_ioctl+0xa9/0x620 [40914.836653] ksys_ioctl+0x60/0x90 [40914.836654] __x64_sys_ioctl+0x16/0x20 [40914.836658] do_syscall_64+0x5b/0x180 [40914.836664] entry_SYSCALL_64_after_hwframe+0x44/0xa9 [40914.836666] RIP: 0033:0x7fb61cb6bfc7 Signed-off-by: LinFeng Signed-off-by: Zhuang Yanying Signed-off-by: Paolo Bonzini Signed-off-by: Sasha Levin --- virt/kvm/kvm_main.c | 1 + 1 file changed, 1 insertion(+) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 6bd01d12df2e..9312c7e750ed 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -169,6 +169,7 @@ bool kvm_is_reserved_pfn(kvm_pfn_t pfn) */ if (pfn_valid(pfn)) return PageReserved(pfn_to_page(pfn)) && + !is_zero_pfn(pfn) && !kvm_is_zone_device_pfn(pfn); return true; -- GitLab From 63cf8e58c21322bd57357fcb7ff19c3d7ef89b97 Mon Sep 17 00:00:00 2001 From: Mohan Kumar Date: Thu, 6 Feb 2020 15:40:53 +0530 Subject: [PATCH 0954/1304] ALSA: hda: Clear RIRB status before reading WP [ Upstream commit 6d011d5057ff88ee556c000ac6fe0be23bdfcd72 ] RIRB interrupt status getting cleared after the write pointer is read causes a race condition, where last response(s) into RIRB may remain unserviced by IRQ, eventually causing azx_rirb_get_response to fall back to polling mode. Clearing the RIRB interrupt status ahead of write pointer access ensures that this condition is avoided. Signed-off-by: Mohan Kumar Signed-off-by: Viswanath L Link: https://lore.kernel.org/r/1580983853-351-1-git-send-email-viswanathl@nvidia.com Signed-off-by: Takashi Iwai Signed-off-by: Sasha Levin --- sound/pci/hda/hda_controller.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c index fa261b27d858..8198d2e53b7d 100644 --- a/sound/pci/hda/hda_controller.c +++ b/sound/pci/hda/hda_controller.c @@ -1169,16 +1169,23 @@ irqreturn_t azx_interrupt(int irq, void *dev_id) if (snd_hdac_bus_handle_stream_irq(bus, status, stream_update)) active = true; - /* clear rirb int */ status = azx_readb(chip, RIRBSTS); if (status & RIRB_INT_MASK) { + /* + * Clearing the interrupt status here ensures that no + * interrupt gets masked after the RIRB wp is read in + * snd_hdac_bus_update_rirb. This avoids a possible + * race condition where codec response in RIRB may + * remain unserviced by IRQ, eventually falling back + * to polling mode in azx_rirb_get_response. + */ + azx_writeb(chip, RIRBSTS, RIRB_INT_MASK); active = true; if (status & RIRB_INT_RESPONSE) { if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) udelay(80); snd_hdac_bus_update_rirb(bus); } - azx_writeb(chip, RIRBSTS, RIRB_INT_MASK); } } while (active && ++repeat < 10); -- GitLab From 1fea0234984c39748386a8b2eebaf8a1561e3075 Mon Sep 17 00:00:00 2001 From: Qian Cai Date: Tue, 4 Feb 2020 13:40:29 -0500 Subject: [PATCH 0955/1304] skbuff: fix a data race in skb_queue_len() [ Upstream commit 86b18aaa2b5b5bb48e609cd591b3d2d0fdbe0442 ] sk_buff.qlen can be accessed concurrently as noticed by KCSAN, BUG: KCSAN: data-race in __skb_try_recv_from_queue / unix_dgram_sendmsg read to 0xffff8a1b1d8a81c0 of 4 bytes by task 5371 on cpu 96: unix_dgram_sendmsg+0x9a9/0xb70 include/linux/skbuff.h:1821 net/unix/af_unix.c:1761 ____sys_sendmsg+0x33e/0x370 ___sys_sendmsg+0xa6/0xf0 __sys_sendmsg+0x69/0xf0 __x64_sys_sendmsg+0x51/0x70 do_syscall_64+0x91/0xb47 entry_SYSCALL_64_after_hwframe+0x49/0xbe write to 0xffff8a1b1d8a81c0 of 4 bytes by task 1 on cpu 99: __skb_try_recv_from_queue+0x327/0x410 include/linux/skbuff.h:2029 __skb_try_recv_datagram+0xbe/0x220 unix_dgram_recvmsg+0xee/0x850 ____sys_recvmsg+0x1fb/0x210 ___sys_recvmsg+0xa2/0xf0 __sys_recvmsg+0x66/0xf0 __x64_sys_recvmsg+0x51/0x70 do_syscall_64+0x91/0xb47 entry_SYSCALL_64_after_hwframe+0x49/0xbe Since only the read is operating as lockless, it could introduce a logic bug in unix_recvq_full() due to the load tearing. Fix it by adding a lockless variant of skb_queue_len() and unix_recvq_full() where READ_ONCE() is on the read while WRITE_ONCE() is on the write similar to the commit d7d16a89350a ("net: add skb_queue_empty_lockless()"). Signed-off-by: Qian Cai Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- include/linux/skbuff.h | 14 +++++++++++++- net/unix/af_unix.c | 11 +++++++++-- 2 files changed, 22 insertions(+), 3 deletions(-) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index cbc0294f3989..703ce71caeac 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1688,6 +1688,18 @@ static inline __u32 skb_queue_len(const struct sk_buff_head *list_) return list_->qlen; } +/** + * skb_queue_len_lockless - get queue length + * @list_: list to measure + * + * Return the length of an &sk_buff queue. + * This variant can be used in lockless contexts. + */ +static inline __u32 skb_queue_len_lockless(const struct sk_buff_head *list_) +{ + return READ_ONCE(list_->qlen); +} + /** * __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head * @list: queue to initialize @@ -1895,7 +1907,7 @@ static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) { struct sk_buff *next, *prev; - list->qlen--; + WRITE_ONCE(list->qlen, list->qlen - 1); next = skb->next; prev = skb->prev; skb->next = skb->prev = NULL; diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 2318e2e2748f..2020306468af 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -192,11 +192,17 @@ static inline int unix_may_send(struct sock *sk, struct sock *osk) return unix_peer(osk) == NULL || unix_our_peer(sk, osk); } -static inline int unix_recvq_full(struct sock const *sk) +static inline int unix_recvq_full(const struct sock *sk) { return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog; } +static inline int unix_recvq_full_lockless(const struct sock *sk) +{ + return skb_queue_len_lockless(&sk->sk_receive_queue) > + READ_ONCE(sk->sk_max_ack_backlog); +} + struct sock *unix_peer_get(struct sock *s) { struct sock *peer; @@ -1788,7 +1794,8 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg, * - unix_peer(sk) == sk by time of get but disconnected before lock */ if (other != sk && - unlikely(unix_peer(other) != sk && unix_recvq_full(other))) { + unlikely(unix_peer(other) != sk && + unix_recvq_full_lockless(other))) { if (timeo) { timeo = unix_wait_for_peer(other, timeo); -- GitLab From 67fd417f961254a409c2e64e026b9f2b41434c40 Mon Sep 17 00:00:00 2001 From: Steve Grubb Date: Fri, 24 Jan 2020 17:29:16 -0500 Subject: [PATCH 0956/1304] audit: CONFIG_CHANGE don't log internal bookkeeping as an event [ Upstream commit 70b3eeed49e8190d97139806f6fbaf8964306cdb ] Common Criteria calls out for any action that modifies the audit trail to be recorded. That usually is interpreted to mean insertion or removal of rules. It is not required to log modification of the inode information since the watch is still in effect. Additionally, if the rule is a never rule and the underlying file is one they do not want events for, they get an event for this bookkeeping update against their wishes. Since no device/inode info is logged at insertion and no device/inode information is logged on update, there is nothing meaningful being communicated to the admin by the CONFIG_CHANGE updated_rules event. One can assume that the rule was not "modified" because it is still watching the intended target. If the device or inode cannot be resolved, then audit_panic is called which is sufficient. The correct resolution is to drop logging config_update events since the watch is still in effect but just on another unknown inode. Signed-off-by: Steve Grubb Signed-off-by: Paul Moore Signed-off-by: Sasha Levin --- kernel/audit_watch.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c index 4f7262eba73d..50952d6d8120 100644 --- a/kernel/audit_watch.c +++ b/kernel/audit_watch.c @@ -317,8 +317,6 @@ static void audit_update_watch(struct audit_parent *parent, if (oentry->rule.exe) audit_remove_mark(oentry->rule.exe); - audit_watch_log_rule_change(r, owatch, "updated_rules"); - call_rcu(&oentry->rcu, audit_free_rule_rcu); } -- GitLab From 64e0f9e159fe6b592e0fe26cfc1ce03f79d2a9db Mon Sep 17 00:00:00 2001 From: Vasily Averin Date: Sat, 1 Feb 2020 10:47:47 +0300 Subject: [PATCH 0957/1304] selinux: sel_avc_get_stat_idx should increase position index [ Upstream commit 8d269a8e2a8f0bca89022f4ec98de460acb90365 ] If seq_file .next function does not change position index, read after some lseek can generate unexpected output. $ dd if=/sys/fs/selinux/avc/cache_stats # usual output lookups hits misses allocations reclaims frees 817223 810034 7189 7189 6992 7037 1934894 1926896 7998 7998 7632 7683 1322812 1317176 5636 5636 5456 5507 1560571 1551548 9023 9023 9056 9115 0+1 records in 0+1 records out 189 bytes copied, 5,1564e-05 s, 3,7 MB/s $# read after lseek to midle of last line $ dd if=/sys/fs/selinux/avc/cache_stats bs=180 skip=1 dd: /sys/fs/selinux/avc/cache_stats: cannot skip to specified offset 056 9115 <<<< end of last line 1560571 1551548 9023 9023 9056 9115 <<< whole last line once again 0+1 records in 0+1 records out 45 bytes copied, 8,7221e-05 s, 516 kB/s $# read after lseek beyond end of of file $ dd if=/sys/fs/selinux/avc/cache_stats bs=1000 skip=1 dd: /sys/fs/selinux/avc/cache_stats: cannot skip to specified offset 1560571 1551548 9023 9023 9056 9115 <<<< generates whole last line 0+1 records in 0+1 records out 36 bytes copied, 9,0934e-05 s, 396 kB/s https://bugzilla.kernel.org/show_bug.cgi?id=206283 Signed-off-by: Vasily Averin Acked-by: Stephen Smalley Signed-off-by: Paul Moore Signed-off-by: Sasha Levin --- security/selinux/selinuxfs.c | 1 + 1 file changed, 1 insertion(+) diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c index f3a5a138a096..60b3f16bb5c7 100644 --- a/security/selinux/selinuxfs.c +++ b/security/selinux/selinuxfs.c @@ -1509,6 +1509,7 @@ static struct avc_cache_stats *sel_avc_get_stat_idx(loff_t *idx) *idx = cpu + 1; return &per_cpu(avc_cache_stats, cpu); } + (*idx)++; return NULL; } -- GitLab From 1ae009aa225732321223bd96d656e07ecf42bf16 Mon Sep 17 00:00:00 2001 From: James Smart Date: Mon, 27 Jan 2020 16:23:01 -0800 Subject: [PATCH 0958/1304] scsi: lpfc: Fix RQ buffer leakage when no IOCBs available [ Upstream commit 39c4f1a965a9244c3ba60695e8ff8da065ec6ac4 ] The driver is occasionally seeing the following SLI Port error, requiring reset and reinit: Port Status Event: ... error 1=0x52004a01, error 2=0x218 The failure means an RQ timeout. That is, the adapter had received asynchronous receive frames, ran out of buffer slots to place the frames, and the driver did not replenish the buffer slots before a timeout occurred. The driver should not be so slow in replenishing buffers that a timeout can occur. When the driver received all the frames of a sequence, it allocates an IOCB to put the frames in. In a situation where there was no IOCB available for the frame of a sequence, the RQ buffer corresponding to the first frame of the sequence was not returned to the FW. Eventually, with enough traffic encountering the situation, the timeout occurred. Fix by releasing the buffer back to firmware whenever there is no IOCB for the first frame. [mkp: typo] Link: https://lore.kernel.org/r/20200128002312.16346-2-jsmart2021@gmail.com Signed-off-by: Dick Kennedy Signed-off-by: James Smart Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin --- drivers/scsi/lpfc/lpfc_sli.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index a56a939792ac..2ab351260e81 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -17413,6 +17413,10 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) list_add_tail(&iocbq->list, &first_iocbq->list); } } + /* Free the sequence's header buffer */ + if (!first_iocbq) + lpfc_in_buf_free(vport->phba, &seq_dmabuf->dbuf); + return first_iocbq; } -- GitLab From b4ba41c88d2cf52c97e3defd13ea6b909646a310 Mon Sep 17 00:00:00 2001 From: James Smart Date: Mon, 27 Jan 2020 16:23:07 -0800 Subject: [PATCH 0959/1304] scsi: lpfc: Fix coverity errors in fmdi attribute handling [ Upstream commit 4cb9e1ddaa145be9ed67b6a7de98ca705a43f998 ] Coverity reported a memory corruption error for the fdmi attributes routines: CID 15768 [Memory Corruption] Out-of-bounds access on FDMI Sloppy coding of the fmdi structures. In both the lpfc_fdmi_attr_def and lpfc_fdmi_reg_port_list structures, a field was placed at the start of payload that may have variable content. The field was given an arbitrary type (uint32_t). The code then uses the field name to derive an address, which it used in things such as memset and memcpy. The memset sizes or memcpy lengths were larger than the arbitrary type, thus coverity reported an error. Fix by replacing the arbitrary fields with the real field structures describing the payload. Link: https://lore.kernel.org/r/20200128002312.16346-8-jsmart2021@gmail.com Signed-off-by: Dick Kennedy Signed-off-by: James Smart Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin --- drivers/scsi/lpfc/lpfc_ct.c | 137 ++++++++++++++++++------------------ drivers/scsi/lpfc/lpfc_hw.h | 36 +++++----- 2 files changed, 85 insertions(+), 88 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 384f5cd7c3c8..99b4ff78f9dc 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -1737,8 +1737,8 @@ lpfc_fdmi_hba_attr_wwnn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, sizeof(struct lpfc_name)); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); memcpy(&ae->un.AttrWWN, &vport->fc_sparam.nodeName, sizeof(struct lpfc_name)); @@ -1754,8 +1754,8 @@ lpfc_fdmi_hba_attr_manufacturer(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t len, size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); /* This string MUST be consistent with other FC platforms * supported by Broadcom. @@ -1779,8 +1779,8 @@ lpfc_fdmi_hba_attr_sn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) struct lpfc_fdmi_attr_entry *ae; uint32_t len, size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); strncpy(ae->un.AttrString, phba->SerialNumber, sizeof(ae->un.AttrString)); @@ -1801,8 +1801,8 @@ lpfc_fdmi_hba_attr_model(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t len, size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); strncpy(ae->un.AttrString, phba->ModelName, sizeof(ae->un.AttrString)); @@ -1822,8 +1822,8 @@ lpfc_fdmi_hba_attr_description(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t len, size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); strncpy(ae->un.AttrString, phba->ModelDesc, sizeof(ae->un.AttrString)); @@ -1845,8 +1845,8 @@ lpfc_fdmi_hba_attr_hdw_ver(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t i, j, incr, size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); /* Convert JEDEC ID to ascii for hardware version */ incr = vp->rev.biuRev; @@ -1875,8 +1875,8 @@ lpfc_fdmi_hba_attr_drvr_ver(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t len, size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); strncpy(ae->un.AttrString, lpfc_release_version, sizeof(ae->un.AttrString)); @@ -1897,8 +1897,8 @@ lpfc_fdmi_hba_attr_rom_ver(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t len, size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); if (phba->sli_rev == LPFC_SLI_REV4) lpfc_decode_firmware_rev(phba, ae->un.AttrString, 1); @@ -1922,8 +1922,8 @@ lpfc_fdmi_hba_attr_fmw_ver(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t len, size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); lpfc_decode_firmware_rev(phba, ae->un.AttrString, 1); len = strnlen(ae->un.AttrString, @@ -1942,8 +1942,8 @@ lpfc_fdmi_hba_attr_os_ver(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t len, size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); snprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "%s %s %s", init_utsname()->sysname, @@ -1965,7 +1965,7 @@ lpfc_fdmi_hba_attr_ct_len(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ae = &ad->AttrValue; ae->un.AttrInt = cpu_to_be32(LPFC_MAX_CT_SIZE); size = FOURBYTES + sizeof(uint32_t); @@ -1981,8 +1981,8 @@ lpfc_fdmi_hba_attr_symbolic_name(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t len, size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); len = lpfc_vport_symbolic_node_name(vport, ae->un.AttrString, 256); @@ -2000,7 +2000,7 @@ lpfc_fdmi_hba_attr_vendor_info(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ae = &ad->AttrValue; /* Nothing is defined for this currently */ ae->un.AttrInt = cpu_to_be32(0); @@ -2017,7 +2017,7 @@ lpfc_fdmi_hba_attr_num_ports(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ae = &ad->AttrValue; /* Each driver instance corresponds to a single port */ ae->un.AttrInt = cpu_to_be32(1); @@ -2034,8 +2034,8 @@ lpfc_fdmi_hba_attr_fabric_wwnn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, sizeof(struct lpfc_name)); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); memcpy(&ae->un.AttrWWN, &vport->fabric_nodename, sizeof(struct lpfc_name)); @@ -2053,8 +2053,8 @@ lpfc_fdmi_hba_attr_bios_ver(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t len, size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); lpfc_decode_firmware_rev(phba, ae->un.AttrString, 1); len = strnlen(ae->un.AttrString, @@ -2073,7 +2073,7 @@ lpfc_fdmi_hba_attr_bios_state(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ae = &ad->AttrValue; /* Driver doesn't have access to this information */ ae->un.AttrInt = cpu_to_be32(0); @@ -2090,8 +2090,8 @@ lpfc_fdmi_hba_attr_vendor_id(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t len, size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); strncpy(ae->un.AttrString, "EMULEX", sizeof(ae->un.AttrString)); @@ -2112,8 +2112,8 @@ lpfc_fdmi_port_attr_fc4type(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 32); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); ae->un.AttrTypes[3] = 0x02; /* Type 0x1 - ELS */ ae->un.AttrTypes[2] = 0x01; /* Type 0x8 - FCP */ @@ -2134,7 +2134,7 @@ lpfc_fdmi_port_attr_support_speed(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ae = &ad->AttrValue; ae->un.AttrInt = 0; if (!(phba->hba_flag & HBA_FCOE_MODE)) { @@ -2186,7 +2186,7 @@ lpfc_fdmi_port_attr_speed(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ae = &ad->AttrValue; if (!(phba->hba_flag & HBA_FCOE_MODE)) { switch (phba->fc_linkspeed) { @@ -2253,7 +2253,7 @@ lpfc_fdmi_port_attr_max_frame(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ae = &ad->AttrValue; hsp = (struct serv_parm *)&vport->fc_sparam; ae->un.AttrInt = (((uint32_t) hsp->cmn.bbRcvSizeMsb) << 8) | @@ -2273,8 +2273,8 @@ lpfc_fdmi_port_attr_os_devname(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t len, size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); snprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "/sys/class/scsi_host/host%d", shost->host_no); @@ -2294,8 +2294,8 @@ lpfc_fdmi_port_attr_host_name(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t len, size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); snprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "%s", init_utsname()->nodename); @@ -2315,8 +2315,8 @@ lpfc_fdmi_port_attr_wwnn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, sizeof(struct lpfc_name)); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); memcpy(&ae->un.AttrWWN, &vport->fc_sparam.nodeName, sizeof(struct lpfc_name)); @@ -2333,8 +2333,8 @@ lpfc_fdmi_port_attr_wwpn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, sizeof(struct lpfc_name)); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); memcpy(&ae->un.AttrWWN, &vport->fc_sparam.portName, sizeof(struct lpfc_name)); @@ -2351,8 +2351,8 @@ lpfc_fdmi_port_attr_symbolic_name(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t len, size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); len = lpfc_vport_symbolic_port_name(vport, ae->un.AttrString, 256); len += (len & 3) ? (4 - (len & 3)) : 4; @@ -2370,7 +2370,7 @@ lpfc_fdmi_port_attr_port_type(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ae = &ad->AttrValue; if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) ae->un.AttrInt = cpu_to_be32(LPFC_FDMI_PORTTYPE_NLPORT); else @@ -2388,7 +2388,7 @@ lpfc_fdmi_port_attr_class(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ae = &ad->AttrValue; ae->un.AttrInt = cpu_to_be32(FC_COS_CLASS2 | FC_COS_CLASS3); size = FOURBYTES + sizeof(uint32_t); ad->AttrLen = cpu_to_be16(size); @@ -2403,8 +2403,8 @@ lpfc_fdmi_port_attr_fabric_wwpn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, sizeof(struct lpfc_name)); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); memcpy(&ae->un.AttrWWN, &vport->fabric_portname, sizeof(struct lpfc_name)); @@ -2421,8 +2421,8 @@ lpfc_fdmi_port_attr_active_fc4type(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 32); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); ae->un.AttrTypes[3] = 0x02; /* Type 0x1 - ELS */ ae->un.AttrTypes[2] = 0x01; /* Type 0x8 - FCP */ @@ -2442,7 +2442,7 @@ lpfc_fdmi_port_attr_port_state(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ae = &ad->AttrValue; /* Link Up - operational */ ae->un.AttrInt = cpu_to_be32(LPFC_FDMI_PORTSTATE_ONLINE); size = FOURBYTES + sizeof(uint32_t); @@ -2458,7 +2458,7 @@ lpfc_fdmi_port_attr_num_disc(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ae = &ad->AttrValue; vport->fdmi_num_disc = lpfc_find_map_node(vport); ae->un.AttrInt = cpu_to_be32(vport->fdmi_num_disc); size = FOURBYTES + sizeof(uint32_t); @@ -2474,7 +2474,7 @@ lpfc_fdmi_port_attr_nportid(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ae = &ad->AttrValue; ae->un.AttrInt = cpu_to_be32(vport->fc_myDID); size = FOURBYTES + sizeof(uint32_t); ad->AttrLen = cpu_to_be16(size); @@ -2489,8 +2489,8 @@ lpfc_fdmi_smart_attr_service(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t len, size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); strncpy(ae->un.AttrString, "Smart SAN Initiator", sizeof(ae->un.AttrString)); @@ -2510,8 +2510,8 @@ lpfc_fdmi_smart_attr_guid(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); memcpy(&ae->un.AttrString, &vport->fc_sparam.nodeName, sizeof(struct lpfc_name)); @@ -2531,8 +2531,8 @@ lpfc_fdmi_smart_attr_version(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t len, size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); strncpy(ae->un.AttrString, "Smart SAN Version 2.0", sizeof(ae->un.AttrString)); @@ -2553,8 +2553,8 @@ lpfc_fdmi_smart_attr_model(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t len, size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; - memset(ae, 0, 256); + ae = &ad->AttrValue; + memset(ae, 0, sizeof(*ae)); strncpy(ae->un.AttrString, phba->ModelName, sizeof(ae->un.AttrString)); @@ -2573,7 +2573,7 @@ lpfc_fdmi_smart_attr_port_info(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ae = &ad->AttrValue; /* SRIOV (type 3) is not supported */ if (vport->vpi) @@ -2593,7 +2593,7 @@ lpfc_fdmi_smart_attr_qos(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ae = &ad->AttrValue; ae->un.AttrInt = cpu_to_be32(0); size = FOURBYTES + sizeof(uint32_t); ad->AttrLen = cpu_to_be16(size); @@ -2608,7 +2608,7 @@ lpfc_fdmi_smart_attr_security(struct lpfc_vport *vport, struct lpfc_fdmi_attr_entry *ae; uint32_t size; - ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; + ae = &ad->AttrValue; ae->un.AttrInt = cpu_to_be32(1); size = FOURBYTES + sizeof(uint32_t); ad->AttrLen = cpu_to_be16(size); @@ -2756,7 +2756,8 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* Registered Port List */ /* One entry (port) per adapter */ rh->rpl.EntryCnt = cpu_to_be32(1); - memcpy(&rh->rpl.pe, &phba->pport->fc_sparam.portName, + memcpy(&rh->rpl.pe.PortName, + &phba->pport->fc_sparam.portName, sizeof(struct lpfc_name)); /* point to the HBA attribute block */ diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index 009aa0eee040..48d4d576d588 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h @@ -1333,25 +1333,8 @@ struct fc_rdp_res_frame { /* lpfc_sli_ct_request defines the CT_IU preamble for FDMI commands */ #define SLI_CT_FDMI_Subtypes 0x10 /* Management Service Subtype */ -/* - * Registered Port List Format - */ -struct lpfc_fdmi_reg_port_list { - uint32_t EntryCnt; - uint32_t pe; /* Variable-length array */ -}; - - /* Definitions for HBA / Port attribute entries */ -struct lpfc_fdmi_attr_def { /* Defined in TLV format */ - /* Structure is in Big Endian format */ - uint32_t AttrType:16; - uint32_t AttrLen:16; - uint32_t AttrValue; /* Marks start of Value (ATTRIBUTE_ENTRY) */ -}; - - /* Attribute Entry */ struct lpfc_fdmi_attr_entry { union { @@ -1362,7 +1345,13 @@ struct lpfc_fdmi_attr_entry { } un; }; -#define LPFC_FDMI_MAX_AE_SIZE sizeof(struct lpfc_fdmi_attr_entry) +struct lpfc_fdmi_attr_def { /* Defined in TLV format */ + /* Structure is in Big Endian format */ + uint32_t AttrType:16; + uint32_t AttrLen:16; + /* Marks start of Value (ATTRIBUTE_ENTRY) */ + struct lpfc_fdmi_attr_entry AttrValue; +} __packed; /* * HBA Attribute Block @@ -1386,13 +1375,20 @@ struct lpfc_fdmi_hba_ident { struct lpfc_name PortName; }; +/* + * Registered Port List Format + */ +struct lpfc_fdmi_reg_port_list { + uint32_t EntryCnt; + struct lpfc_fdmi_port_entry pe; +} __packed; + /* * Register HBA(RHBA) */ struct lpfc_fdmi_reg_hba { struct lpfc_fdmi_hba_ident hi; - struct lpfc_fdmi_reg_port_list rpl; /* variable-length array */ -/* struct lpfc_fdmi_attr_block ab; */ + struct lpfc_fdmi_reg_port_list rpl; }; /* -- GitLab From d4c38bfb33c2bb1c9cb6afb633fc210ae63a3337 Mon Sep 17 00:00:00 2001 From: Wen Yang Date: Mon, 8 Apr 2019 10:58:32 +0800 Subject: [PATCH 0960/1304] drm/omap: fix possible object reference leak [ Upstream commit 47340e46f34a3b1d80e40b43ae3d7a8da34a3541 ] The call to of_find_matching_node returns a node pointer with refcount incremented thus it must be explicitly decremented after the last usage. Detected by coccinelle with the following warnings: drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c:212:2-8: ERROR: missing of_node_put; acquired a node pointer with refcount incremented on line 209, but without a corresponding object release within this function. drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c:237:1-7: ERROR: missing of_node_put; acquired a node pointer with refcount incremented on line 209, but without a corresponding object release within this function. Signed-off-by: Wen Yang Reviewed-by: Laurent Pinchart Reviewed-by: Mukesh Ojha Cc: Tomi Valkeinen Cc: David Airlie Cc: Daniel Vetter Cc: Sebastian Reichel Cc: Laurent Pinchart Cc: dri-devel@lists.freedesktop.org Cc: linux-kernel@vger.kernel.org Cc: Markus Elfring Signed-off-by: Tomi Valkeinen Link: https://patchwork.freedesktop.org/patch/msgid/1554692313-28882-2-git-send-email-wen.yang99@zte.com.cn Signed-off-by: Sasha Levin --- drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c index 3bfb95d230e0..d8fb686c1fda 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c +++ b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c @@ -193,7 +193,7 @@ static int __init omapdss_boot_init(void) dss = of_find_matching_node(NULL, omapdss_of_match); if (dss == NULL || !of_device_is_available(dss)) - return 0; + goto put_node; omapdss_walk_device(dss, true); @@ -218,6 +218,8 @@ static int __init omapdss_boot_init(void) kfree(n); } +put_node: + of_node_put(dss); return 0; } -- GitLab From f99ca8f0876372788b1b6ef04b37ce650ae9c5ae Mon Sep 17 00:00:00 2001 From: Dinh Nguyen Date: Tue, 14 Jan 2020 10:07:25 -0600 Subject: [PATCH 0961/1304] clk: stratix10: use do_div() for 64-bit calculation [ Upstream commit cc26ed7be46c5f5fa45f3df8161ed7ca3c4d318c ] do_div() macro to perform u64 division and guards against overflow if the result is too large for the unsigned long return type. Signed-off-by: Dinh Nguyen Link: https://lkml.kernel.org/r/20200114160726.19771-1-dinguyen@kernel.org Signed-off-by: Stephen Boyd Signed-off-by: Sasha Levin --- drivers/clk/socfpga/clk-pll-s10.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/clk/socfpga/clk-pll-s10.c b/drivers/clk/socfpga/clk-pll-s10.c index c4d0b6f6abf2..fc2e2839fe57 100644 --- a/drivers/clk/socfpga/clk-pll-s10.c +++ b/drivers/clk/socfpga/clk-pll-s10.c @@ -38,7 +38,9 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk, /* read VCO1 reg for numerator and denominator */ reg = readl(socfpgaclk->hw.reg); refdiv = (reg & SOCFPGA_PLL_REFDIV_MASK) >> SOCFPGA_PLL_REFDIV_SHIFT; - vco_freq = (unsigned long long)parent_rate / refdiv; + + vco_freq = parent_rate; + do_div(vco_freq, refdiv); /* Read mdiv and fdiv from the fdbck register */ reg = readl(socfpgaclk->hw.reg + 0x4); -- GitLab From 7444a79b60c777fb4c7bc2f4073c1ec95ee0cbf4 Mon Sep 17 00:00:00 2001 From: Ayush Sawal Date: Wed, 5 Feb 2020 10:48:42 +0530 Subject: [PATCH 0962/1304] crypto: chelsio - This fixes the kernel panic which occurs during a libkcapi test [ Upstream commit 9195189e00a7db55e7d448cee973cae87c5a3c71 ] The libkcapi test which causes kernel panic is aead asynchronous vmsplice multiple test. ./bin/kcapi -v -d 4 -x 10 -c "ccm(aes)" -q 4edb58e8d5eb6bc711c43a6f3693daebde2e5524f1b55297abb29f003236e43d -t a7877c99 -n 674742abd0f5ba -k 2861fd0253705d7875c95ba8a53171b4 -a fb7bc304a3909e66e2e0c5ef952712dd884ce3e7324171369f2c5db1adc48c7d This patch avoids dma_mapping of a zero length sg which causes the panic, by using sg_nents_for_len which maps only upto a specific length Signed-off-by: Ayush Sawal Signed-off-by: Herbert Xu Signed-off-by: Sasha Levin --- drivers/crypto/chelsio/chcr_algo.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c index 9b3c259f081d..ee508bbbb750 100644 --- a/drivers/crypto/chelsio/chcr_algo.c +++ b/drivers/crypto/chelsio/chcr_algo.c @@ -2418,8 +2418,9 @@ int chcr_aead_dma_map(struct device *dev, else reqctx->b0_dma = 0; if (req->src == req->dst) { - error = dma_map_sg(dev, req->src, sg_nents(req->src), - DMA_BIDIRECTIONAL); + error = dma_map_sg(dev, req->src, + sg_nents_for_len(req->src, dst_size), + DMA_BIDIRECTIONAL); if (!error) goto err; } else { -- GitLab From ac6724d49f654ecb722f84c33ebb8ece64958182 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Sat, 14 Dec 2019 00:15:26 +0100 Subject: [PATCH 0963/1304] mt76: clear skb pointers from rx aggregation reorder buffer during cleanup [ Upstream commit 9379df2fd9234e3b67a23101c2370c99f6af6d77 ] During the cleanup of the aggregation session, a rx handler (or release timer) on another CPU might still hold a pointer to the reorder buffer and could attempt to release some packets. Clearing pointers during cleanup avoids a theoretical use-after-free bug here. Signed-off-by: Felix Fietkau Signed-off-by: Sasha Levin --- drivers/net/wireless/mediatek/mt76/agg-rx.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/wireless/mediatek/mt76/agg-rx.c b/drivers/net/wireless/mediatek/mt76/agg-rx.c index d44d57e6eb27..97df6b3a472b 100644 --- a/drivers/net/wireless/mediatek/mt76/agg-rx.c +++ b/drivers/net/wireless/mediatek/mt76/agg-rx.c @@ -278,6 +278,7 @@ static void mt76_rx_aggr_shutdown(struct mt76_dev *dev, struct mt76_rx_tid *tid) if (!skb) continue; + tid->reorder_buf[i] = NULL; tid->nframes--; dev_kfree_skb(skb); } -- GitLab From 0cafae90a24e0c55ccb2443b398e6a6f7e52ed50 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Fri, 14 Feb 2020 15:49:28 +0100 Subject: [PATCH 0964/1304] ALSA: usb-audio: Don't create a mixer element with bogus volume range [ Upstream commit e9a0ef0b5ddcbc0d56c65aefc0f18d16e6f71207 ] Some USB-audio descriptors provide a bogus volume range (e.g. volume min and max are identical), which confuses user-space. This patch makes the driver skipping such a control element. BugLink: https://bugzilla.kernel.org/show_bug.cgi?id=206221 Link: https://lore.kernel.org/r/20200214144928.23628-1-tiwai@suse.de Signed-off-by: Takashi Iwai Signed-off-by: Sasha Levin --- sound/usb/mixer.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index 45bd3d54be54..451b8ea383c6 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -1699,6 +1699,16 @@ static void __build_feature_ctl(struct usb_mixer_interface *mixer, /* get min/max values */ get_min_max_with_quirks(cval, 0, kctl); + /* skip a bogus volume range */ + if (cval->max <= cval->min) { + usb_audio_dbg(mixer->chip, + "[%d] FU [%s] skipped due to invalid volume\n", + cval->head.id, kctl->id.name); + snd_ctl_free_one(kctl); + return; + } + + if (control == UAC_FU_VOLUME) { check_mapped_dB(map, cval); if (cval->dBmin < cval->dBmax || !cval->initialized) { -- GitLab From 345dc71a5c7efe99b5990a8c3a15bc29dd053521 Mon Sep 17 00:00:00 2001 From: Thomas Richter Date: Mon, 17 Feb 2020 11:21:11 +0100 Subject: [PATCH 0965/1304] perf test: Fix test trace+probe_vfs_getname.sh on s390 [ Upstream commit 2bbc83537614517730e9f2811195004b712de207 ] This test places a kprobe to function getname_flags() in the kernel which has the following prototype: struct filename *getname_flags(const char __user *filename, int flags, int *empty) The 'filename' argument points to a filename located in user space memory. Looking at commit 88903c464321c ("tracing/probe: Add ustring type for user-space string") the kprobe should indicate that user space memory is accessed. Output before: [root@m35lp76 perf]# ./perf test 66 67 66: Use vfs_getname probe to get syscall args filenames : FAILED! 67: Check open filename arg using perf trace + vfs_getname: FAILED! [root@m35lp76 perf]# Output after: [root@m35lp76 perf]# ./perf test 66 67 66: Use vfs_getname probe to get syscall args filenames : Ok 67: Check open filename arg using perf trace + vfs_getname: Ok [root@m35lp76 perf]# Comments from Masami Hiramatsu: This bug doesn't happen on x86 or other archs on which user address space and kernel address space is the same. On some arches (ppc64 in this case?) user address space is partially or completely the same as kernel address space. (Yes, they switch the world when running into the kernel) In this case, we need to use different data access functions for each space. That is why I introduced the "ustring" type for kprobe events. As far as I can see, Thomas's patch is sane. Thomas, could you show us your result on your test environment? Comments from Thomas Richter: Test results for s/390 included above. Signed-off-by: Thomas Richter Acked-by: Masami Hiramatsu Tested-by: Arnaldo Carvalho de Melo Cc: Heiko Carstens Cc: Sumanth Korikkar Cc: Vasily Gorbik Link: http://lore.kernel.org/lkml/20200217102111.61137-1-tmricht@linux.ibm.com Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: Sasha Levin --- tools/perf/tests/shell/lib/probe_vfs_getname.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/tests/shell/lib/probe_vfs_getname.sh b/tools/perf/tests/shell/lib/probe_vfs_getname.sh index 7cb99b433888..c2cc42daf924 100644 --- a/tools/perf/tests/shell/lib/probe_vfs_getname.sh +++ b/tools/perf/tests/shell/lib/probe_vfs_getname.sh @@ -14,7 +14,7 @@ add_probe_vfs_getname() { if [ $had_vfs_getname -eq 1 ] ; then line=$(perf probe -L getname_flags 2>&1 | egrep 'result.*=.*filename;' | sed -r 's/[[:space:]]+([[:digit:]]+)[[:space:]]+result->uptr.*/\1/') perf probe -q "vfs_getname=getname_flags:${line} pathname=result->name:string" || \ - perf probe $verbose "vfs_getname=getname_flags:${line} pathname=filename:string" + perf probe $verbose "vfs_getname=getname_flags:${line} pathname=filename:ustring" fi } -- GitLab From 46a57510ad6a04f8ff9c66cd9fbe650218beea12 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Mon, 17 Feb 2020 12:57:14 -0800 Subject: [PATCH 0966/1304] RDMA/rxe: Fix configuration of atomic queue pair attributes [ Upstream commit fb3063d31995cc4cf1d47a406bb61d6fb1b1d58d ] From the comment above the definition of the roundup_pow_of_two() macro: The result is undefined when n == 0. Hence only pass positive values to roundup_pow_of_two(). This patch fixes the following UBSAN complaint: UBSAN: Undefined behaviour in ./include/linux/log2.h:57:13 shift exponent 64 is too large for 64-bit type 'long unsigned int' Call Trace: dump_stack+0xa5/0xe6 ubsan_epilogue+0x9/0x26 __ubsan_handle_shift_out_of_bounds.cold+0x4c/0xf9 rxe_qp_from_attr.cold+0x37/0x5d [rdma_rxe] rxe_modify_qp+0x59/0x70 [rdma_rxe] _ib_modify_qp+0x5aa/0x7c0 [ib_core] ib_modify_qp+0x3b/0x50 [ib_core] cma_modify_qp_rtr+0x234/0x260 [rdma_cm] __rdma_accept+0x1a7/0x650 [rdma_cm] nvmet_rdma_cm_handler+0x1286/0x14cd [nvmet_rdma] cma_cm_event_handler+0x6b/0x330 [rdma_cm] cma_ib_req_handler+0xe60/0x22d0 [rdma_cm] cm_process_work+0x30/0x140 [ib_cm] cm_req_handler+0x11f4/0x1cd0 [ib_cm] cm_work_handler+0xb8/0x344e [ib_cm] process_one_work+0x569/0xb60 worker_thread+0x7a/0x5d0 kthread+0x1e6/0x210 ret_from_fork+0x24/0x30 Link: https://lore.kernel.org/r/20200217205714.26937-1-bvanassche@acm.org Fixes: 8700e3e7c485 ("Soft RoCE driver") Signed-off-by: Bart Van Assche Reviewed-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe Signed-off-by: Sasha Levin --- drivers/infiniband/sw/rxe/rxe_qp.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c index 230697fa31fe..8a22ab8b29e9 100644 --- a/drivers/infiniband/sw/rxe/rxe_qp.c +++ b/drivers/infiniband/sw/rxe/rxe_qp.c @@ -583,15 +583,16 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask, int err; if (mask & IB_QP_MAX_QP_RD_ATOMIC) { - int max_rd_atomic = __roundup_pow_of_two(attr->max_rd_atomic); + int max_rd_atomic = attr->max_rd_atomic ? + roundup_pow_of_two(attr->max_rd_atomic) : 0; qp->attr.max_rd_atomic = max_rd_atomic; atomic_set(&qp->req.rd_atomic, max_rd_atomic); } if (mask & IB_QP_MAX_DEST_RD_ATOMIC) { - int max_dest_rd_atomic = - __roundup_pow_of_two(attr->max_dest_rd_atomic); + int max_dest_rd_atomic = attr->max_dest_rd_atomic ? + roundup_pow_of_two(attr->max_dest_rd_atomic) : 0; qp->attr.max_dest_rd_atomic = max_dest_rd_atomic; -- GitLab From 09ace5ea7e3edb0a2fc6424a1142559cc38644fd Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Thu, 13 Feb 2020 18:24:48 +0100 Subject: [PATCH 0967/1304] KVM: x86: fix incorrect comparison in trace event [ Upstream commit 147f1a1fe5d7e6b01b8df4d0cbd6f9eaf6b6c73b ] The "u" field in the event has three states, -1/0/1. Using u8 however means that comparison with -1 will always fail, so change to signed char. Signed-off-by: Paolo Bonzini Signed-off-by: Sasha Levin --- arch/x86/kvm/mmutrace.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kvm/mmutrace.h b/arch/x86/kvm/mmutrace.h index cb41b036eb26..7e0dc8c7da2c 100644 --- a/arch/x86/kvm/mmutrace.h +++ b/arch/x86/kvm/mmutrace.h @@ -339,7 +339,7 @@ TRACE_EVENT( /* These depend on page entry type, so compute them now. */ __field(bool, r) __field(bool, x) - __field(u8, u) + __field(signed char, u) ), TP_fast_assign( -- GitLab From bb198240240a8469d0708e472a397c02880faad9 Mon Sep 17 00:00:00 2001 From: Amelie Delaunay Date: Mon, 27 Jan 2020 09:53:34 +0100 Subject: [PATCH 0968/1304] dmaengine: stm32-mdma: use vchan_terminate_vdesc() in .terminate_all [ Upstream commit dfc708812a2acfc0ca56f56233b3c3e7b0d4ffe7 ] To avoid race with vchan_complete, use the race free way to terminate running transfer. Move vdesc->node list_del in stm32_mdma_start_transfer instead of in stm32_mdma_xfer_end to avoid another race in vchan_dma_desc_free_list. Signed-off-by: Amelie Delaunay Link: https://lore.kernel.org/r/20200127085334.13163-7-amelie.delaunay@st.com Signed-off-by: Vinod Koul Signed-off-by: Sasha Levin --- drivers/dma/stm32-mdma.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c index 8c3c3e5b812a..9c6867916e89 100644 --- a/drivers/dma/stm32-mdma.c +++ b/drivers/dma/stm32-mdma.c @@ -1137,6 +1137,8 @@ static void stm32_mdma_start_transfer(struct stm32_mdma_chan *chan) return; } + list_del(&vdesc->node); + chan->desc = to_stm32_mdma_desc(vdesc); hwdesc = chan->desc->node[0].hwdesc; chan->curr_hwdesc = 0; @@ -1252,8 +1254,10 @@ static int stm32_mdma_terminate_all(struct dma_chan *c) LIST_HEAD(head); spin_lock_irqsave(&chan->vchan.lock, flags); - if (chan->busy) { - stm32_mdma_stop(chan); + if (chan->desc) { + vchan_terminate_vdesc(&chan->desc->vdesc); + if (chan->busy) + stm32_mdma_stop(chan); chan->desc = NULL; } vchan_get_all_descriptors(&chan->vchan, &head); @@ -1341,7 +1345,6 @@ static enum dma_status stm32_mdma_tx_status(struct dma_chan *c, static void stm32_mdma_xfer_end(struct stm32_mdma_chan *chan) { - list_del(&chan->desc->vdesc.node); vchan_cookie_complete(&chan->desc->vdesc); chan->desc = NULL; chan->busy = false; -- GitLab From eec0eacf6c098492adc560429faa484e0d81829d Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 11 Feb 2020 08:35:46 +0100 Subject: [PATCH 0969/1304] media: staging/imx: Missing assignment in imx_media_capture_device_register() [ Upstream commit ef0ed05dcef8a74178a8b480cce23a377b1de2b8 ] There was supposed to be a "ret = " assignment here, otherwise the error handling on the next line won't work. Fixes: 64b5a49df486 ("[media] media: imx: Add Capture Device Interface") Signed-off-by: Dan Carpenter Reviewed-by: Steve Longerbeam Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Sasha Levin --- drivers/staging/media/imx/imx-media-capture.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/staging/media/imx/imx-media-capture.c b/drivers/staging/media/imx/imx-media-capture.c index 256039ce561e..81a3370551db 100644 --- a/drivers/staging/media/imx/imx-media-capture.c +++ b/drivers/staging/media/imx/imx-media-capture.c @@ -678,7 +678,7 @@ int imx_media_capture_device_register(struct imx_media_video_dev *vdev) /* setup default format */ fmt_src.pad = priv->src_sd_pad; fmt_src.which = V4L2_SUBDEV_FORMAT_ACTIVE; - v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt_src); + ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt_src); if (ret) { v4l2_err(sd, "failed to get src_sd format\n"); goto unreg; -- GitLab From 03dda9566772f5a37c5053c6b6eaa184bb4e387d Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Wed, 22 Jan 2020 08:53:46 -0800 Subject: [PATCH 0970/1304] x86/pkeys: Add check for pkey "overflow" [ Upstream commit 16171bffc829272d5e6014bad48f680cb50943d9 ] Alex Shi reported the pkey macros above arch_set_user_pkey_access() to be unused. They are unused, and even refer to a nonexistent CONFIG option. But, they might have served a good use, which was to ensure that the code does not try to set values that would not fit in the PKRU register. As it stands, a too-large 'pkey' value would be likely to silently overflow the u32 new_pkru_bits. Add a check to look for overflows. Also add a comment to remind any future developer to closely examine the types used to store pkey values if arch_max_pkey() ever changes. This boots and passes the x86 pkey selftests. Reported-by: Alex Shi Signed-off-by: Dave Hansen Signed-off-by: Borislav Petkov Link: https://lkml.kernel.org/r/20200122165346.AD4DA150@viggo.jf.intel.com Signed-off-by: Sasha Levin --- arch/x86/include/asm/pkeys.h | 5 +++++ arch/x86/kernel/fpu/xstate.c | 9 +++++++-- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/pkeys.h b/arch/x86/include/asm/pkeys.h index 19b137f1b3be..2ff9b98812b7 100644 --- a/arch/x86/include/asm/pkeys.h +++ b/arch/x86/include/asm/pkeys.h @@ -4,6 +4,11 @@ #define ARCH_DEFAULT_PKEY 0 +/* + * If more than 16 keys are ever supported, a thorough audit + * will be necessary to ensure that the types that store key + * numbers and masks have sufficient capacity. + */ #define arch_max_pkey() (boot_cpu_has(X86_FEATURE_OSPKE) ? 16 : 1) extern int arch_set_user_pkey_access(struct task_struct *tsk, int pkey, diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c index 4b900035f220..601a5da1d196 100644 --- a/arch/x86/kernel/fpu/xstate.c +++ b/arch/x86/kernel/fpu/xstate.c @@ -907,8 +907,6 @@ const void *get_xsave_field_ptr(int xsave_state) #ifdef CONFIG_ARCH_HAS_PKEYS -#define NR_VALID_PKRU_BITS (CONFIG_NR_PROTECTION_KEYS * 2) -#define PKRU_VALID_MASK (NR_VALID_PKRU_BITS - 1) /* * This will go out and modify PKRU register to set the access * rights for @pkey to @init_val. @@ -927,6 +925,13 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey, if (!boot_cpu_has(X86_FEATURE_OSPKE)) return -EINVAL; + /* + * This code should only be called with valid 'pkey' + * values originating from in-kernel users. Complain + * if a bad value is observed. + */ + WARN_ON_ONCE(pkey >= arch_max_pkey()); + /* Set the bits we need in PKRU: */ if (init_val & PKEY_DISABLE_ACCESS) new_pkru_bits |= PKRU_AD_BIT; -- GitLab From 1baf236084a366789eaf9515ee6027b515fb059d Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 24 Feb 2020 15:01:39 +0100 Subject: [PATCH 0971/1304] bpf: Remove recursion prevention from rcu free callback [ Upstream commit 8a37963c7ac9ecb7f86f8ebda020e3f8d6d7b8a0 ] If an element is freed via RCU then recursion into BPF instrumentation functions is not a concern. The element is already detached from the map and the RCU callback does not hold any locks on which a kprobe, perf event or tracepoint attached BPF program could deadlock. Signed-off-by: Thomas Gleixner Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200224145643.259118710@linutronix.de Signed-off-by: Sasha Levin --- kernel/bpf/hashtab.c | 8 -------- 1 file changed, 8 deletions(-) diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index 1b28fb006763..3f3ed33bd2fd 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -667,15 +667,7 @@ static void htab_elem_free_rcu(struct rcu_head *head) struct htab_elem *l = container_of(head, struct htab_elem, rcu); struct bpf_htab *htab = l->htab; - /* must increment bpf_prog_active to avoid kprobe+bpf triggering while - * we're calling kfree, otherwise deadlock is possible if kprobes - * are placed somewhere inside of slub - */ - preempt_disable(); - __this_cpu_inc(bpf_prog_active); htab_elem_free(htab, l); - __this_cpu_dec(bpf_prog_active); - preempt_enable(); } static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l) -- GitLab From 1da6faf4bebe10c7d01c94e2ccdf6a1f976fd02c Mon Sep 17 00:00:00 2001 From: Amelie Delaunay Date: Wed, 29 Jan 2020 16:36:28 +0100 Subject: [PATCH 0972/1304] dmaengine: stm32-dma: use vchan_terminate_vdesc() in .terminate_all [ Upstream commit d80cbef35bf89b763f06e03bb4ff8f933bf012c5 ] To avoid race with vchan_complete, use the race free way to terminate running transfer. Move vdesc->node list_del in stm32_dma_start_transfer instead of in stm32_mdma_chan_complete to avoid another race in vchan_dma_desc_free_list. Signed-off-by: Amelie Delaunay Link: https://lore.kernel.org/r/20200129153628.29329-9-amelie.delaunay@st.com Signed-off-by: Vinod Koul Signed-off-by: Sasha Levin --- drivers/dma/stm32-dma.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c index 4903a408fc14..ac7af440f865 100644 --- a/drivers/dma/stm32-dma.c +++ b/drivers/dma/stm32-dma.c @@ -494,8 +494,10 @@ static int stm32_dma_terminate_all(struct dma_chan *c) spin_lock_irqsave(&chan->vchan.lock, flags); - if (chan->busy) { - stm32_dma_stop(chan); + if (chan->desc) { + vchan_terminate_vdesc(&chan->desc->vdesc); + if (chan->busy) + stm32_dma_stop(chan); chan->desc = NULL; } @@ -551,6 +553,8 @@ static void stm32_dma_start_transfer(struct stm32_dma_chan *chan) if (!vdesc) return; + list_del(&vdesc->node); + chan->desc = to_stm32_dma_desc(vdesc); chan->next_sg = 0; } @@ -628,7 +632,6 @@ static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan) } else { chan->busy = false; if (chan->next_sg == chan->desc->num_sgs) { - list_del(&chan->desc->vdesc.node); vchan_cookie_complete(&chan->desc->vdesc); chan->desc = NULL; } -- GitLab From 7fbd24e0ea5c327d83825eabc6869581f8e5315e Mon Sep 17 00:00:00 2001 From: Dmitry Osipenko Date: Sun, 9 Feb 2020 19:33:41 +0300 Subject: [PATCH 0973/1304] dmaengine: tegra-apb: Prevent race conditions on channel's freeing [ Upstream commit 8e84172e372bdca20c305d92d51d33640d2da431 ] It's incorrect to check the channel's "busy" state without taking a lock. That shouldn't cause any real troubles, nevertheless it's always better not to have any race conditions in the code. Signed-off-by: Dmitry Osipenko Acked-by: Jon Hunter Link: https://lore.kernel.org/r/20200209163356.6439-5-digetx@gmail.com Signed-off-by: Vinod Koul Signed-off-by: Sasha Levin --- drivers/dma/tegra20-apb-dma.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index 15481aeaeecd..5ccd24a46e38 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c @@ -1225,8 +1225,7 @@ static void tegra_dma_free_chan_resources(struct dma_chan *dc) dev_dbg(tdc2dev(tdc), "Freeing channel %d\n", tdc->id); - if (tdc->busy) - tegra_dma_terminate_all(dc); + tegra_dma_terminate_all(dc); spin_lock_irqsave(&tdc->lock, flags); list_splice_init(&tdc->pending_sg_req, &sg_req_list); -- GitLab From 5fe40ed2a8262ab7bb6ea38c30527396e65b640b Mon Sep 17 00:00:00 2001 From: Aric Cyr Date: Thu, 30 Jan 2020 14:46:53 -0500 Subject: [PATCH 0974/1304] drm/amd/display: dal_ddc_i2c_payloads_create can fail causing panic [ Upstream commit 6a6c4a4d459ecacc9013c45dcbf2bc9747fdbdbd ] [Why] Since the i2c payload allocation can fail need to check return codes [How] Clean up i2c payload allocations and check for errors Signed-off-by: Aric Cyr Reviewed-by: Joshua Aberback Acked-by: Rodrigo Siqueira Acked-by: Harry Wentland Signed-off-by: Alex Deucher Signed-off-by: Sasha Levin --- .../gpu/drm/amd/display/dc/core/dc_link_ddc.c | 52 +++++++++---------- 1 file changed, 25 insertions(+), 27 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c index 46c9cb47a96e..145af3bb2dfc 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c @@ -127,22 +127,16 @@ struct aux_payloads { struct vector payloads; }; -static struct i2c_payloads *dal_ddc_i2c_payloads_create(struct dc_context *ctx, uint32_t count) +static bool dal_ddc_i2c_payloads_create( + struct dc_context *ctx, + struct i2c_payloads *payloads, + uint32_t count) { - struct i2c_payloads *payloads; - - payloads = kzalloc(sizeof(struct i2c_payloads), GFP_KERNEL); - - if (!payloads) - return NULL; - if (dal_vector_construct( &payloads->payloads, ctx, count, sizeof(struct i2c_payload))) - return payloads; - - kfree(payloads); - return NULL; + return true; + return false; } static struct i2c_payload *dal_ddc_i2c_payloads_get(struct i2c_payloads *p) @@ -155,14 +149,12 @@ static uint32_t dal_ddc_i2c_payloads_get_count(struct i2c_payloads *p) return p->payloads.count; } -static void dal_ddc_i2c_payloads_destroy(struct i2c_payloads **p) +static void dal_ddc_i2c_payloads_destroy(struct i2c_payloads *p) { - if (!p || !*p) + if (!p) return; - dal_vector_destruct(&(*p)->payloads); - kfree(*p); - *p = NULL; + dal_vector_destruct(&p->payloads); } static struct aux_payloads *dal_ddc_aux_payloads_create(struct dc_context *ctx, uint32_t count) @@ -580,9 +572,13 @@ bool dal_ddc_service_query_ddc_data( uint32_t payloads_num = write_payloads + read_payloads; + if (write_size > EDID_SEGMENT_SIZE || read_size > EDID_SEGMENT_SIZE) return false; + if (!payloads_num) + return false; + /*TODO: len of payload data for i2c and aux is uint8!!!!, * but we want to read 256 over i2c!!!!*/ if (dal_ddc_service_is_in_aux_transaction_mode(ddc)) { @@ -613,23 +609,25 @@ bool dal_ddc_service_query_ddc_data( dal_ddc_aux_payloads_destroy(&payloads); } else { - struct i2c_payloads *payloads = - dal_ddc_i2c_payloads_create(ddc->ctx, payloads_num); + struct i2c_command command = {0}; + struct i2c_payloads payloads; - struct i2c_command command = { - .payloads = dal_ddc_i2c_payloads_get(payloads), - .number_of_payloads = 0, - .engine = DDC_I2C_COMMAND_ENGINE, - .speed = ddc->ctx->dc->caps.i2c_speed_in_khz }; + if (!dal_ddc_i2c_payloads_create(ddc->ctx, &payloads, payloads_num)) + return false; + + command.payloads = dal_ddc_i2c_payloads_get(&payloads); + command.number_of_payloads = 0; + command.engine = DDC_I2C_COMMAND_ENGINE; + command.speed = ddc->ctx->dc->caps.i2c_speed_in_khz; dal_ddc_i2c_payloads_add( - payloads, address, write_size, write_buf, true); + &payloads, address, write_size, write_buf, true); dal_ddc_i2c_payloads_add( - payloads, address, read_size, read_buf, false); + &payloads, address, read_size, read_buf, false); command.number_of_payloads = - dal_ddc_i2c_payloads_get_count(payloads); + dal_ddc_i2c_payloads_get_count(&payloads); ret = dm_helpers_submit_i2c( ddc->ctx, -- GitLab From f674193b8e9a76762aadf0183172cf087c923d62 Mon Sep 17 00:00:00 2001 From: James Morse Date: Fri, 21 Feb 2020 16:35:08 +0000 Subject: [PATCH 0975/1304] firmware: arm_sdei: Use cpus_read_lock() to avoid races with cpuhp [ Upstream commit 54f529a6806c9710947a4f2cdc15d6ea54121ccd ] SDEI has private events that need registering and enabling on each CPU. CPUs can come and go while we are trying to do this. SDEI tries to avoid these problems by setting the reregister flag before the register call, so any CPUs that come online register the event too. Sticking plaster like this doesn't work, as if the register call fails, a CPU that subsequently comes online will register the event before reregister is cleared. Take cpus_read_lock() around the register and enable calls. We don't want surprise CPUs to do the wrong thing if they race with these calls failing. Signed-off-by: James Morse Signed-off-by: Catalin Marinas Signed-off-by: Sasha Levin --- drivers/firmware/arm_sdei.c | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c index 05b528c7ed8f..e809f4d9a9e9 100644 --- a/drivers/firmware/arm_sdei.c +++ b/drivers/firmware/arm_sdei.c @@ -410,14 +410,19 @@ int sdei_event_enable(u32 event_num) return -ENOENT; } - spin_lock(&sdei_list_lock); - event->reenable = true; - spin_unlock(&sdei_list_lock); + cpus_read_lock(); if (event->type == SDEI_EVENT_TYPE_SHARED) err = sdei_api_event_enable(event->event_num); else err = sdei_do_cross_call(_local_event_enable, event); + + if (!err) { + spin_lock(&sdei_list_lock); + event->reenable = true; + spin_unlock(&sdei_list_lock); + } + cpus_read_unlock(); mutex_unlock(&sdei_events_lock); return err; @@ -619,21 +624,18 @@ int sdei_event_register(u32 event_num, sdei_event_callback *cb, void *arg) break; } - spin_lock(&sdei_list_lock); - event->reregister = true; - spin_unlock(&sdei_list_lock); - + cpus_read_lock(); err = _sdei_event_register(event); if (err) { - spin_lock(&sdei_list_lock); - event->reregister = false; - event->reenable = false; - spin_unlock(&sdei_list_lock); - sdei_event_destroy(event); pr_warn("Failed to register event %u: %d\n", event_num, err); + } else { + spin_lock(&sdei_list_lock); + event->reregister = true; + spin_unlock(&sdei_list_lock); } + cpus_read_unlock(); } while (0); mutex_unlock(&sdei_events_lock); -- GitLab From dca75ae683c1acc4dde1d1a5b65d3fe00d03013a Mon Sep 17 00:00:00 2001 From: Qian Cai Date: Tue, 25 Feb 2020 11:27:04 -0500 Subject: [PATCH 0976/1304] random: fix data races at timer_rand_state [ Upstream commit e00d996a4317aff5351c4338dd97d390225412c2 ] Fields in "struct timer_rand_state" could be accessed concurrently. Lockless plain reads and writes result in data races. Fix them by adding pairs of READ|WRITE_ONCE(). The data races were reported by KCSAN, BUG: KCSAN: data-race in add_timer_randomness / add_timer_randomness write to 0xffff9f320a0a01d0 of 8 bytes by interrupt on cpu 22: add_timer_randomness+0x100/0x190 add_timer_randomness at drivers/char/random.c:1152 add_disk_randomness+0x85/0x280 scsi_end_request+0x43a/0x4a0 scsi_io_completion+0xb7/0x7e0 scsi_finish_command+0x1ed/0x2a0 scsi_softirq_done+0x1c9/0x1d0 blk_done_softirq+0x181/0x1d0 __do_softirq+0xd9/0x57c irq_exit+0xa2/0xc0 do_IRQ+0x8b/0x190 ret_from_intr+0x0/0x42 cpuidle_enter_state+0x15e/0x980 cpuidle_enter+0x69/0xc0 call_cpuidle+0x23/0x40 do_idle+0x248/0x280 cpu_startup_entry+0x1d/0x1f start_secondary+0x1b2/0x230 secondary_startup_64+0xb6/0xc0 no locks held by swapper/22/0. irq event stamp: 32871382 _raw_spin_unlock_irqrestore+0x53/0x60 _raw_spin_lock_irqsave+0x21/0x60 _local_bh_enable+0x21/0x30 irq_exit+0xa2/0xc0 read to 0xffff9f320a0a01d0 of 8 bytes by interrupt on cpu 2: add_timer_randomness+0xe8/0x190 add_disk_randomness+0x85/0x280 scsi_end_request+0x43a/0x4a0 scsi_io_completion+0xb7/0x7e0 scsi_finish_command+0x1ed/0x2a0 scsi_softirq_done+0x1c9/0x1d0 blk_done_softirq+0x181/0x1d0 __do_softirq+0xd9/0x57c irq_exit+0xa2/0xc0 do_IRQ+0x8b/0x190 ret_from_intr+0x0/0x42 cpuidle_enter_state+0x15e/0x980 cpuidle_enter+0x69/0xc0 call_cpuidle+0x23/0x40 do_idle+0x248/0x280 cpu_startup_entry+0x1d/0x1f start_secondary+0x1b2/0x230 secondary_startup_64+0xb6/0xc0 no locks held by swapper/2/0. irq event stamp: 37846304 _raw_spin_unlock_irqrestore+0x53/0x60 _raw_spin_lock_irqsave+0x21/0x60 _local_bh_enable+0x21/0x30 irq_exit+0xa2/0xc0 Reported by Kernel Concurrency Sanitizer on: Hardware name: HP ProLiant BL660c Gen9, BIOS I38 10/17/2018 Link: https://lore.kernel.org/r/1582648024-13111-1-git-send-email-cai@lca.pw Signed-off-by: Qian Cai Signed-off-by: Theodore Ts'o Signed-off-by: Sasha Levin --- drivers/char/random.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/char/random.c b/drivers/char/random.c index 6a5d4dfafc47..80dedecfe15c 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -1150,14 +1150,14 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num) * We take into account the first, second and third-order deltas * in order to make our estimate. */ - delta = sample.jiffies - state->last_time; - state->last_time = sample.jiffies; + delta = sample.jiffies - READ_ONCE(state->last_time); + WRITE_ONCE(state->last_time, sample.jiffies); - delta2 = delta - state->last_delta; - state->last_delta = delta; + delta2 = delta - READ_ONCE(state->last_delta); + WRITE_ONCE(state->last_delta, delta); - delta3 = delta2 - state->last_delta2; - state->last_delta2 = delta2; + delta3 = delta2 - READ_ONCE(state->last_delta2); + WRITE_ONCE(state->last_delta2, delta2); if (delta < 0) delta = -delta; -- GitLab From 4ba1aee12640a5528aee4484ef876eb15f10cb49 Mon Sep 17 00:00:00 2001 From: John Garry Date: Fri, 17 Jan 2020 02:48:34 +0800 Subject: [PATCH 0977/1304] bus: hisi_lpc: Fixup IO ports addresses to avoid use-after-free in host removal [ Upstream commit a6dd255bdd7d00bbdbf78ba00bde9fc64f86c3a7 ] Some released ACPI FW for Huawei boards describes incorrect the port IO address range for child devices, in that it tells us the IO port max range is 0x3fff for each child device, which is not correct. The address range should be [e4:e8) or similar. With this incorrect upper range, the child device IO port resources overlap. As such, the kernel thinks that the LPC host serial device is a child of the IPMI device: root@(none)$ more /proc/ioports [...] 00ffc0e3-00ffffff : hisi-lpc-ipmi.0.auto 00ffc0e3-00ffc0e3 : ipmi_si 00ffc0e4-00ffc0e4 : ipmi_si 00ffc0e5-00ffc0e5 : ipmi_si 00ffc2f7-00ffffff : serial8250.1.auto 00ffc2f7-00ffc2fe : serial root@(none)$ They should both be siblings. Note that these are logical PIO addresses, which have a direct mapping from the FW IO port ranges. This shows up as a real issue when we enable CONFIG_KASAN and CONFIG_DEBUG_TEST_DRIVER_REMOVE - we see use-after-free warnings in the host removal path: ================================================================== BUG: KASAN: use-after-free in release_resource+0x38/0xc8 Read of size 8 at addr ffff0026accdbc38 by task swapper/0/1 CPU: 2 PID: 1 Comm: swapper/0 Not tainted 5.5.0-rc6-00001-g68e186e77b5c-dirty #1593 Hardware name: Huawei Taishan 2180 /D03, BIOS Hisilicon D03 IT20 Nemo 2.0 RC0 03/30/2018 Call trace: dump_backtrace+0x0/0x290 show_stack+0x14/0x20 dump_stack+0xf0/0x14c print_address_description.isra.9+0x6c/0x3b8 __kasan_report+0x12c/0x23c kasan_report+0xc/0x18 __asan_load8+0x94/0xb8 release_resource+0x38/0xc8 platform_device_del.part.10+0x80/0xe0 platform_device_unregister+0x20/0x38 hisi_lpc_acpi_remove_subdev+0x10/0x20 device_for_each_child+0xc8/0x128 hisi_lpc_acpi_remove+0x4c/0xa8 hisi_lpc_remove+0xbc/0xc0 platform_drv_remove+0x3c/0x68 really_probe+0x174/0x548 driver_probe_device+0x7c/0x148 device_driver_attach+0x94/0xa0 __driver_attach+0xa4/0x110 bus_for_each_dev+0xe8/0x158 driver_attach+0x30/0x40 bus_add_driver+0x234/0x2f0 driver_register+0xbc/0x1d0 __platform_driver_register+0x7c/0x88 hisi_lpc_driver_init+0x18/0x20 do_one_initcall+0xb4/0x258 kernel_init_freeable+0x248/0x2c0 kernel_init+0x10/0x118 ret_from_fork+0x10/0x1c ... The issue here is that the kernel created an incorrect parent-child resource dependency between two devices, and references the false parent node when deleting the second child device, when it had been deleted already. Fix up the child device resources from FW to create proper IO port resource relationships for broken FW. With this, the IO port layout looks more healthy: root@(none)$ more /proc/ioports [...] 00ffc0e3-00ffc0e7 : hisi-lpc-ipmi.0.auto 00ffc0e3-00ffc0e3 : ipmi_si 00ffc0e4-00ffc0e4 : ipmi_si 00ffc0e5-00ffc0e5 : ipmi_si 00ffc2f7-00ffc2ff : serial8250.1.auto 00ffc2f7-00ffc2fe : serial Signed-off-by: John Garry Signed-off-by: Wei Xu Signed-off-by: Sasha Levin --- drivers/bus/hisi_lpc.c | 27 +++++++++++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-) diff --git a/drivers/bus/hisi_lpc.c b/drivers/bus/hisi_lpc.c index e31c02dc7770..cbd970fb02f1 100644 --- a/drivers/bus/hisi_lpc.c +++ b/drivers/bus/hisi_lpc.c @@ -358,6 +358,26 @@ static int hisi_lpc_acpi_xlat_io_res(struct acpi_device *adev, return 0; } +/* + * Released firmware describes the IO port max address as 0x3fff, which is + * the max host bus address. Fixup to a proper range. This will probably + * never be fixed in firmware. + */ +static void hisi_lpc_acpi_fixup_child_resource(struct device *hostdev, + struct resource *r) +{ + if (r->end != 0x3fff) + return; + + if (r->start == 0xe4) + r->end = 0xe4 + 0x04 - 1; + else if (r->start == 0x2f8) + r->end = 0x2f8 + 0x08 - 1; + else + dev_warn(hostdev, "unrecognised resource %pR to fixup, ignoring\n", + r); +} + /* * hisi_lpc_acpi_set_io_res - set the resources for a child * @child: the device node to be updated the I/O resource @@ -419,8 +439,11 @@ static int hisi_lpc_acpi_set_io_res(struct device *child, return -ENOMEM; } count = 0; - list_for_each_entry(rentry, &resource_list, node) - resources[count++] = *rentry->res; + list_for_each_entry(rentry, &resource_list, node) { + resources[count] = *rentry->res; + hisi_lpc_acpi_fixup_child_resource(hostdev, &resources[count]); + count++; + } acpi_dev_free_resource_list(&resource_list); -- GitLab From 8910d3f0b0ebe79d42e37fe9be49573682c8cc17 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Thu, 6 Feb 2020 16:45:27 +0100 Subject: [PATCH 0978/1304] media: go7007: Fix URB type for interrupt handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit a3ea410cac41b19a5490aad7fe6d9a9a772e646e ] Josef reported that his old-and-good Plextor ConvertX M402U video converter spews lots of WARNINGs on the recent kernels, and it turned out that the device uses a bulk endpoint for interrupt handling just like 2250 board. For fixing it, generalize the check with the proper verification of the endpoint instead of hard-coded board type check. Fixes: 7e5219d18e93 ("[media] go7007: Fix 2250 urb type") Reported-and-tested-by: Josef Möllers BugLink: https://bugzilla.suse.com/show_bug.cgi?id=1162583 BugLink: https://bugzilla.kernel.org/show_bug.cgi?id=206427 Signed-off-by: Takashi Iwai Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Sasha Levin --- drivers/media/usb/go7007/go7007-usb.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/media/usb/go7007/go7007-usb.c b/drivers/media/usb/go7007/go7007-usb.c index 19c6a0354ce0..b84a6f654861 100644 --- a/drivers/media/usb/go7007/go7007-usb.c +++ b/drivers/media/usb/go7007/go7007-usb.c @@ -1052,6 +1052,7 @@ static int go7007_usb_probe(struct usb_interface *intf, struct go7007_usb *usb; const struct go7007_usb_board *board; struct usb_device *usbdev = interface_to_usbdev(intf); + struct usb_host_endpoint *ep; unsigned num_i2c_devs; char *name; int video_pipe, i, v_urb_len; @@ -1148,7 +1149,8 @@ static int go7007_usb_probe(struct usb_interface *intf, if (usb->intr_urb->transfer_buffer == NULL) goto allocfail; - if (go->board_id == GO7007_BOARDID_SENSORAY_2250) + ep = usb->usbdev->ep_in[4]; + if (usb_endpoint_type(&ep->desc) == USB_ENDPOINT_XFER_BULK) usb_fill_bulk_urb(usb->intr_urb, usb->usbdev, usb_rcvbulkpipe(usb->usbdev, 4), usb->intr_urb->transfer_buffer, 2*sizeof(u16), -- GitLab From 1ee3da6b960b88bdfaa7a7efaf37ec3b0444c068 Mon Sep 17 00:00:00 2001 From: Alain Michaud Date: Tue, 3 Mar 2020 15:55:34 +0000 Subject: [PATCH 0979/1304] Bluetooth: guard against controllers sending zero'd events [ Upstream commit 08bb4da90150e2a225f35e0f642cdc463958d696 ] Some controllers have been observed to send zero'd events under some conditions. This change guards against this condition as well as adding a trace to facilitate diagnosability of this condition. Signed-off-by: Alain Michaud Signed-off-by: Marcel Holtmann Signed-off-by: Sasha Levin --- net/bluetooth/hci_event.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 2b4a7cf03041..ec6b3a87b3e7 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -5738,6 +5738,11 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) u8 status = 0, event = hdr->evt, req_evt = 0; u16 opcode = HCI_OP_NOP; + if (!event) { + bt_dev_warn(hdev, "Received unexpected HCI Event 00000000"); + goto done; + } + if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) { struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data; opcode = __le16_to_cpu(cmd_hdr->opcode); @@ -5949,6 +5954,7 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) req_complete_skb(hdev, status, opcode, orig_skb); } +done: kfree_skb(orig_skb); kfree_skb(skb); hdev->stat.evt_rx++; -- GitLab From 627b771be7d7a36b1a62049912003736bc79edb4 Mon Sep 17 00:00:00 2001 From: Wen Yang Date: Mon, 20 Jan 2020 18:05:23 +0800 Subject: [PATCH 0980/1304] timekeeping: Prevent 32bit truncation in scale64_check_overflow() [ Upstream commit 4cbbc3a0eeed675449b1a4d080008927121f3da3 ] While unlikely the divisor in scale64_check_overflow() could be >= 32bit in scale64_check_overflow(). do_div() truncates the divisor to 32bit at least on 32bit platforms. Use div64_u64() instead to avoid the truncation to 32-bit. [ tglx: Massaged changelog ] Signed-off-by: Wen Yang Signed-off-by: Thomas Gleixner Link: https://lkml.kernel.org/r/20200120100523.45656-1-wenyang@linux.alibaba.com Signed-off-by: Sasha Levin --- kernel/time/timekeeping.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 81ee5b83c920..c66fd11d94bc 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -1004,9 +1004,8 @@ static int scale64_check_overflow(u64 mult, u64 div, u64 *base) ((int)sizeof(u64)*8 - fls64(mult) < fls64(rem))) return -EOVERFLOW; tmp *= mult; - rem *= mult; - do_div(rem, div); + rem = div64_u64(rem * mult, div); *base = tmp + rem; return 0; } -- GitLab From 47c5fa5b550bc01bdbae9e80f48db54ef9a034e4 Mon Sep 17 00:00:00 2001 From: Qiujun Huang Date: Mon, 24 Feb 2020 23:02:46 +0800 Subject: [PATCH 0981/1304] ext4: fix a data race at inode->i_disksize [ Upstream commit dce8e237100f60c28cc66effb526ba65a01d8cb3 ] KCSAN find inode->i_disksize could be accessed concurrently. BUG: KCSAN: data-race in ext4_mark_iloc_dirty / ext4_write_end write (marked) to 0xffff8b8932f40090 of 8 bytes by task 66792 on cpu 0: ext4_write_end+0x53f/0x5b0 ext4_da_write_end+0x237/0x510 generic_perform_write+0x1c4/0x2a0 ext4_buffered_write_iter+0x13a/0x210 ext4_file_write_iter+0xe2/0x9b0 new_sync_write+0x29c/0x3a0 __vfs_write+0x92/0xa0 vfs_write+0xfc/0x2a0 ksys_write+0xe8/0x140 __x64_sys_write+0x4c/0x60 do_syscall_64+0x8a/0x2a0 entry_SYSCALL_64_after_hwframe+0x44/0xa9 read to 0xffff8b8932f40090 of 8 bytes by task 14414 on cpu 1: ext4_mark_iloc_dirty+0x716/0x1190 ext4_mark_inode_dirty+0xc9/0x360 ext4_convert_unwritten_extents+0x1bc/0x2a0 ext4_convert_unwritten_io_end_vec+0xc5/0x150 ext4_put_io_end+0x82/0x130 ext4_writepages+0xae7/0x16f0 do_writepages+0x64/0x120 __writeback_single_inode+0x7d/0x650 writeback_sb_inodes+0x3a4/0x860 __writeback_inodes_wb+0xc4/0x150 wb_writeback+0x43f/0x510 wb_workfn+0x3b2/0x8a0 process_one_work+0x39b/0x7e0 worker_thread+0x88/0x650 kthread+0x1d4/0x1f0 ret_from_fork+0x35/0x40 The plain read is outside of inode->i_data_sem critical section which results in a data race. Fix it by adding READ_ONCE(). Signed-off-by: Qiujun Huang Link: https://lore.kernel.org/r/1582556566-3909-1-git-send-email-hqjagain@gmail.com Signed-off-by: Theodore Ts'o Signed-off-by: Sasha Levin --- fs/ext4/inode.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index cd833f4e64ef..52be4c965024 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -5315,7 +5315,7 @@ static int ext4_do_update_inode(handle_t *handle, raw_inode->i_file_acl_high = cpu_to_le16(ei->i_file_acl >> 32); raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl); - if (ei->i_disksize != ext4_isize(inode->i_sb, raw_inode)) { + if (READ_ONCE(ei->i_disksize) != ext4_isize(inode->i_sb, raw_inode)) { ext4_isize_set(raw_inode, ei->i_disksize); need_datasync = 1; } -- GitLab From 2002c630a95be88a7c4a8fc9a2ef31ac01f900d6 Mon Sep 17 00:00:00 2001 From: John Garry Date: Thu, 5 Mar 2020 19:08:01 +0800 Subject: [PATCH 0982/1304] perf jevents: Fix leak of mapfile memory [ Upstream commit 3f5777fbaf04c58d940526a22a2e0c813c837936 ] The memory for global pointer is never freed during normal program execution, so let's do that in the main function exit as a good programming practice. A stray blank line is also removed. Reported-by: Jiri Olsa Signed-off-by: John Garry Cc: Alexander Shishkin Cc: Andi Kleen Cc: James Clark Cc: Joakim Zhang Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Will Deacon Cc: linuxarm@huawei.com Link: http://lore.kernel.org/lkml/1583406486-154841-2-git-send-email-john.garry@huawei.com Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: Sasha Levin --- tools/perf/pmu-events/jevents.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c index c17e59404171..6631970f9683 100644 --- a/tools/perf/pmu-events/jevents.c +++ b/tools/perf/pmu-events/jevents.c @@ -1064,10 +1064,9 @@ static int process_one_file(const char *fpath, const struct stat *sb, */ int main(int argc, char *argv[]) { - int rc; + int rc, ret = 0; int maxfds; char ldirname[PATH_MAX]; - const char *arch; const char *output_file; const char *start_dirname; @@ -1138,7 +1137,8 @@ int main(int argc, char *argv[]) /* Make build fail */ fclose(eventsfp); free_arch_std_events(); - return 1; + ret = 1; + goto out_free_mapfile; } else if (rc) { goto empty_map; } @@ -1156,14 +1156,17 @@ int main(int argc, char *argv[]) /* Make build fail */ fclose(eventsfp); free_arch_std_events(); - return 1; + ret = 1; } - return 0; + + goto out_free_mapfile; empty_map: fclose(eventsfp); create_empty_mapping(output_file); free_arch_std_events(); - return 0; +out_free_mapfile: + free(mapfile); + return ret; } -- GitLab From 2b294ac325c7ce3f36854b74d0d1d89dc1d1d8b8 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Thu, 5 Mar 2020 22:28:32 -0800 Subject: [PATCH 0983/1304] mm: avoid data corruption on CoW fault into PFN-mapped VMA [ Upstream commit c3e5ea6ee574ae5e845a40ac8198de1fb63bb3ab ] Jeff Moyer has reported that one of xfstests triggers a warning when run on DAX-enabled filesystem: WARNING: CPU: 76 PID: 51024 at mm/memory.c:2317 wp_page_copy+0xc40/0xd50 ... wp_page_copy+0x98c/0xd50 (unreliable) do_wp_page+0xd8/0xad0 __handle_mm_fault+0x748/0x1b90 handle_mm_fault+0x120/0x1f0 __do_page_fault+0x240/0xd70 do_page_fault+0x38/0xd0 handle_page_fault+0x10/0x30 The warning happens on failed __copy_from_user_inatomic() which tries to copy data into a CoW page. This happens because of race between MADV_DONTNEED and CoW page fault: CPU0 CPU1 handle_mm_fault() do_wp_page() wp_page_copy() do_wp_page() madvise(MADV_DONTNEED) zap_page_range() zap_pte_range() ptep_get_and_clear_full() __copy_from_user_inatomic() sees empty PTE and fails WARN_ON_ONCE(1) clear_page() The solution is to re-try __copy_from_user_inatomic() under PTL after checking that PTE is matches the orig_pte. The second copy attempt can still fail, like due to non-readable PTE, but there's nothing reasonable we can do about, except clearing the CoW page. Reported-by: Jeff Moyer Signed-off-by: Andrew Morton Signed-off-by: Kirill A. Shutemov Tested-by: Jeff Moyer Cc: Cc: Justin He Cc: Dan Williams Link: http://lkml.kernel.org/r/20200218154151.13349-1-kirill.shutemov@linux.intel.com Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin --- mm/memory.c | 35 +++++++++++++++++++++++++++-------- 1 file changed, 27 insertions(+), 8 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index fcad8a0d943d..eeae63bd9502 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2353,7 +2353,7 @@ static inline bool cow_user_page(struct page *dst, struct page *src, bool ret; void *kaddr; void __user *uaddr; - bool force_mkyoung; + bool locked = false; struct vm_area_struct *vma = vmf->vma; struct mm_struct *mm = vma->vm_mm; unsigned long addr = vmf->address; @@ -2378,11 +2378,11 @@ static inline bool cow_user_page(struct page *dst, struct page *src, * On architectures with software "accessed" bits, we would * take a double page fault, so mark it accessed here. */ - force_mkyoung = arch_faults_on_old_pte() && !pte_young(vmf->orig_pte); - if (force_mkyoung) { + if (arch_faults_on_old_pte() && !pte_young(vmf->orig_pte)) { pte_t entry; vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); + locked = true; if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) { /* * Other thread has already handled the fault @@ -2406,18 +2406,37 @@ static inline bool cow_user_page(struct page *dst, struct page *src, * zeroes. */ if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) { + if (locked) + goto warn; + + /* Re-validate under PTL if the page is still mapped */ + vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); + locked = true; + if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) { + /* The PTE changed under us. Retry page fault. */ + ret = false; + goto pte_unlock; + } + /* - * Give a warn in case there can be some obscure - * use-case + * The same page can be mapped back since last copy attampt. + * Try to copy again under PTL. */ - WARN_ON_ONCE(1); - clear_page(kaddr); + if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) { + /* + * Give a warn in case there can be some obscure + * use-case + */ +warn: + WARN_ON_ONCE(1); + clear_page(kaddr); + } } ret = true; pte_unlock: - if (force_mkyoung) + if (locked) pte_unmap_unlock(vmf->pte, vmf->ptl); kunmap_atomic(kaddr); flush_dcache_page(dst); -- GitLab From e5bc081aced49b9ea06f12f8ca93ce5db10cc51b Mon Sep 17 00:00:00 2001 From: John Clements Date: Thu, 5 Mar 2020 17:48:56 +0800 Subject: [PATCH 0984/1304] drm/amdgpu: increase atombios cmd timeout [ Upstream commit 1b3460a8b19688ad3033b75237d40fa580a5a953 ] mitigates race condition on BACO reset between GPU bootcode and driver reload Reviewed-by: Hawking Zhang Signed-off-by: John Clements Signed-off-by: Alex Deucher Signed-off-by: Sasha Levin --- drivers/gpu/drm/amd/amdgpu/atom.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c index e9934de1b9cf..0222bb7ea49b 100644 --- a/drivers/gpu/drm/amd/amdgpu/atom.c +++ b/drivers/gpu/drm/amd/amdgpu/atom.c @@ -742,8 +742,8 @@ static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg) cjiffies = jiffies; if (time_after(cjiffies, ctx->last_jump_jiffies)) { cjiffies -= ctx->last_jump_jiffies; - if ((jiffies_to_msecs(cjiffies) > 5000)) { - DRM_ERROR("atombios stuck in loop for more than 5secs aborting\n"); + if ((jiffies_to_msecs(cjiffies) > 10000)) { + DRM_ERROR("atombios stuck in loop for more than 10secs aborting\n"); ctx->abort = true; } } else { -- GitLab From 47721e8ff0b034a6cb7c111763694e594d67790f Mon Sep 17 00:00:00 2001 From: Rodrigo Siqueira Date: Mon, 24 Feb 2020 10:13:37 -0500 Subject: [PATCH 0985/1304] drm/amd/display: Stop if retimer is not available [ Upstream commit a0e40018dcc3f59a10ca21d58f8ea8ceb1b035ac ] Raven provides retimer feature support that requires i2c interaction in order to make it work well, all settings required for this configuration are loaded from the Atom bios which include the i2c address. If the retimer feature is not available, we should abort the attempt to set this feature, otherwise, it makes the following line return I2C_CHANNEL_OPERATION_NO_RESPONSE: i2c_success = i2c_write(pipe_ctx, slave_address, buffer, sizeof(buffer)); ... if (!i2c_success) ASSERT(i2c_success); This ends up causing problems with hotplugging HDMI displays on Raven, and causes retimer settings to warn like so: WARNING: CPU: 1 PID: 429 at drivers/gpu/drm/amd/amdgpu/../dal/dc/core/dc_link.c:1998 write_i2c_retimer_setting+0xc2/0x3c0 [amdgpu] Modules linked in: edac_mce_amd ccp kvm irqbypass binfmt_misc crct10dif_pclmul crc32_pclmul ghash_clmulni_intel snd_hda_codec_realtek snd_hda_codec_generic ledtrig_audio snd_hda_codec_hdmi snd_hda_intel amdgpu(+) snd_hda_codec snd_hda_core snd_hwdep snd_pcm snd_seq_midi snd_seq_midi_event snd_rawmidi aesni_intel snd_seq amd_iommu_v2 gpu_sched aes_x86_64 crypto_simd cryptd glue_helper snd_seq_device ttm drm_kms_helper snd_timer eeepc_wmi wmi_bmof asus_wmi sparse_keymap drm mxm_wmi snd k10temp fb_sys_fops syscopyarea sysfillrect sysimgblt soundcore joydev input_leds mac_hid sch_fq_codel parport_pc ppdev lp parport ip_tables x_tables autofs4 igb i2c_algo_bit hid_generic usbhid i2c_piix4 dca ahci hid libahci video wmi gpio_amdpt gpio_generic CPU: 1 PID: 429 Comm: systemd-udevd Tainted: G W 5.2.0-rc1sept162019+ #1 Hardware name: System manufacturer System Product Name/ROG STRIX B450-F GAMING, BIOS 2605 08/06/2019 RIP: 0010:write_i2c_retimer_setting+0xc2/0x3c0 [amdgpu] Code: ff 0f b6 4d ce 44 0f b6 45 cf 44 0f b6 c8 45 89 cf 44 89 e2 48 c7 c6 f0 34 bc c0 bf 04 00 00 00 e8 63 b0 90 ff 45 84 ff 75 02 <0f> 0b 42 0f b6 04 73 8d 50 f6 80 fa 02 77 8c 3c 0a 0f 85 c8 00 00 RSP: 0018:ffffa99d02726fd0 EFLAGS: 00010246 RAX: 0000000000000000 RBX: ffffa99d02727035 RCX: 0000000000000006 RDX: 0000000000000000 RSI: 0000000000000002 RDI: ffff976acc857440 RBP: ffffa99d02727018 R08: 0000000000000002 R09: 000000000002a600 R10: ffffe90610193680 R11: 00000000000005e3 R12: 000000000000005d R13: ffff976ac4b201b8 R14: 0000000000000001 R15: 0000000000000000 FS: 00007f14f99e1680(0000) GS:ffff976acc840000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007fdf212843b8 CR3: 0000000408906000 CR4: 00000000003406e0 Call Trace: core_link_enable_stream+0x626/0x680 [amdgpu] dce110_apply_ctx_to_hw+0x414/0x4e0 [amdgpu] dc_commit_state+0x331/0x5e0 [amdgpu] ? drm_calc_timestamping_constants+0xf9/0x150 [drm] amdgpu_dm_atomic_commit_tail+0x395/0x1e00 [amdgpu] ? dm_plane_helper_prepare_fb+0x20c/0x280 [amdgpu] commit_tail+0x42/0x70 [drm_kms_helper] drm_atomic_helper_commit+0x10c/0x120 [drm_kms_helper] amdgpu_dm_atomic_commit+0x95/0xa0 [amdgpu] drm_atomic_commit+0x4a/0x50 [drm] restore_fbdev_mode_atomic+0x1c0/0x1e0 [drm_kms_helper] restore_fbdev_mode+0x4c/0x160 [drm_kms_helper] ? _cond_resched+0x19/0x40 drm_fb_helper_restore_fbdev_mode_unlocked+0x4e/0xa0 [drm_kms_helper] drm_fb_helper_set_par+0x2d/0x50 [drm_kms_helper] fbcon_init+0x471/0x630 visual_init+0xd5/0x130 do_bind_con_driver+0x20a/0x430 do_take_over_console+0x7d/0x1b0 do_fbcon_takeover+0x5c/0xb0 fbcon_event_notify+0x6cd/0x8a0 notifier_call_chain+0x4c/0x70 blocking_notifier_call_chain+0x43/0x60 fb_notifier_call_chain+0x1b/0x20 register_framebuffer+0x254/0x360 __drm_fb_helper_initial_config_and_unlock+0x2c5/0x510 [drm_kms_helper] drm_fb_helper_initial_config+0x35/0x40 [drm_kms_helper] amdgpu_fbdev_init+0xcd/0x100 [amdgpu] amdgpu_device_init+0x1156/0x1930 [amdgpu] amdgpu_driver_load_kms+0x8d/0x2e0 [amdgpu] drm_dev_register+0x12b/0x1c0 [drm] amdgpu_pci_probe+0xd3/0x160 [amdgpu] local_pci_probe+0x47/0xa0 pci_device_probe+0x142/0x1b0 really_probe+0xf5/0x3d0 driver_probe_device+0x11b/0x130 device_driver_attach+0x58/0x60 __driver_attach+0xa3/0x140 ? device_driver_attach+0x60/0x60 ? device_driver_attach+0x60/0x60 bus_for_each_dev+0x74/0xb0 ? kmem_cache_alloc_trace+0x1a3/0x1c0 driver_attach+0x1e/0x20 bus_add_driver+0x147/0x220 ? 0xffffffffc0cb9000 driver_register+0x60/0x100 ? 0xffffffffc0cb9000 __pci_register_driver+0x5a/0x60 amdgpu_init+0x74/0x83 [amdgpu] do_one_initcall+0x4a/0x1fa ? _cond_resched+0x19/0x40 ? kmem_cache_alloc_trace+0x3f/0x1c0 ? __vunmap+0x1cc/0x200 do_init_module+0x5f/0x227 load_module+0x2330/0x2b40 __do_sys_finit_module+0xfc/0x120 ? __do_sys_finit_module+0xfc/0x120 __x64_sys_finit_module+0x1a/0x20 do_syscall_64+0x5a/0x130 entry_SYSCALL_64_after_hwframe+0x44/0xa9 RIP: 0033:0x7f14f9500839 Code: 00 f3 c3 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 40 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d 1f f6 2c 00 f7 d8 64 89 01 48 RSP: 002b:00007fff9bc4f5a8 EFLAGS: 00000246 ORIG_RAX: 0000000000000139 RAX: ffffffffffffffda RBX: 000055afb5abce30 RCX: 00007f14f9500839 RDX: 0000000000000000 RSI: 000055afb5ace0f0 RDI: 0000000000000017 RBP: 000055afb5ace0f0 R08: 0000000000000000 R09: 000000000000000a R10: 0000000000000017 R11: 0000000000000246 R12: 0000000000000000 R13: 000055afb5aad800 R14: 0000000000020000 R15: 0000000000000000 ---[ end trace c286e96563966f08 ]--- This commit reworks the way that we handle i2c write for retimer in the way that we abort this configuration if the feature is not available in the device. For debug sake, we kept a simple log message in case the retimer is not available. Signed-off-by: Rodrigo Siqueira Reviewed-by: Hersen Wu Acked-by: Rodrigo Siqueira Signed-off-by: Alex Deucher Signed-off-by: Sasha Levin --- drivers/gpu/drm/amd/display/dc/core/dc_link.c | 67 ++++++++----------- 1 file changed, 29 insertions(+), 38 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 3abc0294c05f..2fb2c683ad54 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -1576,8 +1576,7 @@ static void write_i2c_retimer_setting( buffer, sizeof(buffer)); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A * needs to be set to 1 on every 0xA-0xC write. @@ -1595,8 +1594,7 @@ static void write_i2c_retimer_setting( pipe_ctx->stream->sink->link->ddc, slave_address, &offset, 1, &value, 1); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; } buffer[0] = offset; @@ -1605,8 +1603,7 @@ static void write_i2c_retimer_setting( i2c_success = i2c_write(pipe_ctx, slave_address, buffer, sizeof(buffer)); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; } } } @@ -1623,8 +1620,7 @@ static void write_i2c_retimer_setting( buffer, sizeof(buffer)); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A * needs to be set to 1 on every 0xA-0xC write. @@ -1642,8 +1638,7 @@ static void write_i2c_retimer_setting( pipe_ctx->stream->sink->link->ddc, slave_address, &offset, 1, &value, 1); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; } buffer[0] = offset; @@ -1652,8 +1647,7 @@ static void write_i2c_retimer_setting( i2c_success = i2c_write(pipe_ctx, slave_address, buffer, sizeof(buffer)); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; } } } @@ -1668,8 +1662,7 @@ static void write_i2c_retimer_setting( i2c_success = i2c_write(pipe_ctx, slave_address, buffer, sizeof(buffer)); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Write offset 0x00 to 0x23 */ buffer[0] = 0x00; @@ -1677,8 +1670,7 @@ static void write_i2c_retimer_setting( i2c_success = i2c_write(pipe_ctx, slave_address, buffer, sizeof(buffer)); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Write offset 0xff to 0x00 */ buffer[0] = 0xff; @@ -1686,10 +1678,14 @@ static void write_i2c_retimer_setting( i2c_success = i2c_write(pipe_ctx, slave_address, buffer, sizeof(buffer)); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; } + + return; + +i2c_write_fail: + DC_LOG_DEBUG("Set retimer failed"); } static void write_i2c_default_retimer_setting( @@ -1710,8 +1706,7 @@ static void write_i2c_default_retimer_setting( i2c_success = i2c_write(pipe_ctx, slave_address, buffer, sizeof(buffer)); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Write offset 0x0A to 0x17 */ buffer[0] = 0x0A; @@ -1719,8 +1714,7 @@ static void write_i2c_default_retimer_setting( i2c_success = i2c_write(pipe_ctx, slave_address, buffer, sizeof(buffer)); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Write offset 0x0B to 0xDA or 0xD8 */ buffer[0] = 0x0B; @@ -1728,8 +1722,7 @@ static void write_i2c_default_retimer_setting( i2c_success = i2c_write(pipe_ctx, slave_address, buffer, sizeof(buffer)); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Write offset 0x0A to 0x17 */ buffer[0] = 0x0A; @@ -1737,8 +1730,7 @@ static void write_i2c_default_retimer_setting( i2c_success = i2c_write(pipe_ctx, slave_address, buffer, sizeof(buffer)); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Write offset 0x0C to 0x1D or 0x91 */ buffer[0] = 0x0C; @@ -1746,8 +1738,7 @@ static void write_i2c_default_retimer_setting( i2c_success = i2c_write(pipe_ctx, slave_address, buffer, sizeof(buffer)); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Write offset 0x0A to 0x17 */ buffer[0] = 0x0A; @@ -1755,8 +1746,7 @@ static void write_i2c_default_retimer_setting( i2c_success = i2c_write(pipe_ctx, slave_address, buffer, sizeof(buffer)); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; if (is_vga_mode) { @@ -1768,8 +1758,7 @@ static void write_i2c_default_retimer_setting( i2c_success = i2c_write(pipe_ctx, slave_address, buffer, sizeof(buffer)); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Write offset 0x00 to 0x23 */ buffer[0] = 0x00; @@ -1777,8 +1766,7 @@ static void write_i2c_default_retimer_setting( i2c_success = i2c_write(pipe_ctx, slave_address, buffer, sizeof(buffer)); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; /* Write offset 0xff to 0x00 */ buffer[0] = 0xff; @@ -1786,9 +1774,13 @@ static void write_i2c_default_retimer_setting( i2c_success = i2c_write(pipe_ctx, slave_address, buffer, sizeof(buffer)); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + goto i2c_write_fail; } + + return; + +i2c_write_fail: + DC_LOG_DEBUG("Set default retimer failed"); } static void write_i2c_redriver_setting( @@ -1811,8 +1803,7 @@ static void write_i2c_redriver_setting( buffer, sizeof(buffer)); if (!i2c_success) - /* Write failure */ - ASSERT(i2c_success); + DC_LOG_DEBUG("Set redriver failed"); } static void enable_link_hdmi(struct pipe_ctx *pipe_ctx) -- GitLab From 6854738c9ec19fb3546ae14af6ff96120120d986 Mon Sep 17 00:00:00 2001 From: Wen Gong Date: Fri, 14 Feb 2020 11:42:18 +0800 Subject: [PATCH 0986/1304] ath10k: use kzalloc to read for ath10k_sdio_hif_diag_read [ Upstream commit 402f2992b4d62760cce7c689ff216ea3bf4d6e8a ] When use command to read values, it crashed. command: dd if=/sys/kernel/debug/ieee80211/phy0/ath10k/mem_value count=1 bs=4 skip=$((0x100233)) It will call to ath10k_sdio_hif_diag_read with address = 0x4008cc and buf_len = 4. Then system crash: [ 1786.013258] Unable to handle kernel paging request at virtual address ffffffc00bd45000 [ 1786.013273] Mem abort info: [ 1786.013281] ESR = 0x96000045 [ 1786.013291] Exception class = DABT (current EL), IL = 32 bits [ 1786.013299] SET = 0, FnV = 0 [ 1786.013307] EA = 0, S1PTW = 0 [ 1786.013314] Data abort info: [ 1786.013322] ISV = 0, ISS = 0x00000045 [ 1786.013330] CM = 0, WnR = 1 [ 1786.013342] swapper pgtable: 4k pages, 39-bit VAs, pgdp = 000000008542a60e [ 1786.013350] [ffffffc00bd45000] pgd=0000000000000000, pud=0000000000000000 [ 1786.013368] Internal error: Oops: 96000045 [#1] PREEMPT SMP [ 1786.013609] Process swapper/0 (pid: 0, stack limit = 0x0000000084b153c6) [ 1786.013623] CPU: 0 PID: 0 Comm: swapper/0 Not tainted 4.19.86 #137 [ 1786.013631] Hardware name: MediaTek krane sku176 board (DT) [ 1786.013643] pstate: 80000085 (Nzcv daIf -PAN -UAO) [ 1786.013662] pc : __memcpy+0x94/0x180 [ 1786.013678] lr : swiotlb_tbl_unmap_single+0x84/0x150 [ 1786.013686] sp : ffffff8008003c60 [ 1786.013694] x29: ffffff8008003c90 x28: ffffffae96411f80 [ 1786.013708] x27: ffffffae960d2018 x26: ffffff8019a4b9a8 [ 1786.013721] x25: 0000000000000000 x24: 0000000000000001 [ 1786.013734] x23: ffffffae96567000 x22: 00000000000051d4 [ 1786.013747] x21: 0000000000000000 x20: 00000000fe6e9000 [ 1786.013760] x19: 0000000000000004 x18: 0000000000000020 [ 1786.013773] x17: 0000000000000001 x16: 0000000000000000 [ 1786.013787] x15: 00000000ffffffff x14: 00000000000044c0 [ 1786.013800] x13: 0000000000365ba4 x12: 0000000000000000 [ 1786.013813] x11: 0000000000000001 x10: 00000037be6e9000 [ 1786.013826] x9 : ffffffc940000000 x8 : 000000000bd45000 [ 1786.013839] x7 : 0000000000000000 x6 : ffffffc00bd45000 [ 1786.013852] x5 : 0000000000000000 x4 : 0000000000000000 [ 1786.013865] x3 : 0000000000000c00 x2 : 0000000000000004 [ 1786.013878] x1 : fffffff7be6e9004 x0 : ffffffc00bd45000 [ 1786.013891] Call trace: [ 1786.013903] __memcpy+0x94/0x180 [ 1786.013914] unmap_single+0x6c/0x84 [ 1786.013925] swiotlb_unmap_sg_attrs+0x54/0x80 [ 1786.013938] __swiotlb_unmap_sg_attrs+0x8c/0xa4 [ 1786.013952] msdc_unprepare_data+0x6c/0x84 [ 1786.013963] msdc_request_done+0x58/0x84 [ 1786.013974] msdc_data_xfer_done+0x1a0/0x1c8 [ 1786.013985] msdc_irq+0x12c/0x17c [ 1786.013996] __handle_irq_event_percpu+0xe4/0x250 [ 1786.014006] handle_irq_event_percpu+0x28/0x68 [ 1786.014015] handle_irq_event+0x48/0x78 [ 1786.014026] handle_fasteoi_irq+0xd0/0x1a0 [ 1786.014039] __handle_domain_irq+0x84/0xc4 [ 1786.014050] gic_handle_irq+0x124/0x1a4 [ 1786.014059] el1_irq+0xb0/0x128 [ 1786.014072] cpuidle_enter_state+0x298/0x328 [ 1786.014082] cpuidle_enter+0x30/0x40 [ 1786.014094] do_idle+0x190/0x268 [ 1786.014104] cpu_startup_entry+0x24/0x28 [ 1786.014116] rest_init+0xd4/0xe0 [ 1786.014126] start_kernel+0x30c/0x38c [ 1786.014139] Code: f8408423 f80084c3 36100062 b8404423 (b80044c3) [ 1786.014150] ---[ end trace 3b02ddb698ea69ee ]--- [ 1786.015415] Kernel panic - not syncing: Fatal exception in interrupt [ 1786.015433] SMP: stopping secondary CPUs [ 1786.015447] Kernel Offset: 0x2e8d200000 from 0xffffff8008000000 [ 1786.015458] CPU features: 0x0,2188200c [ 1786.015466] Memory Limit: none For sdio chip, it need the memory which is kmalloc, if it is vmalloc from ath10k_mem_value_read, then it have a memory error. kzalloc of ath10k_sdio_hif_diag_read32 is the correct type, so add kzalloc in ath10k_sdio_hif_diag_read to replace the buffer which is vmalloc from ath10k_mem_value_read. This patch only effect sdio chip. Tested with QCA6174 SDIO with firmware WLAN.RMH.4.4.1-00029. Signed-off-by: Wen Gong Signed-off-by: Kalle Valo Signed-off-by: Sasha Levin --- drivers/net/wireless/ath/ath10k/sdio.c | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c index 0ecaba824fb2..0cdaecb0e28a 100644 --- a/drivers/net/wireless/ath/ath10k/sdio.c +++ b/drivers/net/wireless/ath/ath10k/sdio.c @@ -1567,23 +1567,33 @@ static int ath10k_sdio_hif_diag_read(struct ath10k *ar, u32 address, void *buf, size_t buf_len) { int ret; + void *mem; + + mem = kzalloc(buf_len, GFP_KERNEL); + if (!mem) + return -ENOMEM; /* set window register to start read cycle */ ret = ath10k_sdio_write32(ar, MBOX_WINDOW_READ_ADDR_ADDRESS, address); if (ret) { ath10k_warn(ar, "failed to set mbox window read address: %d", ret); - return ret; + goto out; } /* read the data */ - ret = ath10k_sdio_read(ar, MBOX_WINDOW_DATA_ADDRESS, buf, buf_len); + ret = ath10k_sdio_read(ar, MBOX_WINDOW_DATA_ADDRESS, mem, buf_len); if (ret) { ath10k_warn(ar, "failed to read from mbox window data address: %d\n", ret); - return ret; + goto out; } - return 0; + memcpy(buf, mem, buf_len); + +out: + kfree(mem); + + return ret; } static int ath10k_sdio_hif_diag_read32(struct ath10k *ar, u32 address, -- GitLab From ae5afc392d510a1d7a34a28cba3a447b01e0c4bb Mon Sep 17 00:00:00 2001 From: Sagar Biradar Date: Wed, 12 Feb 2020 16:29:31 -0800 Subject: [PATCH 0987/1304] scsi: aacraid: Disabling TM path and only processing IOP reset [ Upstream commit bef18d308a2215eff8c3411a23d7f34604ce56c3 ] Fixes the occasional adapter panic when sg_reset is issued with -d, -t, -b and -H flags. Removal of command type HBA_IU_TYPE_SCSI_TM_REQ in aac_hba_send since iu_type, request_id and fib_flags are not populated. Device and target reset handlers are made to send TMF commands only when reset_state is 0. Link: https://lore.kernel.org/r/1581553771-25796-1-git-send-email-Sagar.Biradar@microchip.com Reviewed-by: Sagar Biradar Signed-off-by: Sagar Biradar Signed-off-by: Balsundar P Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin --- drivers/scsi/aacraid/commsup.c | 2 +- drivers/scsi/aacraid/linit.c | 34 +++++++++++++++++++++++++--------- 2 files changed, 26 insertions(+), 10 deletions(-) diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index b7588de4484e..4cb6ee6e1212 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -743,7 +743,7 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback, hbacmd->request_id = cpu_to_le32((((u32)(fibptr - dev->fibs)) << 2) + 1); fibptr->flags |= FIB_CONTEXT_FLAG_SCSI_CMD; - } else if (command != HBA_IU_TYPE_SCSI_TM_REQ) + } else return -EINVAL; diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 1046947064a0..0142547aaadd 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -736,7 +736,11 @@ static int aac_eh_abort(struct scsi_cmnd* cmd) status = aac_hba_send(HBA_IU_TYPE_SCSI_TM_REQ, fib, (fib_callback) aac_hba_callback, (void *) cmd); - + if (status != -EINPROGRESS) { + aac_fib_complete(fib); + aac_fib_free(fib); + return ret; + } /* Wait up to 15 secs for completion */ for (count = 0; count < 15; ++count) { if (cmd->SCp.sent_command) { @@ -915,11 +919,11 @@ static int aac_eh_dev_reset(struct scsi_cmnd *cmd) info = &aac->hba_map[bus][cid]; - if (info->devtype != AAC_DEVTYPE_NATIVE_RAW && - info->reset_state > 0) + if (!(info->devtype == AAC_DEVTYPE_NATIVE_RAW && + !(info->reset_state > 0))) return FAILED; - pr_err("%s: Host adapter reset request. SCSI hang ?\n", + pr_err("%s: Host device reset request. SCSI hang ?\n", AAC_DRIVERNAME); fib = aac_fib_alloc(aac); @@ -934,7 +938,12 @@ static int aac_eh_dev_reset(struct scsi_cmnd *cmd) status = aac_hba_send(command, fib, (fib_callback) aac_tmf_callback, (void *) info); - + if (status != -EINPROGRESS) { + info->reset_state = 0; + aac_fib_complete(fib); + aac_fib_free(fib); + return ret; + } /* Wait up to 15 seconds for completion */ for (count = 0; count < 15; ++count) { if (info->reset_state == 0) { @@ -973,11 +982,11 @@ static int aac_eh_target_reset(struct scsi_cmnd *cmd) info = &aac->hba_map[bus][cid]; - if (info->devtype != AAC_DEVTYPE_NATIVE_RAW && - info->reset_state > 0) + if (!(info->devtype == AAC_DEVTYPE_NATIVE_RAW && + !(info->reset_state > 0))) return FAILED; - pr_err("%s: Host adapter reset request. SCSI hang ?\n", + pr_err("%s: Host target reset request. SCSI hang ?\n", AAC_DRIVERNAME); fib = aac_fib_alloc(aac); @@ -994,6 +1003,13 @@ static int aac_eh_target_reset(struct scsi_cmnd *cmd) (fib_callback) aac_tmf_callback, (void *) info); + if (status != -EINPROGRESS) { + info->reset_state = 0; + aac_fib_complete(fib); + aac_fib_free(fib); + return ret; + } + /* Wait up to 15 seconds for completion */ for (count = 0; count < 15; ++count) { if (info->reset_state <= 0) { @@ -1046,7 +1062,7 @@ static int aac_eh_bus_reset(struct scsi_cmnd* cmd) } } - pr_err("%s: Host adapter reset request. SCSI hang ?\n", AAC_DRIVERNAME); + pr_err("%s: Host bus reset request. SCSI hang ?\n", AAC_DRIVERNAME); /* * Check the health of the controller -- GitLab From 8828622fb9b4201eeb0870587052e3d834cfaf61 Mon Sep 17 00:00:00 2001 From: Howard Chung Date: Thu, 12 Mar 2020 12:35:27 +0800 Subject: [PATCH 0988/1304] Bluetooth: L2CAP: handle l2cap config request during open state [ Upstream commit 96298f640104e4cd9a913a6e50b0b981829b94ff ] According to Core Spec Version 5.2 | Vol 3, Part A 6.1.5, the incoming L2CAP_ConfigReq should be handled during OPEN state. The section below shows the btmon trace when running L2CAP/COS/CFD/BV-12-C before and after this change. === Before === ... > ACL Data RX: Handle 256 flags 0x02 dlen 12 #22 L2CAP: Connection Request (0x02) ident 2 len 4 PSM: 1 (0x0001) Source CID: 65 < ACL Data TX: Handle 256 flags 0x00 dlen 16 #23 L2CAP: Connection Response (0x03) ident 2 len 8 Destination CID: 64 Source CID: 65 Result: Connection successful (0x0000) Status: No further information available (0x0000) < ACL Data TX: Handle 256 flags 0x00 dlen 12 #24 L2CAP: Configure Request (0x04) ident 2 len 4 Destination CID: 65 Flags: 0x0000 > HCI Event: Number of Completed Packets (0x13) plen 5 #25 Num handles: 1 Handle: 256 Count: 1 > HCI Event: Number of Completed Packets (0x13) plen 5 #26 Num handles: 1 Handle: 256 Count: 1 > ACL Data RX: Handle 256 flags 0x02 dlen 16 #27 L2CAP: Configure Request (0x04) ident 3 len 8 Destination CID: 64 Flags: 0x0000 Option: Unknown (0x10) [hint] 01 00 .. < ACL Data TX: Handle 256 flags 0x00 dlen 18 #28 L2CAP: Configure Response (0x05) ident 3 len 10 Source CID: 65 Flags: 0x0000 Result: Success (0x0000) Option: Maximum Transmission Unit (0x01) [mandatory] MTU: 672 > HCI Event: Number of Completed Packets (0x13) plen 5 #29 Num handles: 1 Handle: 256 Count: 1 > ACL Data RX: Handle 256 flags 0x02 dlen 14 #30 L2CAP: Configure Response (0x05) ident 2 len 6 Source CID: 64 Flags: 0x0000 Result: Success (0x0000) > ACL Data RX: Handle 256 flags 0x02 dlen 20 #31 L2CAP: Configure Request (0x04) ident 3 len 12 Destination CID: 64 Flags: 0x0000 Option: Unknown (0x10) [hint] 01 00 91 02 11 11 ...... < ACL Data TX: Handle 256 flags 0x00 dlen 14 #32 L2CAP: Command Reject (0x01) ident 3 len 6 Reason: Invalid CID in request (0x0002) Destination CID: 64 Source CID: 65 > HCI Event: Number of Completed Packets (0x13) plen 5 #33 Num handles: 1 Handle: 256 Count: 1 ... === After === ... > ACL Data RX: Handle 256 flags 0x02 dlen 12 #22 L2CAP: Connection Request (0x02) ident 2 len 4 PSM: 1 (0x0001) Source CID: 65 < ACL Data TX: Handle 256 flags 0x00 dlen 16 #23 L2CAP: Connection Response (0x03) ident 2 len 8 Destination CID: 64 Source CID: 65 Result: Connection successful (0x0000) Status: No further information available (0x0000) < ACL Data TX: Handle 256 flags 0x00 dlen 12 #24 L2CAP: Configure Request (0x04) ident 2 len 4 Destination CID: 65 Flags: 0x0000 > HCI Event: Number of Completed Packets (0x13) plen 5 #25 Num handles: 1 Handle: 256 Count: 1 > HCI Event: Number of Completed Packets (0x13) plen 5 #26 Num handles: 1 Handle: 256 Count: 1 > ACL Data RX: Handle 256 flags 0x02 dlen 16 #27 L2CAP: Configure Request (0x04) ident 3 len 8 Destination CID: 64 Flags: 0x0000 Option: Unknown (0x10) [hint] 01 00 .. < ACL Data TX: Handle 256 flags 0x00 dlen 18 #28 L2CAP: Configure Response (0x05) ident 3 len 10 Source CID: 65 Flags: 0x0000 Result: Success (0x0000) Option: Maximum Transmission Unit (0x01) [mandatory] MTU: 672 > HCI Event: Number of Completed Packets (0x13) plen 5 #29 Num handles: 1 Handle: 256 Count: 1 > ACL Data RX: Handle 256 flags 0x02 dlen 14 #30 L2CAP: Configure Response (0x05) ident 2 len 6 Source CID: 64 Flags: 0x0000 Result: Success (0x0000) > ACL Data RX: Handle 256 flags 0x02 dlen 20 #31 L2CAP: Configure Request (0x04) ident 3 len 12 Destination CID: 64 Flags: 0x0000 Option: Unknown (0x10) [hint] 01 00 91 02 11 11 ..... < ACL Data TX: Handle 256 flags 0x00 dlen 18 #32 L2CAP: Configure Response (0x05) ident 3 len 10 Source CID: 65 Flags: 0x0000 Result: Success (0x0000) Option: Maximum Transmission Unit (0x01) [mandatory] MTU: 672 < ACL Data TX: Handle 256 flags 0x00 dlen 12 #33 L2CAP: Configure Request (0x04) ident 3 len 4 Destination CID: 65 Flags: 0x0000 > HCI Event: Number of Completed Packets (0x13) plen 5 #34 Num handles: 1 Handle: 256 Count: 1 > HCI Event: Number of Completed Packets (0x13) plen 5 #35 Num handles: 1 Handle: 256 Count: 1 ... Signed-off-by: Howard Chung Signed-off-by: Marcel Holtmann Signed-off-by: Sasha Levin --- net/bluetooth/l2cap_core.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index b1f51cb007ea..c04107d44601 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -4117,7 +4117,8 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, return 0; } - if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) { + if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 && + chan->state != BT_CONNECTED) { cmd_reject_invalid_cid(conn, cmd->ident, chan->scid, chan->dcid); goto unlock; -- GitLab From 7bf061465fc7c1da22bac37d46c49662dafa27ab Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Mon, 10 Feb 2020 15:26:46 +0100 Subject: [PATCH 0989/1304] media: tda10071: fix unsigned sign extension overflow [ Upstream commit a7463e2dc698075132de9905b89f495df888bb79 ] The shifting of buf[3] by 24 bits to the left will be promoted to a 32 bit signed int and then sign-extended to an unsigned long. In the unlikely event that the the top bit of buf[3] is set then all then all the upper bits end up as also being set because of the sign-extension and this affect the ev->post_bit_error sum. Fix this by using the temporary u32 variable bit_error to avoid the sign-extension promotion. This also removes the need to do the computation twice. Addresses-Coverity: ("Unintended sign extension") Fixes: 267897a4708f ("[media] tda10071: implement DVBv5 statistics") Signed-off-by: Colin Ian King Signed-off-by: Sean Young Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Sasha Levin --- drivers/media/dvb-frontends/tda10071.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/media/dvb-frontends/tda10071.c b/drivers/media/dvb-frontends/tda10071.c index 097c42d3f8c2..df0c7243eafe 100644 --- a/drivers/media/dvb-frontends/tda10071.c +++ b/drivers/media/dvb-frontends/tda10071.c @@ -483,10 +483,11 @@ static int tda10071_read_status(struct dvb_frontend *fe, enum fe_status *status) goto error; if (dev->delivery_system == SYS_DVBS) { - dev->dvbv3_ber = buf[0] << 24 | buf[1] << 16 | - buf[2] << 8 | buf[3] << 0; - dev->post_bit_error += buf[0] << 24 | buf[1] << 16 | - buf[2] << 8 | buf[3] << 0; + u32 bit_error = buf[0] << 24 | buf[1] << 16 | + buf[2] << 8 | buf[3] << 0; + + dev->dvbv3_ber = bit_error; + dev->post_bit_error += bit_error; c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER; c->post_bit_error.stat[0].uvalue = dev->post_bit_error; dev->block_error += buf[4] << 8 | buf[5] << 0; -- GitLab From 6ab959f1299512f9986db48347fff434ce7d33b8 Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Wed, 11 Mar 2020 10:37:55 -0700 Subject: [PATCH 0990/1304] xfs: don't ever return a stale pointer from __xfs_dir3_free_read [ Upstream commit 1cb5deb5bc095c070c09a4540c45f9c9ba24be43 ] If we decide that a directory free block is corrupt, we must take care not to leak a buffer pointer to the caller. After xfs_trans_brelse returns, the buffer can be freed or reused, which means that we have to set *bpp back to NULL. Callers are supposed to notice the nonzero return value and not use the buffer pointer, but we should code more defensively, even if all current callers handle this situation correctly. Fixes: de14c5f541e7 ("xfs: verify free block header fields") Signed-off-by: Darrick J. Wong Reviewed-by: Dave Chinner Signed-off-by: Sasha Levin --- fs/xfs/libxfs/xfs_dir2_node.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/xfs/libxfs/xfs_dir2_node.c b/fs/xfs/libxfs/xfs_dir2_node.c index f1bb3434f51c..01e99806b941 100644 --- a/fs/xfs/libxfs/xfs_dir2_node.c +++ b/fs/xfs/libxfs/xfs_dir2_node.c @@ -214,6 +214,7 @@ __xfs_dir3_free_read( if (fa) { xfs_verifier_error(*bpp, -EFSCORRUPTED, fa); xfs_trans_brelse(tp, *bpp); + *bpp = NULL; return -EFSCORRUPTED; } -- GitLab From 7fff3f7fe9a8643ebfd40ab8ed4ff67dd8879fbc Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Wed, 11 Mar 2020 10:37:57 -0700 Subject: [PATCH 0991/1304] xfs: mark dir corrupt when lookup-by-hash fails [ Upstream commit 2e107cf869eecc770e3f630060bb4e5f547d0fd8 ] In xchk_dir_actor, we attempt to validate the directory hash structures by performing a directory entry lookup by (hashed) name. If the lookup returns ENOENT, that means that the hash information is corrupt. The _process_error functions don't catch this, so we have to add that explicitly. Signed-off-by: Darrick J. Wong Reviewed-by: Dave Chinner Signed-off-by: Sasha Levin --- fs/xfs/scrub/dir.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/fs/xfs/scrub/dir.c b/fs/xfs/scrub/dir.c index cd3e4d768a18..33dfcba72c7a 100644 --- a/fs/xfs/scrub/dir.c +++ b/fs/xfs/scrub/dir.c @@ -156,6 +156,9 @@ xchk_dir_actor( xname.type = XFS_DIR3_FT_UNKNOWN; error = xfs_dir_lookup(sdc->sc->tp, ip, &xname, &lookup_ino, NULL); + /* ENOENT means the hash lookup failed and the dir is corrupt */ + if (error == -ENOENT) + error = -EFSCORRUPTED; if (!xchk_fblock_process_error(sdc->sc, XFS_DATA_FORK, offset, &error)) goto out; -- GitLab From ff331054567bc3f53155875131902fb178a31de6 Mon Sep 17 00:00:00 2001 From: Dmitry Monakhov Date: Tue, 10 Mar 2020 15:01:56 +0000 Subject: [PATCH 0992/1304] ext4: mark block bitmap corrupted when found instead of BUGON [ Upstream commit eb5760863fc28feab28b567ddcda7e667e638da0 ] We already has similar code in ext4_mb_complex_scan_group(), but ext4_mb_simple_scan_group() still affected. Other reports: https://www.spinics.net/lists/linux-ext4/msg60231.html Reviewed-by: Andreas Dilger Signed-off-by: Dmitry Monakhov Link: https://lore.kernel.org/r/20200310150156.641-1-dmonakhov@gmail.com Signed-off-by: Theodore Ts'o Signed-off-by: Sasha Levin --- fs/ext4/mballoc.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 8dd54a8a0361..054cfdd007d6 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -1901,8 +1901,15 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac, BUG_ON(buddy == NULL); k = mb_find_next_zero_bit(buddy, max, 0); - BUG_ON(k >= max); - + if (k >= max) { + ext4_grp_locked_error(ac->ac_sb, e4b->bd_group, 0, 0, + "%d free clusters of order %d. But found 0", + grp->bb_counters[i], i); + ext4_mark_group_bitmap_corrupted(ac->ac_sb, + e4b->bd_group, + EXT4_GROUP_INFO_BBITMAP_CORRUPT); + break; + } ac->ac_found++; ac->ac_b_ex.fe_len = 1 << i; -- GitLab From abc5b427c59c3b7445cb9b59289908e3555f02ed Mon Sep 17 00:00:00 2001 From: Stefan Berger Date: Thu, 12 Mar 2020 11:53:31 -0400 Subject: [PATCH 0993/1304] tpm: ibmvtpm: Wait for buffer to be set before proceeding [ Upstream commit d8d74ea3c00214aee1e1826ca18e77944812b9b4 ] Synchronize with the results from the CRQs before continuing with the initialization. This avoids trying to send TPM commands while the rtce buffer has not been allocated, yet. This patch fixes an existing race condition that may occurr if the hypervisor does not quickly respond to the VTPM_GET_RTCE_BUFFER_SIZE request sent during initialization and therefore the ibmvtpm->rtce_buf has not been allocated at the time the first TPM command is sent. Fixes: 132f76294744 ("drivers/char/tpm: Add new device driver to support IBM vTPM") Signed-off-by: Stefan Berger Acked-by: Nayna Jain Tested-by: Nayna Jain Reviewed-by: Jarkko Sakkinen Signed-off-by: Jarkko Sakkinen Signed-off-by: Sasha Levin --- drivers/char/tpm/tpm_ibmvtpm.c | 9 +++++++++ drivers/char/tpm/tpm_ibmvtpm.h | 1 + 2 files changed, 10 insertions(+) diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c index 569e93e1f06c..3ba67bc6baba 100644 --- a/drivers/char/tpm/tpm_ibmvtpm.c +++ b/drivers/char/tpm/tpm_ibmvtpm.c @@ -588,6 +588,7 @@ static irqreturn_t ibmvtpm_interrupt(int irq, void *vtpm_instance) */ while ((crq = ibmvtpm_crq_get_next(ibmvtpm)) != NULL) { ibmvtpm_crq_process(crq, ibmvtpm); + wake_up_interruptible(&ibmvtpm->crq_queue.wq); crq->valid = 0; smp_wmb(); } @@ -635,6 +636,7 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev, } crq_q->num_entry = CRQ_RES_BUF_SIZE / sizeof(*crq_q->crq_addr); + init_waitqueue_head(&crq_q->wq); ibmvtpm->crq_dma_handle = dma_map_single(dev, crq_q->crq_addr, CRQ_RES_BUF_SIZE, DMA_BIDIRECTIONAL); @@ -687,6 +689,13 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev, if (rc) goto init_irq_cleanup; + if (!wait_event_timeout(ibmvtpm->crq_queue.wq, + ibmvtpm->rtce_buf != NULL, + HZ)) { + dev_err(dev, "CRQ response timed out\n"); + goto init_irq_cleanup; + } + return tpm_chip_register(chip); init_irq_cleanup: do { diff --git a/drivers/char/tpm/tpm_ibmvtpm.h b/drivers/char/tpm/tpm_ibmvtpm.h index 91dfe766d080..4f6a124601db 100644 --- a/drivers/char/tpm/tpm_ibmvtpm.h +++ b/drivers/char/tpm/tpm_ibmvtpm.h @@ -31,6 +31,7 @@ struct ibmvtpm_crq_queue { struct ibmvtpm_crq *crq_addr; u32 index; u32 num_entry; + wait_queue_head_t wq; }; struct ibmvtpm_dev { -- GitLab From e934a66dc2c109b1dcf8d0a120393981c5799d68 Mon Sep 17 00:00:00 2001 From: Alexandre Belloni Date: Fri, 6 Mar 2020 02:01:44 +0100 Subject: [PATCH 0994/1304] rtc: sa1100: fix possible race condition [ Upstream commit f2997775b111c6d660c32a18d5d44d37cb7361b1 ] Both RTC IRQs are requested before the struct rtc_device is allocated, this may lead to a NULL pointer dereference in the IRQ handler. To fix this issue, allocating the rtc_device struct before requesting the IRQs using devm_rtc_allocate_device, and use rtc_register_device to register the RTC device. Link: https://lore.kernel.org/r/20200306010146.39762-1-alexandre.belloni@bootlin.com Signed-off-by: Alexandre Belloni Signed-off-by: Sasha Levin --- drivers/rtc/rtc-sa1100.c | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c index 304d905cb23f..56f625371735 100644 --- a/drivers/rtc/rtc-sa1100.c +++ b/drivers/rtc/rtc-sa1100.c @@ -186,7 +186,6 @@ static const struct rtc_class_ops sa1100_rtc_ops = { int sa1100_rtc_init(struct platform_device *pdev, struct sa1100_rtc *info) { - struct rtc_device *rtc; int ret; spin_lock_init(&info->lock); @@ -215,15 +214,14 @@ int sa1100_rtc_init(struct platform_device *pdev, struct sa1100_rtc *info) writel_relaxed(0, info->rcnr); } - rtc = devm_rtc_device_register(&pdev->dev, pdev->name, &sa1100_rtc_ops, - THIS_MODULE); - if (IS_ERR(rtc)) { + info->rtc->ops = &sa1100_rtc_ops; + info->rtc->max_user_freq = RTC_FREQ; + + ret = rtc_register_device(info->rtc); + if (ret) { clk_disable_unprepare(info->clk); - return PTR_ERR(rtc); + return ret; } - info->rtc = rtc; - - rtc->max_user_freq = RTC_FREQ; /* Fix for a nasty initialization problem the in SA11xx RTSR register. * See also the comments in sa1100_rtc_interrupt(). @@ -272,6 +270,10 @@ static int sa1100_rtc_probe(struct platform_device *pdev) info->irq_1hz = irq_1hz; info->irq_alarm = irq_alarm; + info->rtc = devm_rtc_allocate_device(&pdev->dev); + if (IS_ERR(info->rtc)) + return PTR_ERR(info->rtc); + ret = devm_request_irq(&pdev->dev, irq_1hz, sa1100_rtc_interrupt, 0, "rtc 1Hz", &pdev->dev); if (ret) { -- GitLab From 142513a2e01ad8952bb63034436d7e7f463f9f93 Mon Sep 17 00:00:00 2001 From: Alexandre Belloni Date: Fri, 6 Mar 2020 08:34:01 +0100 Subject: [PATCH 0995/1304] rtc: ds1374: fix possible race condition [ Upstream commit c11af8131a4e7ba1960faed731ee7e84c2c13c94 ] The RTC IRQ is requested before the struct rtc_device is allocated, this may lead to a NULL pointer dereference in the IRQ handler. To fix this issue, allocating the rtc_device struct before requesting the RTC IRQ using devm_rtc_allocate_device, and use rtc_register_device to register the RTC device. Link: https://lore.kernel.org/r/20200306073404.56921-1-alexandre.belloni@bootlin.com Signed-off-by: Alexandre Belloni Signed-off-by: Sasha Levin --- drivers/rtc/rtc-ds1374.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c index 38a2e9e684df..77a106e90124 100644 --- a/drivers/rtc/rtc-ds1374.c +++ b/drivers/rtc/rtc-ds1374.c @@ -620,6 +620,10 @@ static int ds1374_probe(struct i2c_client *client, if (!ds1374) return -ENOMEM; + ds1374->rtc = devm_rtc_allocate_device(&client->dev); + if (IS_ERR(ds1374->rtc)) + return PTR_ERR(ds1374->rtc); + ds1374->client = client; i2c_set_clientdata(client, ds1374); @@ -641,12 +645,11 @@ static int ds1374_probe(struct i2c_client *client, device_set_wakeup_capable(&client->dev, 1); } - ds1374->rtc = devm_rtc_device_register(&client->dev, client->name, - &ds1374_rtc_ops, THIS_MODULE); - if (IS_ERR(ds1374->rtc)) { - dev_err(&client->dev, "unable to register the class device\n"); - return PTR_ERR(ds1374->rtc); - } + ds1374->rtc->ops = &ds1374_rtc_ops; + + ret = rtc_register_device(ds1374->rtc); + if (ret) + return ret; #ifdef CONFIG_RTC_DRV_DS1374_WDT save_client = client; -- GitLab From 1ab250aabad1dd0a9f656b976c3b59f2aaa19405 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sun, 1 Mar 2020 18:21:38 -0500 Subject: [PATCH 0996/1304] nfsd: Don't add locks to closed or closing open stateids [ Upstream commit a451b12311aa8c96c6f6e01c783a86995dc3ec6b ] In NFSv4, the lock stateids are tied to the lockowner, and the open stateid, so that the action of closing the file also results in either an automatic loss of the locks, or an error of the form NFS4ERR_LOCKS_HELD. In practice this means we must not add new locks to the open stateid after the close process has been invoked. In fact doing so, can result in the following panic: kernel BUG at lib/list_debug.c:51! invalid opcode: 0000 [#1] SMP NOPTI CPU: 2 PID: 1085 Comm: nfsd Not tainted 5.6.0-rc3+ #2 Hardware name: VMware, Inc. VMware7,1/440BX Desktop Reference Platform, BIOS VMW71.00V.14410784.B64.1908150010 08/15/2019 RIP: 0010:__list_del_entry_valid.cold+0x31/0x55 Code: 1a 3d 9b e8 74 10 c2 ff 0f 0b 48 c7 c7 f0 1a 3d 9b e8 66 10 c2 ff 0f 0b 48 89 f2 48 89 fe 48 c7 c7 b0 1a 3d 9b e8 52 10 c2 ff <0f> 0b 48 89 fe 4c 89 c2 48 c7 c7 78 1a 3d 9b e8 3e 10 c2 ff 0f 0b RSP: 0018:ffffb296c1d47d90 EFLAGS: 00010246 RAX: 0000000000000054 RBX: ffff8ba032456ec8 RCX: 0000000000000000 RDX: 0000000000000000 RSI: ffff8ba039e99cc8 RDI: ffff8ba039e99cc8 RBP: ffff8ba032456e60 R08: 0000000000000781 R09: 0000000000000003 R10: 0000000000000000 R11: 0000000000000001 R12: ffff8ba009a4abe0 R13: ffff8ba032456e8c R14: 0000000000000000 R15: ffff8ba00adb01d8 FS: 0000000000000000(0000) GS:ffff8ba039e80000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007fb213f0b008 CR3: 00000001347de006 CR4: 00000000003606e0 Call Trace: release_lock_stateid+0x2b/0x80 [nfsd] nfsd4_free_stateid+0x1e9/0x210 [nfsd] nfsd4_proc_compound+0x414/0x700 [nfsd] ? nfs4svc_decode_compoundargs+0x407/0x4c0 [nfsd] nfsd_dispatch+0xc1/0x200 [nfsd] svc_process_common+0x476/0x6f0 [sunrpc] ? svc_sock_secure_port+0x12/0x30 [sunrpc] ? svc_recv+0x313/0x9c0 [sunrpc] ? nfsd_svc+0x2d0/0x2d0 [nfsd] svc_process+0xd4/0x110 [sunrpc] nfsd+0xe3/0x140 [nfsd] kthread+0xf9/0x130 ? nfsd_destroy+0x50/0x50 [nfsd] ? kthread_park+0x90/0x90 ret_from_fork+0x1f/0x40 The fix is to ensure that lock creation tests for whether or not the open stateid is unhashed, and to fail if that is the case. Fixes: 659aefb68eca ("nfsd: Ensure we don't recognise lock stateids after freeing them") Signed-off-by: Trond Myklebust Signed-off-by: Chuck Lever Signed-off-by: Sasha Levin --- fs/nfsd/nfs4state.c | 73 ++++++++++++++++++++++++++------------------- 1 file changed, 43 insertions(+), 30 deletions(-) diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index c24306af9758..655079ae1dd1 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -471,6 +471,8 @@ find_any_file(struct nfs4_file *f) { struct file *ret; + if (!f) + return NULL; spin_lock(&f->fi_lock); ret = __nfs4_get_fd(f, O_RDWR); if (!ret) { @@ -1207,6 +1209,12 @@ static void nfs4_put_stateowner(struct nfs4_stateowner *sop) nfs4_free_stateowner(sop); } +static bool +nfs4_ol_stateid_unhashed(const struct nfs4_ol_stateid *stp) +{ + return list_empty(&stp->st_perfile); +} + static bool unhash_ol_stateid(struct nfs4_ol_stateid *stp) { struct nfs4_file *fp = stp->st_stid.sc_file; @@ -1274,9 +1282,11 @@ static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp) { lockdep_assert_held(&stp->st_stid.sc_client->cl_lock); + if (!unhash_ol_stateid(stp)) + return false; list_del_init(&stp->st_locks); nfs4_unhash_stid(&stp->st_stid); - return unhash_ol_stateid(stp); + return true; } static void release_lock_stateid(struct nfs4_ol_stateid *stp) @@ -1341,13 +1351,12 @@ static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp, static bool unhash_open_stateid(struct nfs4_ol_stateid *stp, struct list_head *reaplist) { - bool unhashed; - lockdep_assert_held(&stp->st_stid.sc_client->cl_lock); - unhashed = unhash_ol_stateid(stp); + if (!unhash_ol_stateid(stp)) + return false; release_open_stateid_locks(stp, reaplist); - return unhashed; + return true; } static void release_open_stateid(struct nfs4_ol_stateid *stp) @@ -5774,21 +5783,21 @@ alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp, } static struct nfs4_ol_stateid * -find_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp) +find_lock_stateid(const struct nfs4_lockowner *lo, + const struct nfs4_ol_stateid *ost) { struct nfs4_ol_stateid *lst; - struct nfs4_client *clp = lo->lo_owner.so_client; - lockdep_assert_held(&clp->cl_lock); + lockdep_assert_held(&ost->st_stid.sc_client->cl_lock); - list_for_each_entry(lst, &lo->lo_owner.so_stateids, st_perstateowner) { - if (lst->st_stid.sc_type != NFS4_LOCK_STID) - continue; - if (lst->st_stid.sc_file == fp) { - refcount_inc(&lst->st_stid.sc_count); - return lst; + /* If ost is not hashed, ost->st_locks will not be valid */ + if (!nfs4_ol_stateid_unhashed(ost)) + list_for_each_entry(lst, &ost->st_locks, st_locks) { + if (lst->st_stateowner == &lo->lo_owner) { + refcount_inc(&lst->st_stid.sc_count); + return lst; + } } - } return NULL; } @@ -5804,11 +5813,11 @@ init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo, mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX); retry: spin_lock(&clp->cl_lock); - spin_lock(&fp->fi_lock); - retstp = find_lock_stateid(lo, fp); + if (nfs4_ol_stateid_unhashed(open_stp)) + goto out_close; + retstp = find_lock_stateid(lo, open_stp); if (retstp) - goto out_unlock; - + goto out_found; refcount_inc(&stp->st_stid.sc_count); stp->st_stid.sc_type = NFS4_LOCK_STID; stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner); @@ -5817,22 +5826,26 @@ init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo, stp->st_access_bmap = 0; stp->st_deny_bmap = open_stp->st_deny_bmap; stp->st_openstp = open_stp; + spin_lock(&fp->fi_lock); list_add(&stp->st_locks, &open_stp->st_locks); list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids); list_add(&stp->st_perfile, &fp->fi_stateids); -out_unlock: spin_unlock(&fp->fi_lock); spin_unlock(&clp->cl_lock); - if (retstp) { - if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) { - nfs4_put_stid(&retstp->st_stid); - goto retry; - } - /* To keep mutex tracking happy */ - mutex_unlock(&stp->st_mutex); - stp = retstp; - } return stp; +out_found: + spin_unlock(&clp->cl_lock); + if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) { + nfs4_put_stid(&retstp->st_stid); + goto retry; + } + /* To keep mutex tracking happy */ + mutex_unlock(&stp->st_mutex); + return retstp; +out_close: + spin_unlock(&clp->cl_lock); + mutex_unlock(&stp->st_mutex); + return NULL; } static struct nfs4_ol_stateid * @@ -5847,7 +5860,7 @@ find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi, *new = false; spin_lock(&clp->cl_lock); - lst = find_lock_stateid(lo, fi); + lst = find_lock_stateid(lo, ost); spin_unlock(&clp->cl_lock); if (lst != NULL) { if (nfsd4_lock_ol_stateid(lst) == nfs_ok) -- GitLab From 851eba100212a76f7ec4408f6ab3ae5a5b4bfd70 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Tue, 10 Mar 2020 11:25:33 +0200 Subject: [PATCH 0997/1304] RDMA/cm: Remove a race freeing timewait_info [ Upstream commit bede86a39d9dc3387ac00dcb8e1ac221676b2f25 ] When creating a cm_id during REQ the id immediately becomes visible to the other MAD handlers, and shortly after the state is moved to IB_CM_REQ_RCVD This allows cm_rej_handler() to run concurrently and free the work: CPU 0 CPU1 cm_req_handler() ib_create_cm_id() cm_match_req() id_priv->state = IB_CM_REQ_RCVD cm_rej_handler() cm_acquire_id() spin_lock(&id_priv->lock) switch (id_priv->state) case IB_CM_REQ_RCVD: cm_reset_to_idle() kfree(id_priv->timewait_info); goto destroy destroy: kfree(id_priv->timewait_info); id_priv->timewait_info = NULL Causing a double free or worse. Do not free the timewait_info without also holding the id_priv->lock. Simplify this entire flow by making the free unconditional during cm_destroy_id() and removing the confusing special case error unwind during creation of the timewait_info. This also fixes a leak of the timewait if cm_destroy_id() is called in IB_CM_ESTABLISHED with an XRC TGT QP. The state machine will be left in ESTABLISHED while it needed to transition through IB_CM_TIMEWAIT to release the timewait pointer. Also fix a leak of the timewait_info if the caller mis-uses the API and does ib_send_cm_reqs(). Fixes: a977049dacde ("[PATCH] IB: Add the kernel CM implementation") Link: https://lore.kernel.org/r/20200310092545.251365-4-leon@kernel.org Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe Signed-off-by: Sasha Levin --- drivers/infiniband/core/cm.c | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 64f206e11d49..4ebf63360a69 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -1100,14 +1100,22 @@ static void cm_destroy_id(struct ib_cm_id *cm_id, int err) break; } - spin_lock_irq(&cm.lock); + spin_lock_irq(&cm_id_priv->lock); + spin_lock(&cm.lock); + /* Required for cleanup paths related cm_req_handler() */ + if (cm_id_priv->timewait_info) { + cm_cleanup_timewait(cm_id_priv->timewait_info); + kfree(cm_id_priv->timewait_info); + cm_id_priv->timewait_info = NULL; + } if (!list_empty(&cm_id_priv->altr_list) && (!cm_id_priv->altr_send_port_not_ready)) list_del(&cm_id_priv->altr_list); if (!list_empty(&cm_id_priv->prim_list) && (!cm_id_priv->prim_send_port_not_ready)) list_del(&cm_id_priv->prim_list); - spin_unlock_irq(&cm.lock); + spin_unlock(&cm.lock); + spin_unlock_irq(&cm_id_priv->lock); cm_free_id(cm_id->local_id); cm_deref_id(cm_id_priv); @@ -1424,7 +1432,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id, /* Verify that we're not in timewait. */ cm_id_priv = container_of(cm_id, struct cm_id_private, id); spin_lock_irqsave(&cm_id_priv->lock, flags); - if (cm_id->state != IB_CM_IDLE) { + if (cm_id->state != IB_CM_IDLE || WARN_ON(cm_id_priv->timewait_info)) { spin_unlock_irqrestore(&cm_id_priv->lock, flags); ret = -EINVAL; goto out; @@ -1442,12 +1450,12 @@ int ib_send_cm_req(struct ib_cm_id *cm_id, param->ppath_sgid_attr, &cm_id_priv->av, cm_id_priv); if (ret) - goto error1; + goto out; if (param->alternate_path) { ret = cm_init_av_by_path(param->alternate_path, NULL, &cm_id_priv->alt_av, cm_id_priv); if (ret) - goto error1; + goto out; } cm_id->service_id = param->service_id; cm_id->service_mask = ~cpu_to_be64(0); @@ -1465,7 +1473,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id, ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg); if (ret) - goto error1; + goto out; req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad; cm_format_req(req_msg, cm_id_priv, param); @@ -1488,7 +1496,6 @@ int ib_send_cm_req(struct ib_cm_id *cm_id, return 0; error2: cm_free_msg(cm_id_priv->msg); -error1: kfree(cm_id_priv->timewait_info); out: return ret; } EXPORT_SYMBOL(ib_send_cm_req); @@ -1973,7 +1980,7 @@ static int cm_req_handler(struct cm_work *work) pr_debug("%s: local_id %d, no listen_cm_id_priv\n", __func__, be32_to_cpu(cm_id->local_id)); ret = -EINVAL; - goto free_timeinfo; + goto destroy; } cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler; @@ -2057,8 +2064,6 @@ static int cm_req_handler(struct cm_work *work) rejected: atomic_dec(&cm_id_priv->refcount); cm_deref_id(listen_cm_id_priv); -free_timeinfo: - kfree(cm_id_priv->timewait_info); destroy: ib_destroy_cm_id(cm_id); return ret; -- GitLab From 228403834931474902e29544faa4b860f59cbe9b Mon Sep 17 00:00:00 2001 From: Gustavo Romero Date: Fri, 21 Feb 2020 11:29:50 -0500 Subject: [PATCH 0998/1304] KVM: PPC: Book3S HV: Treat TM-related invalid form instructions on P9 like the valid ones [ Upstream commit 1dff3064c764b5a51c367b949b341d2e38972bec ] On P9 DD2.2 due to a CPU defect some TM instructions need to be emulated by KVM. This is handled at first by the hardware raising a softpatch interrupt when certain TM instructions that need KVM assistance are executed in the guest. Althought some TM instructions per Power ISA are invalid forms they can raise a softpatch interrupt too. For instance, 'tresume.' instruction as defined in the ISA must have bit 31 set (1), but an instruction that matches 'tresume.' PO and XO opcode fields but has bit 31 not set (0), like 0x7cfe9ddc, also raises a softpatch interrupt. Similarly for 'treclaim.' and 'trechkpt.' instructions with bit 31 = 0, i.e. 0x7c00075c and 0x7c0007dc, respectively. Hence, if a code like the following is executed in the guest it will raise a softpatch interrupt just like a 'tresume.' when the TM facility is enabled ('tabort. 0' in the example is used only to enable the TM facility): int main() { asm("tabort. 0; .long 0x7cfe9ddc;"); } Currently in such a case KVM throws a complete trace like: [345523.705984] WARNING: CPU: 24 PID: 64413 at arch/powerpc/kvm/book3s_hv_tm.c:211 kvmhv_p9_tm_emulation+0x68/0x620 [kvm_hv] [345523.705985] Modules linked in: kvm_hv(E) xt_conntrack ipt_REJECT nf_reject_ipv4 xt_tcpudp ip6table_mangle ip6table_nat iptable_mangle iptable_nat nf_nat nf_conntrack nf_defrag_ipv6 nf_defrag_ipv4 ebtable_filter ebtables ip6table_filter ip6_tables iptable_filter bridge stp llc sch_fq_codel ipmi_powernv at24 vmx_crypto ipmi_devintf ipmi_msghandler ibmpowernv uio_pdrv_genirq kvm opal_prd uio leds_powernv ib_iser rdma_cm iw_cm ib_cm ib_core iscsi_tcp libiscsi_tcp libiscsi scsi_transport_iscsi ip_tables x_tables autofs4 btrfs blake2b_generic zstd_compress raid10 raid456 async_raid6_recov async_memcpy async_pq async_xor async_tx libcrc32c xor raid6_pq raid1 raid0 multipath linear tg3 crct10dif_vpmsum crc32c_vpmsum ipr [last unloaded: kvm_hv] [345523.706030] CPU: 24 PID: 64413 Comm: CPU 0/KVM Tainted: G W E 5.5.0+ #1 [345523.706031] NIP: c0080000072cb9c0 LR: c0080000072b5e80 CTR: c0080000085c7850 [345523.706034] REGS: c000000399467680 TRAP: 0700 Tainted: G W E (5.5.0+) [345523.706034] MSR: 900000010282b033 CR: 24022428 XER: 00000000 [345523.706042] CFAR: c0080000072b5e7c IRQMASK: 0 GPR00: c0080000072b5e80 c000000399467910 c0080000072db500 c000000375ccc720 GPR04: c000000375ccc720 00000003fbec0000 0000a10395dda5a6 0000000000000000 GPR08: 000000007cfe9ddc 7cfe9ddc000005dc 7cfe9ddc7c0005dc c0080000072cd530 GPR12: c0080000085c7850 c0000003fffeb800 0000000000000001 00007dfb737f0000 GPR16: c0002001edcca558 0000000000000000 0000000000000000 0000000000000001 GPR20: c000000001b21258 c0002001edcca558 0000000000000018 0000000000000000 GPR24: 0000000001000000 ffffffffffffffff 0000000000000001 0000000000001500 GPR28: c0002001edcc4278 c00000037dd80000 800000050280f033 c000000375ccc720 [345523.706062] NIP [c0080000072cb9c0] kvmhv_p9_tm_emulation+0x68/0x620 [kvm_hv] [345523.706065] LR [c0080000072b5e80] kvmppc_handle_exit_hv.isra.53+0x3e8/0x798 [kvm_hv] [345523.706066] Call Trace: [345523.706069] [c000000399467910] [c000000399467940] 0xc000000399467940 (unreliable) [345523.706071] [c000000399467950] [c000000399467980] 0xc000000399467980 [345523.706075] [c0000003994679f0] [c0080000072bd1c4] kvmhv_run_single_vcpu+0xa1c/0xb80 [kvm_hv] [345523.706079] [c000000399467ac0] [c0080000072bd8e0] kvmppc_vcpu_run_hv+0x5b8/0xb00 [kvm_hv] [345523.706087] [c000000399467b90] [c0080000085c93cc] kvmppc_vcpu_run+0x34/0x48 [kvm] [345523.706095] [c000000399467bb0] [c0080000085c582c] kvm_arch_vcpu_ioctl_run+0x244/0x420 [kvm] [345523.706101] [c000000399467c40] [c0080000085b7498] kvm_vcpu_ioctl+0x3d0/0x7b0 [kvm] [345523.706105] [c000000399467db0] [c0000000004adf9c] ksys_ioctl+0x13c/0x170 [345523.706107] [c000000399467e00] [c0000000004adff8] sys_ioctl+0x28/0x80 [345523.706111] [c000000399467e20] [c00000000000b278] system_call+0x5c/0x68 [345523.706112] Instruction dump: [345523.706114] 419e0390 7f8a4840 409d0048 6d497c00 2f89075d 419e021c 6d497c00 2f8907dd [345523.706119] 419e01c0 6d497c00 2f8905dd 419e00a4 <0fe00000> 38210040 38600000 ebc1fff0 and then treats the executed instruction as a 'nop'. However the POWER9 User's Manual, in section "4.6.10 Book II Invalid Forms", informs that for TM instructions bit 31 is in fact ignored, thus for the TM-related invalid forms ignoring bit 31 and handling them like the valid forms is an acceptable way to handle them. POWER8 behaves the same way too. This commit changes the handling of the cases here described by treating the TM-related invalid forms that can generate a softpatch interrupt just like their valid forms (w/ bit 31 = 1) instead of as a 'nop' and by gently reporting any other unrecognized case to the host and treating it as illegal instruction instead of throwing a trace and treating it as a 'nop'. Signed-off-by: Gustavo Romero Reviewed-by: Segher Boessenkool Acked-By: Michael Neuling Reviewed-by: Leonardo Bras Signed-off-by: Paul Mackerras Signed-off-by: Sasha Levin --- arch/powerpc/include/asm/kvm_asm.h | 3 +++ arch/powerpc/kvm/book3s_hv_tm.c | 28 ++++++++++++++++++++----- arch/powerpc/kvm/book3s_hv_tm_builtin.c | 16 ++++++++++++-- 3 files changed, 40 insertions(+), 7 deletions(-) diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h index a790d5cf6ea3..684e8ae00d16 100644 --- a/arch/powerpc/include/asm/kvm_asm.h +++ b/arch/powerpc/include/asm/kvm_asm.h @@ -163,4 +163,7 @@ #define KVM_INST_FETCH_FAILED -1 +/* Extract PO and XOP opcode fields */ +#define PO_XOP_OPCODE_MASK 0xfc0007fe + #endif /* __POWERPC_KVM_ASM_H__ */ diff --git a/arch/powerpc/kvm/book3s_hv_tm.c b/arch/powerpc/kvm/book3s_hv_tm.c index 31cd0f327c8a..e7fd60cf9780 100644 --- a/arch/powerpc/kvm/book3s_hv_tm.c +++ b/arch/powerpc/kvm/book3s_hv_tm.c @@ -6,6 +6,8 @@ * published by the Free Software Foundation. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include @@ -47,7 +49,18 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu) u64 newmsr, bescr; int ra, rs; - switch (instr & 0xfc0007ff) { + /* + * rfid, rfebb, and mtmsrd encode bit 31 = 0 since it's a reserved bit + * in these instructions, so masking bit 31 out doesn't change these + * instructions. For treclaim., tsr., and trechkpt. instructions if bit + * 31 = 0 then they are per ISA invalid forms, however P9 UM, in section + * 4.6.10 Book II Invalid Forms, informs specifically that ignoring bit + * 31 is an acceptable way to handle these invalid forms that have + * bit 31 = 0. Moreover, for emulation purposes both forms (w/ and wo/ + * bit 31 set) can generate a softpatch interrupt. Hence both forms + * are handled below for these instructions so they behave the same way. + */ + switch (instr & PO_XOP_OPCODE_MASK) { case PPC_INST_RFID: /* XXX do we need to check for PR=0 here? */ newmsr = vcpu->arch.shregs.srr1; @@ -108,7 +121,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu) vcpu->arch.shregs.msr = newmsr; return RESUME_GUEST; - case PPC_INST_TSR: + /* ignore bit 31, see comment above */ + case (PPC_INST_TSR & PO_XOP_OPCODE_MASK): /* check for PR=1 and arch 2.06 bit set in PCR */ if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) { /* generate an illegal instruction interrupt */ @@ -143,7 +157,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu) vcpu->arch.shregs.msr = msr; return RESUME_GUEST; - case PPC_INST_TRECLAIM: + /* ignore bit 31, see comment above */ + case (PPC_INST_TRECLAIM & PO_XOP_OPCODE_MASK): /* check for TM disabled in the HFSCR or MSR */ if (!(vcpu->arch.hfscr & HFSCR_TM)) { /* generate an illegal instruction interrupt */ @@ -179,7 +194,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu) vcpu->arch.shregs.msr &= ~MSR_TS_MASK; return RESUME_GUEST; - case PPC_INST_TRECHKPT: + /* ignore bit 31, see comment above */ + case (PPC_INST_TRECHKPT & PO_XOP_OPCODE_MASK): /* XXX do we need to check for PR=0 here? */ /* check for TM disabled in the HFSCR or MSR */ if (!(vcpu->arch.hfscr & HFSCR_TM)) { @@ -211,6 +227,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu) } /* What should we do here? We didn't recognize the instruction */ - WARN_ON_ONCE(1); + kvmppc_core_queue_program(vcpu, SRR1_PROGILL); + pr_warn_ratelimited("Unrecognized TM-related instruction %#x for emulation", instr); + return RESUME_GUEST; } diff --git a/arch/powerpc/kvm/book3s_hv_tm_builtin.c b/arch/powerpc/kvm/book3s_hv_tm_builtin.c index 3cf5863bc06e..3c7ca2fa1959 100644 --- a/arch/powerpc/kvm/book3s_hv_tm_builtin.c +++ b/arch/powerpc/kvm/book3s_hv_tm_builtin.c @@ -26,7 +26,18 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu) u64 newmsr, msr, bescr; int rs; - switch (instr & 0xfc0007ff) { + /* + * rfid, rfebb, and mtmsrd encode bit 31 = 0 since it's a reserved bit + * in these instructions, so masking bit 31 out doesn't change these + * instructions. For the tsr. instruction if bit 31 = 0 then it is per + * ISA an invalid form, however P9 UM, in section 4.6.10 Book II Invalid + * Forms, informs specifically that ignoring bit 31 is an acceptable way + * to handle TM-related invalid forms that have bit 31 = 0. Moreover, + * for emulation purposes both forms (w/ and wo/ bit 31 set) can + * generate a softpatch interrupt. Hence both forms are handled below + * for tsr. to make them behave the same way. + */ + switch (instr & PO_XOP_OPCODE_MASK) { case PPC_INST_RFID: /* XXX do we need to check for PR=0 here? */ newmsr = vcpu->arch.shregs.srr1; @@ -76,7 +87,8 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu) vcpu->arch.shregs.msr = newmsr; return 1; - case PPC_INST_TSR: + /* ignore bit 31, see comment above */ + case (PPC_INST_TSR & PO_XOP_OPCODE_MASK): /* we know the MSR has the TS field = S (0b01) here */ msr = vcpu->arch.shregs.msr; /* check for PR=1 and arch 2.06 bit set in PCR */ -- GitLab From 45e618016a40714ffecb392ecc20dd32a2d295a4 Mon Sep 17 00:00:00 2001 From: Pavel Machek Date: Mon, 9 Mar 2020 11:14:10 +0100 Subject: [PATCH 0999/1304] drm/msm: fix leaks if initialization fails [ Upstream commit 66be340f827554cb1c8a1ed7dea97920b4085af2 ] We should free resources in unlikely case of allocation failure. Signed-off-by: Pavel Machek Signed-off-by: Rob Clark Signed-off-by: Sasha Levin --- drivers/gpu/drm/msm/msm_drv.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 7f45486b6650..3ba3ae9749be 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -495,8 +495,10 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv) if (!dev->dma_parms) { dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), GFP_KERNEL); - if (!dev->dma_parms) - return -ENOMEM; + if (!dev->dma_parms) { + ret = -ENOMEM; + goto err_msm_uninit; + } } dma_set_max_seg_size(dev, DMA_BIT_MASK(32)); -- GitLab From 102bdec1d1cf196aaafa4e54513490d5de6e05a4 Mon Sep 17 00:00:00 2001 From: Jordan Crouse Date: Fri, 14 Feb 2020 11:36:44 -0700 Subject: [PATCH 1000/1304] drm/msm/a5xx: Always set an OPP supported hardware value [ Upstream commit 0478b4fc5f37f4d494245fe7bcce3f531cf380e9 ] If the opp table specifies opp-supported-hw as a property but the driver has not set a supported hardware value the OPP subsystem will reject all the table entries. Set a "default" value that will match the default table entries but not conflict with any possible real bin values. Also fix a small memory leak and free the buffer allocated by nvmem_cell_read(). Signed-off-by: Jordan Crouse Reviewed-by: Eric Anholt Signed-off-by: Rob Clark Signed-off-by: Sasha Levin --- drivers/gpu/drm/msm/adreno/a5xx_gpu.c | 27 ++++++++++++++++++++------- 1 file changed, 20 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index 1fc9a7fa37b4..d29a58bd2f7a 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -1474,18 +1474,31 @@ static const struct adreno_gpu_funcs funcs = { static void check_speed_bin(struct device *dev) { struct nvmem_cell *cell; - u32 bin, val; + u32 val; + + /* + * If the OPP table specifies a opp-supported-hw property then we have + * to set something with dev_pm_opp_set_supported_hw() or the table + * doesn't get populated so pick an arbitrary value that should + * ensure the default frequencies are selected but not conflict with any + * actual bins + */ + val = 0x80; cell = nvmem_cell_get(dev, "speed_bin"); - /* If a nvmem cell isn't defined, nothing to do */ - if (IS_ERR(cell)) - return; + if (!IS_ERR(cell)) { + void *buf = nvmem_cell_read(cell, NULL); + + if (!IS_ERR(buf)) { + u8 bin = *((u8 *) buf); - bin = *((u32 *) nvmem_cell_read(cell, NULL)); - nvmem_cell_put(cell); + val = (1 << bin); + kfree(buf); + } - val = (1 << bin); + nvmem_cell_put(cell); + } dev_pm_opp_set_supported_hw(dev, &val, 1); } -- GitLab From b92d156a32b8f40974ba38e0154b327d9605946e Mon Sep 17 00:00:00 2001 From: Nathan Chancellor Date: Wed, 19 Feb 2020 22:10:12 -0700 Subject: [PATCH 1001/1304] tracing: Use address-of operator on section symbols [ Upstream commit bf2cbe044da275021b2de5917240411a19e5c50d ] Clang warns: ../kernel/trace/trace.c:9335:33: warning: array comparison always evaluates to true [-Wtautological-compare] if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt) ^ 1 warning generated. These are not true arrays, they are linker defined symbols, which are just addresses. Using the address of operator silences the warning and does not change the runtime result of the check (tested with some print statements compiled in with clang + ld.lld and gcc + ld.bfd in QEMU). Link: http://lkml.kernel.org/r/20200220051011.26113-1-natechancellor@gmail.com Link: https://github.com/ClangBuiltLinux/linux/issues/893 Suggested-by: Nick Desaulniers Signed-off-by: Nathan Chancellor Signed-off-by: Steven Rostedt (VMware) Signed-off-by: Sasha Levin --- kernel/trace/trace.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 17505a22d800..6bf617ff0369 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -8529,7 +8529,7 @@ __init static int tracer_alloc_buffers(void) goto out_free_buffer_mask; /* Only allocate trace_printk buffers if a trace_printk exists */ - if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt) + if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt) /* Must be called before global_trace.buffer is allocated */ trace_printk_init_buffers(); -- GitLab From 9d8b5dbacd6b0ca84922fb12b2a05fd7e32383a2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Niklas=20S=C3=B6derlund?= Date: Tue, 10 Mar 2020 12:47:09 +0100 Subject: [PATCH 1002/1304] thermal: rcar_thermal: Handle probe error gracefully MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 39056e8a989ef52486e063e34b4822b341e47b0e ] If the common register memory resource is not available the driver needs to fail gracefully to disable PM. Instead of returning the error directly store it in ret and use the already existing error path. Signed-off-by: Niklas Söderlund Reviewed-by: Geert Uytterhoeven Signed-off-by: Daniel Lezcano Link: https://lore.kernel.org/r/20200310114709.1483860-1-niklas.soderlund+renesas@ragnatech.se Signed-off-by: Sasha Levin --- drivers/thermal/rcar_thermal.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c index 4dc30e7890f6..140386d7c75a 100644 --- a/drivers/thermal/rcar_thermal.c +++ b/drivers/thermal/rcar_thermal.c @@ -505,8 +505,10 @@ static int rcar_thermal_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, mres++); common->base = devm_ioremap_resource(dev, res); - if (IS_ERR(common->base)) - return PTR_ERR(common->base); + if (IS_ERR(common->base)) { + ret = PTR_ERR(common->base); + goto error_unregister; + } idle = 0; /* polling delay is not needed */ } -- GitLab From a0100a363098c33fc1f89fdd778a2bdf91379ed7 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Sat, 14 Mar 2020 10:03:56 -0700 Subject: [PATCH 1003/1304] perf parse-events: Fix 3 use after frees found with clang ASAN [ Upstream commit d4953f7ef1a2e87ef732823af35361404d13fea8 ] Reproducible with a clang asan build and then running perf test in particular 'Parse event definition strings'. Signed-off-by: Ian Rogers Acked-by: Jiri Olsa Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Andi Kleen Cc: Leo Yan Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Stephane Eranian Cc: clang-built-linux@googlegroups.com Link: http://lore.kernel.org/lkml/20200314170356.62914-1-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: Sasha Levin --- tools/perf/util/evsel.c | 1 + tools/perf/util/parse-events.c | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 4fad92213609..68c5ab0e1800 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -1290,6 +1290,7 @@ void perf_evsel__exit(struct perf_evsel *evsel) thread_map__put(evsel->threads); zfree(&evsel->group_name); zfree(&evsel->name); + zfree(&evsel->pmu_name); perf_evsel__object.fini(evsel); } diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 95043cae5774..6d087d9acd5e 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -1261,7 +1261,7 @@ int parse_events_add_pmu(struct parse_events_state *parse_state, attr.type = pmu->type; evsel = __add_event(list, &parse_state->idx, &attr, NULL, pmu, NULL, auto_merge_stats); if (evsel) { - evsel->pmu_name = name; + evsel->pmu_name = name ? strdup(name) : NULL; evsel->use_uncore_alias = use_uncore_alias; return 0; } else { @@ -1302,7 +1302,7 @@ int parse_events_add_pmu(struct parse_events_state *parse_state, evsel->snapshot = info.snapshot; evsel->metric_expr = info.metric_expr; evsel->metric_name = info.metric_name; - evsel->pmu_name = name; + evsel->pmu_name = name ? strdup(name) : NULL; evsel->use_uncore_alias = use_uncore_alias; } -- GitLab From 20191760203e3d0d9d840764891ea83854a63ef8 Mon Sep 17 00:00:00 2001 From: Vignesh Raghavendra Date: Thu, 19 Mar 2020 16:02:29 +0530 Subject: [PATCH 1004/1304] serial: 8250_port: Don't service RX FIFO if throttled [ Upstream commit f19c3f6c8109b8bab000afd35580929958e087a9 ] When port's throttle callback is called, it should stop pushing any more data into TTY buffer to avoid buffer overflow. This means driver has to stop HW from receiving more data and assert the HW flow control. For UARTs with auto HW flow control (such as 8250_omap) manual assertion of flow control line is not possible and only way is to allow RX FIFO to fill up, thus trigger auto HW flow control logic. Therefore make sure that 8250 generic IRQ handler does not drain data when port is stopped (i.e UART_LSR_DR is unset in read_status_mask). Not servicing, RX FIFO would trigger auto HW flow control when FIFO occupancy reaches preset threshold, thus halting RX. Since, error conditions in UART_LSR register are cleared just by reading the register, data has to be drained in case there are FIFO errors, else error information will lost. Signed-off-by: Vignesh Raghavendra Link: https://lore.kernel.org/r/20200319103230.16867-2-vigneshr@ti.com Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin --- drivers/tty/serial/8250/8250_port.c | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index 09f0dc3b967b..60ca19eca1f6 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -1861,6 +1861,7 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir) unsigned char status; unsigned long flags; struct uart_8250_port *up = up_to_u8250p(port); + bool skip_rx = false; if (iir & UART_IIR_NO_INT) return 0; @@ -1869,7 +1870,20 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir) status = serial_port_in(port, UART_LSR); - if (status & (UART_LSR_DR | UART_LSR_BI)) { + /* + * If port is stopped and there are no error conditions in the + * FIFO, then don't drain the FIFO, as this may lead to TTY buffer + * overflow. Not servicing, RX FIFO would trigger auto HW flow + * control when FIFO occupancy reaches preset threshold, thus + * halting RX. This only works when auto HW flow control is + * available. + */ + if (!(status & (UART_LSR_FIFOE | UART_LSR_BRK_ERROR_BITS)) && + (port->status & (UPSTAT_AUTOCTS | UPSTAT_AUTORTS)) && + !(port->read_status_mask & UART_LSR_DR)) + skip_rx = true; + + if (status & (UART_LSR_DR | UART_LSR_BI) && !skip_rx) { if (!up->dma || handle_rx_dma(up, iir)) status = serial8250_rx_chars(up, status); } -- GitLab From 10aa90fed8aafbfe14e32552742d48eb8f806311 Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Fri, 20 Mar 2020 14:52:00 +0200 Subject: [PATCH 1005/1304] serial: 8250_omap: Fix sleeping function called from invalid context during probe [ Upstream commit 4ce35a3617c0ac758c61122b2218b6c8c9ac9398 ] When booting j721e the following bug is printed: [ 1.154821] BUG: sleeping function called from invalid context at kernel/sched/completion.c:99 [ 1.154827] in_atomic(): 0, irqs_disabled(): 128, non_block: 0, pid: 12, name: kworker/0:1 [ 1.154832] 3 locks held by kworker/0:1/12: [ 1.154836] #0: ffff000840030728 ((wq_completion)events){+.+.}, at: process_one_work+0x1d4/0x6e8 [ 1.154852] #1: ffff80001214fdd8 (deferred_probe_work){+.+.}, at: process_one_work+0x1d4/0x6e8 [ 1.154860] #2: ffff00084060b170 (&dev->mutex){....}, at: __device_attach+0x38/0x138 [ 1.154872] irq event stamp: 63096 [ 1.154881] hardirqs last enabled at (63095): [] _raw_spin_unlock_irqrestore+0x70/0x78 [ 1.154887] hardirqs last disabled at (63096): [] _raw_spin_lock_irqsave+0x28/0x80 [ 1.154893] softirqs last enabled at (62254): [] _stext+0x488/0x564 [ 1.154899] softirqs last disabled at (62247): [] irq_exit+0x114/0x140 [ 1.154906] CPU: 0 PID: 12 Comm: kworker/0:1 Not tainted 5.6.0-rc6-next-20200318-00094-g45e4089b0bd3 #221 [ 1.154911] Hardware name: Texas Instruments K3 J721E SoC (DT) [ 1.154917] Workqueue: events deferred_probe_work_func [ 1.154923] Call trace: [ 1.154928] dump_backtrace+0x0/0x190 [ 1.154933] show_stack+0x14/0x20 [ 1.154940] dump_stack+0xe0/0x148 [ 1.154946] ___might_sleep+0x150/0x1f0 [ 1.154952] __might_sleep+0x4c/0x80 [ 1.154957] wait_for_completion_timeout+0x40/0x140 [ 1.154964] ti_sci_set_device_state+0xa0/0x158 [ 1.154969] ti_sci_cmd_get_device_exclusive+0x14/0x20 [ 1.154977] ti_sci_dev_start+0x34/0x50 [ 1.154984] genpd_runtime_resume+0x78/0x1f8 [ 1.154991] __rpm_callback+0x3c/0x140 [ 1.154996] rpm_callback+0x20/0x80 [ 1.155001] rpm_resume+0x568/0x758 [ 1.155007] __pm_runtime_resume+0x44/0xb0 [ 1.155013] omap8250_probe+0x2b4/0x508 [ 1.155019] platform_drv_probe+0x50/0xa0 [ 1.155023] really_probe+0xd4/0x318 [ 1.155028] driver_probe_device+0x54/0xe8 [ 1.155033] __device_attach_driver+0x80/0xb8 [ 1.155039] bus_for_each_drv+0x74/0xc0 [ 1.155044] __device_attach+0xdc/0x138 [ 1.155049] device_initial_probe+0x10/0x18 [ 1.155053] bus_probe_device+0x98/0xa0 [ 1.155058] deferred_probe_work_func+0x74/0xb0 [ 1.155063] process_one_work+0x280/0x6e8 [ 1.155068] worker_thread+0x48/0x430 [ 1.155073] kthread+0x108/0x138 [ 1.155079] ret_from_fork+0x10/0x18 To fix the bug we need to first call pm_runtime_enable() prior to any pm_runtime calls. Reported-by: Tomi Valkeinen Signed-off-by: Peter Ujfalusi Link: https://lore.kernel.org/r/20200320125200.6772-1-peter.ujfalusi@ti.com Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin --- drivers/tty/serial/8250/8250_omap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c index a019286f8bb6..a7e555e413a6 100644 --- a/drivers/tty/serial/8250/8250_omap.c +++ b/drivers/tty/serial/8250/8250_omap.c @@ -1227,11 +1227,11 @@ static int omap8250_probe(struct platform_device *pdev) spin_lock_init(&priv->rx_dma_lock); device_init_wakeup(&pdev->dev, true); + pm_runtime_enable(&pdev->dev); pm_runtime_use_autosuspend(&pdev->dev); pm_runtime_set_autosuspend_delay(&pdev->dev, -1); pm_runtime_irq_safe(&pdev->dev); - pm_runtime_enable(&pdev->dev); pm_runtime_get_sync(&pdev->dev); -- GitLab From 69077bd8f19a34a5a3bb05fd8bc032aa7983ef80 Mon Sep 17 00:00:00 2001 From: Vignesh Raghavendra Date: Thu, 19 Mar 2020 16:33:39 +0530 Subject: [PATCH 1006/1304] serial: 8250: 8250_omap: Terminate DMA before pushing data on RX timeout [ Upstream commit 7cf4df30a98175033e9849f7f16c46e96ba47f41 ] Terminate and flush DMA internal buffers, before pushing RX data to higher layer. Otherwise, this will lead to data corruption, as driver would end up pushing stale buffer data to higher layer while actual data is still stuck inside DMA hardware and has yet not arrived at the memory. While at that, replace deprecated dmaengine_terminate_all() with dmaengine_terminate_async(). Signed-off-by: Vignesh Raghavendra Link: https://lore.kernel.org/r/20200319110344.21348-2-vigneshr@ti.com Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin --- drivers/tty/serial/8250/8250_omap.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c index a7e555e413a6..cbd006fb7fbb 100644 --- a/drivers/tty/serial/8250/8250_omap.c +++ b/drivers/tty/serial/8250/8250_omap.c @@ -781,7 +781,10 @@ static void __dma_rx_do_complete(struct uart_8250_port *p) dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state); count = dma->rx_size - state.residue; - + if (count < dma->rx_size) + dmaengine_terminate_async(dma->rxchan); + if (!count) + goto unlock; ret = tty_insert_flip_string(tty_port, dma->rx_buf, count); p->port.icount.rx += ret; @@ -843,7 +846,6 @@ static void omap_8250_rx_dma_flush(struct uart_8250_port *p) spin_unlock_irqrestore(&priv->rx_dma_lock, flags); __dma_rx_do_complete(p); - dmaengine_terminate_all(dma->rxchan); } static int omap_8250_rx_dma(struct uart_8250_port *p) -- GitLab From 9a1d2d2eadeb4886610c2c310c8f39d106608e17 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Tue, 24 Mar 2020 08:03:19 +0100 Subject: [PATCH 1007/1304] perf cpumap: Fix snprintf overflow check [ Upstream commit d74b181a028bb5a468f0c609553eff6a8fdf4887 ] 'snprintf' returns the number of characters which would be generated for the given input. If the returned value is *greater than* or equal to the buffer size, it means that the output has been truncated. Fix the overflow test accordingly. Fixes: 7780c25bae59f ("perf tools: Allow ability to map cpus to nodes easily") Fixes: 92a7e1278005b ("perf cpumap: Add cpu__max_present_cpu()") Signed-off-by: Christophe JAILLET Suggested-by: David Laight Cc: Alexander Shishkin Cc: Don Zickus Cc: He Zhe Cc: Jan Stancek Cc: Jiri Olsa Cc: Kan Liang Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Cc: kernel-janitors@vger.kernel.org Link: http://lore.kernel.org/lkml/20200324070319.10901-1-christophe.jaillet@wanadoo.fr Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: Sasha Levin --- tools/perf/util/cpumap.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c index f93846edc1e0..827d844f4efb 100644 --- a/tools/perf/util/cpumap.c +++ b/tools/perf/util/cpumap.c @@ -462,7 +462,7 @@ static void set_max_cpu_num(void) /* get the highest possible cpu number for a sparse allocation */ ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/possible", mnt); - if (ret == PATH_MAX) { + if (ret >= PATH_MAX) { pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX); goto out; } @@ -473,7 +473,7 @@ static void set_max_cpu_num(void) /* get the highest present cpu number for a sparse allocation */ ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/present", mnt); - if (ret == PATH_MAX) { + if (ret >= PATH_MAX) { pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX); goto out; } @@ -501,7 +501,7 @@ static void set_max_node_num(void) /* get the highest possible cpu number for a sparse allocation */ ret = snprintf(path, PATH_MAX, "%s/devices/system/node/possible", mnt); - if (ret == PATH_MAX) { + if (ret >= PATH_MAX) { pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX); goto out; } @@ -586,7 +586,7 @@ int cpu__setup_cpunode_map(void) return 0; n = snprintf(path, PATH_MAX, "%s/devices/system/node", mnt); - if (n == PATH_MAX) { + if (n >= PATH_MAX) { pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX); return -1; } @@ -601,7 +601,7 @@ int cpu__setup_cpunode_map(void) continue; n = snprintf(buf, PATH_MAX, "%s/%s", path, dent1->d_name); - if (n == PATH_MAX) { + if (n >= PATH_MAX) { pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX); continue; } -- GitLab From 68aaf03936dcbfdf023ac2f7182c03f83c0c6e05 Mon Sep 17 00:00:00 2001 From: Pratik Rajesh Sampat Date: Mon, 16 Mar 2020 19:27:43 +0530 Subject: [PATCH 1008/1304] cpufreq: powernv: Fix frame-size-overflow in powernv_cpufreq_work_fn [ Upstream commit d95fe371ecd28901f11256c610b988ed44e36ee2 ] The patch avoids allocating cpufreq_policy on stack hence fixing frame size overflow in 'powernv_cpufreq_work_fn' Fixes: 227942809b52 ("cpufreq: powernv: Restore cpu frequency to policy->cur on unthrottling") Signed-off-by: Pratik Rajesh Sampat Reviewed-by: Daniel Axtens Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200316135743.57735-1-psampat@linux.ibm.com Signed-off-by: Sasha Levin --- drivers/cpufreq/powernv-cpufreq.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c index 687c92ef7644..79942f705757 100644 --- a/drivers/cpufreq/powernv-cpufreq.c +++ b/drivers/cpufreq/powernv-cpufreq.c @@ -903,6 +903,7 @@ static struct notifier_block powernv_cpufreq_reboot_nb = { void powernv_cpufreq_work_fn(struct work_struct *work) { struct chip *chip = container_of(work, struct chip, throttle); + struct cpufreq_policy *policy; unsigned int cpu; cpumask_t mask; @@ -917,12 +918,14 @@ void powernv_cpufreq_work_fn(struct work_struct *work) chip->restore = false; for_each_cpu(cpu, &mask) { int index; - struct cpufreq_policy policy; - cpufreq_get_policy(&policy, cpu); - index = cpufreq_table_find_index_c(&policy, policy.cur); - powernv_cpufreq_target_index(&policy, index); - cpumask_andnot(&mask, &mask, policy.cpus); + policy = cpufreq_cpu_get(cpu); + if (!policy) + continue; + index = cpufreq_table_find_index_c(policy, policy->cur); + powernv_cpufreq_target_index(policy, index); + cpumask_andnot(&mask, &mask, policy->cpus); + cpufreq_cpu_put(policy); } out: put_online_cpus(); -- GitLab From 1d0e482939c49c6fc4979e964c1cd6a7c255edd0 Mon Sep 17 00:00:00 2001 From: Gabriel Ravier Date: Thu, 12 Mar 2020 15:50:21 +0100 Subject: [PATCH 1009/1304] tools: gpio-hammer: Avoid potential overflow in main [ Upstream commit d1ee7e1f5c9191afb69ce46cc7752e4257340a31 ] If '-o' was used more than 64 times in a single invocation of gpio-hammer, this could lead to an overflow of the 'lines' array. This commit fixes this by avoiding the overflow and giving a proper diagnostic back to the user Signed-off-by: Gabriel Ravier Signed-off-by: Bartosz Golaszewski Signed-off-by: Sasha Levin --- tools/gpio/gpio-hammer.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/tools/gpio/gpio-hammer.c b/tools/gpio/gpio-hammer.c index 4bcb234c0fca..3da5462a0c7d 100644 --- a/tools/gpio/gpio-hammer.c +++ b/tools/gpio/gpio-hammer.c @@ -138,7 +138,14 @@ int main(int argc, char **argv) device_name = optarg; break; case 'o': - lines[i] = strtoul(optarg, NULL, 10); + /* + * Avoid overflow. Do not immediately error, we want to + * be able to accurately report on the amount of times + * '-o' was given to give an accurate error message + */ + if (i < GPIOHANDLES_MAX) + lines[i] = strtoul(optarg, NULL, 10); + i++; break; case '?': @@ -146,6 +153,14 @@ int main(int argc, char **argv) return -1; } } + + if (i >= GPIOHANDLES_MAX) { + fprintf(stderr, + "Only %d occurences of '-o' are allowed, %d were found\n", + GPIOHANDLES_MAX, i + 1); + return -1; + } + nlines = i; if (!device_name || !nlines) { -- GitLab From b3dc81c1987f687dfa9c30b87c78dd0a2e603c56 Mon Sep 17 00:00:00 2001 From: John Meneghini Date: Thu, 20 Feb 2020 10:05:38 +0900 Subject: [PATCH 1010/1304] nvme-multipath: do not reset on unknown status [ Upstream commit 764e9332098c0e60251386a507fe46ac91276120 ] The nvme multipath error handling defaults to controller reset if the error is unknown. There are, however, no existing nvme status codes that indicate a reset should be used, and resetting causes unnecessary disruption to the rest of IO. Change nvme's error handling to first check if failover should happen. If not, let the normal error handling take over rather than reset the controller. Based-on-a-patch-by: Christoph Hellwig Reviewed-by: Hannes Reinecke Signed-off-by: John Meneghini Signed-off-by: Keith Busch Signed-off-by: Sasha Levin --- drivers/nvme/host/core.c | 5 +---- drivers/nvme/host/multipath.c | 21 +++++++++------------ drivers/nvme/host/nvme.h | 5 +++-- 3 files changed, 13 insertions(+), 18 deletions(-) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 0d60f2f8f3ee..4b182ac15687 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -255,11 +255,8 @@ void nvme_complete_rq(struct request *req) trace_nvme_complete_rq(req); if (unlikely(status != BLK_STS_OK && nvme_req_needs_retry(req))) { - if ((req->cmd_flags & REQ_NVME_MPATH) && - blk_path_error(status)) { - nvme_failover_req(req); + if ((req->cmd_flags & REQ_NVME_MPATH) && nvme_failover_req(req)) return; - } if (!blk_queue_dying(req->q)) { nvme_req(req)->retries++; diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 2e63c1106030..e71075338ff5 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -73,17 +73,12 @@ void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns, } } -void nvme_failover_req(struct request *req) +bool nvme_failover_req(struct request *req) { struct nvme_ns *ns = req->q->queuedata; u16 status = nvme_req(req)->status; unsigned long flags; - spin_lock_irqsave(&ns->head->requeue_lock, flags); - blk_steal_bios(&ns->head->requeue_list, req); - spin_unlock_irqrestore(&ns->head->requeue_lock, flags); - blk_mq_end_request(req, 0); - switch (status & 0x7ff) { case NVME_SC_ANA_TRANSITION: case NVME_SC_ANA_INACCESSIBLE: @@ -111,15 +106,17 @@ void nvme_failover_req(struct request *req) nvme_mpath_clear_current_path(ns); break; default: - /* - * Reset the controller for any non-ANA error as we don't know - * what caused the error. - */ - nvme_reset_ctrl(ns->ctrl); - break; + /* This was a non-ANA error so follow the normal error path. */ + return false; } + spin_lock_irqsave(&ns->head->requeue_lock, flags); + blk_steal_bios(&ns->head->requeue_list, req); + spin_unlock_irqrestore(&ns->head->requeue_lock, flags); + blk_mq_end_request(req, 0); + kblockd_schedule_work(&ns->head->requeue_work); + return true; } void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl) diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index cc4273f11989..31c1496f938f 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -477,7 +477,7 @@ void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys); void nvme_mpath_start_freeze(struct nvme_subsystem *subsys); void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns, struct nvme_ctrl *ctrl, int *flags); -void nvme_failover_req(struct request *req); +bool nvme_failover_req(struct request *req); void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl); int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head); void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id); @@ -521,8 +521,9 @@ static inline void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns, sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance); } -static inline void nvme_failover_req(struct request *req) +static inline bool nvme_failover_req(struct request *req) { + return false; } static inline void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl) { -- GitLab From 6f7baf41b7c8963cbd4b23e2cb8a729894eafad7 Mon Sep 17 00:00:00 2001 From: Israel Rukshin Date: Tue, 24 Mar 2020 17:29:43 +0200 Subject: [PATCH 1011/1304] nvme: Fix controller creation races with teardown flow [ Upstream commit ce1518139e6976cf19c133b555083354fdb629b8 ] Calling nvme_sysfs_delete() when the controller is in the middle of creation may cause several bugs. If the controller is in NEW state we remove delete_controller file and don't delete the controller. The user will not be able to use nvme disconnect command on that controller again, although the controller may be active. Other bugs may happen if the controller is in the middle of create_ctrl callback and nvme_do_delete_ctrl() starts. For example, freeing I/O tagset at nvme_do_delete_ctrl() before it was allocated at create_ctrl callback. To fix all those races don't allow the user to delete the controller before it was fully created. Signed-off-by: Israel Rukshin Reviewed-by: Max Gurtovoy Reviewed-by: Christoph Hellwig Signed-off-by: Keith Busch Signed-off-by: Sasha Levin --- drivers/nvme/host/core.c | 5 +++++ drivers/nvme/host/nvme.h | 1 + 2 files changed, 6 insertions(+) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 4b182ac15687..faa7feebb609 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -2856,6 +2856,10 @@ static ssize_t nvme_sysfs_delete(struct device *dev, { struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + /* Can't delete non-created controllers */ + if (!ctrl->created) + return -EBUSY; + if (device_remove_file_self(dev, attr)) nvme_delete_ctrl_sync(ctrl); return count; @@ -3576,6 +3580,7 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl) queue_work(nvme_wq, &ctrl->async_event_work); nvme_start_queues(ctrl); } + ctrl->created = true; } EXPORT_SYMBOL_GPL(nvme_start_ctrl); diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 31c1496f938f..a70b997060e6 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -206,6 +206,7 @@ struct nvme_ctrl { struct nvme_command ka_cmd; struct work_struct fw_act_work; unsigned long events; + bool created; #ifdef CONFIG_NVME_MULTIPATH /* asymmetric namespace access: */ -- GitLab From db96986c088b047d12ef907ebec252804b785424 Mon Sep 17 00:00:00 2001 From: Zhu Yanjun Date: Mon, 23 Mar 2020 13:28:00 +0200 Subject: [PATCH 1012/1304] RDMA/rxe: Set sys_image_guid to be aligned with HW IB devices [ Upstream commit d0ca2c35dd15a3d989955caec02beea02f735ee6 ] The RXE driver doesn't set sys_image_guid and user space applications see zeros. This causes to pyverbs tests to fail with the following traceback, because the IBTA spec requires to have valid sys_image_guid. Traceback (most recent call last): File "./tests/test_device.py", line 51, in test_query_device self.verify_device_attr(attr) File "./tests/test_device.py", line 74, in verify_device_attr assert attr.sys_image_guid != 0 In order to fix it, set sys_image_guid to be equal to node_guid. Before: 5: rxe0: ... node_guid 5054:00ff:feaa:5363 sys_image_guid 0000:0000:0000:0000 After: 5: rxe0: ... node_guid 5054:00ff:feaa:5363 sys_image_guid 5054:00ff:feaa:5363 Fixes: 8700e3e7c485 ("Soft RoCE driver") Link: https://lore.kernel.org/r/20200323112800.1444784-1-leon@kernel.org Signed-off-by: Zhu Yanjun Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe Signed-off-by: Sasha Levin --- drivers/infiniband/sw/rxe/rxe.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c index 94dedabe648c..6589ff51eaf5 100644 --- a/drivers/infiniband/sw/rxe/rxe.c +++ b/drivers/infiniband/sw/rxe/rxe.c @@ -121,6 +121,8 @@ static void rxe_init_device_param(struct rxe_dev *rxe) rxe->attr.max_fast_reg_page_list_len = RXE_MAX_FMR_PAGE_LIST_LEN; rxe->attr.max_pkeys = RXE_MAX_PKEYS; rxe->attr.local_ca_ack_delay = RXE_LOCAL_CA_ACK_DELAY; + addrconf_addr_eui48((unsigned char *)&rxe->attr.sys_image_guid, + rxe->ndev->dev_addr); rxe->max_ucontext = RXE_MAX_UCONTEXT; } -- GitLab From b125a752eb1843a4546ec8ab6bf162baf8ebaae3 Mon Sep 17 00:00:00 2001 From: Don Brace Date: Fri, 20 Mar 2020 13:26:18 -0500 Subject: [PATCH 1013/1304] scsi: hpsa: correct race condition in offload enabled [ Upstream commit 3e16e83a62edac7617bfd8dbb4e55d04ff6adbe1 ] Correct race condition where ioaccel is re-enabled before the raid_map is updated. For RAID_1, RAID_1ADM, and RAID 5/6 there is a BUG_ON called which is bad. - Change event thread to disable ioaccel only. Send all requests down the RAID path instead. - Have rescan thread handle offload_enable. - Since there is only one rescan allowed at a time, turning offload_enabled on/off should not be racy. Each handler queues up a rescan if one is already in progress. - For timing diagram, offload_enabled is initially off due to a change (transformation: splitmirror/remirror), ... otbe = offload_to_be_enabled oe = offload_enabled Time Event Rescan Completion Request Worker Worker Thread Thread ---- ------ ------ ---------- ------- T0 | | + UA | T1 | + rescan started | 0x3f | T2 + Event | | 0x0e | T3 + Ack msg | | | T4 | + if (!dev[i]->oe && | | T5 | | dev[i]->otbe) | | T6 | | get_raid_map | | T7 + otbe = 1 | | | T8 | | | | T9 | + oe = otbe | | T10 | | | + ioaccel request T11 * BUG_ON T0 - I/O completion with UA 0x3f 0x0e sets rescan flag. T1 - rescan worker thread starts a rescan. T2 - event comes in T3 - event thread starts and issues "Acknowledge" message ... T6 - rescan thread has bypassed code to reload new raid map. ... T7 - event thread runs and sets offload_to_be_enabled ... T9 - rescan thread turns on offload_enabled. T10- request comes in and goes down ioaccel path. T11- BUG_ON. - After the patch is applied, ioaccel_enabled can only be re-enabled in the re-scan thread. Link: https://lore.kernel.org/r/158472877894.14200.7077843399036368335.stgit@brunhilda Reviewed-by: Scott Teel Reviewed-by: Matt Perricone Reviewed-by: Scott Benesh Signed-off-by: Don Brace Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin --- drivers/scsi/hpsa.c | 80 ++++++++++++++++++++++++++++++++------------- 1 file changed, 57 insertions(+), 23 deletions(-) diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index f570b8c5d857..11de2198bb87 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -507,6 +507,12 @@ static ssize_t host_store_rescan(struct device *dev, return count; } +static void hpsa_turn_off_ioaccel_for_device(struct hpsa_scsi_dev_t *device) +{ + device->offload_enabled = 0; + device->offload_to_be_enabled = 0; +} + static ssize_t host_show_firmware_revision(struct device *dev, struct device_attribute *attr, char *buf) { @@ -1743,8 +1749,7 @@ static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h, __func__, h->scsi_host->host_no, logical_drive->bus, logical_drive->target, logical_drive->lun); - logical_drive->offload_enabled = 0; - logical_drive->offload_to_be_enabled = 0; + hpsa_turn_off_ioaccel_for_device(logical_drive); logical_drive->queue_depth = 8; } } @@ -2496,8 +2501,7 @@ static void process_ioaccel2_completion(struct ctlr_info *h, IOACCEL2_SERV_RESPONSE_FAILURE) { if (c2->error_data.status == IOACCEL2_STATUS_SR_IOACCEL_DISABLED) { - dev->offload_enabled = 0; - dev->offload_to_be_enabled = 0; + hpsa_turn_off_ioaccel_for_device(dev); } return hpsa_retry_cmd(h, c); @@ -3676,10 +3680,17 @@ static void hpsa_get_ioaccel_status(struct ctlr_info *h, this_device->offload_config = !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT); if (this_device->offload_config) { - this_device->offload_to_be_enabled = + bool offload_enabled = !!(ioaccel_status & OFFLOAD_ENABLED_BIT); - if (hpsa_get_raid_map(h, scsi3addr, this_device)) - this_device->offload_to_be_enabled = 0; + /* + * Check to see if offload can be enabled. + */ + if (offload_enabled) { + rc = hpsa_get_raid_map(h, scsi3addr, this_device); + if (rc) /* could not load raid_map */ + goto out; + this_device->offload_to_be_enabled = 1; + } } out: @@ -3998,8 +4009,7 @@ static int hpsa_update_device_info(struct ctlr_info *h, } else { this_device->raid_level = RAID_UNKNOWN; this_device->offload_config = 0; - this_device->offload_enabled = 0; - this_device->offload_to_be_enabled = 0; + hpsa_turn_off_ioaccel_for_device(this_device); this_device->hba_ioaccel_enabled = 0; this_device->volume_offline = 0; this_device->queue_depth = h->nr_cmds; @@ -5213,8 +5223,12 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, /* Handles load balance across RAID 1 members. * (2-drive R1 and R10 with even # of drives.) * Appropriate for SSDs, not optimal for HDDs + * Ensure we have the correct raid_map. */ - BUG_ON(le16_to_cpu(map->layout_map_count) != 2); + if (le16_to_cpu(map->layout_map_count) != 2) { + hpsa_turn_off_ioaccel_for_device(dev); + return IO_ACCEL_INELIGIBLE; + } if (dev->offload_to_mirror) map_index += le16_to_cpu(map->data_disks_per_row); dev->offload_to_mirror = !dev->offload_to_mirror; @@ -5222,8 +5236,12 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, case HPSA_RAID_ADM: /* Handles N-way mirrors (R1-ADM) * and R10 with # of drives divisible by 3.) + * Ensure we have the correct raid_map. */ - BUG_ON(le16_to_cpu(map->layout_map_count) != 3); + if (le16_to_cpu(map->layout_map_count) != 3) { + hpsa_turn_off_ioaccel_for_device(dev); + return IO_ACCEL_INELIGIBLE; + } offload_to_mirror = dev->offload_to_mirror; raid_map_helper(map, offload_to_mirror, @@ -5248,7 +5266,10 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, r5or6_blocks_per_row = le16_to_cpu(map->strip_size) * le16_to_cpu(map->data_disks_per_row); - BUG_ON(r5or6_blocks_per_row == 0); + if (r5or6_blocks_per_row == 0) { + hpsa_turn_off_ioaccel_for_device(dev); + return IO_ACCEL_INELIGIBLE; + } stripesize = r5or6_blocks_per_row * le16_to_cpu(map->layout_map_count); #if BITS_PER_LONG == 32 @@ -8218,7 +8239,7 @@ static int detect_controller_lockup(struct ctlr_info *h) * * Called from monitor controller worker (hpsa_event_monitor_worker) * - * A Volume (or Volumes that comprise an Array set may be undergoing a + * A Volume (or Volumes that comprise an Array set) may be undergoing a * transformation, so we will be turning off ioaccel for all volumes that * make up the Array. */ @@ -8241,6 +8262,9 @@ static void hpsa_set_ioaccel_status(struct ctlr_info *h) * Run through current device list used during I/O requests. */ for (i = 0; i < h->ndevices; i++) { + int offload_to_be_enabled = 0; + int offload_config = 0; + device = h->dev[i]; if (!device) @@ -8258,25 +8282,35 @@ static void hpsa_set_ioaccel_status(struct ctlr_info *h) continue; ioaccel_status = buf[IOACCEL_STATUS_BYTE]; - device->offload_config = + + /* + * Check if offload is still configured on + */ + offload_config = !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT); - if (device->offload_config) - device->offload_to_be_enabled = + /* + * If offload is configured on, check to see if ioaccel + * needs to be enabled. + */ + if (offload_config) + offload_to_be_enabled = !!(ioaccel_status & OFFLOAD_ENABLED_BIT); + /* + * If ioaccel is to be re-enabled, re-enable later during the + * scan operation so the driver can get a fresh raidmap + * before turning ioaccel back on. + */ + if (offload_to_be_enabled) + continue; + /* * Immediately turn off ioaccel for any volume the * controller tells us to. Some of the reasons could be: * transformation - change to the LVs of an Array. * degraded volume - component failure - * - * If ioaccel is to be re-enabled, re-enable later during the - * scan operation so the driver can get a fresh raidmap - * before turning ioaccel back on. - * */ - if (!device->offload_to_be_enabled) - device->offload_enabled = 0; + hpsa_turn_off_ioaccel_for_device(device); } kfree(buf); -- GitLab From 38c46471f998067e7eff81b04d7238427ae40975 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Fri, 27 Mar 2020 17:15:39 +0100 Subject: [PATCH 1014/1304] SUNRPC: Fix a potential buffer overflow in 'svc_print_xprts()' [ Upstream commit b25b60d7bfb02a74bc3c2d998e09aab159df8059 ] 'maxlen' is the total size of the destination buffer. There is only one caller and this value is 256. When we compute the size already used and what we would like to add in the buffer, the trailling NULL character is not taken into account. However, this trailling character will be added by the 'strcat' once we have checked that we have enough place. So, there is a off-by-one issue and 1 byte of the stack could be erroneously overwridden. Take into account the trailling NULL, when checking if there is enough place in the destination buffer. While at it, also replace a 'sprintf' by a safer 'snprintf', check for output truncation and avoid a superfluous 'strlen'. Fixes: dc9a16e49dbba ("svc: Add /proc/sys/sunrpc/transport files") Signed-off-by: Christophe JAILLET [ cel: very minor fix to documenting comment Signed-off-by: Chuck Lever Signed-off-by: Sasha Levin --- net/sunrpc/svc_xprt.c | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index c8ee8e801edb..709c082dc905 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -103,8 +103,17 @@ void svc_unreg_xprt_class(struct svc_xprt_class *xcl) } EXPORT_SYMBOL_GPL(svc_unreg_xprt_class); -/* - * Format the transport list for printing +/** + * svc_print_xprts - Format the transport list for printing + * @buf: target buffer for formatted address + * @maxlen: length of target buffer + * + * Fills in @buf with a string containing a list of transport names, each name + * terminated with '\n'. If the buffer is too small, some entries may be + * missing, but it is guaranteed that all lines in the output buffer are + * complete. + * + * Returns positive length of the filled-in string. */ int svc_print_xprts(char *buf, int maxlen) { @@ -117,9 +126,9 @@ int svc_print_xprts(char *buf, int maxlen) list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) { int slen; - sprintf(tmpstr, "%s %d\n", xcl->xcl_name, xcl->xcl_max_payload); - slen = strlen(tmpstr); - if (len + slen > maxlen) + slen = snprintf(tmpstr, sizeof(tmpstr), "%s %d\n", + xcl->xcl_name, xcl->xcl_max_payload); + if (slen >= sizeof(tmpstr) || len + slen >= maxlen) break; len += slen; strcat(buf, tmpstr); -- GitLab From 308aeb3629c8745ef55ec38545cf2dc338108267 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 24 Mar 2020 16:53:59 -0400 Subject: [PATCH 1015/1304] svcrdma: Fix leak of transport addresses [ Upstream commit 1a33d8a284b1e85e03b8c7b1ea8fb985fccd1d71 ] Kernel memory leak detected: unreferenced object 0xffff888849cdf480 (size 8): comm "kworker/u8:3", pid 2086, jiffies 4297898756 (age 4269.856s) hex dump (first 8 bytes): 30 00 cd 49 88 88 ff ff 0..I.... backtrace: [<00000000acfc370b>] __kmalloc_track_caller+0x137/0x183 [<00000000a2724354>] kstrdup+0x2b/0x43 [<0000000082964f84>] xprt_rdma_format_addresses+0x114/0x17d [rpcrdma] [<00000000dfa6ed00>] xprt_setup_rdma_bc+0xc0/0x10c [rpcrdma] [<0000000073051a83>] xprt_create_transport+0x3f/0x1a0 [sunrpc] [<0000000053531a8e>] rpc_create+0x118/0x1cd [sunrpc] [<000000003a51b5f8>] setup_callback_client+0x1a5/0x27d [nfsd] [<000000001bd410af>] nfsd4_process_cb_update.isra.7+0x16c/0x1ac [nfsd] [<000000007f4bbd56>] nfsd4_run_cb_work+0x4c/0xbd [nfsd] [<0000000055c5586b>] process_one_work+0x1b2/0x2fe [<00000000b1e3e8ef>] worker_thread+0x1a6/0x25a [<000000005205fb78>] kthread+0xf6/0xfb [<000000006d2dc057>] ret_from_fork+0x3a/0x50 Introduce a call to xprt_rdma_free_addresses() similar to the way that the TCP backchannel releases a transport's peer address strings. Fixes: 5d252f90a800 ("svcrdma: Add class for RDMA backwards direction transport") Signed-off-by: Chuck Lever Signed-off-by: Sasha Levin --- net/sunrpc/xprtrdma/svc_rdma_backchannel.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c index b9827665ff35..d183d4aee822 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c +++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c @@ -256,6 +256,7 @@ xprt_rdma_bc_put(struct rpc_xprt *xprt) { dprintk("svcrdma: %s: xprt %p\n", __func__, xprt); + xprt_rdma_free_addresses(xprt); xprt_free(xprt); module_put(THIS_MODULE); } -- GitLab From 1841a99325123478c46720078802df3f313e2199 Mon Sep 17 00:00:00 2001 From: Mikel Rychliski Date: Wed, 18 Mar 2020 22:16:23 -0400 Subject: [PATCH 1016/1304] PCI: Use ioremap(), not phys_to_virt() for platform ROM [ Upstream commit 72e0ef0e5f067fd991f702f0b2635d911d0cf208 ] On some EFI systems, the video BIOS is provided by the EFI firmware. The boot stub code stores the physical address of the ROM image in pdev->rom. Currently we attempt to access this pointer using phys_to_virt(), which doesn't work with CONFIG_HIGHMEM. On these systems, attempting to load the radeon module on a x86_32 kernel can result in the following: BUG: unable to handle page fault for address: 3e8ed03c #PF: supervisor read access in kernel mode #PF: error_code(0x0000) - not-present page *pde = 00000000 Oops: 0000 [#1] PREEMPT SMP CPU: 0 PID: 317 Comm: systemd-udevd Not tainted 5.6.0-rc3-next-20200228 #2 Hardware name: Apple Computer, Inc. MacPro1,1/Mac-F4208DC8, BIOS MP11.88Z.005C.B08.0707021221 07/02/07 EIP: radeon_get_bios+0x5ed/0xe50 [radeon] Code: 00 00 84 c0 0f 85 12 fd ff ff c7 87 64 01 00 00 00 00 00 00 8b 47 08 8b 55 b0 e8 1e 83 e1 d6 85 c0 74 1a 8b 55 c0 85 d2 74 13 <80> 38 55 75 0e 80 78 01 aa 0f 84 a4 03 00 00 8d 74 26 00 68 dc 06 EAX: 3e8ed03c EBX: 00000000 ECX: 3e8ed03c EDX: 00010000 ESI: 00040000 EDI: eec04000 EBP: eef3fc60 ESP: eef3fbe0 DS: 007b ES: 007b FS: 00d8 GS: 00e0 SS: 0068 EFLAGS: 00010206 CR0: 80050033 CR2: 3e8ed03c CR3: 2ec77000 CR4: 000006d0 Call Trace: r520_init+0x26/0x240 [radeon] radeon_device_init+0x533/0xa50 [radeon] radeon_driver_load_kms+0x80/0x220 [radeon] drm_dev_register+0xa7/0x180 [drm] radeon_pci_probe+0x10f/0x1a0 [radeon] pci_device_probe+0xd4/0x140 Fix the issue by updating all drivers which can access a platform provided ROM. Instead of calling the helper function pci_platform_rom() which uses phys_to_virt(), call ioremap() directly on the pdev->rom. radeon_read_platform_bios() previously directly accessed an __iomem pointer. Avoid this by calling memcpy_fromio() instead of kmemdup(). pci_platform_rom() now has no remaining callers, so remove it. Link: https://lore.kernel.org/r/20200319021623.5426-1-mikel@mikelr.com Signed-off-by: Mikel Rychliski Signed-off-by: Bjorn Helgaas Acked-by: Alex Deucher Signed-off-by: Sasha Levin --- drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c | 31 +++++++++++-------- .../drm/nouveau/nvkm/subdev/bios/shadowpci.c | 17 ++++++++-- drivers/gpu/drm/radeon/radeon_bios.c | 30 +++++++++++------- drivers/pci/rom.c | 17 ---------- include/linux/pci.h | 1 - 5 files changed, 52 insertions(+), 44 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c index a5df80d50d44..6cf3dd5edffd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c @@ -191,30 +191,35 @@ static bool amdgpu_read_bios_from_rom(struct amdgpu_device *adev) static bool amdgpu_read_platform_bios(struct amdgpu_device *adev) { - uint8_t __iomem *bios; - size_t size; + phys_addr_t rom = adev->pdev->rom; + size_t romlen = adev->pdev->romlen; + void __iomem *bios; adev->bios = NULL; - bios = pci_platform_rom(adev->pdev, &size); - if (!bios) { + if (!rom || romlen == 0) return false; - } - adev->bios = kzalloc(size, GFP_KERNEL); - if (adev->bios == NULL) + adev->bios = kzalloc(romlen, GFP_KERNEL); + if (!adev->bios) return false; - memcpy_fromio(adev->bios, bios, size); + bios = ioremap(rom, romlen); + if (!bios) + goto free_bios; - if (!check_atom_bios(adev->bios, size)) { - kfree(adev->bios); - return false; - } + memcpy_fromio(adev->bios, bios, romlen); + iounmap(bios); - adev->bios_size = size; + if (!check_atom_bios(adev->bios, romlen)) + goto free_bios; + + adev->bios_size = romlen; return true; +free_bios: + kfree(adev->bios); + return false; } #ifdef CONFIG_ACPI diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c index 9b91da09dc5f..8d9812a51ef6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c @@ -101,9 +101,13 @@ platform_init(struct nvkm_bios *bios, const char *name) else return ERR_PTR(-ENODEV); + if (!pdev->rom || pdev->romlen == 0) + return ERR_PTR(-ENODEV); + if ((priv = kmalloc(sizeof(*priv), GFP_KERNEL))) { + priv->size = pdev->romlen; if (ret = -ENODEV, - (priv->rom = pci_platform_rom(pdev, &priv->size))) + (priv->rom = ioremap(pdev->rom, pdev->romlen))) return priv; kfree(priv); } @@ -111,11 +115,20 @@ platform_init(struct nvkm_bios *bios, const char *name) return ERR_PTR(ret); } +static void +platform_fini(void *data) +{ + struct priv *priv = data; + + iounmap(priv->rom); + kfree(priv); +} + const struct nvbios_source nvbios_platform = { .name = "PLATFORM", .init = platform_init, - .fini = (void(*)(void *))kfree, + .fini = platform_fini, .read = pcirom_read, .rw = true, }; diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c index 04c0ed41374f..dd0528cf9818 100644 --- a/drivers/gpu/drm/radeon/radeon_bios.c +++ b/drivers/gpu/drm/radeon/radeon_bios.c @@ -104,25 +104,33 @@ static bool radeon_read_bios(struct radeon_device *rdev) static bool radeon_read_platform_bios(struct radeon_device *rdev) { - uint8_t __iomem *bios; - size_t size; + phys_addr_t rom = rdev->pdev->rom; + size_t romlen = rdev->pdev->romlen; + void __iomem *bios; rdev->bios = NULL; - bios = pci_platform_rom(rdev->pdev, &size); - if (!bios) { + if (!rom || romlen == 0) return false; - } - if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) { + rdev->bios = kzalloc(romlen, GFP_KERNEL); + if (!rdev->bios) return false; - } - rdev->bios = kmemdup(bios, size, GFP_KERNEL); - if (rdev->bios == NULL) { - return false; - } + + bios = ioremap(rom, romlen); + if (!bios) + goto free_bios; + + memcpy_fromio(rdev->bios, bios, romlen); + iounmap(bios); + + if (rdev->bios[0] != 0x55 || rdev->bios[1] != 0xaa) + goto free_bios; return true; +free_bios: + kfree(rdev->bios); + return false; } #ifdef CONFIG_ACPI diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c index 137bf0cee897..8fc9a4e911e3 100644 --- a/drivers/pci/rom.c +++ b/drivers/pci/rom.c @@ -195,20 +195,3 @@ void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom) pci_disable_rom(pdev); } EXPORT_SYMBOL(pci_unmap_rom); - -/** - * pci_platform_rom - provides a pointer to any ROM image provided by the - * platform - * @pdev: pointer to pci device struct - * @size: pointer to receive size of pci window over ROM - */ -void __iomem *pci_platform_rom(struct pci_dev *pdev, size_t *size) -{ - if (pdev->rom && pdev->romlen) { - *size = pdev->romlen; - return phys_to_virt((phys_addr_t)pdev->rom); - } - - return NULL; -} -EXPORT_SYMBOL(pci_platform_rom); diff --git a/include/linux/pci.h b/include/linux/pci.h index 2517492dd185..2fda9893962d 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1144,7 +1144,6 @@ int pci_enable_rom(struct pci_dev *pdev); void pci_disable_rom(struct pci_dev *pdev); void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size); void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom); -void __iomem __must_check *pci_platform_rom(struct pci_dev *pdev, size_t *size); /* Power management related routines */ int pci_save_state(struct pci_dev *dev); -- GitLab From 2f0a77ccae8bf303b439fe62ad2830a3c3255848 Mon Sep 17 00:00:00 2001 From: Liu Song Date: Thu, 16 Jan 2020 23:36:07 +0800 Subject: [PATCH 1017/1304] ubifs: Fix out-of-bounds memory access caused by abnormal value of node_len MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit acc5af3efa303d5f36cc8c0f61716161f6ca1384 ] In “ubifs_check_node”, when the value of "node_len" is abnormal, the code will goto label of "out_len" for execution. Then, in the following "ubifs_dump_node", if inode type is "UBIFS_DATA_NODE", in "print_hex_dump", an out-of-bounds access may occur due to the wrong "ch->len". Therefore, when the value of "node_len" is abnormal, data length should to be adjusted to a reasonable safe range. At this time, structured data is not credible, so dump the corrupted data directly for analysis. Signed-off-by: Liu Song Signed-off-by: Richard Weinberger Signed-off-by: Sasha Levin --- fs/ubifs/io.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c index 099bec94b820..fab29f899f91 100644 --- a/fs/ubifs/io.c +++ b/fs/ubifs/io.c @@ -237,7 +237,7 @@ int ubifs_is_mapped(const struct ubifs_info *c, int lnum) int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum, int offs, int quiet, int must_chk_crc) { - int err = -EINVAL, type, node_len; + int err = -EINVAL, type, node_len, dump_node = 1; uint32_t crc, node_crc, magic; const struct ubifs_ch *ch = buf; @@ -290,10 +290,22 @@ int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum, out_len: if (!quiet) ubifs_err(c, "bad node length %d", node_len); + if (type == UBIFS_DATA_NODE && node_len > UBIFS_DATA_NODE_SZ) + dump_node = 0; out: if (!quiet) { ubifs_err(c, "bad node at LEB %d:%d", lnum, offs); - ubifs_dump_node(c, buf); + if (dump_node) { + ubifs_dump_node(c, buf); + } else { + int safe_len = min3(node_len, c->leb_size - offs, + (int)UBIFS_MAX_DATA_NODE_SZ); + pr_err("\tprevent out-of-bounds memory access\n"); + pr_err("\ttruncated data node length %d\n", safe_len); + pr_err("\tcorrupted data node:\n"); + print_hex_dump(KERN_ERR, "\t", DUMP_PREFIX_OFFSET, 32, 1, + buf, safe_len, 0); + } dump_stack(); } return err; -- GitLab From 65d95462001c6ccd9bc9499c1fc9a90eca9de496 Mon Sep 17 00:00:00 2001 From: Andreas Steinmetz Date: Tue, 31 Mar 2020 14:25:54 +0200 Subject: [PATCH 1018/1304] ALSA: usb-audio: Fix case when USB MIDI interface has more than one extra endpoint descriptor [ Upstream commit 5c6cd7021a05a02fcf37f360592d7c18d4d807fb ] The Miditech MIDIFACE 16x16 (USB ID 1290:1749) has more than one extra endpoint descriptor. The first extra descriptor is: 0x06 0x30 0x00 0x00 0x00 0x00 As the code in snd_usbmidi_get_ms_info() looks only at the first extra descriptor to find USB_DT_CS_ENDPOINT the device as such is recognized but there is neither input nor output configured. The patch iterates through the extra descriptors to find the proper one. With this patch the device is correctly configured. Signed-off-by: Andreas Steinmetz Link: https://lore.kernel.org/r/1c3b431a86f69e1d60745b6110cdb93c299f120b.camel@domdv.de Signed-off-by: Takashi Iwai Signed-off-by: Sasha Levin --- sound/usb/midi.c | 29 ++++++++++++++++++++++++----- 1 file changed, 24 insertions(+), 5 deletions(-) diff --git a/sound/usb/midi.c b/sound/usb/midi.c index 28a3ad8b1d74..137e1e8718d6 100644 --- a/sound/usb/midi.c +++ b/sound/usb/midi.c @@ -1828,6 +1828,28 @@ static int snd_usbmidi_create_endpoints(struct snd_usb_midi *umidi, return 0; } +static struct usb_ms_endpoint_descriptor *find_usb_ms_endpoint_descriptor( + struct usb_host_endpoint *hostep) +{ + unsigned char *extra = hostep->extra; + int extralen = hostep->extralen; + + while (extralen > 3) { + struct usb_ms_endpoint_descriptor *ms_ep = + (struct usb_ms_endpoint_descriptor *)extra; + + if (ms_ep->bLength > 3 && + ms_ep->bDescriptorType == USB_DT_CS_ENDPOINT && + ms_ep->bDescriptorSubtype == UAC_MS_GENERAL) + return ms_ep; + if (!extra[0]) + break; + extralen -= extra[0]; + extra += extra[0]; + } + return NULL; +} + /* * Returns MIDIStreaming device capabilities. */ @@ -1865,11 +1887,8 @@ static int snd_usbmidi_get_ms_info(struct snd_usb_midi *umidi, ep = get_ep_desc(hostep); if (!usb_endpoint_xfer_bulk(ep) && !usb_endpoint_xfer_int(ep)) continue; - ms_ep = (struct usb_ms_endpoint_descriptor *)hostep->extra; - if (hostep->extralen < 4 || - ms_ep->bLength < 4 || - ms_ep->bDescriptorType != USB_DT_CS_ENDPOINT || - ms_ep->bDescriptorSubtype != UAC_MS_GENERAL) + ms_ep = find_usb_ms_endpoint_descriptor(hostep); + if (!ms_ep) continue; if (usb_endpoint_dir_out(ep)) { if (endpoints[epidx].out_ep) { -- GitLab From a8cc52270f3d8e8f4faf01ffd6c4a95bbfb55ba4 Mon Sep 17 00:00:00 2001 From: Stuart Hayes Date: Wed, 19 Feb 2020 15:31:13 +0100 Subject: [PATCH 1019/1304] PCI: pciehp: Fix MSI interrupt race [ Upstream commit 8edf5332c39340b9583cf9cba659eb7ec71f75b5 ] Without this commit, a PCIe hotplug port can stop generating interrupts on hotplug events, so device adds and removals will not be seen: The pciehp interrupt handler pciehp_isr() reads the Slot Status register and then writes back to it to clear the bits that caused the interrupt. If a different interrupt event bit gets set between the read and the write, pciehp_isr() returns without having cleared all of the interrupt event bits. If this happens when the MSI isn't masked (which by default it isn't in handle_edge_irq(), and which it will never be when MSI per-vector masking is not supported), we won't get any more hotplug interrupts from that device. That is expected behavior, according to the PCIe Base Spec r5.0, section 6.7.3.4, "Software Notification of Hot-Plug Events". Because the Presence Detect Changed and Data Link Layer State Changed event bits can both get set at nearly the same time when a device is added or removed, this is more likely to happen than it might seem. The issue was found (and can be reproduced rather easily) by connecting and disconnecting an NVMe storage device on at least one system model where the NVMe devices were being connected to an AMD PCIe port (PCI device 0x1022/0x1483). Fix the issue by modifying pciehp_isr() to loop back and re-read the Slot Status register immediately after writing to it, until it sees that all of the event status bits have been cleared. [lukas: drop loop count limitation, write "events" instead of "status", don't loop back in INTx and poll modes, tweak code comment & commit msg] Link: https://lore.kernel.org/r/78b4ced5072bfe6e369d20e8b47c279b8c7af12e.1582121613.git.lukas@wunner.de Tested-by: Stuart Hayes Signed-off-by: Stuart Hayes Signed-off-by: Lukas Wunner Signed-off-by: Bjorn Helgaas Reviewed-by: Joerg Roedel Signed-off-by: Sasha Levin --- drivers/pci/hotplug/pciehp_hpc.c | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 07940d1d83b7..005817e40ad3 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -530,7 +530,7 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id) struct controller *ctrl = (struct controller *)dev_id; struct pci_dev *pdev = ctrl_dev(ctrl); struct device *parent = pdev->dev.parent; - u16 status, events; + u16 status, events = 0; /* * Interrupts only occur in D3hot or shallower (PCIe r4.0, sec 6.7.3.4). @@ -553,6 +553,7 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id) } } +read_status: pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &status); if (status == (u16) ~0) { ctrl_info(ctrl, "%s: no response from device\n", __func__); @@ -565,24 +566,37 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id) * Slot Status contains plain status bits as well as event * notification bits; right now we only want the event bits. */ - events = status & (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD | - PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_CC | - PCI_EXP_SLTSTA_DLLSC); + status &= PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD | + PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_CC | + PCI_EXP_SLTSTA_DLLSC; /* * If we've already reported a power fault, don't report it again * until we've done something to handle it. */ if (ctrl->power_fault_detected) - events &= ~PCI_EXP_SLTSTA_PFD; + status &= ~PCI_EXP_SLTSTA_PFD; + events |= status; if (!events) { if (parent) pm_runtime_put(parent); return IRQ_NONE; } - pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, events); + if (status) { + pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, events); + + /* + * In MSI mode, all event bits must be zero before the port + * will send a new interrupt (PCIe Base Spec r5.0 sec 6.7.3.4). + * So re-read the Slot Status register in case a bit was set + * between read and write. + */ + if (pci_dev_msi_enabled(pdev) && !pciehp_poll_mode) + goto read_status; + } + ctrl_dbg(ctrl, "pending interrupts %#06x from Slot Status\n", events); if (parent) pm_runtime_put(parent); -- GitLab From 1f39a7cc5d07a58c53f3054b177bad93c243d3f9 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Wed, 1 Apr 2020 13:04:49 -0400 Subject: [PATCH 1020/1304] NFS: Fix races nfs_page_group_destroy() vs nfs_destroy_unlinked_subrequests() [ Upstream commit 08ca8b21f760c0ed5034a5c122092eec22ccf8f4 ] When a subrequest is being detached from the subgroup, we want to ensure that it is not holding the group lock, or in the process of waiting for the group lock. Fixes: 5b2b5187fa85 ("NFS: Fix nfs_page_group_destroy() and nfs_lock_and_join_requests() race cases") Signed-off-by: Trond Myklebust Signed-off-by: Sasha Levin --- fs/nfs/pagelist.c | 67 +++++++++++++++++++++++++++------------- fs/nfs/write.c | 10 ++++-- include/linux/nfs_page.h | 2 ++ 3 files changed, 55 insertions(+), 24 deletions(-) diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index 5dae7c85d9b6..2c7d76b4c5e1 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -132,47 +132,70 @@ nfs_async_iocounter_wait(struct rpc_task *task, struct nfs_lock_context *l_ctx) EXPORT_SYMBOL_GPL(nfs_async_iocounter_wait); /* - * nfs_page_group_lock - lock the head of the page group - * @req - request in group that is to be locked + * nfs_page_set_headlock - set the request PG_HEADLOCK + * @req: request that is to be locked * - * this lock must be held when traversing or modifying the page - * group list + * this lock must be held when modifying req->wb_head * * return 0 on success, < 0 on error */ int -nfs_page_group_lock(struct nfs_page *req) +nfs_page_set_headlock(struct nfs_page *req) { - struct nfs_page *head = req->wb_head; - - WARN_ON_ONCE(head != head->wb_head); - - if (!test_and_set_bit(PG_HEADLOCK, &head->wb_flags)) + if (!test_and_set_bit(PG_HEADLOCK, &req->wb_flags)) return 0; - set_bit(PG_CONTENDED1, &head->wb_flags); + set_bit(PG_CONTENDED1, &req->wb_flags); smp_mb__after_atomic(); - return wait_on_bit_lock(&head->wb_flags, PG_HEADLOCK, + return wait_on_bit_lock(&req->wb_flags, PG_HEADLOCK, TASK_UNINTERRUPTIBLE); } /* - * nfs_page_group_unlock - unlock the head of the page group - * @req - request in group that is to be unlocked + * nfs_page_clear_headlock - clear the request PG_HEADLOCK + * @req: request that is to be locked */ void -nfs_page_group_unlock(struct nfs_page *req) +nfs_page_clear_headlock(struct nfs_page *req) { - struct nfs_page *head = req->wb_head; - - WARN_ON_ONCE(head != head->wb_head); - smp_mb__before_atomic(); - clear_bit(PG_HEADLOCK, &head->wb_flags); + clear_bit(PG_HEADLOCK, &req->wb_flags); smp_mb__after_atomic(); - if (!test_bit(PG_CONTENDED1, &head->wb_flags)) + if (!test_bit(PG_CONTENDED1, &req->wb_flags)) return; - wake_up_bit(&head->wb_flags, PG_HEADLOCK); + wake_up_bit(&req->wb_flags, PG_HEADLOCK); +} + +/* + * nfs_page_group_lock - lock the head of the page group + * @req: request in group that is to be locked + * + * this lock must be held when traversing or modifying the page + * group list + * + * return 0 on success, < 0 on error + */ +int +nfs_page_group_lock(struct nfs_page *req) +{ + int ret; + + ret = nfs_page_set_headlock(req); + if (ret || req->wb_head == req) + return ret; + return nfs_page_set_headlock(req->wb_head); +} + +/* + * nfs_page_group_unlock - unlock the head of the page group + * @req: request in group that is to be unlocked + */ +void +nfs_page_group_unlock(struct nfs_page *req) +{ + if (req != req->wb_head) + nfs_page_clear_headlock(req->wb_head); + nfs_page_clear_headlock(req); } /* diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 63d20308a9bb..d419d89b91f7 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -416,22 +416,28 @@ nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list, destroy_list = (subreq->wb_this_page == old_head) ? NULL : subreq->wb_this_page; + /* Note: lock subreq in order to change subreq->wb_head */ + nfs_page_set_headlock(subreq); WARN_ON_ONCE(old_head != subreq->wb_head); /* make sure old group is not used */ subreq->wb_this_page = subreq; + subreq->wb_head = subreq; clear_bit(PG_REMOVE, &subreq->wb_flags); /* Note: races with nfs_page_group_destroy() */ if (!kref_read(&subreq->wb_kref)) { /* Check if we raced with nfs_page_group_destroy() */ - if (test_and_clear_bit(PG_TEARDOWN, &subreq->wb_flags)) + if (test_and_clear_bit(PG_TEARDOWN, &subreq->wb_flags)) { + nfs_page_clear_headlock(subreq); nfs_free_request(subreq); + } else + nfs_page_clear_headlock(subreq); continue; } + nfs_page_clear_headlock(subreq); - subreq->wb_head = subreq; nfs_release_request(old_head); if (test_and_clear_bit(PG_INODE_REF, &subreq->wb_flags)) { diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h index ad69430fd0eb..5162fc1533c2 100644 --- a/include/linux/nfs_page.h +++ b/include/linux/nfs_page.h @@ -142,6 +142,8 @@ extern void nfs_unlock_and_release_request(struct nfs_page *); extern int nfs_page_group_lock(struct nfs_page *); extern void nfs_page_group_unlock(struct nfs_page *); extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int); +extern int nfs_page_set_headlock(struct nfs_page *req); +extern void nfs_page_clear_headlock(struct nfs_page *req); extern bool nfs_async_iocounter_wait(struct rpc_task *, struct nfs_lock_context *); /* -- GitLab From afe001488e7e8e1108a2d9fcac3757713ffae503 Mon Sep 17 00:00:00 2001 From: Nathan Chancellor Date: Wed, 1 Apr 2020 21:04:34 -0700 Subject: [PATCH 1021/1304] mm/kmemleak.c: use address-of operator on section symbols [ Upstream commit b0d14fc43d39203ae025f20ef4d5d25d9ccf4be1 ] Clang warns: mm/kmemleak.c:1955:28: warning: array comparison always evaluates to a constant [-Wtautological-compare] if (__start_ro_after_init < _sdata || __end_ro_after_init > _edata) ^ mm/kmemleak.c:1955:60: warning: array comparison always evaluates to a constant [-Wtautological-compare] if (__start_ro_after_init < _sdata || __end_ro_after_init > _edata) These are not true arrays, they are linker defined symbols, which are just addresses. Using the address of operator silences the warning and does not change the resulting assembly with either clang/ld.lld or gcc/ld (tested with diff + objdump -Dr). Suggested-by: Nick Desaulniers Signed-off-by: Nathan Chancellor Signed-off-by: Andrew Morton Acked-by: Catalin Marinas Link: https://github.com/ClangBuiltLinux/linux/issues/895 Link: http://lkml.kernel.org/r/20200220051551.44000-1-natechancellor@gmail.com Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin --- mm/kmemleak.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 5eeabece0c17..f54734abf946 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -2039,7 +2039,7 @@ void __init kmemleak_init(void) create_object((unsigned long)__bss_start, __bss_stop - __bss_start, KMEMLEAK_GREY, GFP_ATOMIC); /* only register .data..ro_after_init if not within .data */ - if (__start_ro_after_init < _sdata || __end_ro_after_init > _edata) + if (&__start_ro_after_init < &_sdata || &__end_ro_after_init > &_edata) create_object((unsigned long)__start_ro_after_init, __end_ro_after_init - __start_ro_after_init, KMEMLEAK_GREY, GFP_ATOMIC); -- GitLab From cebefe4f6fc0cf5721d443b91e8f43a66766fb06 Mon Sep 17 00:00:00 2001 From: Xianting Tian Date: Wed, 1 Apr 2020 21:04:47 -0700 Subject: [PATCH 1022/1304] mm/filemap.c: clear page error before actual read [ Upstream commit faffdfa04fa11ccf048cebdde73db41ede0679e0 ] Mount failure issue happens under the scenario: Application forked dozens of threads to mount the same number of cramfs images separately in docker, but several mounts failed with high probability. Mount failed due to the checking result of the page(read from the superblock of loop dev) is not uptodate after wait_on_page_locked(page) returned in function cramfs_read: wait_on_page_locked(page); if (!PageUptodate(page)) { ... } The reason of the checking result of the page not uptodate: systemd-udevd read the loopX dev before mount, because the status of loopX is Lo_unbound at this time, so loop_make_request directly trigger the calling of io_end handler end_buffer_async_read, which called SetPageError(page). So It caused the page can't be set to uptodate in function end_buffer_async_read: if(page_uptodate && !PageError(page)) { SetPageUptodate(page); } Then mount operation is performed, it used the same page which is just accessed by systemd-udevd above, Because this page is not uptodate, it will launch a actual read via submit_bh, then wait on this page by calling wait_on_page_locked(page). When the I/O of the page done, io_end handler end_buffer_async_read is called, because no one cleared the page error(during the whole read path of mount), which is caused by systemd-udevd reading, so this page is still in "PageError" status, which can't be set to uptodate in function end_buffer_async_read, then caused mount failure. But sometimes mount succeed even through systemd-udeved read loopX dev just before, The reason is systemd-udevd launched other loopX read just between step 3.1 and 3.2, the steps as below: 1, loopX dev default status is Lo_unbound; 2, systemd-udved read loopX dev (page is set to PageError); 3, mount operation 1) set loopX status to Lo_bound; ==>systemd-udevd read loopX dev<== 2) read loopX dev(page has no error) 3) mount succeed As the loopX dev status is set to Lo_bound after step 3.1, so the other loopX dev read by systemd-udevd will go through the whole I/O stack, part of the call trace as below: SYS_read vfs_read do_sync_read blkdev_aio_read generic_file_aio_read do_generic_file_read: ClearPageError(page); mapping->a_ops->readpage(filp, page); here, mapping->a_ops->readpage() is blkdev_readpage. In latest kernel, some function name changed, the call trace as below: blkdev_read_iter generic_file_read_iter generic_file_buffered_read: /* * A previous I/O error may have been due to temporary * failures, eg. mutipath errors. * Pg_error will be set again if readpage fails. */ ClearPageError(page); /* Start the actual read. The read will unlock the page*/ error=mapping->a_ops->readpage(flip, page); We can see ClearPageError(page) is called before the actual read, then the read in step 3.2 succeed. This patch is to add the calling of ClearPageError just before the actual read of read path of cramfs mount. Without the patch, the call trace as below when performing cramfs mount: do_mount cramfs_read cramfs_blkdev_read read_cache_page do_read_cache_page: filler(data, page); or mapping->a_ops->readpage(data, page); With the patch, the call trace as below when performing mount: do_mount cramfs_read cramfs_blkdev_read read_cache_page: do_read_cache_page: ClearPageError(page); <== new add filler(data, page); or mapping->a_ops->readpage(data, page); With the patch, mount operation trigger the calling of ClearPageError(page) before the actual read, the page has no error if no additional page error happen when I/O done. Signed-off-by: Xianting Tian Signed-off-by: Andrew Morton Reviewed-by: Matthew Wilcox (Oracle) Cc: Jan Kara Cc: Link: http://lkml.kernel.org/r/1583318844-22971-1-git-send-email-xianting_tian@126.com Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin --- mm/filemap.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/mm/filemap.c b/mm/filemap.c index 45f1c6d73b5b..f2e777003b90 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2889,6 +2889,14 @@ static struct page *do_read_cache_page(struct address_space *mapping, unlock_page(page); goto out; } + + /* + * A previous I/O error may have been due to temporary + * failures. + * Clear page error before actual read, PG_error will be + * set again if read page fails. + */ + ClearPageError(page); goto filler; out: -- GitLab From b73c744019721ea47340b37440a7f6a263beea54 Mon Sep 17 00:00:00 2001 From: Qian Cai Date: Wed, 1 Apr 2020 21:10:12 -0700 Subject: [PATCH 1023/1304] mm/vmscan.c: fix data races using kswapd_classzone_idx [ Upstream commit 5644e1fbbfe15ad06785502bbfe5751223e5841d ] pgdat->kswapd_classzone_idx could be accessed concurrently in wakeup_kswapd(). Plain writes and reads without any lock protection result in data races. Fix them by adding a pair of READ|WRITE_ONCE() as well as saving a branch (compilers might well optimize the original code in an unintentional way anyway). While at it, also take care of pgdat->kswapd_order and non-kswapd threads in allow_direct_reclaim(). The data races were reported by KCSAN, BUG: KCSAN: data-race in wakeup_kswapd / wakeup_kswapd write to 0xffff9f427ffff2dc of 4 bytes by task 7454 on cpu 13: wakeup_kswapd+0xf1/0x400 wakeup_kswapd at mm/vmscan.c:3967 wake_all_kswapds+0x59/0xc0 wake_all_kswapds at mm/page_alloc.c:4241 __alloc_pages_slowpath+0xdcc/0x1290 __alloc_pages_slowpath at mm/page_alloc.c:4512 __alloc_pages_nodemask+0x3bb/0x450 alloc_pages_vma+0x8a/0x2c0 do_anonymous_page+0x16e/0x6f0 __handle_mm_fault+0xcd5/0xd40 handle_mm_fault+0xfc/0x2f0 do_page_fault+0x263/0x6f9 page_fault+0x34/0x40 1 lock held by mtest01/7454: #0: ffff9f425afe8808 (&mm->mmap_sem#2){++++}, at: do_page_fault+0x143/0x6f9 do_user_addr_fault at arch/x86/mm/fault.c:1405 (inlined by) do_page_fault at arch/x86/mm/fault.c:1539 irq event stamp: 6944085 count_memcg_event_mm+0x1a6/0x270 count_memcg_event_mm+0x119/0x270 __do_softirq+0x34c/0x57c irq_exit+0xa2/0xc0 read to 0xffff9f427ffff2dc of 4 bytes by task 7472 on cpu 38: wakeup_kswapd+0xc8/0x400 wake_all_kswapds+0x59/0xc0 __alloc_pages_slowpath+0xdcc/0x1290 __alloc_pages_nodemask+0x3bb/0x450 alloc_pages_vma+0x8a/0x2c0 do_anonymous_page+0x16e/0x6f0 __handle_mm_fault+0xcd5/0xd40 handle_mm_fault+0xfc/0x2f0 do_page_fault+0x263/0x6f9 page_fault+0x34/0x40 1 lock held by mtest01/7472: #0: ffff9f425a9ac148 (&mm->mmap_sem#2){++++}, at: do_page_fault+0x143/0x6f9 irq event stamp: 6793561 count_memcg_event_mm+0x1a6/0x270 count_memcg_event_mm+0x119/0x270 __do_softirq+0x34c/0x57c irq_exit+0xa2/0xc0 BUG: KCSAN: data-race in kswapd / wakeup_kswapd write to 0xffff90973ffff2dc of 4 bytes by task 820 on cpu 6: kswapd+0x27c/0x8d0 kthread+0x1e0/0x200 ret_from_fork+0x27/0x50 read to 0xffff90973ffff2dc of 4 bytes by task 6299 on cpu 0: wakeup_kswapd+0xf3/0x450 wake_all_kswapds+0x59/0xc0 __alloc_pages_slowpath+0xdcc/0x1290 __alloc_pages_nodemask+0x3bb/0x450 alloc_pages_vma+0x8a/0x2c0 do_anonymous_page+0x170/0x700 __handle_mm_fault+0xc9f/0xd00 handle_mm_fault+0xfc/0x2f0 do_page_fault+0x263/0x6f9 page_fault+0x34/0x40 Signed-off-by: Qian Cai Signed-off-by: Andrew Morton Reviewed-by: Andrew Morton Cc: Marco Elver Cc: Matthew Wilcox Link: http://lkml.kernel.org/r/1582749472-5171-1-git-send-email-cai@lca.pw Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin --- mm/vmscan.c | 45 ++++++++++++++++++++++++++------------------- 1 file changed, 26 insertions(+), 19 deletions(-) diff --git a/mm/vmscan.c b/mm/vmscan.c index b93dc8fc6007..b7d7f6d65bd5 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -3109,8 +3109,9 @@ static bool allow_direct_reclaim(pg_data_t *pgdat) /* kswapd must be awake if processes are being throttled */ if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) { - pgdat->kswapd_classzone_idx = min(pgdat->kswapd_classzone_idx, - (enum zone_type)ZONE_NORMAL); + if (READ_ONCE(pgdat->kswapd_classzone_idx) > ZONE_NORMAL) + WRITE_ONCE(pgdat->kswapd_classzone_idx, ZONE_NORMAL); + wake_up_interruptible(&pgdat->kswapd_wait); } @@ -3626,9 +3627,9 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx) static enum zone_type kswapd_classzone_idx(pg_data_t *pgdat, enum zone_type prev_classzone_idx) { - if (pgdat->kswapd_classzone_idx == MAX_NR_ZONES) - return prev_classzone_idx; - return pgdat->kswapd_classzone_idx; + enum zone_type curr_idx = READ_ONCE(pgdat->kswapd_classzone_idx); + + return curr_idx == MAX_NR_ZONES ? prev_classzone_idx : curr_idx; } static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_order, @@ -3672,8 +3673,11 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_o * the previous request that slept prematurely. */ if (remaining) { - pgdat->kswapd_classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx); - pgdat->kswapd_order = max(pgdat->kswapd_order, reclaim_order); + WRITE_ONCE(pgdat->kswapd_classzone_idx, + kswapd_classzone_idx(pgdat, classzone_idx)); + + if (READ_ONCE(pgdat->kswapd_order) < reclaim_order) + WRITE_ONCE(pgdat->kswapd_order, reclaim_order); } finish_wait(&pgdat->kswapd_wait, &wait); @@ -3755,12 +3759,12 @@ static int kswapd(void *p) tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD; set_freezable(); - pgdat->kswapd_order = 0; - pgdat->kswapd_classzone_idx = MAX_NR_ZONES; + WRITE_ONCE(pgdat->kswapd_order, 0); + WRITE_ONCE(pgdat->kswapd_classzone_idx, MAX_NR_ZONES); for ( ; ; ) { bool ret; - alloc_order = reclaim_order = pgdat->kswapd_order; + alloc_order = reclaim_order = READ_ONCE(pgdat->kswapd_order); classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx); kswapd_try_sleep: @@ -3768,10 +3772,10 @@ static int kswapd(void *p) classzone_idx); /* Read the new order and classzone_idx */ - alloc_order = reclaim_order = pgdat->kswapd_order; + alloc_order = reclaim_order = READ_ONCE(pgdat->kswapd_order); classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx); - pgdat->kswapd_order = 0; - pgdat->kswapd_classzone_idx = MAX_NR_ZONES; + WRITE_ONCE(pgdat->kswapd_order, 0); + WRITE_ONCE(pgdat->kswapd_classzone_idx, MAX_NR_ZONES); ret = try_to_freeze(); if (kthread_should_stop()) @@ -3816,20 +3820,23 @@ void wakeup_kswapd(struct zone *zone, gfp_t gfp_flags, int order, enum zone_type classzone_idx) { pg_data_t *pgdat; + enum zone_type curr_idx; if (!managed_zone(zone)) return; if (!cpuset_zone_allowed(zone, gfp_flags)) return; + pgdat = zone->zone_pgdat; + curr_idx = READ_ONCE(pgdat->kswapd_classzone_idx); + + if (curr_idx == MAX_NR_ZONES || curr_idx < classzone_idx) + WRITE_ONCE(pgdat->kswapd_classzone_idx, classzone_idx); + + if (READ_ONCE(pgdat->kswapd_order) < order) + WRITE_ONCE(pgdat->kswapd_order, order); - if (pgdat->kswapd_classzone_idx == MAX_NR_ZONES) - pgdat->kswapd_classzone_idx = classzone_idx; - else - pgdat->kswapd_classzone_idx = max(pgdat->kswapd_classzone_idx, - classzone_idx); - pgdat->kswapd_order = max(pgdat->kswapd_order, order); if (!waitqueue_active(&pgdat->kswapd_wait)) return; -- GitLab From 5fd750e826609e69f11d2d55113efbd40e9f6b4c Mon Sep 17 00:00:00 2001 From: Israel Rukshin Date: Tue, 7 Apr 2020 11:02:28 +0000 Subject: [PATCH 1024/1304] nvmet-rdma: fix double free of rdma queue [ Upstream commit 21f9024355e58772ec5d7fc3534aa5e29d72a8b6 ] In case rdma accept fails at nvmet_rdma_queue_connect(), release work is scheduled. Later on, a new RDMA CM event may arrive since we didn't destroy the cm-id and call nvmet_rdma_queue_connect_fail(), which schedule another release work. This will cause calling nvmet_rdma_free_queue twice. To fix this we implicitly destroy the cm_id with non-zero ret code, which guarantees that new rdma_cm events will not arrive afterwards. Also add a qp pointer to nvmet_rdma_queue structure, so we can use it when the cm_id pointer is NULL or was destroyed. Signed-off-by: Israel Rukshin Suggested-by: Sagi Grimberg Reviewed-by: Max Gurtovoy Reviewed-by: Sagi Grimberg Signed-off-by: Christoph Hellwig Signed-off-by: Sasha Levin --- drivers/nvme/target/rdma.c | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index 08f997a390d5..cfd26437aeae 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -83,6 +83,7 @@ enum nvmet_rdma_queue_state { struct nvmet_rdma_queue { struct rdma_cm_id *cm_id; + struct ib_qp *qp; struct nvmet_port *port; struct ib_cq *cq; atomic_t sq_wr_avail; @@ -471,7 +472,7 @@ static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev, if (ndev->srq) ret = ib_post_srq_recv(ndev->srq, &cmd->wr, NULL); else - ret = ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, NULL); + ret = ib_post_recv(cmd->queue->qp, &cmd->wr, NULL); if (unlikely(ret)) pr_err("post_recv cmd failed\n"); @@ -510,7 +511,7 @@ static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp) atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail); if (rsp->n_rdma) { - rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp, + rdma_rw_ctx_destroy(&rsp->rw, queue->qp, queue->cm_id->port_num, rsp->req.sg, rsp->req.sg_cnt, nvmet_data_dir(&rsp->req)); } @@ -594,7 +595,7 @@ static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc) WARN_ON(rsp->n_rdma <= 0); atomic_add(rsp->n_rdma, &queue->sq_wr_avail); - rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp, + rdma_rw_ctx_destroy(&rsp->rw, queue->qp, queue->cm_id->port_num, rsp->req.sg, rsp->req.sg_cnt, nvmet_data_dir(&rsp->req)); rsp->n_rdma = 0; @@ -737,7 +738,7 @@ static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp) } if (nvmet_rdma_need_data_in(rsp)) { - if (rdma_rw_ctx_post(&rsp->rw, queue->cm_id->qp, + if (rdma_rw_ctx_post(&rsp->rw, queue->qp, queue->cm_id->port_num, &rsp->read_cqe, NULL)) nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR); } else { @@ -1020,6 +1021,7 @@ static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue) pr_err("failed to create_qp ret= %d\n", ret); goto err_destroy_cq; } + queue->qp = queue->cm_id->qp; atomic_set(&queue->sq_wr_avail, qp_attr.cap.max_send_wr); @@ -1048,11 +1050,10 @@ static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue) static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue) { - struct ib_qp *qp = queue->cm_id->qp; - - ib_drain_qp(qp); - rdma_destroy_id(queue->cm_id); - ib_destroy_qp(qp); + ib_drain_qp(queue->qp); + if (queue->cm_id) + rdma_destroy_id(queue->cm_id); + ib_destroy_qp(queue->qp); ib_free_cq(queue->cq); } @@ -1286,9 +1287,12 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id, ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn); if (ret) { - schedule_work(&queue->release_work); - /* Destroying rdma_cm id is not needed here */ - return 0; + /* + * Don't destroy the cm_id in free path, as we implicitly + * destroy the cm_id here with non-zero ret code. + */ + queue->cm_id = NULL; + goto free_queue; } mutex_lock(&nvmet_rdma_queue_mutex); @@ -1297,6 +1301,8 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id, return 0; +free_queue: + nvmet_rdma_free_queue(queue); put_device: kref_put(&ndev->ref, nvmet_rdma_free_dev); -- GitLab From 6bee7991f63e6ae8faba0c704f4d98575bb0312f Mon Sep 17 00:00:00 2001 From: Jaewon Kim Date: Fri, 10 Apr 2020 14:32:48 -0700 Subject: [PATCH 1025/1304] mm/mmap.c: initialize align_offset explicitly for vm_unmapped_area [ Upstream commit 09ef5283fd96ac424ef0e569626f359bf9ab86c9 ] On passing requirement to vm_unmapped_area, arch_get_unmapped_area and arch_get_unmapped_area_topdown did not set align_offset. Internally on both unmapped_area and unmapped_area_topdown, if info->align_mask is 0, then info->align_offset was meaningless. But commit df529cabb7a2 ("mm: mmap: add trace point of vm_unmapped_area") always prints info->align_offset even though it is uninitialized. Fix this uninitialized value issue by setting it to 0 explicitly. Before: vm_unmapped_area: addr=0x755b155000 err=0 total_vm=0x15aaf0 flags=0x1 len=0x109000 lo=0x8000 hi=0x75eed48000 mask=0x0 ofs=0x4022 After: vm_unmapped_area: addr=0x74a4ca1000 err=0 total_vm=0x168ab1 flags=0x1 len=0x9000 lo=0x8000 hi=0x753d94b000 mask=0x0 ofs=0x0 Signed-off-by: Jaewon Kim Signed-off-by: Andrew Morton Reviewed-by: Andrew Morton Cc: Matthew Wilcox (Oracle) Cc: Michel Lespinasse Cc: Borislav Petkov Link: http://lkml.kernel.org/r/20200409094035.19457-1-jaewon31.kim@samsung.com Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin --- mm/mmap.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/mm/mmap.c b/mm/mmap.c index e84fd3347a51..f875386e7acd 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2077,6 +2077,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, info.low_limit = mm->mmap_base; info.high_limit = TASK_SIZE; info.align_mask = 0; + info.align_offset = 0; return vm_unmapped_area(&info); } #endif @@ -2118,6 +2119,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, info.low_limit = max(PAGE_SIZE, mmap_min_addr); info.high_limit = mm->mmap_base; info.align_mask = 0; + info.align_offset = 0; addr = vm_unmapped_area(&info); /* -- GitLab From b860a828153a69e9d0993a163131e283ecc61fb8 Mon Sep 17 00:00:00 2001 From: Nilesh Javali Date: Tue, 7 Apr 2020 23:43:32 -0700 Subject: [PATCH 1026/1304] scsi: qedi: Fix termination timeouts in session logout [ Upstream commit b9b97e6903032ec56e6dcbe137a9819b74a17fea ] The destroy connection ramrod timed out during session logout. Fix the wait delay for graceful vs abortive termination as per the FW requirements. Link: https://lore.kernel.org/r/20200408064332.19377-7-mrangankar@marvell.com Reviewed-by: Lee Duncan Signed-off-by: Nilesh Javali Signed-off-by: Manish Rangankar Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin --- drivers/scsi/qedi/qedi_iscsi.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c index 751941a3ed30..aa451c8b49e5 100644 --- a/drivers/scsi/qedi/qedi_iscsi.c +++ b/drivers/scsi/qedi/qedi_iscsi.c @@ -1065,6 +1065,9 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep) break; } + if (!abrt_conn) + wait_delay += qedi->pf_params.iscsi_pf_params.two_msl_timer; + qedi_ep->state = EP_STATE_DISCONN_START; ret = qedi_ops->destroy_conn(qedi->cdev, qedi_ep->handle, abrt_conn); if (ret) { -- GitLab From 99e4fecd18d6d26c96cec5b03880d63924b77cfe Mon Sep 17 00:00:00 2001 From: Raviteja Narayanam Date: Thu, 9 Apr 2020 11:56:02 +0530 Subject: [PATCH 1027/1304] serial: uartps: Wait for tx_empty in console setup [ Upstream commit 42e11948ddf68b9f799cad8c0ddeab0a39da33e8 ] On some platforms, the log is corrupted while console is being registered. It is observed that when set_termios is called, there are still some bytes in the FIFO to be transmitted. So, wait for tx_empty inside cdns_uart_console_setup before calling set_termios. Signed-off-by: Raviteja Narayanam Reviewed-by: Shubhrajyoti Datta Link: https://lore.kernel.org/r/1586413563-29125-2-git-send-email-raviteja.narayanam@xilinx.com Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin --- drivers/tty/serial/xilinx_uartps.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c index 31950a38f0fb..23f9b0cdff08 100644 --- a/drivers/tty/serial/xilinx_uartps.c +++ b/drivers/tty/serial/xilinx_uartps.c @@ -1236,6 +1236,7 @@ static int cdns_uart_console_setup(struct console *co, char *options) int bits = 8; int parity = 'n'; int flow = 'n'; + unsigned long time_out; if (!port->membase) { pr_debug("console on " CDNS_UART_TTY_NAME "%i not present\n", @@ -1246,6 +1247,13 @@ static int cdns_uart_console_setup(struct console *co, char *options) if (options) uart_parse_options(options, &baud, &parity, &bits, &flow); + /* Wait for tx_empty before setting up the console */ + time_out = jiffies + usecs_to_jiffies(TX_TIMEOUT); + + while (time_before(jiffies, time_out) && + cdns_uart_tx_empty(port) != TIOCSER_TEMT) + cpu_relax(); + return uart_set_options(port, co, baud, parity, bits, flow); } -- GitLab From 2c0356665fd0f6161a9284a61c06974ac82af7a7 Mon Sep 17 00:00:00 2001 From: Steve Rutherford Date: Thu, 16 Apr 2020 12:11:52 -0700 Subject: [PATCH 1028/1304] KVM: Remove CREATE_IRQCHIP/SET_PIT2 race [ Upstream commit 7289fdb5dcdbc5155b5531529c44105868a762f2 ] Fixes a NULL pointer dereference, caused by the PIT firing an interrupt before the interrupt table has been initialized. SET_PIT2 can race with the creation of the IRQchip. In particular, if SET_PIT2 is called with a low PIT timer period (after the creation of the IOAPIC, but before the instantiation of the irq routes), the PIT can fire an interrupt at an uninitialized table. Signed-off-by: Steve Rutherford Signed-off-by: Jon Cargille Reviewed-by: Jim Mattson Message-Id: <20200416191152.259434-1-jcargill@google.com> Signed-off-by: Paolo Bonzini Signed-off-by: Sasha Levin --- arch/x86/kvm/x86.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 430a4bc66f60..620ed1fa3511 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -4668,10 +4668,13 @@ long kvm_arch_vm_ioctl(struct file *filp, r = -EFAULT; if (copy_from_user(&u.ps, argp, sizeof u.ps)) goto out; + mutex_lock(&kvm->lock); r = -ENXIO; if (!kvm->arch.vpit) - goto out; + goto set_pit_out; r = kvm_vm_ioctl_set_pit(kvm, &u.ps); +set_pit_out: + mutex_unlock(&kvm->lock); break; } case KVM_GET_PIT2: { @@ -4691,10 +4694,13 @@ long kvm_arch_vm_ioctl(struct file *filp, r = -EFAULT; if (copy_from_user(&u.ps2, argp, sizeof(u.ps2))) goto out; + mutex_lock(&kvm->lock); r = -ENXIO; if (!kvm->arch.vpit) - goto out; + goto set_pit2_out; r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2); +set_pit2_out: + mutex_unlock(&kvm->lock); break; } case KVM_REINJECT_CONTROL: { -- GitLab From b6256c2966706c279f54bdd2c6582c7c370e9467 Mon Sep 17 00:00:00 2001 From: Douglas Anderson Date: Tue, 24 Mar 2020 14:48:27 -0700 Subject: [PATCH 1029/1304] bdev: Reduce time holding bd_mutex in sync in blkdev_close() [ Upstream commit b849dd84b6ccfe32622988b79b7b073861fcf9f7 ] While trying to "dd" to the block device for a USB stick, I encountered a hung task warning (blocked for > 120 seconds). I managed to come up with an easy way to reproduce this on my system (where /dev/sdb is the block device for my USB stick) with: while true; do dd if=/dev/zero of=/dev/sdb bs=4M; done With my reproduction here are the relevant bits from the hung task detector: INFO: task udevd:294 blocked for more than 122 seconds. ... udevd D 0 294 1 0x00400008 Call trace: ... mutex_lock_nested+0x40/0x50 __blkdev_get+0x7c/0x3d4 blkdev_get+0x118/0x138 blkdev_open+0x94/0xa8 do_dentry_open+0x268/0x3a0 vfs_open+0x34/0x40 path_openat+0x39c/0xdf4 do_filp_open+0x90/0x10c do_sys_open+0x150/0x3c8 ... ... Showing all locks held in the system: ... 1 lock held by dd/2798: #0: ffffff814ac1a3b8 (&bdev->bd_mutex){+.+.}, at: __blkdev_put+0x50/0x204 ... dd D 0 2798 2764 0x00400208 Call trace: ... schedule+0x8c/0xbc io_schedule+0x1c/0x40 wait_on_page_bit_common+0x238/0x338 __lock_page+0x5c/0x68 write_cache_pages+0x194/0x500 generic_writepages+0x64/0xa4 blkdev_writepages+0x24/0x30 do_writepages+0x48/0xa8 __filemap_fdatawrite_range+0xac/0xd8 filemap_write_and_wait+0x30/0x84 __blkdev_put+0x88/0x204 blkdev_put+0xc4/0xe4 blkdev_close+0x28/0x38 __fput+0xe0/0x238 ____fput+0x1c/0x28 task_work_run+0xb0/0xe4 do_notify_resume+0xfc0/0x14bc work_pending+0x8/0x14 The problem appears related to the fact that my USB disk is terribly slow and that I have a lot of RAM in my system to cache things. Specifically my writes seem to be happening at ~15 MB/s and I've got ~4 GB of RAM in my system that can be used for buffering. To write 4 GB of buffer to disk thus takes ~4000 MB / ~15 MB/s = ~267 seconds. The 267 second number is a problem because in __blkdev_put() we call sync_blockdev() while holding the bd_mutex. Any other callers who want the bd_mutex will be blocked for the whole time. The problem is made worse because I believe blkdev_put() specifically tells other tasks (namely udev) to go try to access the device at right around the same time we're going to hold the mutex for a long time. Putting some traces around this (after disabling the hung task detector), I could confirm: dd: 437.608600: __blkdev_put() right before sync_blockdev() for sdb udevd: 437.623901: blkdev_open() right before blkdev_get() for sdb dd: 661.468451: __blkdev_put() right after sync_blockdev() for sdb udevd: 663.820426: blkdev_open() right after blkdev_get() for sdb A simple fix for this is to realize that sync_blockdev() works fine if you're not holding the mutex. Also, it's not the end of the world if you sync a little early (though it can have performance impacts). Thus we can make a guess that we're going to need to do the sync and then do it without holding the mutex. We still do one last sync with the mutex but it should be much, much faster. With this, my hung task warnings for my test case are gone. Signed-off-by: Douglas Anderson Reviewed-by: Guenter Roeck Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe Signed-off-by: Sasha Levin --- fs/block_dev.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/fs/block_dev.c b/fs/block_dev.c index 8ac8f7469354..9f3faac49025 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -1793,6 +1793,16 @@ static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part) struct gendisk *disk = bdev->bd_disk; struct block_device *victim = NULL; + /* + * Sync early if it looks like we're the last one. If someone else + * opens the block device between now and the decrement of bd_openers + * then we did a sync that we didn't need to, but that's not the end + * of the world and we want to avoid long (could be several minute) + * syncs while holding the mutex. + */ + if (bdev->bd_openers == 1) + sync_blockdev(bdev); + mutex_lock_nested(&bdev->bd_mutex, for_part); if (for_part) bdev->bd_part_count--; -- GitLab From 82be3d65f4198d3958bd54de1cba193368a4b1a2 Mon Sep 17 00:00:00 2001 From: Madhuparna Bhowmik Date: Fri, 17 Apr 2020 21:04:51 +0530 Subject: [PATCH 1030/1304] drivers: char: tlclk.c: Avoid data race between init and interrupt handler [ Upstream commit 44b8fb6eaa7c3fb770bf1e37619cdb3902cca1fc ] After registering character device the file operation callbacks can be called. The open callback registers interrupt handler. Therefore interrupt handler can execute in parallel with rest of the init function. To avoid such data race initialize telclk_interrupt variable and struct alarm_events before registering character device. Found by Linux Driver Verification project (linuxtesting.org). Signed-off-by: Madhuparna Bhowmik Link: https://lore.kernel.org/r/20200417153451.1551-1-madhuparnabhowmik10@gmail.com Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin --- drivers/char/tlclk.c | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/drivers/char/tlclk.c b/drivers/char/tlclk.c index 8eeb4190207d..dce22b7fc544 100644 --- a/drivers/char/tlclk.c +++ b/drivers/char/tlclk.c @@ -776,17 +776,21 @@ static int __init tlclk_init(void) { int ret; + telclk_interrupt = (inb(TLCLK_REG7) & 0x0f); + + alarm_events = kzalloc( sizeof(struct tlclk_alarms), GFP_KERNEL); + if (!alarm_events) { + ret = -ENOMEM; + goto out1; + } + ret = register_chrdev(tlclk_major, "telco_clock", &tlclk_fops); if (ret < 0) { printk(KERN_ERR "tlclk: can't get major %d.\n", tlclk_major); + kfree(alarm_events); return ret; } tlclk_major = ret; - alarm_events = kzalloc( sizeof(struct tlclk_alarms), GFP_KERNEL); - if (!alarm_events) { - ret = -ENOMEM; - goto out1; - } /* Read telecom clock IRQ number (Set by BIOS) */ if (!request_region(TLCLK_BASE, 8, "telco_clock")) { @@ -795,7 +799,6 @@ static int __init tlclk_init(void) ret = -EBUSY; goto out2; } - telclk_interrupt = (inb(TLCLK_REG7) & 0x0f); if (0x0F == telclk_interrupt ) { /* not MCPBL0010 ? */ printk(KERN_ERR "telclk_interrupt = 0x%x non-mcpbl0010 hw.\n", @@ -836,8 +839,8 @@ static int __init tlclk_init(void) release_region(TLCLK_BASE, 8); out2: kfree(alarm_events); -out1: unregister_chrdev(tlclk_major, "telco_clock"); +out1: return ret; } -- GitLab From 7c451f583f547536854e9647fa9582e698bc111c Mon Sep 17 00:00:00 2001 From: Zenghui Yu Date: Tue, 14 Apr 2020 11:03:48 +0800 Subject: [PATCH 1031/1304] KVM: arm64: vgic-its: Fix memory leak on the error path of vgic_add_lpi() [ Upstream commit 57bdb436ce869a45881d8aa4bc5dac8e072dd2b6 ] If we're going to fail out the vgic_add_lpi(), let's make sure the allocated vgic_irq memory is also freed. Though it seems that both cases are unlikely to fail. Signed-off-by: Zenghui Yu Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20200414030349.625-3-yuzenghui@huawei.com Signed-off-by: Sasha Levin --- virt/kvm/arm/vgic/vgic-its.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c index 9295addea7ec..f139b1c62ca3 100644 --- a/virt/kvm/arm/vgic/vgic-its.c +++ b/virt/kvm/arm/vgic/vgic-its.c @@ -107,14 +107,21 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid, * We "cache" the configuration table entries in our struct vgic_irq's. * However we only have those structs for mapped IRQs, so we read in * the respective config data from memory here upon mapping the LPI. + * + * Should any of these fail, behave as if we couldn't create the LPI + * by dropping the refcount and returning the error. */ ret = update_lpi_config(kvm, irq, NULL, false); - if (ret) + if (ret) { + vgic_put_irq(kvm, irq); return ERR_PTR(ret); + } ret = vgic_v3_lpi_sync_pending_status(kvm, irq); - if (ret) + if (ret) { + vgic_put_irq(kvm, irq); return ERR_PTR(ret); + } return irq; } -- GitLab From 6043d6112f7dece5285eb87edc49b5d4ac248297 Mon Sep 17 00:00:00 2001 From: Tonghao Zhang Date: Fri, 24 Apr 2020 08:08:06 +0800 Subject: [PATCH 1032/1304] net: openvswitch: use u64 for meter bucket [ Upstream commit e57358873bb5d6caa882b9684f59140912b37dde ] When setting the meter rate to 4+Gbps, there is an overflow, the meters don't work as expected. Cc: Pravin B Shelar Cc: Andy Zhou Signed-off-by: Tonghao Zhang Acked-by: Pravin B Shelar Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- net/openvswitch/meter.c | 2 +- net/openvswitch/meter.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/net/openvswitch/meter.c b/net/openvswitch/meter.c index c038e021a591..6f5131d1074b 100644 --- a/net/openvswitch/meter.c +++ b/net/openvswitch/meter.c @@ -255,7 +255,7 @@ static struct dp_meter *dp_meter_create(struct nlattr **a) * * Start with a full bucket. */ - band->bucket = (band->burst_size + band->rate) * 1000; + band->bucket = (band->burst_size + band->rate) * 1000ULL; band_max_delta_t = band->bucket / band->rate; if (band_max_delta_t > meter->max_delta_t) meter->max_delta_t = band_max_delta_t; diff --git a/net/openvswitch/meter.h b/net/openvswitch/meter.h index 964ace2650f8..970557ed5b5b 100644 --- a/net/openvswitch/meter.h +++ b/net/openvswitch/meter.h @@ -26,7 +26,7 @@ struct dp_meter_band { u32 type; u32 rate; u32 burst_size; - u32 bucket; /* 1/1000 packets, or in bits */ + u64 bucket; /* 1/1000 packets, or in bits */ struct ovs_flow_stats stats; }; -- GitLab From 31662a5a20aeea840be606c95c2953fe2262b1c4 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sun, 12 Apr 2020 11:40:39 +0200 Subject: [PATCH 1033/1304] scsi: aacraid: Fix error handling paths in aac_probe_one() [ Upstream commit f7854c382240c1686900b2f098b36430c6f5047e ] If 'scsi_host_alloc()' or 'kcalloc()' fail, 'error' is known to be 0. Set it explicitly to -ENOMEM before branching to the error handling path. While at it, remove 2 useless assignments to 'error'. These values are overwridden a few lines later. Link: https://lore.kernel.org/r/20200412094039.8822-1-christophe.jaillet@wanadoo.fr Signed-off-by: Christophe JAILLET Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin --- drivers/scsi/aacraid/linit.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 0142547aaadd..eecffc03084c 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -1620,7 +1620,7 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) struct Scsi_Host *shost; struct aac_dev *aac; struct list_head *insert = &aac_devices; - int error = -ENODEV; + int error; int unique_id = 0; u64 dmamask; int mask_bits = 0; @@ -1645,7 +1645,6 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) error = pci_enable_device(pdev); if (error) goto out; - error = -ENODEV; if (!(aac_drivers[index].quirks & AAC_QUIRK_SRC)) { error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); @@ -1677,8 +1676,10 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) pci_set_master(pdev); shost = scsi_host_alloc(&aac_driver_template, sizeof(struct aac_dev)); - if (!shost) + if (!shost) { + error = -ENOMEM; goto out_disable_pdev; + } shost->irq = pdev->irq; shost->unique_id = unique_id; @@ -1703,8 +1704,11 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) aac->fibs = kcalloc(shost->can_queue + AAC_NUM_MGT_FIB, sizeof(struct fib), GFP_KERNEL); - if (!aac->fibs) + if (!aac->fibs) { + error = -ENOMEM; goto out_free_host; + } + spin_lock_init(&aac->fib_lock); mutex_init(&aac->ioctl_mutex); -- GitLab From bbd1347bd4ad6e2fc79bed8f19b088abb42fb4b1 Mon Sep 17 00:00:00 2001 From: Ivan Safonov Date: Thu, 23 Apr 2020 22:14:04 +0300 Subject: [PATCH 1034/1304] staging:r8188eu: avoid skb_clone for amsdu to msdu conversion [ Upstream commit 628cbd971a927abe6388d44320e351c337b331e4 ] skb clones use same data buffer, so tail of one skb is corrupted by beginning of next skb. Signed-off-by: Ivan Safonov Link: https://lore.kernel.org/r/20200423191404.12028-1-insafonov@gmail.com Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin --- drivers/staging/rtl8188eu/core/rtw_recv.c | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-) diff --git a/drivers/staging/rtl8188eu/core/rtw_recv.c b/drivers/staging/rtl8188eu/core/rtw_recv.c index 17b4b9257b49..0ddf41b5a734 100644 --- a/drivers/staging/rtl8188eu/core/rtw_recv.c +++ b/drivers/staging/rtl8188eu/core/rtw_recv.c @@ -1535,21 +1535,14 @@ static int amsdu_to_msdu(struct adapter *padapter, struct recv_frame *prframe) /* Allocate new skb for releasing to upper layer */ sub_skb = dev_alloc_skb(nSubframe_Length + 12); - if (sub_skb) { - skb_reserve(sub_skb, 12); - skb_put_data(sub_skb, pdata, nSubframe_Length); - } else { - sub_skb = skb_clone(prframe->pkt, GFP_ATOMIC); - if (sub_skb) { - sub_skb->data = pdata; - sub_skb->len = nSubframe_Length; - skb_set_tail_pointer(sub_skb, nSubframe_Length); - } else { - DBG_88E("skb_clone() Fail!!! , nr_subframes=%d\n", nr_subframes); - break; - } + if (!sub_skb) { + DBG_88E("dev_alloc_skb() Fail!!! , nr_subframes=%d\n", nr_subframes); + break; } + skb_reserve(sub_skb, 12); + skb_put_data(sub_skb, pdata, nSubframe_Length); + subframes[nr_subframes++] = sub_skb; if (nr_subframes >= MAX_SUBFRAME_COUNT) { -- GitLab From 5c8c4d83ef7038059c4be11b099e537598040e63 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Mon, 27 Apr 2020 12:24:15 +0000 Subject: [PATCH 1035/1304] sparc64: vcc: Fix error return code in vcc_probe() [ Upstream commit ff62255a2a5c1228a28f2bb063646f948115a309 ] Fix to return negative error code -ENOMEM from the error handling case instead of 0, as done elsewhere in this function. Signed-off-by: Wei Yongjun Link: https://lore.kernel.org/r/20200427122415.47416-1-weiyongjun1@huawei.com Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin --- drivers/tty/vcc.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/tty/vcc.c b/drivers/tty/vcc.c index 58b454c34560..10a832a2135e 100644 --- a/drivers/tty/vcc.c +++ b/drivers/tty/vcc.c @@ -604,6 +604,7 @@ static int vcc_probe(struct vio_dev *vdev, const struct vio_device_id *id) port->index = vcc_table_add(port); if (port->index == -1) { pr_err("VCC: no more TTY indices left for allocation\n"); + rv = -ENOMEM; goto free_ldc; } -- GitLab From 8658bb981e148a19946d1e6825bad46cc8785eb7 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 21 Apr 2020 15:29:21 +0100 Subject: [PATCH 1036/1304] arm64: cpufeature: Relax checks for AArch32 support at EL[0-2] [ Upstream commit 98448cdfe7060dd5491bfbd3f7214ffe1395d58e ] We don't need to be quite as strict about mismatched AArch32 support, which is good because the friendly hardware folks have been busy mismatching this to their hearts' content. * We don't care about EL2 or EL3 (there are silly comments concerning the latter, so remove those) * EL1 support is gated by the ARM64_HAS_32BIT_EL1 capability and handled gracefully when a mismatch occurs * EL0 support is gated by the ARM64_HAS_32BIT_EL0 capability and handled gracefully when a mismatch occurs Relax the AArch32 checks to FTR_NONSTRICT. Tested-by: Sai Prakash Ranjan Reviewed-by: Suzuki K Poulose Link: https://lore.kernel.org/r/20200421142922.18950-8-will@kernel.org Signed-off-by: Will Deacon Signed-off-by: Sasha Levin --- arch/arm64/kernel/cpufeature.c | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index ac3126aba036..095dec566275 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -155,11 +155,10 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = { ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0), S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI), S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI), - /* Linux doesn't care about the EL3 */ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL3_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL2_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY), - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL2_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY), ARM64_FTR_END, }; @@ -671,9 +670,6 @@ void update_cpu_features(int cpu, taint |= check_update_ftr_reg(SYS_ID_AA64MMFR2_EL1, cpu, info->reg_id_aa64mmfr2, boot->reg_id_aa64mmfr2); - /* - * EL3 is not our concern. - */ taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu, info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0); taint |= check_update_ftr_reg(SYS_ID_AA64PFR1_EL1, cpu, -- GitLab From fa73de8793fbbace70281f6e40fdd1831d6e14b6 Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Fri, 1 May 2020 15:35:34 +0200 Subject: [PATCH 1037/1304] dt-bindings: sound: wm8994: Correct required supplies based on actual implementaion [ Upstream commit 8c149b7d75e53be47648742f40fc90d9fc6fa63a ] The required supplies in bindings were actually not matching implementation making the bindings incorrect and misleading. The Linux kernel driver requires all supplies to be present. Also for wlf,wm8994 uses just DBVDD-supply instead of DBVDDn-supply (n: <1,3>). Reported-by: Jonathan Bakker Signed-off-by: Krzysztof Kozlowski Link: https://lore.kernel.org/r/20200501133534.6706-1-krzk@kernel.org Signed-off-by: Mark Brown Signed-off-by: Sasha Levin --- .../devicetree/bindings/sound/wm8994.txt | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/Documentation/devicetree/bindings/sound/wm8994.txt b/Documentation/devicetree/bindings/sound/wm8994.txt index 68cccc4653ba..367b58ce1bb9 100644 --- a/Documentation/devicetree/bindings/sound/wm8994.txt +++ b/Documentation/devicetree/bindings/sound/wm8994.txt @@ -14,9 +14,15 @@ Required properties: - #gpio-cells : Must be 2. The first cell is the pin number and the second cell is used to specify optional parameters (currently unused). - - AVDD2-supply, DBVDD1-supply, DBVDD2-supply, DBVDD3-supply, CPVDD-supply, - SPKVDD1-supply, SPKVDD2-supply : power supplies for the device, as covered - in Documentation/devicetree/bindings/regulator/regulator.txt + - power supplies for the device, as covered in + Documentation/devicetree/bindings/regulator/regulator.txt, depending + on compatible: + - for wlf,wm1811 and wlf,wm8958: + AVDD1-supply, AVDD2-supply, DBVDD1-supply, DBVDD2-supply, DBVDD3-supply, + DCVDD-supply, CPVDD-supply, SPKVDD1-supply, SPKVDD2-supply + - for wlf,wm8994: + AVDD1-supply, AVDD2-supply, DBVDD-supply, DCVDD-supply, CPVDD-supply, + SPKVDD1-supply, SPKVDD2-supply Optional properties: @@ -73,11 +79,11 @@ wm8994: codec@1a { lineout1-se; + AVDD1-supply = <®ulator>; AVDD2-supply = <®ulator>; CPVDD-supply = <®ulator>; - DBVDD1-supply = <®ulator>; - DBVDD2-supply = <®ulator>; - DBVDD3-supply = <®ulator>; + DBVDD-supply = <®ulator>; + DCVDD-supply = <®ulator>; SPKVDD1-supply = <®ulator>; SPKVDD2-supply = <®ulator>; }; -- GitLab From 68dc33fcf817eaf62cd8d4eabf28f03fb0432532 Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Fri, 1 May 2020 11:11:09 -0700 Subject: [PATCH 1038/1304] atm: fix a memory leak of vcc->user_back [ Upstream commit 8d9f73c0ad2f20e9fed5380de0a3097825859d03 ] In lec_arp_clear_vccs() only entry->vcc is freed, but vcc could be installed on entry->recv_vcc too in lec_vcc_added(). This fixes the following memory leak: unreferenced object 0xffff8880d9266b90 (size 16): comm "atm2", pid 425, jiffies 4294907980 (age 23.488s) hex dump (first 16 bytes): 00 00 00 00 00 00 00 00 00 00 00 00 6b 6b 6b a5 ............kkk. backtrace: [<(____ptrval____)>] kmem_cache_alloc_trace+0x10e/0x151 [<(____ptrval____)>] lane_ioctl+0x4b3/0x569 [<(____ptrval____)>] do_vcc_ioctl+0x1ea/0x236 [<(____ptrval____)>] svc_ioctl+0x17d/0x198 [<(____ptrval____)>] sock_do_ioctl+0x47/0x12f [<(____ptrval____)>] sock_ioctl+0x2f9/0x322 [<(____ptrval____)>] vfs_ioctl+0x1e/0x2b [<(____ptrval____)>] ksys_ioctl+0x61/0x80 [<(____ptrval____)>] __x64_sys_ioctl+0x16/0x19 [<(____ptrval____)>] do_syscall_64+0x57/0x65 [<(____ptrval____)>] entry_SYSCALL_64_after_hwframe+0x49/0xb3 Cc: Gengming Liu Signed-off-by: Cong Wang Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- net/atm/lec.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/net/atm/lec.c b/net/atm/lec.c index ad4f829193f0..5a6186b80987 100644 --- a/net/atm/lec.c +++ b/net/atm/lec.c @@ -1270,6 +1270,12 @@ static void lec_arp_clear_vccs(struct lec_arp_table *entry) entry->vcc = NULL; } if (entry->recv_vcc) { + struct atm_vcc *vcc = entry->recv_vcc; + struct lec_vcc_priv *vpriv = LEC_VCC_PRIV(vcc); + + kfree(vpriv); + vcc->user_back = NULL; + entry->recv_vcc->push = entry->old_recv_push; vcc_release_async(entry->recv_vcc, -EPIPE); entry->recv_vcc = NULL; -- GitLab From 318af7241223eea9fc16413b04a6915518ab1e9c Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Fri, 20 Mar 2020 11:23:47 -0700 Subject: [PATCH 1039/1304] perf mem2node: Avoid double free related to realloc [ Upstream commit 266150c94c69429cf6d18e130237224a047f5061 ] Realloc of size zero is a free not an error, avoid this causing a double free. Caught by clang's address sanitizer: ==2634==ERROR: AddressSanitizer: attempting double-free on 0x6020000015f0 in thread T0: #0 0x5649659297fd in free llvm/llvm-project/compiler-rt/lib/asan/asan_malloc_linux.cpp:123:3 #1 0x5649659e9251 in __zfree tools/lib/zalloc.c:13:2 #2 0x564965c0f92c in mem2node__exit tools/perf/util/mem2node.c:114:2 #3 0x564965a08b4c in perf_c2c__report tools/perf/builtin-c2c.c:2867:2 #4 0x564965a0616a in cmd_c2c tools/perf/builtin-c2c.c:2989:10 #5 0x564965944348 in run_builtin tools/perf/perf.c:312:11 #6 0x564965943235 in handle_internal_command tools/perf/perf.c:364:8 #7 0x5649659440c4 in run_argv tools/perf/perf.c:408:2 #8 0x564965942e41 in main tools/perf/perf.c:538:3 0x6020000015f0 is located 0 bytes inside of 1-byte region [0x6020000015f0,0x6020000015f1) freed by thread T0 here: #0 0x564965929da3 in realloc third_party/llvm/llvm-project/compiler-rt/lib/asan/asan_malloc_linux.cpp:164:3 #1 0x564965c0f55e in mem2node__init tools/perf/util/mem2node.c:97:16 #2 0x564965a08956 in perf_c2c__report tools/perf/builtin-c2c.c:2803:8 #3 0x564965a0616a in cmd_c2c tools/perf/builtin-c2c.c:2989:10 #4 0x564965944348 in run_builtin tools/perf/perf.c:312:11 #5 0x564965943235 in handle_internal_command tools/perf/perf.c:364:8 #6 0x5649659440c4 in run_argv tools/perf/perf.c:408:2 #7 0x564965942e41 in main tools/perf/perf.c:538:3 previously allocated by thread T0 here: #0 0x564965929c42 in calloc third_party/llvm/llvm-project/compiler-rt/lib/asan/asan_malloc_linux.cpp:154:3 #1 0x5649659e9220 in zalloc tools/lib/zalloc.c:8:9 #2 0x564965c0f32d in mem2node__init tools/perf/util/mem2node.c:61:12 #3 0x564965a08956 in perf_c2c__report tools/perf/builtin-c2c.c:2803:8 #4 0x564965a0616a in cmd_c2c tools/perf/builtin-c2c.c:2989:10 #5 0x564965944348 in run_builtin tools/perf/perf.c:312:11 #6 0x564965943235 in handle_internal_command tools/perf/perf.c:364:8 #7 0x5649659440c4 in run_argv tools/perf/perf.c:408:2 #8 0x564965942e41 in main tools/perf/perf.c:538:3 v2: add a WARN_ON_ONCE when the free condition arises. Signed-off-by: Ian Rogers Acked-by: Jiri Olsa Cc: Alexander Shishkin Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Stephane Eranian Cc: clang-built-linux@googlegroups.com Link: http://lore.kernel.org/lkml/20200320182347.87675-1-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: Sasha Levin --- tools/perf/util/mem2node.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/perf/util/mem2node.c b/tools/perf/util/mem2node.c index c6fd81c02586..81c5a2e438b7 100644 --- a/tools/perf/util/mem2node.c +++ b/tools/perf/util/mem2node.c @@ -1,5 +1,6 @@ #include #include +#include #include #include "mem2node.h" #include "util.h" @@ -92,7 +93,7 @@ int mem2node__init(struct mem2node *map, struct perf_env *env) /* Cut unused entries, due to merging. */ tmp_entries = realloc(entries, sizeof(*entries) * j); - if (tmp_entries) + if (tmp_entries || WARN_ON_ONCE(j == 0)) entries = tmp_entries; for (i = 0; i < j; i++) { -- GitLab From da95fdc3c756d48dc2a9ae2d32a191646b9c9813 Mon Sep 17 00:00:00 2001 From: Jonathan Bakker Date: Mon, 4 May 2020 15:12:58 -0700 Subject: [PATCH 1040/1304] power: supply: max17040: Correct voltage reading [ Upstream commit 0383024f811aa469df258039807810fc3793a105 ] According to the datasheet available at (1), the bottom four bits are always zero and the actual voltage is 1.25x this value in mV. Since the kernel API specifies that voltages should be in uV, it should report 1250x the shifted value. 1) https://datasheets.maximintegrated.com/en/ds/MAX17040-MAX17041.pdf Signed-off-by: Jonathan Bakker Signed-off-by: Sebastian Reichel Signed-off-by: Sasha Levin --- drivers/power/supply/max17040_battery.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/power/supply/max17040_battery.c b/drivers/power/supply/max17040_battery.c index 33c40f79d23d..2c35c13ad546 100644 --- a/drivers/power/supply/max17040_battery.c +++ b/drivers/power/supply/max17040_battery.c @@ -109,7 +109,7 @@ static void max17040_get_vcell(struct i2c_client *client) vcell = max17040_read_reg(client, MAX17040_VCELL); - chip->vcell = vcell; + chip->vcell = (vcell >> 4) * 1250; } static void max17040_get_soc(struct i2c_client *client) -- GitLab From 82d16c23518f9c3c275bf2557cd057b0e3f14ce1 Mon Sep 17 00:00:00 2001 From: Jonathan Bakker Date: Sat, 25 Apr 2020 10:36:33 -0700 Subject: [PATCH 1041/1304] phy: samsung: s5pv210-usb2: Add delay after reset [ Upstream commit 05942b8c36c7eb5d3fc5e375d4b0d0c49562e85d ] The USB phy takes some time to reset, so make sure we give it to it. The delay length was taken from the 4x12 phy driver. This manifested in issues with the DWC2 driver since commit fe369e1826b3 ("usb: dwc2: Make dwc2_readl/writel functions endianness-agnostic.") where the endianness check would read the DWC ID as 0 due to the phy still resetting, resulting in the wrong endian mode being chosen. Signed-off-by: Jonathan Bakker Link: https://lore.kernel.org/r/BN6PR04MB06605D52502816E500683553A3D10@BN6PR04MB0660.namprd04.prod.outlook.com Signed-off-by: Kishon Vijay Abraham I Signed-off-by: Sasha Levin --- drivers/phy/samsung/phy-s5pv210-usb2.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/phy/samsung/phy-s5pv210-usb2.c b/drivers/phy/samsung/phy-s5pv210-usb2.c index f6f72339bbc3..bb7fdf491c1c 100644 --- a/drivers/phy/samsung/phy-s5pv210-usb2.c +++ b/drivers/phy/samsung/phy-s5pv210-usb2.c @@ -142,6 +142,10 @@ static void s5pv210_phy_pwr(struct samsung_usb2_phy_instance *inst, bool on) udelay(10); rst &= ~rstbits; writel(rst, drv->reg_phy + S5PV210_UPHYRST); + /* The following delay is necessary for the reset sequence to be + * completed + */ + udelay(80); } else { pwr = readl(drv->reg_phy + S5PV210_UPHYPWR); pwr |= phypwr; -- GitLab From f3fd4552a9bf7c14c8d9f4255cad5d8c78f24e63 Mon Sep 17 00:00:00 2001 From: Sonny Sasaka Date: Wed, 6 May 2020 12:55:03 -0700 Subject: [PATCH 1042/1304] Bluetooth: Handle Inquiry Cancel error after Inquiry Complete [ Upstream commit adf1d6926444029396861413aba8a0f2a805742a ] After sending Inquiry Cancel command to the controller, it is possible that Inquiry Complete event comes before Inquiry Cancel command complete event. In this case the Inquiry Cancel command will have status of Command Disallowed since there is no Inquiry session to be cancelled. This case should not be treated as error, otherwise we can reach an inconsistent state. Example of a btmon trace when this happened: < HCI Command: Inquiry Cancel (0x01|0x0002) plen 0 > HCI Event: Inquiry Complete (0x01) plen 1 Status: Success (0x00) > HCI Event: Command Complete (0x0e) plen 4 Inquiry Cancel (0x01|0x0002) ncmd 1 Status: Command Disallowed (0x0c) Signed-off-by: Sonny Sasaka Signed-off-by: Marcel Holtmann Signed-off-by: Sasha Levin --- net/bluetooth/hci_event.c | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index ec6b3a87b3e7..310622086f74 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -41,12 +41,27 @@ /* Handle HCI Event packets */ -static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb) +static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb, + u8 *new_status) { __u8 status = *((__u8 *) skb->data); BT_DBG("%s status 0x%2.2x", hdev->name, status); + /* It is possible that we receive Inquiry Complete event right + * before we receive Inquiry Cancel Command Complete event, in + * which case the latter event should have status of Command + * Disallowed (0x0c). This should not be treated as error, since + * we actually achieve what Inquiry Cancel wants to achieve, + * which is to end the last Inquiry session. + */ + if (status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) { + bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command"); + status = 0x00; + } + + *new_status = status; + if (status) return; @@ -3039,7 +3054,7 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb, switch (*opcode) { case HCI_OP_INQUIRY_CANCEL: - hci_cc_inquiry_cancel(hdev, skb); + hci_cc_inquiry_cancel(hdev, skb, status); break; case HCI_OP_PERIODIC_INQ: -- GitLab From c1705819f3cce9e2524e0ea03536372cd3ab974f Mon Sep 17 00:00:00 2001 From: Tang Bin Date: Fri, 8 May 2020 19:43:05 +0800 Subject: [PATCH 1043/1304] USB: EHCI: ehci-mv: fix error handling in mv_ehci_probe() [ Upstream commit c856b4b0fdb5044bca4c0acf9a66f3b5cc01a37a ] If the function platform_get_irq() failed, the negative value returned will not be detected here. So fix error handling in mv_ehci_probe(). And when get irq failed, the function platform_get_irq() logs an error message, so remove redundant message here. Signed-off-by: Zhang Shengju Signed-off-by: Tang Bin Link: https://lore.kernel.org/r/20200508114305.15740-1-tangbin@cmss.chinamobile.com Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin --- drivers/usb/host/ehci-mv.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/usb/host/ehci-mv.c b/drivers/usb/host/ehci-mv.c index de764459e05a..4edcd7536a01 100644 --- a/drivers/usb/host/ehci-mv.c +++ b/drivers/usb/host/ehci-mv.c @@ -193,9 +193,8 @@ static int mv_ehci_probe(struct platform_device *pdev) hcd->regs = ehci_mv->op_regs; hcd->irq = platform_get_irq(pdev, 0); - if (!hcd->irq) { - dev_err(&pdev->dev, "Cannot get irq."); - retval = -ENODEV; + if (hcd->irq < 0) { + retval = hcd->irq; goto err_disable_clk; } -- GitLab From 6b3ea3aa6c675b65b6b068f5726c93abc8a4b460 Mon Sep 17 00:00:00 2001 From: Tuong Lien Date: Wed, 13 May 2020 19:33:17 +0700 Subject: [PATCH 1044/1304] tipc: fix memory leak in service subscripting [ Upstream commit 0771d7df819284d46cf5cfb57698621b503ec17f ] Upon receipt of a service subscription request from user via a topology connection, one 'sub' object will be allocated in kernel, so it will be able to send an event of the service if any to the user correspondingly then. Also, in case of any failure, the connection will be shutdown and all the pertaining 'sub' objects will be freed. However, there is a race condition as follows resulting in memory leak: receive-work connection send-work | | | sub-1 |<------//-------| | sub-2 |<------//-------| | | |<---------------| evt for sub-x sub-3 |<------//-------| | : : : : : : | /--------| | | | * peer closed | | | | | | | |<-------X-------| evt for sub-y | | |<===============| sub-n |<------/ X shutdown | -> orphan | | That is, the 'receive-work' may get the last subscription request while the 'send-work' is shutting down the connection due to peer close. We had a 'lock' on the connection, so the two actions cannot be carried out simultaneously. If the last subscription is allocated e.g. 'sub-n', before the 'send-work' closes the connection, there will be no issue at all, the 'sub' objects will be freed. In contrast the last subscription will become orphan since the connection was closed, and we released all references. This commit fixes the issue by simply adding one test if the connection remains in 'connected' state right after we obtain the connection lock, then a subscription object can be created as usual, otherwise we ignore it. Acked-by: Ying Xue Acked-by: Jon Maloy Reported-by: Thang Ngo Signed-off-by: Tuong Lien Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- net/tipc/topsrv.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c index 41f4464ac6cc..ec9a7137d267 100644 --- a/net/tipc/topsrv.c +++ b/net/tipc/topsrv.c @@ -407,7 +407,9 @@ static int tipc_conn_rcv_from_sock(struct tipc_conn *con) return -EWOULDBLOCK; if (ret == sizeof(s)) { read_lock_bh(&sk->sk_callback_lock); - ret = tipc_conn_rcv_sub(srv, con, &s); + /* RACE: the connection can be closed in the meantime */ + if (likely(connected(con))) + ret = tipc_conn_rcv_sub(srv, con, &s); read_unlock_bh(&sk->sk_callback_lock); if (!ret) return 0; -- GitLab From 08cfb31399683924d4e09fe39c74308477158f19 Mon Sep 17 00:00:00 2001 From: Jonathan Bakker Date: Fri, 8 May 2020 18:34:33 -0700 Subject: [PATCH 1045/1304] tty: serial: samsung: Correct clock selection logic [ Upstream commit 7d31676a8d91dd18e08853efd1cb26961a38c6a6 ] Some variants of the samsung tty driver can pick which clock to use for their baud rate generation. In the DT conversion, a default clock was selected to be used if a specific one wasn't assigned and then a comparison of which clock rate worked better was done. Unfortunately, the comparison was implemented in such a way that only the default clock was ever actually compared. Fix this by iterating through all possible clocks, except when a specific clock has already been picked via clk_sel (which is only possible via board files). Signed-off-by: Jonathan Bakker Reviewed-by: Krzysztof Kozlowski Link: https://lore.kernel.org/r/BN6PR04MB06604E63833EA41837EBF77BA3A30@BN6PR04MB0660.namprd04.prod.outlook.com Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin --- drivers/tty/serial/samsung.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c index fcb89bf2524d..1528a7ba2bf4 100644 --- a/drivers/tty/serial/samsung.c +++ b/drivers/tty/serial/samsung.c @@ -1187,14 +1187,14 @@ static unsigned int s3c24xx_serial_getclk(struct s3c24xx_uart_port *ourport, struct s3c24xx_uart_info *info = ourport->info; struct clk *clk; unsigned long rate; - unsigned int cnt, baud, quot, clk_sel, best_quot = 0; + unsigned int cnt, baud, quot, best_quot = 0; char clkname[MAX_CLK_NAME_LENGTH]; int calc_deviation, deviation = (1 << 30) - 1; - clk_sel = (ourport->cfg->clk_sel) ? ourport->cfg->clk_sel : - ourport->info->def_clk_sel; for (cnt = 0; cnt < info->num_clks; cnt++) { - if (!(clk_sel & (1 << cnt))) + /* Keep selected clock if provided */ + if (ourport->cfg->clk_sel && + !(ourport->cfg->clk_sel & (1 << cnt))) continue; sprintf(clkname, "clk_uart_baud%d", cnt); -- GitLab From ac8196928f6a06c0788e981fab8772123eb3fa05 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Sat, 16 May 2020 08:25:56 +0200 Subject: [PATCH 1046/1304] ALSA: hda: Fix potential race in unsol event handler [ Upstream commit c637fa151259c0f74665fde7cba5b7eac1417ae5 ] The unsol event handling code has a loop retrieving the read/write indices and the arrays without locking while the append to the array may happen concurrently. This may lead to some inconsistency. Although there hasn't been any proof of this bad results, it's still safer to protect the racy accesses. This patch adds the spinlock protection around the unsol handling loop for addressing it. Here we take bus->reg_lock as the writer side snd_hdac_bus_queue_event() is also protected by that lock. Link: https://lore.kernel.org/r/20200516062556.30951-1-tiwai@suse.de Signed-off-by: Takashi Iwai Signed-off-by: Sasha Levin --- sound/hda/hdac_bus.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/sound/hda/hdac_bus.c b/sound/hda/hdac_bus.c index 714a51721a31..ab9236e4c157 100644 --- a/sound/hda/hdac_bus.c +++ b/sound/hda/hdac_bus.c @@ -155,6 +155,7 @@ static void process_unsol_events(struct work_struct *work) struct hdac_driver *drv; unsigned int rp, caddr, res; + spin_lock_irq(&bus->reg_lock); while (bus->unsol_rp != bus->unsol_wp) { rp = (bus->unsol_rp + 1) % HDA_UNSOL_QUEUE_SIZE; bus->unsol_rp = rp; @@ -166,10 +167,13 @@ static void process_unsol_events(struct work_struct *work) codec = bus->caddr_tbl[caddr & 0x0f]; if (!codec || !codec->dev.driver) continue; + spin_unlock_irq(&bus->reg_lock); drv = drv_to_hdac_driver(codec->dev.driver); if (drv->unsol_event) drv->unsol_event(codec, res); + spin_lock_irq(&bus->reg_lock); } + spin_unlock_irq(&bus->reg_lock); } /** -- GitLab From 4b15f7b21cac8595216a69044876441fad3179b9 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Fri, 8 May 2020 14:34:07 +1000 Subject: [PATCH 1047/1304] powerpc/traps: Make unrecoverable NMIs die instead of panic [ Upstream commit 265d6e588d87194c2fe2d6c240247f0264e0c19b ] System Reset and Machine Check interrupts that are not recoverable due to being nested or interrupting when RI=0 currently panic. This is not necessary, and can often just kill the current context and recover. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Reviewed-by: Christophe Leroy Link: https://lore.kernel.org/r/20200508043408.886394-16-npiggin@gmail.com Signed-off-by: Sasha Levin --- arch/powerpc/kernel/traps.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index d5f351f02c15..7781f0168ce8 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -430,11 +430,11 @@ void system_reset_exception(struct pt_regs *regs) #ifdef CONFIG_PPC_BOOK3S_64 BUG_ON(get_paca()->in_nmi == 0); if (get_paca()->in_nmi > 1) - nmi_panic(regs, "Unrecoverable nested System Reset"); + die("Unrecoverable nested System Reset", regs, SIGABRT); #endif /* Must die if the interrupt is not recoverable */ if (!(regs->msr & MSR_RI)) - nmi_panic(regs, "Unrecoverable System Reset"); + die("Unrecoverable System Reset", regs, SIGABRT); if (!nested) nmi_exit(); @@ -775,7 +775,7 @@ void machine_check_exception(struct pt_regs *regs) /* Must die if the interrupt is not recoverable */ if (!(regs->msr & MSR_RI)) - nmi_panic(regs, "Unrecoverable Machine check"); + die("Unrecoverable Machine check", regs, SIGBUS); return; -- GitLab From 59da76a1713f7fd82d9c18ec72be99085b557027 Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Tue, 19 May 2020 14:50:37 +0200 Subject: [PATCH 1048/1304] fuse: don't check refcount after stealing page [ Upstream commit 32f98877c57bee6bc27f443a96f49678a2cd6a50 ] page_count() is unstable. Unless there has been an RCU grace period between when the page was removed from the page cache and now, a speculative reference may exist from the page cache. Reported-by: Matthew Wilcox Signed-off-by: Miklos Szeredi Signed-off-by: Sasha Levin --- fs/fuse/dev.c | 1 - 1 file changed, 1 deletion(-) diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 01e6ea11822b..c51c9a6881e4 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -831,7 +831,6 @@ static int fuse_check_page(struct page *page) { if (page_mapcount(page) || page->mapping != NULL || - page_count(page) != 1 || (page->flags & PAGE_FLAGS_CHECK_AT_PREP & ~(1 << PG_locked | 1 << PG_referenced | -- GitLab From d4a74f4399ccff99a69ff62b0b27d04f5c51aef9 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Fri, 15 May 2020 17:54:53 +0100 Subject: [PATCH 1049/1304] USB: EHCI: ehci-mv: fix less than zero comparison of an unsigned int [ Upstream commit a7f40c233a6b0540d28743267560df9cfb571ca9 ] The comparison of hcd->irq to less than zero for an error check will never be true because hcd->irq is an unsigned int. Fix this by assigning the int retval to the return of platform_get_irq and checking this for the -ve error condition and assigning hcd->irq to retval. Addresses-Coverity: ("Unsigned compared against 0") Fixes: c856b4b0fdb5 ("USB: EHCI: ehci-mv: fix error handling in mv_ehci_probe()") Signed-off-by: Colin Ian King Link: https://lore.kernel.org/r/20200515165453.104028-1-colin.king@canonical.com Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin --- drivers/usb/host/ehci-mv.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/usb/host/ehci-mv.c b/drivers/usb/host/ehci-mv.c index 4edcd7536a01..9d93e7441bbc 100644 --- a/drivers/usb/host/ehci-mv.c +++ b/drivers/usb/host/ehci-mv.c @@ -192,11 +192,10 @@ static int mv_ehci_probe(struct platform_device *pdev) hcd->rsrc_len = resource_size(r); hcd->regs = ehci_mv->op_regs; - hcd->irq = platform_get_irq(pdev, 0); - if (hcd->irq < 0) { - retval = hcd->irq; + retval = platform_get_irq(pdev, 0); + if (retval < 0) goto err_disable_clk; - } + hcd->irq = retval; ehci = hcd_to_ehci(hcd); ehci->caps = (struct ehci_caps *) ehci_mv->cap_regs; -- GitLab From 6909507e9e98b9eed3ce9d6b4d7528aed106d0fb Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Tue, 28 Apr 2020 14:18:55 +0000 Subject: [PATCH 1050/1304] scsi: cxlflash: Fix error return code in cxlflash_probe() [ Upstream commit d0b1e4a638d670a09f42017a3e567dc846931ba8 ] Fix to return negative error code -ENOMEM from create_afu error handling case instead of 0, as done elsewhere in this function. Link: https://lore.kernel.org/r/20200428141855.88704-1-weiyongjun1@huawei.com Acked-by: Matthew R. Ochs Signed-off-by: Wei Yongjun Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin --- drivers/scsi/cxlflash/main.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c index f987c40c47a1..443813feaef4 100644 --- a/drivers/scsi/cxlflash/main.c +++ b/drivers/scsi/cxlflash/main.c @@ -3749,6 +3749,7 @@ static int cxlflash_probe(struct pci_dev *pdev, cfg->afu_cookie = cfg->ops->create_afu(pdev); if (unlikely(!cfg->afu_cookie)) { dev_err(dev, "%s: create_afu failed\n", __func__); + rc = -ENOMEM; goto out_remove; } -- GitLab From e682e0d53c390467100dadd0cebcf8f4f0b9498e Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Tue, 19 May 2020 15:10:39 +0530 Subject: [PATCH 1051/1304] arm64/cpufeature: Drop TraceFilt feature exposure from ID_DFR0 register [ Upstream commit 1ed1b90a0594c8c9d31e8bb8be25a2b37717dc9e ] ID_DFR0 based TraceFilt feature should not be exposed to guests. Hence lets drop it. Cc: Catalin Marinas Cc: Will Deacon Cc: Marc Zyngier Cc: Mark Rutland Cc: James Morse Cc: Suzuki K Poulose Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Suggested-by: Mark Rutland Signed-off-by: Anshuman Khandual Reviewed-by: Suzuki K Poulose Link: https://lore.kernel.org/r/1589881254-10082-3-git-send-email-anshuman.khandual@arm.com Signed-off-by: Will Deacon Signed-off-by: Sasha Levin --- arch/arm64/kernel/cpufeature.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 095dec566275..de6fa9b4abfa 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -300,7 +300,7 @@ static const struct arm64_ftr_bits ftr_id_pfr0[] = { }; static const struct arm64_ftr_bits ftr_id_dfr0[] = { - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0), + /* [31:28] TraceFilt */ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0xf), /* PerfMon */ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0), -- GitLab From dc8ecb8017bfcf864c051ba7c022a82f36aa7700 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Fri, 17 Apr 2020 09:35:31 -0700 Subject: [PATCH 1052/1304] e1000: Do not perform reset in reset_task if we are already down [ Upstream commit 49ee3c2ab5234757bfb56a0b3a3cb422f427e3a3 ] We are seeing a deadlock in e1000 down when NAPI is being disabled. Looking over the kernel function trace of the system it appears that the interface is being closed and then a reset is hitting which deadlocks the interface as the NAPI interface is already disabled. To prevent this from happening I am disabling the reset task when __E1000_DOWN is already set. In addition code has been added so that we set the __E1000_DOWN while holding the __E1000_RESET flag in e1000_close in order to guarantee that the reset task will not run after we have started the close call. Signed-off-by: Alexander Duyck Tested-by: Maxim Zhukov Signed-off-by: Jeff Kirsher Signed-off-by: Sasha Levin --- drivers/net/ethernet/intel/e1000/e1000_main.c | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 47b867c64b14..195108858f38 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -542,8 +542,13 @@ void e1000_reinit_locked(struct e1000_adapter *adapter) WARN_ON(in_interrupt()); while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) msleep(1); - e1000_down(adapter); - e1000_up(adapter); + + /* only run the task if not already down */ + if (!test_bit(__E1000_DOWN, &adapter->flags)) { + e1000_down(adapter); + e1000_up(adapter); + } + clear_bit(__E1000_RESETTING, &adapter->flags); } @@ -1433,10 +1438,15 @@ int e1000_close(struct net_device *netdev) struct e1000_hw *hw = &adapter->hw; int count = E1000_CHECK_RESET_COUNT; - while (test_bit(__E1000_RESETTING, &adapter->flags) && count--) + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags) && count--) usleep_range(10000, 20000); - WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); + WARN_ON(count < 0); + + /* signal that we're down so that the reset task will no longer run */ + set_bit(__E1000_DOWN, &adapter->flags); + clear_bit(__E1000_RESETTING, &adapter->flags); + e1000_down(adapter); e1000_power_down_phy(adapter); e1000_free_irq(adapter); -- GitLab From 8e0f8fe0103cc7db7b15eae3a05088570e641ca2 Mon Sep 17 00:00:00 2001 From: Dinghao Liu Date: Wed, 20 May 2020 18:14:53 +0800 Subject: [PATCH 1053/1304] drm/nouveau/debugfs: fix runtime pm imbalance on error [ Upstream commit 00583fbe8031f69bba8b0a9a861efb75fb7131af ] pm_runtime_get_sync() increments the runtime PM usage counter even the call returns an error code. Thus a pairing decrement is needed on the error handling path to keep the counter balanced. Signed-off-by: Dinghao Liu Signed-off-by: Ben Skeggs Signed-off-by: Sasha Levin --- drivers/gpu/drm/nouveau/nouveau_debugfs.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c index 9635704a1d86..4561a786fab0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c +++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c @@ -161,8 +161,11 @@ nouveau_debugfs_pstate_set(struct file *file, const char __user *ubuf, } ret = pm_runtime_get_sync(drm->dev); - if (ret < 0 && ret != -EACCES) + if (ret < 0 && ret != -EACCES) { + pm_runtime_put_autosuspend(drm->dev); return ret; + } + ret = nvif_mthd(ctrl, NVIF_CONTROL_PSTATE_USER, &args, sizeof(args)); pm_runtime_put_autosuspend(drm->dev); if (ret < 0) -- GitLab From 12f61a929604a7e4aee4632ea73e81fab564f703 Mon Sep 17 00:00:00 2001 From: Dinghao Liu Date: Wed, 20 May 2020 18:25:49 +0800 Subject: [PATCH 1054/1304] drm/nouveau: fix runtime pm imbalance on error [ Upstream commit d7372dfb3f7f1602b87e0663e8b8646da23ebca7 ] pm_runtime_get_sync() increments the runtime PM usage counter even the call returns an error code. Thus a pairing decrement is needed on the error handling path to keep the counter balanced. Signed-off-by: Dinghao Liu Signed-off-by: Ben Skeggs Signed-off-by: Sasha Levin --- drivers/gpu/drm/nouveau/nouveau_gem.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 791f970714ed..a98fccb0d32f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -82,8 +82,10 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv) return ret; ret = pm_runtime_get_sync(dev); - if (ret < 0 && ret != -EACCES) + if (ret < 0 && ret != -EACCES) { + pm_runtime_put_autosuspend(dev); goto out; + } ret = nouveau_vma_new(nvbo, &cli->vmm, &vma); pm_runtime_mark_last_busy(dev); -- GitLab From 39096c0f5c1ec9a3f9840ee315251493a607a57b Mon Sep 17 00:00:00 2001 From: Dinghao Liu Date: Wed, 20 May 2020 18:47:48 +0800 Subject: [PATCH 1055/1304] drm/nouveau/dispnv50: fix runtime pm imbalance on error [ Upstream commit dc455f4c888365595c0a13da445e092422d55b8d ] pm_runtime_get_sync() increments the runtime PM usage counter even the call returns an error code. Thus a pairing decrement is needed on the error handling path to keep the counter balanced. Signed-off-by: Dinghao Liu Signed-off-by: Ben Skeggs Signed-off-by: Sasha Levin --- drivers/gpu/drm/nouveau/dispnv50/disp.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index e06ea8c8184c..1bb0a9f6fa73 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -909,8 +909,10 @@ nv50_mstc_detect(struct drm_connector *connector, bool force) return connector_status_disconnected; ret = pm_runtime_get_sync(connector->dev->dev); - if (ret < 0 && ret != -EACCES) + if (ret < 0 && ret != -EACCES) { + pm_runtime_put_autosuspend(connector->dev->dev); return connector_status_disconnected; + } conn_status = drm_dp_mst_detect_port(connector, mstc->port->mgr, mstc->port); -- GitLab From c6a9585611a538466c8ad2421035c0ffa7fabc77 Mon Sep 17 00:00:00 2001 From: Shreyas Joshi Date: Fri, 22 May 2020 16:53:06 +1000 Subject: [PATCH 1056/1304] printk: handle blank console arguments passed in. [ Upstream commit 48021f98130880dd74286459a1ef48b5e9bc374f ] If uboot passes a blank string to console_setup then it results in a trashed memory. Ultimately, the kernel crashes during freeing up the memory. This fix checks if there is a blank parameter being passed to console_setup from uboot. In case it detects that the console parameter is blank then it doesn't setup the serial device and it gracefully exits. Link: https://lore.kernel.org/r/20200522065306.83-1-shreyas.joshi@biamp.com Signed-off-by: Shreyas Joshi Acked-by: Sergey Senozhatsky [pmladek@suse.com: Better format the commit message and code, remove unnecessary brackets.] Signed-off-by: Petr Mladek Signed-off-by: Sasha Levin --- kernel/printk/printk.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 3cb0e5b479ff..cf272aba362b 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -2148,6 +2148,9 @@ static int __init console_setup(char *str) char *s, *options, *brl_options = NULL; int idx; + if (str[0] == 0) + return 1; + if (_braille_console_setup(&str, &brl_options)) return 1; -- GitLab From 8201fdac15d67f692c99ac15d867a413a0d6553a Mon Sep 17 00:00:00 2001 From: Yu Chen Date: Thu, 21 May 2020 16:46:43 +0800 Subject: [PATCH 1057/1304] usb: dwc3: Increase timeout for CmdAct cleared by device controller [ Upstream commit 1c0e69ae1b9f9004fd72978612ae3463791edc56 ] If the SS PHY is in P3, there is no pipe_clk, HW may use suspend_clk for function, as suspend_clk is slow so EP command need more time to complete, e.g, imx8M suspend_clk is 32K, set ep configuration will take about 380us per below trace time stamp(44.286278 - 44.285897 = 0.000381): configfs_acm.sh-822 [000] d..1 44.285896: dwc3_writel: addr 000000006d59aae1 value 00000401 configfs_acm.sh-822 [000] d..1 44.285897: dwc3_readl: addr 000000006d59aae1 value 00000401 ... ... configfs_acm.sh-822 [000] d..1 44.286278: dwc3_readl: addr 000000006d59aae1 value 00000001 configfs_acm.sh-822 [000] d..1 44.286279: dwc3_gadget_ep_cmd: ep0out: cmd 'Set Endpoint Configuration' [401] params 00001000 00000500 00000000 --> status: Successful This was originally found on Hisilicon Kirin Soc that need more time for the device controller to clear the CmdAct of DEPCMD. Signed-off-by: Yu Chen Signed-off-by: John Stultz Signed-off-by: Li Jun Signed-off-by: Felipe Balbi Signed-off-by: Sasha Levin --- drivers/usb/dwc3/gadget.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 7bf2573dd459..37cc3fd7c3ca 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -270,7 +270,7 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd, { const struct usb_endpoint_descriptor *desc = dep->endpoint.desc; struct dwc3 *dwc = dep->dwc; - u32 timeout = 1000; + u32 timeout = 5000; u32 saved_config = 0; u32 reg; -- GitLab From 5915b8ecb176e48f9fbb91e7004053097fc93ec7 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Tue, 25 Feb 2020 15:05:53 +0100 Subject: [PATCH 1058/1304] btrfs: don't force read-only after error in drop snapshot [ Upstream commit 7c09c03091ac562ddca2b393e5d65c1d37da79f1 ] Deleting a subvolume on a full filesystem leads to ENOSPC followed by a forced read-only. This is not a transaction abort and the filesystem is otherwise ok, so the error should be just propagated to the callers. This is caused by unnecessary call to btrfs_handle_fs_error for all errors, except EAGAIN. This does not make sense as the standard transaction abort mechanism is in btrfs_drop_snapshot so all relevant failures are handled. Originally in commit cb1b69f4508a ("Btrfs: forced readonly when btrfs_drop_snapshot() fails") there was no return value at all, so the btrfs_std_error made some sense but once the error handling and propagation has been implemented we don't need it anymore. Signed-off-by: David Sterba Signed-off-by: Sasha Levin --- fs/btrfs/extent-tree.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 319a89d4d073..ce5e0f6c6af4 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -9098,8 +9098,6 @@ int btrfs_drop_snapshot(struct btrfs_root *root, */ if (!for_reloc && !root_dropped) btrfs_add_dead_root(root); - if (err && err != -EAGAIN) - btrfs_handle_fs_error(fs_info, err, NULL); return err; } -- GitLab From b7e24664cc816717ca2a45b773d950a9188fb5c1 Mon Sep 17 00:00:00 2001 From: Qian Cai Date: Mon, 11 May 2020 00:34:50 -0400 Subject: [PATCH 1059/1304] vfio/pci: fix memory leaks of eventfd ctx [ Upstream commit 1518ac272e789cae8c555d69951b032a275b7602 ] Finished a qemu-kvm (-device vfio-pci,host=0001:01:00.0) triggers a few memory leaks after a while because vfio_pci_set_ctx_trigger_single() calls eventfd_ctx_fdget() without the matching eventfd_ctx_put() later. Fix it by calling eventfd_ctx_put() for those memory in vfio_pci_release() before vfio_device_release(). unreferenced object 0xebff008981cc2b00 (size 128): comm "qemu-kvm", pid 4043, jiffies 4294994816 (age 9796.310s) hex dump (first 32 bytes): 01 00 00 00 6b 6b 6b 6b 00 00 00 00 ad 4e ad de ....kkkk.....N.. ff ff ff ff 6b 6b 6b 6b ff ff ff ff ff ff ff ff ....kkkk........ backtrace: [<00000000917e8f8d>] slab_post_alloc_hook+0x74/0x9c [<00000000df0f2aa2>] kmem_cache_alloc_trace+0x2b4/0x3d4 [<000000005fcec025>] do_eventfd+0x54/0x1ac [<0000000082791a69>] __arm64_sys_eventfd2+0x34/0x44 [<00000000b819758c>] do_el0_svc+0x128/0x1dc [<00000000b244e810>] el0_sync_handler+0xd0/0x268 [<00000000d495ef94>] el0_sync+0x164/0x180 unreferenced object 0x29ff008981cc4180 (size 128): comm "qemu-kvm", pid 4043, jiffies 4294994818 (age 9796.290s) hex dump (first 32 bytes): 01 00 00 00 6b 6b 6b 6b 00 00 00 00 ad 4e ad de ....kkkk.....N.. ff ff ff ff 6b 6b 6b 6b ff ff ff ff ff ff ff ff ....kkkk........ backtrace: [<00000000917e8f8d>] slab_post_alloc_hook+0x74/0x9c [<00000000df0f2aa2>] kmem_cache_alloc_trace+0x2b4/0x3d4 [<000000005fcec025>] do_eventfd+0x54/0x1ac [<0000000082791a69>] __arm64_sys_eventfd2+0x34/0x44 [<00000000b819758c>] do_el0_svc+0x128/0x1dc [<00000000b244e810>] el0_sync_handler+0xd0/0x268 [<00000000d495ef94>] el0_sync+0x164/0x180 Signed-off-by: Qian Cai Signed-off-by: Alex Williamson Signed-off-by: Sasha Levin --- drivers/vfio/pci/vfio_pci.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index 9f72a6ee13b5..86cd8bdfa9f2 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c @@ -409,6 +409,10 @@ static void vfio_pci_release(void *device_data) if (!(--vdev->refcnt)) { vfio_spapr_pci_eeh_release(vdev->pdev); vfio_pci_disable(vdev); + if (vdev->err_trigger) + eventfd_ctx_put(vdev->err_trigger); + if (vdev->req_trigger) + eventfd_ctx_put(vdev->req_trigger); } mutex_unlock(&driver_lock); -- GitLab From 56540590ce7c316947d6740edc0403182a1e1ade Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 12 May 2020 16:59:18 -0700 Subject: [PATCH 1060/1304] perf evsel: Fix 2 memory leaks [ Upstream commit 3efc899d9afb3d03604f191a0be9669eabbfc4aa ] If allocated, perf_pkg_mask and metric_events need freeing. Signed-off-by: Ian Rogers Reviewed-by: Andi Kleen Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Jiri Olsa Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lore.kernel.org/lkml/20200512235918.10732-1-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: Sasha Levin --- tools/perf/util/evsel.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 68c5ab0e1800..11a2aa80802d 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -1291,6 +1291,8 @@ void perf_evsel__exit(struct perf_evsel *evsel) zfree(&evsel->group_name); zfree(&evsel->name); zfree(&evsel->pmu_name); + zfree(&evsel->per_pkg_mask); + zfree(&evsel->metric_events); perf_evsel__object.fini(evsel); } -- GitLab From aa0d162b9fae4cc8cb01a2f0326777dca59f9ec2 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Thu, 5 Mar 2020 23:11:10 -0800 Subject: [PATCH 1061/1304] perf trace: Fix the selection for architectures to generate the errno name tables [ Upstream commit 7597ce89b3ed239f7a3408b930d2a6c7a4c938a1 ] Make the architecture test directory agree with the code comment. Committer notes: This was split from a larger patch. The code was assuming the developer always worked from tools/perf/, so make sure we do the test -d having $toolsdir/perf/arch/$arch, to match the intent expressed in the comment, just above that loop. Signed-off-by: Ian Rogers Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Alexios Zavras Cc: Andi Kleen Cc: Greg Kroah-Hartman Cc: Igor Lubashev Cc: Jiri Olsa Cc: Kan Liang Cc: Mark Rutland Cc: Mathieu Poirier Cc: Namhyung Kim Cc: Nick Desaulniers Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Wei Li Link: http://lore.kernel.org/lkml/20200306071110.130202-4-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: Sasha Levin --- tools/perf/trace/beauty/arch_errno_names.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/trace/beauty/arch_errno_names.sh b/tools/perf/trace/beauty/arch_errno_names.sh index 22c9fc900c84..f8c44a85650b 100755 --- a/tools/perf/trace/beauty/arch_errno_names.sh +++ b/tools/perf/trace/beauty/arch_errno_names.sh @@ -91,7 +91,7 @@ EoHEADER # in tools/perf/arch archlist="" for arch in $(find $toolsdir/arch -maxdepth 1 -mindepth 1 -type d -printf "%f\n" | grep -v x86 | sort); do - test -d arch/$arch && archlist="$archlist $arch" + test -d $toolsdir/perf/arch/$arch && archlist="$archlist $arch" done for arch in x86 $archlist generic; do -- GitLab From d911653688c588c22bdbc83459f87961c9d4399e Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Mon, 18 May 2020 15:14:45 +0200 Subject: [PATCH 1062/1304] perf stat: Fix duration_time value for higher intervals [ Upstream commit ea9eb1f456a08c18feb485894185f7a4e31cc8a4 ] Joakim reported wrong duration_time value for interval bigger than 4000 [1]. The problem is in the interval value we pass to update_stats function, which is typed as 'unsigned int' and overflows when we get over 2^32 (happens between intervals 4000 and 5000). Retyping the passed value to unsigned long long. [1] https://www.spinics.net/lists/linux-perf-users/msg11777.html Fixes: b90f1333ef08 ("perf stat: Update walltime_nsecs_stats in interval mode") Reported-by: Joakim Zhang Signed-off-by: Jiri Olsa Cc: Alexander Shishkin Cc: Andi Kleen Cc: Michael Petlan Cc: Namhyung Kim Cc: Peter Zijlstra Link: http://lore.kernel.org/lkml/20200518131445.3745083-1-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: Sasha Levin --- tools/perf/builtin-stat.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 6aae10ff954c..adabe9d4dc86 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -422,7 +422,7 @@ static void process_interval(void) } init_stats(&walltime_nsecs_stats); - update_stats(&walltime_nsecs_stats, stat_config.interval * 1000000); + update_stats(&walltime_nsecs_stats, stat_config.interval * 1000000ULL); print_counters(&rs, 0, NULL); } -- GitLab From dd155a48a0c9b53404b30f6f92ccf9f8160378c1 Mon Sep 17 00:00:00 2001 From: Xie XiuQi Date: Thu, 21 May 2020 21:32:17 +0800 Subject: [PATCH 1063/1304] perf util: Fix memory leak of prefix_if_not_in [ Upstream commit 07e9a6f538cbeecaf5c55b6f2991416f873cdcbd ] Need to free "str" before return when asprintf() failed to avoid memory leak. Signed-off-by: Xie XiuQi Cc: Alexander Shishkin Cc: Hongbo Yao Cc: Jiri Olsa Cc: Li Bin Cc: Mark Rutland Cc: Namhyung Kim Link: http://lore.kernel.org/lkml/20200521133218.30150-4-liwei391@huawei.com Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: Sasha Levin --- tools/perf/util/sort.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index 46daa22b86e3..85ff4f68adc0 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c @@ -2690,7 +2690,7 @@ static char *prefix_if_not_in(const char *pre, char *str) return str; if (asprintf(&n, "%s,%s", pre, str) < 0) - return NULL; + n = NULL; free(str); return n; -- GitLab From cc6ae85020035734eb13597fd6e8b0074897b837 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Thu, 7 May 2020 22:36:24 -0700 Subject: [PATCH 1064/1304] perf metricgroup: Free metric_events on error [ Upstream commit a159e2fe89b4d1f9fb54b0ae418b961e239bf617 ] Avoid a simple memory leak. Signed-off-by: Ian Rogers Cc: Alexander Shishkin Cc: Alexei Starovoitov Cc: Andi Kleen Cc: Andrii Nakryiko Cc: Cong Wang Cc: Daniel Borkmann Cc: Jin Yao Cc: Jiri Olsa Cc: John Fastabend Cc: John Garry Cc: Kajol Jain Cc: Kan Liang Cc: Kim Phillips Cc: Mark Rutland Cc: Martin KaFai Lau Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Song Liu Cc: Stephane Eranian Cc: Vince Weaver Cc: Yonghong Song Cc: bpf@vger.kernel.org Cc: kp singh Cc: netdev@vger.kernel.org Link: http://lore.kernel.org/lkml/20200508053629.210324-10-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: Sasha Levin --- tools/perf/util/metricgroup.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c index 8b3dafe3fac3..6dcc6e1182a5 100644 --- a/tools/perf/util/metricgroup.c +++ b/tools/perf/util/metricgroup.c @@ -171,6 +171,7 @@ static int metricgroup__setup_events(struct list_head *groups, if (!evsel) { pr_debug("Cannot resolve %s: %s\n", eg->metric_name, eg->metric_expr); + free(metric_events); continue; } for (i = 0; i < eg->idnum; i++) @@ -178,11 +179,13 @@ static int metricgroup__setup_events(struct list_head *groups, me = metricgroup__lookup(metric_events_list, evsel, true); if (!me) { ret = -ENOMEM; + free(metric_events); break; } expr = malloc(sizeof(struct metric_expr)); if (!expr) { ret = -ENOMEM; + free(metric_events); break; } expr->metric_expr = eg->metric_expr; -- GitLab From a63689c06a6dd5c0cf2a9221927b9b1b2b2bb9c1 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Tue, 12 May 2020 15:19:16 +0300 Subject: [PATCH 1065/1304] perf kcore_copy: Fix module map when there are no modules loaded [ Upstream commit 61f82e3fb697a8e85f22fdec786528af73dc36d1 ] In the absence of any modules, no "modules" map is created, but there are other executable pages to map, due to eBPF JIT, kprobe or ftrace. Map them by recognizing that the first "module" symbol is not necessarily from a module, and adjust the map accordingly. Signed-off-by: Adrian Hunter Cc: Alexander Shishkin Cc: Borislav Petkov Cc: H. Peter Anvin Cc: Jiri Olsa Cc: Leo Yan Cc: Mark Rutland Cc: Masami Hiramatsu Cc: Mathieu Poirier Cc: Peter Zijlstra Cc: Steven Rostedt (VMware) Cc: x86@kernel.org Link: http://lore.kernel.org/lkml/20200512121922.8997-10-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: Sasha Levin --- tools/perf/util/symbol-elf.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c index a701a8a48f00..166c621e0223 100644 --- a/tools/perf/util/symbol-elf.c +++ b/tools/perf/util/symbol-elf.c @@ -1421,6 +1421,7 @@ struct kcore_copy_info { u64 first_symbol; u64 last_symbol; u64 first_module; + u64 first_module_symbol; u64 last_module_symbol; size_t phnum; struct list_head phdrs; @@ -1497,6 +1498,8 @@ static int kcore_copy__process_kallsyms(void *arg, const char *name, char type, return 0; if (strchr(name, '[')) { + if (!kci->first_module_symbol || start < kci->first_module_symbol) + kci->first_module_symbol = start; if (start > kci->last_module_symbol) kci->last_module_symbol = start; return 0; @@ -1694,6 +1697,10 @@ static int kcore_copy__calc_maps(struct kcore_copy_info *kci, const char *dir, kci->etext += page_size; } + if (kci->first_module_symbol && + (!kci->first_module || kci->first_module_symbol < kci->first_module)) + kci->first_module = kci->first_module_symbol; + kci->first_module = round_down(kci->first_module, page_size); if (kci->last_module_symbol) { -- GitLab From fce356afe09a4f62ebcd7080c924828aac841b75 Mon Sep 17 00:00:00 2001 From: Dinghao Liu Date: Fri, 29 May 2020 09:22:28 +0800 Subject: [PATCH 1066/1304] ASoC: img-i2s-out: Fix runtime PM imbalance on error [ Upstream commit 65bd91dd6957390c42a0491b9622cf31a2cdb140 ] pm_runtime_get_sync() increments the runtime PM usage counter even the call returns an error code. Thus a pairing decrement is needed on the error handling path to keep the counter balanced. Signed-off-by: Dinghao Liu Link: https://lore.kernel.org/r/20200529012230.5863-1-dinghao.liu@zju.edu.cn Signed-off-by: Mark Brown Signed-off-by: Sasha Levin --- sound/soc/img/img-i2s-out.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/sound/soc/img/img-i2s-out.c b/sound/soc/img/img-i2s-out.c index fc2d1dac6333..798ab579564c 100644 --- a/sound/soc/img/img-i2s-out.c +++ b/sound/soc/img/img-i2s-out.c @@ -350,8 +350,10 @@ static int img_i2s_out_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) chan_control_mask = IMG_I2S_OUT_CHAN_CTL_CLKT_MASK; ret = pm_runtime_get_sync(i2s->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_noidle(i2s->dev); return ret; + } img_i2s_out_disable(i2s); @@ -491,8 +493,10 @@ static int img_i2s_out_probe(struct platform_device *pdev) goto err_pm_disable; } ret = pm_runtime_get_sync(&pdev->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_noidle(&pdev->dev); goto err_suspend; + } reg = IMG_I2S_OUT_CTL_FRM_SIZE_MASK; img_i2s_out_writel(i2s, reg, IMG_I2S_OUT_CTL); -- GitLab From 3ad6b023d0eef580c3183af75abfb9c2a2c46993 Mon Sep 17 00:00:00 2001 From: Dinghao Liu Date: Wed, 20 May 2020 20:42:38 +0800 Subject: [PATCH 1067/1304] wlcore: fix runtime pm imbalance in wl1271_tx_work [ Upstream commit 9604617e998b49f7695fea1479ed82421ef8c9f0 ] There are two error handling paths in this functon. When wlcore_tx_work_locked() returns an error code, we should decrease the runtime PM usage counter the same way as the error handling path beginning from pm_runtime_get_sync(). Signed-off-by: Dinghao Liu Acked-by: Tony Lindgren Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200520124241.9931-1-dinghao.liu@zju.edu.cn Signed-off-by: Sasha Levin --- drivers/net/wireless/ti/wlcore/tx.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c index b6e19c2d66b0..250bcbf4ea2f 100644 --- a/drivers/net/wireless/ti/wlcore/tx.c +++ b/drivers/net/wireless/ti/wlcore/tx.c @@ -877,6 +877,7 @@ void wl1271_tx_work(struct work_struct *work) ret = wlcore_tx_work_locked(wl); if (ret < 0) { + pm_runtime_put_noidle(wl->dev); wl12xx_queue_recovery_work(wl); goto out; } -- GitLab From 345d68b47a05e455de5048bd3ed8e9aa5abb430c Mon Sep 17 00:00:00 2001 From: Dinghao Liu Date: Wed, 20 May 2020 20:46:47 +0800 Subject: [PATCH 1068/1304] wlcore: fix runtime pm imbalance in wlcore_regdomain_config [ Upstream commit 282a04bf1d8029eb98585cb5db3fd70fe8bc91f7 ] pm_runtime_get_sync() increments the runtime PM usage counter even the call returns an error code. Thus a pairing decrement is needed on the error handling path to keep the counter balanced. Signed-off-by: Dinghao Liu Acked-by: Tony Lindgren Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200520124649.10848-1-dinghao.liu@zju.edu.cn Signed-off-by: Sasha Levin --- drivers/net/wireless/ti/wlcore/main.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index 2ca5658bbc2a..43c7b37dec0c 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -3671,8 +3671,10 @@ void wlcore_regdomain_config(struct wl1271 *wl) goto out; ret = pm_runtime_get_sync(wl->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_autosuspend(wl->dev); goto out; + } ret = wlcore_cmd_regdomain_config_locked(wl); if (ret < 0) { -- GitLab From 2138dc84dfef7b0add7ccc034cdc4ad86754c715 Mon Sep 17 00:00:00 2001 From: Dinghao Liu Date: Fri, 22 May 2020 18:40:06 +0800 Subject: [PATCH 1069/1304] mtd: rawnand: omap_elm: Fix runtime PM imbalance on error [ Upstream commit 37f7212148cf1d796135cdf8d0c7fee13067674b ] pm_runtime_get_sync() increments the runtime PM usage counter even when it returns an error code. Thus a pairing decrement is needed on the error handling path to keep the counter balanced. Signed-off-by: Dinghao Liu Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20200522104008.28340-1-dinghao.liu@zju.edu.cn Signed-off-by: Sasha Levin --- drivers/mtd/nand/raw/omap_elm.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/mtd/nand/raw/omap_elm.c b/drivers/mtd/nand/raw/omap_elm.c index a3f32f939cc1..6736777a4156 100644 --- a/drivers/mtd/nand/raw/omap_elm.c +++ b/drivers/mtd/nand/raw/omap_elm.c @@ -421,6 +421,7 @@ static int elm_probe(struct platform_device *pdev) pm_runtime_enable(&pdev->dev); if (pm_runtime_get_sync(&pdev->dev) < 0) { ret = -EINVAL; + pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); dev_err(&pdev->dev, "can't enable clock\n"); return ret; -- GitLab From 23c233c606c33251afcafc56ca35b307ed4f2e59 Mon Sep 17 00:00:00 2001 From: Dinghao Liu Date: Thu, 21 May 2020 10:47:09 +0800 Subject: [PATCH 1070/1304] PCI: tegra: Fix runtime PM imbalance on error [ Upstream commit fcee90cdf6f3a3a371add04d41528d5ba9c3b411 ] pm_runtime_get_sync() increments the runtime PM usage counter even when it returns an error code. Thus a pairing decrement is needed on the error handling path to keep the counter balanced. Also, call pm_runtime_disable() when pm_runtime_get_sync() returns an error code. Link: https://lore.kernel.org/r/20200521024709.2368-1-dinghao.liu@zju.edu.cn Signed-off-by: Dinghao Liu Signed-off-by: Lorenzo Pieralisi Acked-by: Thierry Reding Signed-off-by: Sasha Levin --- drivers/pci/controller/pci-tegra.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c index 6f86583605a4..097c02197ec8 100644 --- a/drivers/pci/controller/pci-tegra.c +++ b/drivers/pci/controller/pci-tegra.c @@ -2400,7 +2400,7 @@ static int tegra_pcie_probe(struct platform_device *pdev) err = pm_runtime_get_sync(pcie->dev); if (err < 0) { dev_err(dev, "fail to enable pcie controller: %d\n", err); - goto teardown_msi; + goto pm_runtime_put; } err = tegra_pcie_request_resources(pcie); @@ -2440,7 +2440,6 @@ static int tegra_pcie_probe(struct platform_device *pdev) pm_runtime_put: pm_runtime_put_sync(pcie->dev); pm_runtime_disable(pcie->dev); -teardown_msi: tegra_pcie_msi_teardown(pcie); put_resources: tegra_pcie_put_resources(pcie); -- GitLab From c42c61e98251d5de390434181092ce9e7a9c5719 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Fri, 20 Mar 2020 16:45:45 -0400 Subject: [PATCH 1071/1304] ceph: fix potential race in ceph_check_caps [ Upstream commit dc3da0461cc4b76f2d0c5b12247fcb3b520edbbf ] Nothing ensures that session will still be valid by the time we dereference the pointer. Take and put a reference. In principle, we should always be able to get a reference here, but throw a warning if that's ever not the case. Signed-off-by: Jeff Layton Signed-off-by: Ilya Dryomov Signed-off-by: Sasha Levin --- fs/ceph/caps.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index a2d4eed27f80..c0dbf8b7762b 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -2015,12 +2015,24 @@ void ceph_check_caps(struct ceph_inode_info *ci, int flags, if (mutex_trylock(&session->s_mutex) == 0) { dout("inverting session/ino locks on %p\n", session); + session = ceph_get_mds_session(session); spin_unlock(&ci->i_ceph_lock); if (took_snap_rwsem) { up_read(&mdsc->snap_rwsem); took_snap_rwsem = 0; } - mutex_lock(&session->s_mutex); + if (session) { + mutex_lock(&session->s_mutex); + ceph_put_mds_session(session); + } else { + /* + * Because we take the reference while + * holding the i_ceph_lock, it should + * never be NULL. Throw a warning if it + * ever is. + */ + WARN_ON_ONCE(true); + } goto retry; } } -- GitLab From 8cc3afd53d8d967a3839b7f59e962e76cda2720b Mon Sep 17 00:00:00 2001 From: Qian Cai Date: Mon, 1 Jun 2020 21:48:40 -0700 Subject: [PATCH 1072/1304] mm/swap_state: fix a data race in swapin_nr_pages [ Upstream commit d6c1f098f2a7ba62627c9bc17cda28f534ef9e4a ] "prev_offset" is a static variable in swapin_nr_pages() that can be accessed concurrently with only mmap_sem held in read mode as noticed by KCSAN, BUG: KCSAN: data-race in swap_cluster_readahead / swap_cluster_readahead write to 0xffffffff92763830 of 8 bytes by task 14795 on cpu 17: swap_cluster_readahead+0x2a6/0x5e0 swapin_readahead+0x92/0x8dc do_swap_page+0x49b/0xf20 __handle_mm_fault+0xcfb/0xd70 handle_mm_fault+0xfc/0x2f0 do_page_fault+0x263/0x715 page_fault+0x34/0x40 1 lock held by (dnf)/14795: #0: ffff897bd2e98858 (&mm->mmap_sem#2){++++}-{3:3}, at: do_page_fault+0x143/0x715 do_user_addr_fault at arch/x86/mm/fault.c:1405 (inlined by) do_page_fault at arch/x86/mm/fault.c:1535 irq event stamp: 83493 count_memcg_event_mm+0x1a6/0x270 count_memcg_event_mm+0x119/0x270 __do_softirq+0x365/0x589 irq_exit+0xa2/0xc0 read to 0xffffffff92763830 of 8 bytes by task 1 on cpu 22: swap_cluster_readahead+0xfd/0x5e0 swapin_readahead+0x92/0x8dc do_swap_page+0x49b/0xf20 __handle_mm_fault+0xcfb/0xd70 handle_mm_fault+0xfc/0x2f0 do_page_fault+0x263/0x715 page_fault+0x34/0x40 1 lock held by systemd/1: #0: ffff897c38f14858 (&mm->mmap_sem#2){++++}-{3:3}, at: do_page_fault+0x143/0x715 irq event stamp: 43530289 count_memcg_event_mm+0x1a6/0x270 count_memcg_event_mm+0x119/0x270 __do_softirq+0x365/0x589 irq_exit+0xa2/0xc0 Signed-off-by: Qian Cai Signed-off-by: Andrew Morton Cc: Marco Elver Cc: Hugh Dickins Link: http://lkml.kernel.org/r/20200402213748.2237-1-cai@lca.pw Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin --- mm/swap_state.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/mm/swap_state.c b/mm/swap_state.c index 09731f4174c7..3febffe0fca4 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -537,10 +537,11 @@ static unsigned long swapin_nr_pages(unsigned long offset) return 1; hits = atomic_xchg(&swapin_readahead_hits, 0); - pages = __swapin_nr_pages(prev_offset, offset, hits, max_pages, + pages = __swapin_nr_pages(READ_ONCE(prev_offset), offset, hits, + max_pages, atomic_read(&last_readahead_pages)); if (!hits) - prev_offset = offset; + WRITE_ONCE(prev_offset, offset); atomic_set(&last_readahead_pages, pages); return pages; -- GitLab From a44cb3037661fccc5f9f3a88953b35a7f2aad46d Mon Sep 17 00:00:00 2001 From: Madhuparna Bhowmik Date: Thu, 4 Jun 2020 16:51:21 -0700 Subject: [PATCH 1073/1304] rapidio: avoid data race between file operation callbacks and mport_cdev_add(). [ Upstream commit e1c3cdb26ab881b77486dc50370356a349077c74 ] Fields of md(mport_dev) are set after cdev_device_add(). However, the file operation callbacks can be called after cdev_device_add() and therefore accesses to fields of md in the callbacks can race with the rest of the mport_cdev_add() function. One such example is INIT_LIST_HEAD(&md->portwrites) in mport_cdev_add(), the list is initialised after cdev_device_add(). This can race with list_add_tail(&pw_filter->md_node,&md->portwrites) in rio_mport_add_pw_filter() which is called by unlocked_ioctl. To avoid such data races use cdev_device_add() after initializing md. Found by Linux Driver Verification project (linuxtesting.org). Signed-off-by: Madhuparna Bhowmik Signed-off-by: Andrew Morton Acked-by: Alexandre Bounine Cc: Matt Porter Cc: Dan Carpenter Cc: Mike Marshall Cc: Thomas Gleixner Cc: Ira Weiny Cc: Allison Randal Cc: Pavel Andrianov Link: http://lkml.kernel.org/r/20200426112950.1803-1-madhuparnabhowmik10@gmail.com Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin --- drivers/rapidio/devices/rio_mport_cdev.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c index 5940780648e0..f36a8a5261a1 100644 --- a/drivers/rapidio/devices/rio_mport_cdev.c +++ b/drivers/rapidio/devices/rio_mport_cdev.c @@ -2385,13 +2385,6 @@ static struct mport_dev *mport_cdev_add(struct rio_mport *mport) cdev_init(&md->cdev, &mport_fops); md->cdev.owner = THIS_MODULE; - ret = cdev_device_add(&md->cdev, &md->dev); - if (ret) { - rmcd_error("Failed to register mport %d (err=%d)", - mport->id, ret); - goto err_cdev; - } - INIT_LIST_HEAD(&md->doorbells); spin_lock_init(&md->db_lock); INIT_LIST_HEAD(&md->portwrites); @@ -2411,6 +2404,13 @@ static struct mport_dev *mport_cdev_add(struct rio_mport *mport) #else md->properties.transfer_mode |= RIO_TRANSFER_MODE_TRANSFER; #endif + + ret = cdev_device_add(&md->cdev, &md->dev); + if (ret) { + rmcd_error("Failed to register mport %d (err=%d)", + mport->id, ret); + goto err_cdev; + } ret = rio_query_mport(mport, &attr); if (!ret) { md->properties.flags = attr.flags; -- GitLab From 9a59dfddcbdb65824c7b528e1210b7335758fe49 Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Wed, 29 Apr 2020 09:53:47 -0700 Subject: [PATCH 1074/1304] mtd: parser: cmdline: Support MTD names containing one or more colons [ Upstream commit eb13fa0227417e84aecc3bd9c029d376e33474d3 ] Looks like some drivers define MTD names with a colon in it, thus making mtdpart= parsing impossible. Let's fix the parser to gracefully handle that case: the last ':' in a partition definition sequence is considered instead of the first one. Signed-off-by: Boris Brezillon Signed-off-by: Ron Minnich Tested-by: Ron Minnich Signed-off-by: Richard Weinberger Signed-off-by: Sasha Levin --- drivers/mtd/cmdlinepart.c | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c index 3ea44cff9b75..c29205ee82e2 100644 --- a/drivers/mtd/cmdlinepart.c +++ b/drivers/mtd/cmdlinepart.c @@ -231,12 +231,29 @@ static int mtdpart_setup_real(char *s) struct cmdline_mtd_partition *this_mtd; struct mtd_partition *parts; int mtd_id_len, num_parts; - char *p, *mtd_id; + char *p, *mtd_id, *semicol; + + /* + * Replace the first ';' by a NULL char so strrchr can work + * properly. + */ + semicol = strchr(s, ';'); + if (semicol) + *semicol = '\0'; mtd_id = s; - /* fetch */ - p = strchr(s, ':'); + /* + * fetch . We use strrchr to ignore all ':' that could + * be present in the MTD name, only the last one is interpreted + * as an / separator. + */ + p = strrchr(s, ':'); + + /* Restore the ';' now. */ + if (semicol) + *semicol = ';'; + if (!p) { pr_err("no mtd-id\n"); return -EINVAL; -- GitLab From f0e13175e6ea1f939f7d9f34a3774873bd711dfe Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 4 Mar 2020 12:49:18 +0100 Subject: [PATCH 1075/1304] x86/speculation/mds: Mark mds_user_clear_cpu_buffers() __always_inline [ Upstream commit a7ef9ba986b5fae9d80f8a7b31db0423687efe4e ] Prevent the compiler from uninlining and creating traceable/probable functions as this is invoked _after_ context tracking switched to CONTEXT_USER and rcu idle. Signed-off-by: Thomas Gleixner Reviewed-by: Alexandre Chartre Acked-by: Peter Zijlstra Link: https://lkml.kernel.org/r/20200505134340.902709267@linutronix.de Signed-off-by: Sasha Levin --- arch/x86/include/asm/nospec-branch.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index e3f70c60e8cc..62f9903544b5 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -330,7 +330,7 @@ DECLARE_STATIC_KEY_FALSE(mds_idle_clear); * combination with microcode which triggers a CPU buffer flush when the * instruction is executed. */ -static inline void mds_clear_cpu_buffers(void) +static __always_inline void mds_clear_cpu_buffers(void) { static const u16 ds = __KERNEL_DS; @@ -351,7 +351,7 @@ static inline void mds_clear_cpu_buffers(void) * * Clear CPU buffers if the corresponding static key is enabled */ -static inline void mds_user_clear_cpu_buffers(void) +static __always_inline void mds_user_clear_cpu_buffers(void) { if (static_branch_likely(&mds_user_clear)) mds_clear_cpu_buffers(); -- GitLab From 41a77298809e7be112f91972d794aa231fbe27aa Mon Sep 17 00:00:00 2001 From: Alex Williamson Date: Tue, 16 Jun 2020 15:26:36 -0600 Subject: [PATCH 1076/1304] vfio/pci: Clear error and request eventfd ctx after releasing [ Upstream commit 5c5866c593bbd444d0339ede6a8fb5f14ff66d72 ] The next use of the device will generate an underflow from the stale reference. Cc: Qian Cai Fixes: 1518ac272e78 ("vfio/pci: fix memory leaks of eventfd ctx") Reported-by: Daniel Wagner Reviewed-by: Cornelia Huck Tested-by: Daniel Wagner Signed-off-by: Alex Williamson Signed-off-by: Sasha Levin --- drivers/vfio/pci/vfio_pci.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index 86cd8bdfa9f2..94fad366312f 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c @@ -409,10 +409,14 @@ static void vfio_pci_release(void *device_data) if (!(--vdev->refcnt)) { vfio_spapr_pci_eeh_release(vdev->pdev); vfio_pci_disable(vdev); - if (vdev->err_trigger) + if (vdev->err_trigger) { eventfd_ctx_put(vdev->err_trigger); - if (vdev->req_trigger) + vdev->err_trigger = NULL; + } + if (vdev->req_trigger) { eventfd_ctx_put(vdev->req_trigger); + vdev->req_trigger = NULL; + } } mutex_unlock(&driver_lock); -- GitLab From 5f7ca306c7db558fc81d9b1a45d59d5e1332a8a0 Mon Sep 17 00:00:00 2001 From: Zhang Xiaoxu Date: Mon, 22 Jun 2020 05:30:19 -0400 Subject: [PATCH 1077/1304] cifs: Fix double add page to memcg when cifs_readpages [ Upstream commit 95a3d8f3af9b0d63b43f221b630beaab9739d13a ] When xfstests generic/451, there is an BUG at mm/memcontrol.c: page:ffffea000560f2c0 refcount:2 mapcount:0 mapping:000000008544e0ea index:0xf mapping->aops:cifs_addr_ops dentry name:"tst-aio-dio-cycle-write.451" flags: 0x2fffff80000001(locked) raw: 002fffff80000001 ffffc90002023c50 ffffea0005280088 ffff88815cda0210 raw: 000000000000000f 0000000000000000 00000002ffffffff ffff88817287d000 page dumped because: VM_BUG_ON_PAGE(page->mem_cgroup) page->mem_cgroup:ffff88817287d000 ------------[ cut here ]------------ kernel BUG at mm/memcontrol.c:2659! invalid opcode: 0000 [#1] SMP CPU: 2 PID: 2038 Comm: xfs_io Not tainted 5.8.0-rc1 #44 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS ?-20190727_ 073836-buildvm-ppc64le-16.ppc.4 RIP: 0010:commit_charge+0x35/0x50 Code: 0d 48 83 05 54 b2 02 05 01 48 89 77 38 c3 48 c7 c6 78 4a ea ba 48 83 05 38 b2 02 05 01 e8 63 0d9 RSP: 0018:ffffc90002023a50 EFLAGS: 00010202 RAX: 0000000000000000 RBX: ffff88817287d000 RCX: 0000000000000000 RDX: 0000000000000000 RSI: ffff88817ac97ea0 RDI: ffff88817ac97ea0 RBP: ffffea000560f2c0 R08: 0000000000000203 R09: 0000000000000005 R10: 0000000000000030 R11: ffffc900020237a8 R12: 0000000000000000 R13: 0000000000000001 R14: 0000000000000001 R15: ffff88815a1272c0 FS: 00007f5071ab0800(0000) GS:ffff88817ac80000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 000055efcd5ca000 CR3: 000000015d312000 CR4: 00000000000006e0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Call Trace: mem_cgroup_charge+0x166/0x4f0 __add_to_page_cache_locked+0x4a9/0x710 add_to_page_cache_locked+0x15/0x20 cifs_readpages+0x217/0x1270 read_pages+0x29a/0x670 page_cache_readahead_unbounded+0x24f/0x390 __do_page_cache_readahead+0x3f/0x60 ondemand_readahead+0x1f1/0x470 page_cache_async_readahead+0x14c/0x170 generic_file_buffered_read+0x5df/0x1100 generic_file_read_iter+0x10c/0x1d0 cifs_strict_readv+0x139/0x170 new_sync_read+0x164/0x250 __vfs_read+0x39/0x60 vfs_read+0xb5/0x1e0 ksys_pread64+0x85/0xf0 __x64_sys_pread64+0x22/0x30 do_syscall_64+0x69/0x150 entry_SYSCALL_64_after_hwframe+0x44/0xa9 RIP: 0033:0x7f5071fcb1af Code: Bad RIP value. RSP: 002b:00007ffde2cdb8e0 EFLAGS: 00000293 ORIG_RAX: 0000000000000011 RAX: ffffffffffffffda RBX: 00007ffde2cdb990 RCX: 00007f5071fcb1af RDX: 0000000000001000 RSI: 000055efcd5ca000 RDI: 0000000000000003 RBP: 0000000000000003 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000001000 R11: 0000000000000293 R12: 0000000000000001 R13: 000000000009f000 R14: 0000000000000000 R15: 0000000000001000 Modules linked in: ---[ end trace 725fa14a3e1af65c ]--- Since commit 3fea5a499d57 ("mm: memcontrol: convert page cache to a new mem_cgroup_charge() API") not cancel the page charge, the pages maybe double add to pagecache: thread1 | thread2 cifs_readpages readpages_get_pages add_to_page_cache_locked(head,index=n)=0 | readpages_get_pages | add_to_page_cache_locked(head,index=n+1)=0 add_to_page_cache_locked(head, index=n+1)=-EEXIST then, will next loop with list head page's index=n+1 and the page->mapping not NULL readpages_get_pages add_to_page_cache_locked(head, index=n+1) commit_charge VM_BUG_ON_PAGE So, we should not do the next loop when any page add to page cache failed. Reported-by: Hulk Robot Signed-off-by: Zhang Xiaoxu Signed-off-by: Steve French Acked-by: Ronnie Sahlberg Signed-off-by: Sasha Levin --- fs/cifs/file.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/fs/cifs/file.c b/fs/cifs/file.c index e78b52c582f1..5cb15649adb0 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -3804,7 +3804,8 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list, break; __SetPageLocked(page); - if (add_to_page_cache_locked(page, mapping, page->index, gfp)) { + rc = add_to_page_cache_locked(page, mapping, page->index, gfp); + if (rc) { __ClearPageLocked(page); break; } @@ -3820,6 +3821,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, struct list_head *page_list, unsigned num_pages) { int rc; + int err = 0; struct list_head tmplist; struct cifsFileInfo *open_file = file->private_data; struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file); @@ -3860,7 +3862,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, * the order of declining indexes. When we put the pages in * the rdata->pages, then we want them in increasing order. */ - while (!list_empty(page_list)) { + while (!list_empty(page_list) && !err) { unsigned int i, nr_pages, bytes, rsize; loff_t offset; struct page *page, *tpage; @@ -3883,9 +3885,10 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, return 0; } - rc = readpages_get_pages(mapping, page_list, rsize, &tmplist, + nr_pages = 0; + err = readpages_get_pages(mapping, page_list, rsize, &tmplist, &nr_pages, &offset, &bytes); - if (rc) { + if (!nr_pages) { add_credits_and_wake_if(server, credits, 0); break; } -- GitLab From 03dfb191acea76e6f92379abdbb5335139b28ffa Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Wed, 24 Jun 2020 01:53:08 -0700 Subject: [PATCH 1078/1304] nvme: fix possible deadlock when I/O is blocked [ Upstream commit 3b4b19721ec652ad2c4fe51dfbe5124212b5f581 ] Revert fab7772bfbcf ("nvme-multipath: revalidate nvme_ns_head gendisk in nvme_validate_ns") When adding a new namespace to the head disk (via nvme_mpath_set_live) we will see partition scan which triggers I/O on the mpath device node. This process will usually be triggered from the scan_work which holds the scan_lock. If I/O blocks (if we got ana change currently have only available paths but none are accessible) this can deadlock on the head disk bd_mutex as both partition scan I/O takes it, and head disk revalidation takes it to check for resize (also triggered from scan_work on a different path). See trace [1]. The mpath disk revalidation was originally added to detect online disk size change, but this is no longer needed since commit cb224c3af4df ("nvme: Convert to use set_capacity_revalidate_and_notify") which already updates resize info without unnecessarily revalidating the disk (the mpath disk doesn't even implement .revalidate_disk fop). [1]: -- kernel: INFO: task kworker/u65:9:494 blocked for more than 241 seconds. kernel: Tainted: G OE 5.3.5-050305-generic #201910071830 kernel: "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. kernel: kworker/u65:9 D 0 494 2 0x80004000 kernel: Workqueue: nvme-wq nvme_scan_work [nvme_core] kernel: Call Trace: kernel: __schedule+0x2b9/0x6c0 kernel: schedule+0x42/0xb0 kernel: schedule_preempt_disabled+0xe/0x10 kernel: __mutex_lock.isra.0+0x182/0x4f0 kernel: __mutex_lock_slowpath+0x13/0x20 kernel: mutex_lock+0x2e/0x40 kernel: revalidate_disk+0x63/0xa0 kernel: __nvme_revalidate_disk+0xfe/0x110 [nvme_core] kernel: nvme_revalidate_disk+0xa4/0x160 [nvme_core] kernel: ? evict+0x14c/0x1b0 kernel: revalidate_disk+0x2b/0xa0 kernel: nvme_validate_ns+0x49/0x940 [nvme_core] kernel: ? blk_mq_free_request+0xd2/0x100 kernel: ? __nvme_submit_sync_cmd+0xbe/0x1e0 [nvme_core] kernel: nvme_scan_work+0x24f/0x380 [nvme_core] kernel: process_one_work+0x1db/0x380 kernel: worker_thread+0x249/0x400 kernel: kthread+0x104/0x140 kernel: ? process_one_work+0x380/0x380 kernel: ? kthread_park+0x80/0x80 kernel: ret_from_fork+0x1f/0x40 ... kernel: INFO: task kworker/u65:1:2630 blocked for more than 241 seconds. kernel: Tainted: G OE 5.3.5-050305-generic #201910071830 kernel: "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. kernel: kworker/u65:1 D 0 2630 2 0x80004000 kernel: Workqueue: nvme-wq nvme_scan_work [nvme_core] kernel: Call Trace: kernel: __schedule+0x2b9/0x6c0 kernel: schedule+0x42/0xb0 kernel: io_schedule+0x16/0x40 kernel: do_read_cache_page+0x438/0x830 kernel: ? __switch_to_asm+0x34/0x70 kernel: ? file_fdatawait_range+0x30/0x30 kernel: read_cache_page+0x12/0x20 kernel: read_dev_sector+0x27/0xc0 kernel: read_lba+0xc1/0x220 kernel: ? kmem_cache_alloc_trace+0x19c/0x230 kernel: efi_partition+0x1e6/0x708 kernel: ? vsnprintf+0x39e/0x4e0 kernel: ? snprintf+0x49/0x60 kernel: check_partition+0x154/0x244 kernel: rescan_partitions+0xae/0x280 kernel: __blkdev_get+0x40f/0x560 kernel: blkdev_get+0x3d/0x140 kernel: __device_add_disk+0x388/0x480 kernel: device_add_disk+0x13/0x20 kernel: nvme_mpath_set_live+0x119/0x140 [nvme_core] kernel: nvme_update_ns_ana_state+0x5c/0x60 [nvme_core] kernel: nvme_set_ns_ana_state+0x1e/0x30 [nvme_core] kernel: nvme_parse_ana_log+0xa1/0x180 [nvme_core] kernel: ? nvme_update_ns_ana_state+0x60/0x60 [nvme_core] kernel: nvme_mpath_add_disk+0x47/0x90 [nvme_core] kernel: nvme_validate_ns+0x396/0x940 [nvme_core] kernel: ? blk_mq_free_request+0xd2/0x100 kernel: nvme_scan_work+0x24f/0x380 [nvme_core] kernel: process_one_work+0x1db/0x380 kernel: worker_thread+0x249/0x400 kernel: kthread+0x104/0x140 kernel: ? process_one_work+0x380/0x380 kernel: ? kthread_park+0x80/0x80 kernel: ret_from_fork+0x1f/0x40 -- Fixes: fab7772bfbcf ("nvme-multipath: revalidate nvme_ns_head gendisk in nvme_validate_ns") Signed-off-by: Anton Eidelman Signed-off-by: Sagi Grimberg Signed-off-by: Christoph Hellwig Signed-off-by: Sasha Levin --- drivers/nvme/host/core.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index faa7feebb609..84fcfcdb8ba5 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -1599,7 +1599,6 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id) if (ns->head->disk) { nvme_update_disk_info(ns->head->disk, ns, id); blk_queue_stack_limits(ns->head->disk->queue, ns->queue); - revalidate_disk(ns->head->disk); } #endif } -- GitLab From 694ec54b7826da9043dd07cdfd80f7afcc926aed Mon Sep 17 00:00:00 2001 From: Javed Hasan Date: Mon, 22 Jun 2020 03:12:11 -0700 Subject: [PATCH 1079/1304] scsi: libfc: Handling of extra kref [ Upstream commit 71f2bf85e90d938d4a9ef9dd9bfa8d9b0b6a03f7 ] Handling of extra kref which is done by lookup table in case rdata is already present in list. This issue was leading to memory leak. Trace from KMEMLEAK tool: unreferenced object 0xffff8888259e8780 (size 512): comm "kworker/2:1", pid 182614, jiffies 4433237386 (age 113021.971s) hex dump (first 32 bytes): 58 0a ec cf 83 88 ff ff 00 00 00 00 00 00 00 00 01 00 00 00 08 00 00 00 13 7d f0 1e 0e 00 00 10 backtrace: [<000000006b25760f>] fc_rport_recv_req+0x3c6/0x18f0 [libfc] [<00000000f208d994>] fc_lport_recv_els_req+0x120/0x8a0 [libfc] [<00000000a9c437b8>] fc_lport_recv+0xb9/0x130 [libfc] [<00000000ad5be37b>] qedf_ll2_process_skb+0x73d/0xad0 [qedf] [<00000000e0eb6893>] process_one_work+0x382/0x6c0 [<000000002dfd9e21>] worker_thread+0x57/0x5c0 [<00000000b648204f>] kthread+0x1a0/0x1c0 [<0000000072f5ab20>] ret_from_fork+0x35/0x40 [<000000001d5c05d8>] 0xffffffffffffffff Below is the log sequence which leads to memory leak. Here we get the nested "Received PLOGI request" for same port and this request leads to call the fc_rport_create() twice for the same rport. kernel: host1: rport fffce5: Received PLOGI request kernel: host1: rport fffce5: Received PLOGI in INIT state kernel: host1: rport fffce5: Port is Ready kernel: host1: rport fffce5: Received PRLI request while in state Ready kernel: host1: rport fffce5: PRLI rspp type 8 active 1 passive 0 kernel: host1: rport fffce5: Received LOGO request while in state Ready kernel: host1: rport fffce5: Delete port kernel: host1: rport fffce5: Received PLOGI request kernel: host1: rport fffce5: Received PLOGI in state Delete - send busy Link: https://lore.kernel.org/r/20200622101212.3922-2-jhasan@marvell.com Reviewed-by: Girish Basrur Reviewed-by: Saurav Kashyap Reviewed-by: Shyam Sundar Signed-off-by: Javed Hasan Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin --- drivers/scsi/libfc/fc_rport.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 90a748551ede..f39d2d62b002 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -145,8 +145,10 @@ struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id) lockdep_assert_held(&lport->disc.disc_mutex); rdata = fc_rport_lookup(lport, port_id); - if (rdata) + if (rdata) { + kref_put(&rdata->kref, fc_rport_destroy); return rdata; + } if (lport->rport_priv_size > 0) rport_priv_size = lport->rport_priv_size; -- GitLab From 4575845e9c91a3edb0d4d8cf93855f11ddf4ffce Mon Sep 17 00:00:00 2001 From: Javed Hasan Date: Fri, 26 Jun 2020 02:49:59 -0700 Subject: [PATCH 1080/1304] scsi: libfc: Skip additional kref updating work event [ Upstream commit 823a65409c8990f64c5693af98ce0e7819975cba ] When an rport event (RPORT_EV_READY) is updated without work being queued, avoid taking an additional reference. This issue was leading to memory leak. Trace from KMEMLEAK tool: unreferenced object 0xffff8888259e8780 (size 512): comm "kworker/2:1", jiffies 4433237386 (age 113021.971s) hex dump (first 32 bytes): 58 0a ec cf 83 88 ff ff 00 00 00 00 00 00 00 00 01 00 00 00 08 00 00 00 13 7d f0 1e 0e 00 00 10 backtrace: [<000000006b25760f>] fc_rport_recv_req+0x3c6/0x18f0 [libfc] [<00000000f208d994>] fc_lport_recv_els_req+0x120/0x8a0 [libfc] [<00000000a9c437b8>] fc_lport_recv+0xb9/0x130 [libfc] [<00000000a9c437b8>] fc_lport_recv+0xb9/0x130 [libfc] [<00000000ad5be37b>] qedf_ll2_process_skb+0x73d/0xad0 [qedf] [<00000000e0eb6893>] process_one_work+0x382/0x6c0 [<000000002dfd9e21>] worker_thread+0x57/0x5c0 [<00000000b648204f>] kthread+0x1a0/0x1c0 [<0000000072f5ab20>] ret_from_fork+0x35/0x40 [<000000001d5c05d8>] 0xffffffffffffffff Below is the log sequence which leads to memory leak. Here we get the RPORT_EV_READY and RPORT_EV_STOP back to back, which lead to overwrite the event RPORT_EV_READY by event RPORT_EV_STOP. Because of this, kref_count gets incremented by 1. kernel: host0: rport fffce5: Received PLOGI request kernel: host0: rport fffce5: Received PLOGI in INIT state kernel: host0: rport fffce5: Port is Ready kernel: host0: rport fffce5: Received PRLI request while in state Ready kernel: host0: rport fffce5: PRLI rspp type 8 active 1 passive 0 kernel: host0: rport fffce5: Received LOGO request while in state Ready kernel: host0: rport fffce5: Delete port kernel: host0: rport fffce5: Received PLOGI request kernel: host0: rport fffce5: Received PLOGI in state Delete - send busy kernel: host0: rport fffce5: work event 3 kernel: host0: rport fffce5: lld callback ev 3 kernel: host0: rport fffce5: work delete Link: https://lore.kernel.org/r/20200626094959.32151-1-jhasan@marvell.com Reviewed-by: Girish Basrur Reviewed-by: Saurav Kashyap Reviewed-by: Shyam Sundar Signed-off-by: Javed Hasan Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin --- drivers/scsi/libfc/fc_rport.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index f39d2d62b002..2b3239765c24 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -495,10 +495,11 @@ static void fc_rport_enter_delete(struct fc_rport_priv *rdata, fc_rport_state_enter(rdata, RPORT_ST_DELETE); - kref_get(&rdata->kref); - if (rdata->event == RPORT_EV_NONE && - !queue_work(rport_event_queue, &rdata->event_work)) - kref_put(&rdata->kref, fc_rport_destroy); + if (rdata->event == RPORT_EV_NONE) { + kref_get(&rdata->kref); + if (!queue_work(rport_event_queue, &rdata->event_work)) + kref_put(&rdata->kref, fc_rport_destroy); + } rdata->event = event; } -- GitLab From 511a287cb62787fae3d343930078bee77e06cd05 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Fri, 26 Jun 2020 10:21:15 -0700 Subject: [PATCH 1081/1304] selftests/x86/syscall_nt: Clear weird flags after each test [ Upstream commit a61fa2799ef9bf6c4f54cf7295036577cececc72 ] Clear the weird flags before logging to improve strace output -- logging results while, say, TF is set does no one any favors. Signed-off-by: Andy Lutomirski Signed-off-by: Thomas Gleixner Link: https://lkml.kernel.org/r/907bfa5a42d4475b8245e18b67a04b13ca51ffdb.1593191971.git.luto@kernel.org Signed-off-by: Sasha Levin --- tools/testing/selftests/x86/syscall_nt.c | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/testing/selftests/x86/syscall_nt.c b/tools/testing/selftests/x86/syscall_nt.c index 43fcab367fb0..74e6b3fc2d09 100644 --- a/tools/testing/selftests/x86/syscall_nt.c +++ b/tools/testing/selftests/x86/syscall_nt.c @@ -67,6 +67,7 @@ static void do_it(unsigned long extraflags) set_eflags(get_eflags() | extraflags); syscall(SYS_getpid); flags = get_eflags(); + set_eflags(X86_EFLAGS_IF | X86_EFLAGS_FIXED); if ((flags & extraflags) == extraflags) { printf("[OK]\tThe syscall worked and flags are still set\n"); } else { -- GitLab From 0d1682ca6d1314c27d07afacda4dd51baf5fcd94 Mon Sep 17 00:00:00 2001 From: Zeng Tao Date: Wed, 15 Jul 2020 15:34:41 +0800 Subject: [PATCH 1082/1304] vfio/pci: fix racy on error and request eventfd ctx [ Upstream commit b872d0640840018669032b20b6375a478ed1f923 ] The vfio_pci_release call will free and clear the error and request eventfd ctx while these ctx could be in use at the same time in the function like vfio_pci_request, and it's expected to protect them under the vdev->igate mutex, which is missing in vfio_pci_release. This issue is introduced since commit 1518ac272e78 ("vfio/pci: fix memory leaks of eventfd ctx"),and since commit 5c5866c593bb ("vfio/pci: Clear error and request eventfd ctx after releasing"), it's very easily to trigger the kernel panic like this: [ 9513.904346] Unable to handle kernel NULL pointer dereference at virtual address 0000000000000008 [ 9513.913091] Mem abort info: [ 9513.915871] ESR = 0x96000006 [ 9513.918912] EC = 0x25: DABT (current EL), IL = 32 bits [ 9513.924198] SET = 0, FnV = 0 [ 9513.927238] EA = 0, S1PTW = 0 [ 9513.930364] Data abort info: [ 9513.933231] ISV = 0, ISS = 0x00000006 [ 9513.937048] CM = 0, WnR = 0 [ 9513.940003] user pgtable: 4k pages, 48-bit VAs, pgdp=0000007ec7d12000 [ 9513.946414] [0000000000000008] pgd=0000007ec7d13003, p4d=0000007ec7d13003, pud=0000007ec728c003, pmd=0000000000000000 [ 9513.956975] Internal error: Oops: 96000006 [#1] PREEMPT SMP [ 9513.962521] Modules linked in: vfio_pci vfio_virqfd vfio_iommu_type1 vfio hclge hns3 hnae3 [last unloaded: vfio_pci] [ 9513.972998] CPU: 4 PID: 1327 Comm: bash Tainted: G W 5.8.0-rc4+ #3 [ 9513.980443] Hardware name: Huawei TaiShan 2280 V2/BC82AMDC, BIOS 2280-V2 CS V3.B270.01 05/08/2020 [ 9513.989274] pstate: 80400089 (Nzcv daIf +PAN -UAO BTYPE=--) [ 9513.994827] pc : _raw_spin_lock_irqsave+0x48/0x88 [ 9513.999515] lr : eventfd_signal+0x6c/0x1b0 [ 9514.003591] sp : ffff800038a0b960 [ 9514.006889] x29: ffff800038a0b960 x28: ffff007ef7f4da10 [ 9514.012175] x27: ffff207eefbbfc80 x26: ffffbb7903457000 [ 9514.017462] x25: ffffbb7912191000 x24: ffff007ef7f4d400 [ 9514.022747] x23: ffff20be6e0e4c00 x22: 0000000000000008 [ 9514.028033] x21: 0000000000000000 x20: 0000000000000000 [ 9514.033321] x19: 0000000000000008 x18: 0000000000000000 [ 9514.038606] x17: 0000000000000000 x16: ffffbb7910029328 [ 9514.043893] x15: 0000000000000000 x14: 0000000000000001 [ 9514.049179] x13: 0000000000000000 x12: 0000000000000002 [ 9514.054466] x11: 0000000000000000 x10: 0000000000000a00 [ 9514.059752] x9 : ffff800038a0b840 x8 : ffff007ef7f4de60 [ 9514.065038] x7 : ffff007fffc96690 x6 : fffffe01faffb748 [ 9514.070324] x5 : 0000000000000000 x4 : 0000000000000000 [ 9514.075609] x3 : 0000000000000000 x2 : 0000000000000001 [ 9514.080895] x1 : ffff007ef7f4d400 x0 : 0000000000000000 [ 9514.086181] Call trace: [ 9514.088618] _raw_spin_lock_irqsave+0x48/0x88 [ 9514.092954] eventfd_signal+0x6c/0x1b0 [ 9514.096691] vfio_pci_request+0x84/0xd0 [vfio_pci] [ 9514.101464] vfio_del_group_dev+0x150/0x290 [vfio] [ 9514.106234] vfio_pci_remove+0x30/0x128 [vfio_pci] [ 9514.111007] pci_device_remove+0x48/0x108 [ 9514.115001] device_release_driver_internal+0x100/0x1b8 [ 9514.120200] device_release_driver+0x28/0x38 [ 9514.124452] pci_stop_bus_device+0x68/0xa8 [ 9514.128528] pci_stop_and_remove_bus_device+0x20/0x38 [ 9514.133557] pci_iov_remove_virtfn+0xb4/0x128 [ 9514.137893] sriov_disable+0x3c/0x108 [ 9514.141538] pci_disable_sriov+0x28/0x38 [ 9514.145445] hns3_pci_sriov_configure+0x48/0xb8 [hns3] [ 9514.150558] sriov_numvfs_store+0x110/0x198 [ 9514.154724] dev_attr_store+0x44/0x60 [ 9514.158373] sysfs_kf_write+0x5c/0x78 [ 9514.162018] kernfs_fop_write+0x104/0x210 [ 9514.166010] __vfs_write+0x48/0x90 [ 9514.169395] vfs_write+0xbc/0x1c0 [ 9514.172694] ksys_write+0x74/0x100 [ 9514.176079] __arm64_sys_write+0x24/0x30 [ 9514.179987] el0_svc_common.constprop.4+0x110/0x200 [ 9514.184842] do_el0_svc+0x34/0x98 [ 9514.188144] el0_svc+0x14/0x40 [ 9514.191185] el0_sync_handler+0xb0/0x2d0 [ 9514.195088] el0_sync+0x140/0x180 [ 9514.198389] Code: b9001020 d2800000 52800022 f9800271 (885ffe61) [ 9514.204455] ---[ end trace 648de00c8406465f ]--- [ 9514.212308] note: bash[1327] exited with preempt_count 1 Cc: Qian Cai Cc: Alex Williamson Fixes: 1518ac272e78 ("vfio/pci: fix memory leaks of eventfd ctx") Signed-off-by: Zeng Tao Signed-off-by: Alex Williamson Signed-off-by: Sasha Levin --- drivers/vfio/pci/vfio_pci.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index 94fad366312f..58e7336b2748 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c @@ -409,14 +409,19 @@ static void vfio_pci_release(void *device_data) if (!(--vdev->refcnt)) { vfio_spapr_pci_eeh_release(vdev->pdev); vfio_pci_disable(vdev); + mutex_lock(&vdev->igate); if (vdev->err_trigger) { eventfd_ctx_put(vdev->err_trigger); vdev->err_trigger = NULL; } + mutex_unlock(&vdev->igate); + + mutex_lock(&vdev->igate); if (vdev->req_trigger) { eventfd_ctx_put(vdev->req_trigger); vdev->req_trigger = NULL; } + mutex_unlock(&vdev->igate); } mutex_unlock(&driver_lock); -- GitLab From 803b2f2f9c3a22821b9328f930c09311da1b1ab3 Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Fri, 17 Jul 2020 15:12:05 +0800 Subject: [PATCH 1083/1304] btrfs: qgroup: fix data leak caused by race between writeback and truncate [ Upstream commit fa91e4aa1716004ea8096d5185ec0451e206aea0 ] [BUG] When running tests like generic/013 on test device with btrfs quota enabled, it can normally lead to data leak, detected at unmount time: BTRFS warning (device dm-3): qgroup 0/5 has unreleased space, type 0 rsv 4096 ------------[ cut here ]------------ WARNING: CPU: 11 PID: 16386 at fs/btrfs/disk-io.c:4142 close_ctree+0x1dc/0x323 [btrfs] RIP: 0010:close_ctree+0x1dc/0x323 [btrfs] Call Trace: btrfs_put_super+0x15/0x17 [btrfs] generic_shutdown_super+0x72/0x110 kill_anon_super+0x18/0x30 btrfs_kill_super+0x17/0x30 [btrfs] deactivate_locked_super+0x3b/0xa0 deactivate_super+0x40/0x50 cleanup_mnt+0x135/0x190 __cleanup_mnt+0x12/0x20 task_work_run+0x64/0xb0 __prepare_exit_to_usermode+0x1bc/0x1c0 __syscall_return_slowpath+0x47/0x230 do_syscall_64+0x64/0xb0 entry_SYSCALL_64_after_hwframe+0x44/0xa9 ---[ end trace caf08beafeca2392 ]--- BTRFS error (device dm-3): qgroup reserved space leaked [CAUSE] In the offending case, the offending operations are: 2/6: writev f2X[269 1 0 0 0 0] [1006997,67,288] 0 2/7: truncate f2X[269 1 0 0 48 1026293] 18388 0 The following sequence of events could happen after the writev(): CPU1 (writeback) | CPU2 (truncate) ----------------------------------------------------------------- btrfs_writepages() | |- extent_write_cache_pages() | |- Got page for 1003520 | | 1003520 is Dirty, no writeback | | So (!clear_page_dirty_for_io()) | | gets called for it | |- Now page 1003520 is Clean. | | | btrfs_setattr() | | |- btrfs_setsize() | | |- truncate_setsize() | | New i_size is 18388 |- __extent_writepage() | | |- page_offset() > i_size | |- btrfs_invalidatepage() | |- Page is clean, so no qgroup | callback executed This means, the qgroup reserved data space is not properly released in btrfs_invalidatepage() as the page is Clean. [FIX] Instead of checking the dirty bit of a page, call btrfs_qgroup_free_data() unconditionally in btrfs_invalidatepage(). As qgroup rsv are completely bound to the QGROUP_RESERVED bit of io_tree, not bound to page status, thus we won't cause double freeing anyway. Fixes: 0b34c261e235 ("btrfs: qgroup: Prevent qgroup->reserved from going subzero") CC: stable@vger.kernel.org # 4.14+ Reviewed-by: Josef Bacik Signed-off-by: Qu Wenruo Signed-off-by: David Sterba Signed-off-by: Sasha Levin --- fs/btrfs/inode.c | 23 ++++++++++------------- 1 file changed, 10 insertions(+), 13 deletions(-) diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index bdfe159a60da..64d459ca76d0 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -8913,20 +8913,17 @@ static void btrfs_invalidatepage(struct page *page, unsigned int offset, /* * Qgroup reserved space handler * Page here will be either - * 1) Already written to disk - * In this case, its reserved space is released from data rsv map - * and will be freed by delayed_ref handler finally. - * So even we call qgroup_free_data(), it won't decrease reserved - * space. - * 2) Not written to disk - * This means the reserved space should be freed here. However, - * if a truncate invalidates the page (by clearing PageDirty) - * and the page is accounted for while allocating extent - * in btrfs_check_data_free_space() we let delayed_ref to - * free the entire extent. + * 1) Already written to disk or ordered extent already submitted + * Then its QGROUP_RESERVED bit in io_tree is already cleaned. + * Qgroup will be handled by its qgroup_record then. + * btrfs_qgroup_free_data() call will do nothing here. + * + * 2) Not written to disk yet + * Then btrfs_qgroup_free_data() call will clear the QGROUP_RESERVED + * bit of its io_tree, and free the qgroup reserved data space. + * Since the IO will never happen for this page. */ - if (PageDirty(page)) - btrfs_qgroup_free_data(inode, NULL, page_start, PAGE_SIZE); + btrfs_qgroup_free_data(inode, NULL, page_start, PAGE_SIZE); if (!inode_evicting) { clear_extent_bit(tree, page_start, page_end, EXTENT_LOCKED | EXTENT_DIRTY | -- GitLab From 7d3d6fc18caeeef094d4417cbddf335b82b1c2c5 Mon Sep 17 00:00:00 2001 From: Hou Tao Date: Mon, 10 Feb 2020 21:26:34 +0800 Subject: [PATCH 1084/1304] ubi: fastmap: Free unused fastmap anchor peb during detach [ Upstream commit c16f39d14a7e0ec59881fbdb22ae494907534384 ] When CONFIG_MTD_UBI_FASTMAP is enabled, fm_anchor will be assigned a free PEB during ubi_wl_init() or ubi_update_fastmap(). However if fastmap is not used or disabled on the MTD device, ubi_wl_entry related with the PEB will not be freed during detach. So Fix it by freeing the unused fastmap anchor during detach. Fixes: f9c34bb52997 ("ubi: Fix producing anchor PEBs") Reported-by: syzbot+f317896aae32eb281a58@syzkaller.appspotmail.com Reviewed-by: Sascha Hauer Signed-off-by: Hou Tao Signed-off-by: Richard Weinberger Signed-off-by: Sasha Levin --- drivers/mtd/ubi/fastmap-wl.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c index 13efebb40022..e08f6b4637dd 100644 --- a/drivers/mtd/ubi/fastmap-wl.c +++ b/drivers/mtd/ubi/fastmap-wl.c @@ -48,6 +48,13 @@ static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root) return victim; } +static inline void return_unused_peb(struct ubi_device *ubi, + struct ubi_wl_entry *e) +{ + wl_tree_add(e, &ubi->free); + ubi->free_count++; +} + /** * return_unused_pool_pebs - returns unused PEB to the free tree. * @ubi: UBI device description object @@ -61,8 +68,7 @@ static void return_unused_pool_pebs(struct ubi_device *ubi, for (i = pool->used; i < pool->size; i++) { e = ubi->lookuptbl[pool->pebs[i]]; - wl_tree_add(e, &ubi->free); - ubi->free_count++; + return_unused_peb(ubi, e); } } @@ -370,6 +376,11 @@ static void ubi_fastmap_close(struct ubi_device *ubi) return_unused_pool_pebs(ubi, &ubi->fm_pool); return_unused_pool_pebs(ubi, &ubi->fm_wl_pool); + if (ubi->fm_anchor) { + return_unused_peb(ubi, ubi->fm_anchor); + ubi->fm_anchor = NULL; + } + if (ubi->fm) { for (i = 0; i < ubi->fm->used_blocks; i++) kfree(ubi->fm->e[i]); -- GitLab From 31c5c44707d8eb6809100a512b0877da51f795c2 Mon Sep 17 00:00:00 2001 From: Jin Yao Date: Thu, 30 Apr 2020 08:36:18 +0800 Subject: [PATCH 1085/1304] perf parse-events: Use strcmp() to compare the PMU name [ Upstream commit 8510895bafdbf7c4dd24c22946d925691135c2b2 ] A big uncore event group is split into multiple small groups which only include the uncore events from the same PMU. This has been supported in the commit 3cdc5c2cb924a ("perf parse-events: Handle uncore event aliases in small groups properly"). If the event's PMU name starts to repeat, it must be a new event. That can be used to distinguish the leader from other members. But now it only compares the pointer of pmu_name (leader->pmu_name == evsel->pmu_name). If we use "perf stat -M LLC_MISSES.PCIE_WRITE -a" on cascadelakex, the event list is: evsel->name evsel->pmu_name --------------------------------------------------------------- unc_iio_data_req_of_cpu.mem_write.part0 uncore_iio_4 (as leader) unc_iio_data_req_of_cpu.mem_write.part0 uncore_iio_2 unc_iio_data_req_of_cpu.mem_write.part0 uncore_iio_0 unc_iio_data_req_of_cpu.mem_write.part0 uncore_iio_5 unc_iio_data_req_of_cpu.mem_write.part0 uncore_iio_3 unc_iio_data_req_of_cpu.mem_write.part0 uncore_iio_1 unc_iio_data_req_of_cpu.mem_write.part1 uncore_iio_4 ...... For the event "unc_iio_data_req_of_cpu.mem_write.part1" with "uncore_iio_4", it should be the event from PMU "uncore_iio_4". It's not a new leader for this PMU. But if we use "(leader->pmu_name == evsel->pmu_name)", the check would be failed and the event is stored to leaders[] as a new PMU leader. So this patch uses strcmp to compare the PMU name between events. Fixes: d4953f7ef1a2 ("perf parse-events: Fix 3 use after frees found with clang ASAN") Signed-off-by: Jin Yao Acked-by: Jiri Olsa Cc: Alexander Shishkin Cc: Andi Kleen Cc: Jin Yao Cc: Kan Liang Cc: Peter Zijlstra Link: http://lore.kernel.org/lkml/20200430003618.17002-1-yao.jin@linux.intel.com Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: Sasha Levin --- tools/perf/util/parse-events.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 6d087d9acd5e..0eff0c3ba9ee 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -1421,12 +1421,11 @@ parse_events__set_leader_for_uncore_aliase(char *name, struct list_head *list, * event. That can be used to distinguish the leader from * other members, even they have the same event name. */ - if ((leader != evsel) && (leader->pmu_name == evsel->pmu_name)) { + if ((leader != evsel) && + !strcmp(leader->pmu_name, evsel->pmu_name)) { is_leader = false; continue; } - /* The name is always alias name */ - WARN_ON(strcmp(leader->name, evsel->name)); /* Store the leader event for each PMU */ leaders[nr_pmu++] = (uintptr_t) evsel; -- GitLab From 1e6a4232befee0c3dbd201f8a50b5c333498f259 Mon Sep 17 00:00:00 2001 From: Tonghao Zhang Date: Sat, 25 Apr 2020 11:39:48 +0800 Subject: [PATCH 1086/1304] net: openvswitch: use div_u64() for 64-by-32 divisions [ Upstream commit 659d4587fe7233bfdff303744b20d6f41ad04362 ] Compile the kernel for arm 32 platform, the build warning found. To fix that, should use div_u64() for divisions. | net/openvswitch/meter.c:396: undefined reference to `__udivdi3' [add more commit msg, change reported tag, and use div_u64 instead of do_div by Tonghao] Fixes: e57358873bb5d6ca ("net: openvswitch: use u64 for meter bucket") Reported-by: kbuild test robot Signed-off-by: Tonghao Zhang Tested-by: Tonghao Zhang Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- net/openvswitch/meter.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/openvswitch/meter.c b/net/openvswitch/meter.c index 6f5131d1074b..5ea2471ffc03 100644 --- a/net/openvswitch/meter.c +++ b/net/openvswitch/meter.c @@ -256,7 +256,7 @@ static struct dp_meter *dp_meter_create(struct nlattr **a) * Start with a full bucket. */ band->bucket = (band->burst_size + band->rate) * 1000ULL; - band_max_delta_t = band->bucket / band->rate; + band_max_delta_t = div_u64(band->bucket, band->rate); if (band_max_delta_t > meter->max_delta_t) meter->max_delta_t = band_max_delta_t; band++; -- GitLab From 906c9129787bf890f3f1b562ddac45c3ec0965a8 Mon Sep 17 00:00:00 2001 From: Anthony Iliopoulos Date: Tue, 14 Jul 2020 13:11:59 +0200 Subject: [PATCH 1087/1304] nvme: explicitly update mpath disk capacity on revalidation [ Upstream commit 05b29021fba5e725dd385151ef00b6340229b500 ] Commit 3b4b19721ec652 ("nvme: fix possible deadlock when I/O is blocked") reverted multipath head disk revalidation due to deadlocks caused by holding the bd_mutex during revalidate. Updating the multipath disk blockdev size is still required though for userspace to be able to observe any resizing while the device is mounted. Directly update the bdev inode size to avoid unnecessarily holding the bdev->bd_mutex. Fixes: 3b4b19721ec652 ("nvme: fix possible deadlock when I/O is blocked") Signed-off-by: Anthony Iliopoulos Signed-off-by: Christoph Hellwig Signed-off-by: Sasha Levin --- drivers/nvme/host/core.c | 1 + drivers/nvme/host/nvme.h | 13 +++++++++++++ 2 files changed, 14 insertions(+) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 84fcfcdb8ba5..33dad9774da0 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -1599,6 +1599,7 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id) if (ns->head->disk) { nvme_update_disk_info(ns->head->disk, ns, id); blk_queue_stack_limits(ns->head->disk->queue, ns->queue); + nvme_mpath_update_disk_size(ns->head->disk); } #endif } diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index a70b997060e6..9c2e7a151e40 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -504,6 +504,16 @@ static inline void nvme_mpath_check_last_path(struct nvme_ns *ns) kblockd_schedule_work(&head->requeue_work); } +static inline void nvme_mpath_update_disk_size(struct gendisk *disk) +{ + struct block_device *bdev = bdget_disk(disk, 0); + + if (bdev) { + bd_set_size(bdev, get_capacity(disk) << SECTOR_SHIFT); + bdput(bdev); + } +} + extern struct device_attribute dev_attr_ana_grpid; extern struct device_attribute dev_attr_ana_state; @@ -570,6 +580,9 @@ static inline void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys) static inline void nvme_mpath_start_freeze(struct nvme_subsystem *subsys) { } +static inline void nvme_mpath_update_disk_size(struct gendisk *disk) +{ +} #endif /* CONFIG_NVME_MULTIPATH */ #ifdef CONFIG_NVM -- GitLab From 9688d3074108ece8d1e82b017216062731e8c8c8 Mon Sep 17 00:00:00 2001 From: Sylwester Nawrocki Date: Thu, 27 Aug 2020 19:33:56 +0200 Subject: [PATCH 1088/1304] ASoC: wm8994: Skip setting of the WM8994_MICBIAS register for WM1811 [ Upstream commit 811c5494436789e7149487c06e0602b507ce274b ] The WM8994_MICBIAS register is not available in the WM1811 CODEC so skip initialization of that register for that device. This suppresses an error during boot: "wm8994-codec: ASoC: error at snd_soc_component_update_bits on wm8994-codec" Signed-off-by: Sylwester Nawrocki Acked-by: Krzysztof Kozlowski Acked-by: Charles Keepax Link: https://lore.kernel.org/r/20200827173357.31891-1-s.nawrocki@samsung.com Signed-off-by: Mark Brown Signed-off-by: Sasha Levin --- sound/soc/codecs/wm8994.c | 2 ++ sound/soc/codecs/wm_hubs.c | 3 +++ sound/soc/codecs/wm_hubs.h | 1 + 3 files changed, 6 insertions(+) diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c index 01acb8da2f48..cd089b414302 100644 --- a/sound/soc/codecs/wm8994.c +++ b/sound/soc/codecs/wm8994.c @@ -4051,11 +4051,13 @@ static int wm8994_component_probe(struct snd_soc_component *component) wm8994->hubs.dcs_readback_mode = 2; break; } + wm8994->hubs.micd_scthr = true; break; case WM8958: wm8994->hubs.dcs_readback_mode = 1; wm8994->hubs.hp_startup_mode = 1; + wm8994->hubs.micd_scthr = true; switch (control->revision) { case 0: diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c index fed6ea9b019f..da7fa6f5459e 100644 --- a/sound/soc/codecs/wm_hubs.c +++ b/sound/soc/codecs/wm_hubs.c @@ -1227,6 +1227,9 @@ int wm_hubs_handle_analogue_pdata(struct snd_soc_component *component, snd_soc_component_update_bits(component, WM8993_ADDITIONAL_CONTROL, WM8993_LINEOUT2_FB, WM8993_LINEOUT2_FB); + if (!hubs->micd_scthr) + return 0; + snd_soc_component_update_bits(component, WM8993_MICBIAS, WM8993_JD_SCTHR_MASK | WM8993_JD_THR_MASK | WM8993_MICB1_LVL | WM8993_MICB2_LVL, diff --git a/sound/soc/codecs/wm_hubs.h b/sound/soc/codecs/wm_hubs.h index ee339ad8514d..1433d73e09bf 100644 --- a/sound/soc/codecs/wm_hubs.h +++ b/sound/soc/codecs/wm_hubs.h @@ -31,6 +31,7 @@ struct wm_hubs_data { int hp_startup_mode; int series_startup; int no_series_update; + bool micd_scthr; bool no_cache_dac_hp_direct; struct list_head dcs_cache; -- GitLab From 9af818a3b073eb39334318976feb30a492df8a16 Mon Sep 17 00:00:00 2001 From: Sylwester Nawrocki Date: Thu, 27 Aug 2020 19:33:57 +0200 Subject: [PATCH 1089/1304] ASoC: wm8994: Ensure the device is resumed in wm89xx_mic_detect functions [ Upstream commit f5a2cda4f1db89776b64c4f0f2c2ac609527ac70 ] When the wm8958_mic_detect, wm8994_mic_detect functions get called from the machine driver, e.g. from the card's late_probe() callback, the CODEC device may be PM runtime suspended and any regmap writes have no effect. Add PM runtime calls to these functions to ensure the device registers are updated as expected. This suppresses an error during boot "wm8994-codec: ASoC: error at snd_soc_component_update_bits on wm8994-codec" caused by the regmap access error due to the cache_only flag being set. Signed-off-by: Sylwester Nawrocki Acked-by: Krzysztof Kozlowski Acked-by: Charles Keepax Link: https://lore.kernel.org/r/20200827173357.31891-2-s.nawrocki@samsung.com Signed-off-by: Mark Brown Signed-off-by: Sasha Levin --- sound/soc/codecs/wm8994.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c index cd089b414302..e3e069277a3f 100644 --- a/sound/soc/codecs/wm8994.c +++ b/sound/soc/codecs/wm8994.c @@ -3376,6 +3376,8 @@ int wm8994_mic_detect(struct snd_soc_component *component, struct snd_soc_jack * return -EINVAL; } + pm_runtime_get_sync(component->dev); + switch (micbias) { case 1: micdet = &wm8994->micdet[0]; @@ -3423,6 +3425,8 @@ int wm8994_mic_detect(struct snd_soc_component *component, struct snd_soc_jack * snd_soc_dapm_sync(dapm); + pm_runtime_put(component->dev); + return 0; } EXPORT_SYMBOL_GPL(wm8994_mic_detect); @@ -3790,6 +3794,8 @@ int wm8958_mic_detect(struct snd_soc_component *component, struct snd_soc_jack * return -EINVAL; } + pm_runtime_get_sync(component->dev); + if (jack) { snd_soc_dapm_force_enable_pin(dapm, "CLK_SYS"); snd_soc_dapm_sync(dapm); @@ -3858,6 +3864,8 @@ int wm8958_mic_detect(struct snd_soc_component *component, struct snd_soc_jack * snd_soc_dapm_sync(dapm); } + pm_runtime_put(component->dev); + return 0; } EXPORT_SYMBOL_GPL(wm8958_mic_detect); -- GitLab From 66dc19456dc9bd2e4afc118c98c844f02ed3183c Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Tue, 1 Sep 2020 10:06:23 +0200 Subject: [PATCH 1090/1304] ASoC: Intel: bytcr_rt5640: Add quirk for MPMAN Converter9 2-in-1 [ Upstream commit 6a0137101f47301fff2da6ba4b9048383d569909 ] The MPMAN Converter9 2-in-1 almost fully works with out default settings. The only problem is that it has only 1 speaker so any sounds only playing on the right channel get lost. Add a quirk for this model using the default settings + MONO_SPEAKER. Signed-off-by: Hans de Goede Acked-by: Pierre-Louis Bossart Link: https://lore.kernel.org/r/20200901080623.4987-1-hdegoede@redhat.com Signed-off-by: Mark Brown Signed-off-by: Sasha Levin --- sound/soc/intel/boards/bytcr_rt5640.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c index 0dcd249877c5..ec630127ef2f 100644 --- a/sound/soc/intel/boards/bytcr_rt5640.c +++ b/sound/soc/intel/boards/bytcr_rt5640.c @@ -588,6 +588,16 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = { BYT_RT5640_SSP0_AIF1 | BYT_RT5640_MCLK_EN), }, + { /* MPMAN Converter 9, similar hw as the I.T.Works TW891 2-in-1 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "MPMAN"), + DMI_MATCH(DMI_PRODUCT_NAME, "Converter9"), + }, + .driver_data = (void *)(BYTCR_INPUT_DEFAULTS | + BYT_RT5640_MONO_SPEAKER | + BYT_RT5640_SSP0_AIF1 | + BYT_RT5640_MCLK_EN), + }, { /* MPMAN MPWIN895CL */ .matches = { -- GitLab From f959196c828ec7a88f838f6888552dc4dee63c8b Mon Sep 17 00:00:00 2001 From: Palmer Dabbelt Date: Mon, 24 Aug 2020 17:21:22 -0700 Subject: [PATCH 1091/1304] RISC-V: Take text_mutex in ftrace_init_nop() [ Upstream commit 66d18dbda8469a944dfec6c49d26d5946efba218 ] Without this we get lockdep failures. They're spurious failures as SMP isn't up when ftrace_init_nop() is called. As far as I can tell the easiest fix is to just take the lock, which also seems like the safest fix. Signed-off-by: Palmer Dabbelt Acked-by: Guo Ren Signed-off-by: Palmer Dabbelt Signed-off-by: Sasha Levin --- arch/riscv/include/asm/ftrace.h | 7 +++++++ arch/riscv/kernel/ftrace.c | 19 +++++++++++++++++++ 2 files changed, 26 insertions(+) diff --git a/arch/riscv/include/asm/ftrace.h b/arch/riscv/include/asm/ftrace.h index c6dcc5291f97..02fbc175142e 100644 --- a/arch/riscv/include/asm/ftrace.h +++ b/arch/riscv/include/asm/ftrace.h @@ -63,4 +63,11 @@ do { \ * Let auipc+jalr be the basic *mcount unit*, so we make it 8 bytes here. */ #define MCOUNT_INSN_SIZE 8 + +#ifndef __ASSEMBLY__ +struct dyn_ftrace; +int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec); +#define ftrace_init_nop ftrace_init_nop +#endif + #endif diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c index 6d39f64e4dce..fa8530f05ed4 100644 --- a/arch/riscv/kernel/ftrace.c +++ b/arch/riscv/kernel/ftrace.c @@ -88,6 +88,25 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, return __ftrace_modify_call(rec->ip, addr, false); } + +/* + * This is called early on, and isn't wrapped by + * ftrace_arch_code_modify_{prepare,post_process}() and therefor doesn't hold + * text_mutex, which triggers a lockdep failure. SMP isn't running so we could + * just directly poke the text, but it's simpler to just take the lock + * ourselves. + */ +int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec) +{ + int out; + + ftrace_arch_code_modify_prepare(); + out = ftrace_make_nop(mod, rec, MCOUNT_ADDR); + ftrace_arch_code_modify_post_process(); + + return out; +} + int ftrace_update_ftrace_func(ftrace_func_t func) { int ret = __ftrace_modify_call((unsigned long)&ftrace_call, -- GitLab From b08005625f251017fd5643927c4fdbee9fdb860c Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Wed, 9 Sep 2020 14:27:25 +0200 Subject: [PATCH 1092/1304] s390/init: add missing __init annotations [ Upstream commit fcb2b70cdb194157678fb1a75f9ff499aeba3d2a ] Add __init to reserve_memory_end, reserve_oldmem and remove_oldmem. Sometimes these functions are not inlined, and then the build complains about section mismatch. Signed-off-by: Ilya Leoshkevich Signed-off-by: Heiko Carstens Signed-off-by: Vasily Gorbik Signed-off-by: Sasha Levin --- arch/s390/kernel/setup.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 5f85e0dfa66d..4bda9055daef 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -537,7 +537,7 @@ static struct notifier_block kdump_mem_nb = { /* * Make sure that the area behind memory_end is protected */ -static void reserve_memory_end(void) +static void __init reserve_memory_end(void) { #ifdef CONFIG_CRASH_DUMP if (ipl_info.type == IPL_TYPE_FCP_DUMP && @@ -555,7 +555,7 @@ static void reserve_memory_end(void) /* * Make sure that oldmem, where the dump is stored, is protected */ -static void reserve_oldmem(void) +static void __init reserve_oldmem(void) { #ifdef CONFIG_CRASH_DUMP if (OLDMEM_BASE) @@ -567,7 +567,7 @@ static void reserve_oldmem(void) /* * Make sure that oldmem, where the dump is stored, is protected */ -static void remove_oldmem(void) +static void __init remove_oldmem(void) { #ifdef CONFIG_CRASH_DUMP if (OLDMEM_BASE) -- GitLab From aafa75ff39d05ad8011c1b8fa118c36acec9661a Mon Sep 17 00:00:00 2001 From: Sven Schnelle Date: Thu, 10 Sep 2020 12:24:53 +0200 Subject: [PATCH 1093/1304] lockdep: fix order in trace_hardirqs_off_caller() [ Upstream commit 73ac74c7d489756d2313219a108809921dbfaea1 ] Switch order so that locking state is consistent even if the IRQ tracer calls into lockdep again. Acked-by: Peter Zijlstra Signed-off-by: Sven Schnelle Signed-off-by: Vasily Gorbik Signed-off-by: Sasha Levin --- kernel/trace/trace_preemptirq.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/trace/trace_preemptirq.c b/kernel/trace/trace_preemptirq.c index 71f553cceb3c..0e373cb0106b 100644 --- a/kernel/trace/trace_preemptirq.c +++ b/kernel/trace/trace_preemptirq.c @@ -59,14 +59,14 @@ EXPORT_SYMBOL(trace_hardirqs_on_caller); __visible void trace_hardirqs_off_caller(unsigned long caller_addr) { + lockdep_hardirqs_off(CALLER_ADDR0); + if (!this_cpu_read(tracing_irq_cpu)) { this_cpu_write(tracing_irq_cpu, 1); tracer_hardirqs_off(CALLER_ADDR0, caller_addr); if (!in_nmi()) trace_irq_disable_rcuidle(CALLER_ADDR0, caller_addr); } - - lockdep_hardirqs_off(CALLER_ADDR0); } EXPORT_SYMBOL(trace_hardirqs_off_caller); #endif /* CONFIG_TRACE_IRQFLAGS */ -- GitLab From ce81be26d33f32b5e6edf02abade4259165223c9 Mon Sep 17 00:00:00 2001 From: Dennis Li Date: Wed, 2 Sep 2020 17:11:09 +0800 Subject: [PATCH 1094/1304] drm/amdkfd: fix a memory leak issue [ Upstream commit 087d764159996ae378b08c0fdd557537adfd6899 ] In the resume stage of GPU recovery, start_cpsch will call pm_init which set pm->allocated as false, cause the next pm_release_ib has no chance to release ib memory. Add pm_release_ib in stop_cpsch which will be called in the suspend stage of GPU recovery. Reviewed-by: Felix Kuehling Signed-off-by: Dennis Li Signed-off-by: Alex Deucher Signed-off-by: Sasha Levin --- drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 189212cb3547..bff39f561264 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -1101,6 +1101,8 @@ static int stop_cpsch(struct device_queue_manager *dqm) unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0); dqm_unlock(dqm); + pm_release_ib(&dqm->packets); + kfd_gtt_sa_free(dqm->dev, dqm->fence_mem); pm_uninit(&dqm->packets); -- GitLab From 8216a3852ae50f52e482c15b3a8fcfc4cb312f1e Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Wed, 9 Sep 2020 12:32:33 +0200 Subject: [PATCH 1095/1304] i2c: core: Call i2c_acpi_install_space_handler() before i2c_acpi_register_devices() [ Upstream commit 21653a4181ff292480599dad996a2b759ccf050f ] Some ACPI i2c-devices _STA method (which is used to detect if the device is present) use autodetection code which probes which device is present over i2c. This requires the I2C ACPI OpRegion handler to be registered before we enumerate i2c-clients under the i2c-adapter. This fixes the i2c touchpad on the Lenovo ThinkBook 14-IIL and ThinkBook 15 IIL not getting an i2c-client instantiated and thus not working. BugLink: https://bugzilla.redhat.com/show_bug.cgi?id=1842039 Signed-off-by: Hans de Goede Reviewed-by: Mika Westerberg Signed-off-by: Wolfram Sang Signed-off-by: Sasha Levin --- drivers/i2c/i2c-core-base.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index f225bef1e043..41dd0a08a625 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c @@ -1292,8 +1292,8 @@ static int i2c_register_adapter(struct i2c_adapter *adap) /* create pre-declared device nodes */ of_i2c_register_devices(adap); - i2c_acpi_register_devices(adap); i2c_acpi_install_space_handler(adap); + i2c_acpi_register_devices(adap); if (adap->nr < __i2c_first_dynamic_bus_num) i2c_scan_static_board_info(adap); -- GitLab From 8c821f4829eff2bf7f0beaf2471f49296d464c12 Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Thu, 10 Sep 2020 10:24:57 -0500 Subject: [PATCH 1096/1304] objtool: Fix noreturn detection for ignored functions [ Upstream commit db6c6a0df840e3f52c84cc302cc1a08ba11a4416 ] When a function is annotated with STACK_FRAME_NON_STANDARD, objtool doesn't validate its code paths. It also skips sibling call detection within the function. But sibling call detection is actually needed for the case where the ignored function doesn't have any return instructions. Otherwise objtool naively marks the function as implicit static noreturn, which affects the reachability of its callers, resulting in "unreachable instruction" warnings. Fix it by just enabling sibling call detection for ignored functions. The 'insn->ignore' check in add_jump_destinations() is no longer needed after e6da9567959e ("objtool: Don't use ignore flag for fake jumps"). Fixes the following warning: arch/x86/kvm/vmx/vmx.o: warning: objtool: vmx_handle_exit_irqoff()+0x142: unreachable instruction which triggers on an allmodconfig with CONFIG_GCOV_KERNEL unset. Reported-by: Linus Torvalds Signed-off-by: Josh Poimboeuf Signed-off-by: Borislav Petkov Acked-by: Linus Torvalds Link: https://lkml.kernel.org/r/5b1e2536cdbaa5246b60d7791b76130a74082c62.1599751464.git.jpoimboe@redhat.com Signed-off-by: Sasha Levin --- tools/objtool/check.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/objtool/check.c b/tools/objtool/check.c index fd3071d83dea..c0ab27368a34 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -503,7 +503,7 @@ static int add_jump_destinations(struct objtool_file *file) insn->type != INSN_JUMP_UNCONDITIONAL) continue; - if (insn->ignore || insn->offset == FAKE_JUMP_OFFSET) + if (insn->offset == FAKE_JUMP_OFFSET) continue; rela = find_rela_by_dest_range(insn->sec, insn->offset, -- GitLab From a24c2499cdcf12daa243ff0ac945932ad516593f Mon Sep 17 00:00:00 2001 From: Liu Jian Date: Mon, 20 Jul 2020 22:33:15 +0800 Subject: [PATCH 1097/1304] ieee802154: fix one possible memleak in ca8210_dev_com_init [ Upstream commit 88f46b3fe2ac41c381770ebad9f2ee49346b57a2 ] We should call destroy_workqueue to destroy mlme_workqueue in error branch. Fixes: ded845a781a5 ("ieee802154: Add CA8210 IEEE 802.15.4 device driver") Signed-off-by: Liu Jian Link: https://lore.kernel.org/r/20200720143315.40523-1-liujian56@huawei.com Signed-off-by: Stefan Schmidt Signed-off-by: Sasha Levin --- drivers/net/ieee802154/ca8210.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c index 38a41651e451..deace0aadad2 100644 --- a/drivers/net/ieee802154/ca8210.c +++ b/drivers/net/ieee802154/ca8210.c @@ -2923,6 +2923,7 @@ static int ca8210_dev_com_init(struct ca8210_priv *priv) ); if (!priv->irq_workqueue) { dev_crit(&priv->spi->dev, "alloc of irq_workqueue failed!\n"); + destroy_workqueue(priv->mlme_workqueue); return -ENOMEM; } -- GitLab From 0ad77d7dc50113065d218c5d951a79fc37cd6a79 Mon Sep 17 00:00:00 2001 From: Tom Rix Date: Sun, 2 Aug 2020 07:23:39 -0700 Subject: [PATCH 1098/1304] ieee802154/adf7242: check status of adf7242_read_reg [ Upstream commit e3914ed6cf44bfe1f169e26241f8314556fd1ac1 ] Clang static analysis reports this error adf7242.c:887:6: warning: Assigned value is garbage or undefined len = len_u8; ^ ~~~~~~ len_u8 is set in adf7242_read_reg(lp, 0, &len_u8); When this call fails, len_u8 is not set. So check the return code. Fixes: 7302b9d90117 ("ieee802154/adf7242: Driver for ADF7242 MAC IEEE802154") Signed-off-by: Tom Rix Acked-by: Michael Hennerich Link: https://lore.kernel.org/r/20200802142339.21091-1-trix@redhat.com Signed-off-by: Stefan Schmidt Signed-off-by: Sasha Levin --- drivers/net/ieee802154/adf7242.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c index 71be8524cca8..a686926bba71 100644 --- a/drivers/net/ieee802154/adf7242.c +++ b/drivers/net/ieee802154/adf7242.c @@ -883,7 +883,9 @@ static int adf7242_rx(struct adf7242_local *lp) int ret; u8 lqi, len_u8, *data; - adf7242_read_reg(lp, 0, &len_u8); + ret = adf7242_read_reg(lp, 0, &len_u8); + if (ret) + return ret; len = len_u8; -- GitLab From 907a6ee8b0e5691abefcc599b27ca7edee00600a Mon Sep 17 00:00:00 2001 From: Tianjia Zhang Date: Sun, 2 Aug 2020 19:15:41 +0800 Subject: [PATCH 1099/1304] clocksource/drivers/h8300_timer8: Fix wrong return value in h8300_8timer_init() [ Upstream commit 400d033f5a599120089b5f0c54d14d198499af5a ] In the init function, if the call to of_iomap() fails, the return value is ENXIO instead of -ENXIO. Change to the right negative errno. Fixes: 691f8f878290f ("clocksource/drivers/h8300_timer8: Convert init function to return error") Cc: Daniel Lezcano Signed-off-by: Tianjia Zhang Signed-off-by: Daniel Lezcano Link: https://lore.kernel.org/r/20200802111541.5429-1-tianjia.zhang@linux.alibaba.com Signed-off-by: Sasha Levin --- drivers/clocksource/h8300_timer8.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/clocksource/h8300_timer8.c b/drivers/clocksource/h8300_timer8.c index 1d740a8c42ab..47114c2a7cb5 100644 --- a/drivers/clocksource/h8300_timer8.c +++ b/drivers/clocksource/h8300_timer8.c @@ -169,7 +169,7 @@ static int __init h8300_8timer_init(struct device_node *node) return PTR_ERR(clk); } - ret = ENXIO; + ret = -ENXIO; base = of_iomap(node, 0); if (!base) { pr_err("failed to map registers for clockevent\n"); -- GitLab From 9f59089ee02d932b486a0b57dc5f53d682dbb575 Mon Sep 17 00:00:00 2001 From: Maximilian Luz Date: Tue, 25 Aug 2020 17:38:29 +0200 Subject: [PATCH 1100/1304] mwifiex: Increase AES key storage size to 256 bits [ Upstream commit 4afc850e2e9e781976fb2c7852ce7bac374af938 ] Following commit e18696786548 ("mwifiex: Prevent memory corruption handling keys") the mwifiex driver fails to authenticate with certain networks, specifically networks with 256 bit keys, and repeatedly asks for the password. The kernel log repeats the following lines (id and bssid redacted): mwifiex_pcie 0000:01:00.0: info: trying to associate to '' bssid mwifiex_pcie 0000:01:00.0: info: associated to bssid successfully mwifiex_pcie 0000:01:00.0: crypto keys added mwifiex_pcie 0000:01:00.0: info: successfully disconnected from : reason code 3 Tracking down this problem lead to the overflow check introduced by the aforementioned commit into mwifiex_ret_802_11_key_material_v2(). This check fails on networks with 256 bit keys due to the current storage size for AES keys in struct mwifiex_aes_param being only 128 bit. To fix this issue, increase the storage size for AES keys to 256 bit. Fixes: e18696786548 ("mwifiex: Prevent memory corruption handling keys") Signed-off-by: Maximilian Luz Reported-by: Kaloyan Nikolov Tested-by: Kaloyan Nikolov Reviewed-by: Dan Carpenter Reviewed-by: Brian Norris Tested-by: Brian Norris Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200825153829.38043-1-luzmaximilian@gmail.com Signed-off-by: Sasha Levin --- drivers/net/wireless/marvell/mwifiex/fw.h | 2 +- drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h index 1fb76d2f5d3f..8b9d0809daf6 100644 --- a/drivers/net/wireless/marvell/mwifiex/fw.h +++ b/drivers/net/wireless/marvell/mwifiex/fw.h @@ -953,7 +953,7 @@ struct mwifiex_tkip_param { struct mwifiex_aes_param { u8 pn[WPA_PN_SIZE]; __le16 key_len; - u8 key[WLAN_KEY_LEN_CCMP]; + u8 key[WLAN_KEY_LEN_CCMP_256]; } __packed; struct mwifiex_wapi_param { diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c index 797c2e978394..7003767eef42 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c @@ -620,7 +620,7 @@ static int mwifiex_ret_802_11_key_material_v2(struct mwifiex_private *priv, key_v2 = &resp->params.key_material_v2; len = le16_to_cpu(key_v2->key_param_set.key_params.aes.key_len); - if (len > WLAN_KEY_LEN_CCMP) + if (len > sizeof(key_v2->key_param_set.key_params.aes.key)) return -EINVAL; if (le16_to_cpu(key_v2->action) == HostCmd_ACT_GEN_SET) { @@ -636,7 +636,7 @@ static int mwifiex_ret_802_11_key_material_v2(struct mwifiex_private *priv, return 0; memset(priv->aes_key_v2.key_param_set.key_params.aes.key, 0, - WLAN_KEY_LEN_CCMP); + sizeof(key_v2->key_param_set.key_params.aes.key)); priv->aes_key_v2.key_param_set.key_params.aes.key_len = cpu_to_le16(len); memcpy(priv->aes_key_v2.key_param_set.key_params.aes.key, -- GitLab From 8d6cd745526a5e15c80211a2ba4114150dad2f27 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Linus=20L=C3=BCssing?= Date: Thu, 27 Aug 2020 17:34:48 +0200 Subject: [PATCH 1101/1304] batman-adv: bla: fix type misuse for backbone_gw hash indexing MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 097930e85f90f252c44dc0d084598265dd44ca48 ] It seems that due to a copy & paste error the void pointer in batadv_choose_backbone_gw() is cast to the wrong type. Fixing this by using "struct batadv_bla_backbone_gw" instead of "struct batadv_bla_claim" which better matches the caller's side. For now it seems that we were lucky because the two structs both have their orig/vid and addr/vid in the beginning. However I stumbled over this issue when I was trying to add some debug variables in front of "orig" in batadv_backbone_gw, which caused hash lookups to fail. Fixes: 07568d0369f9 ("batman-adv: don't rely on positions in struct for hashing") Signed-off-by: Linus Lüssing Signed-off-by: Sven Eckelmann Signed-off-by: Sasha Levin --- net/batman-adv/bridge_loop_avoidance.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index 9b8bf06ccb61..e71a35a3950d 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c @@ -96,11 +96,12 @@ static inline u32 batadv_choose_claim(const void *data, u32 size) */ static inline u32 batadv_choose_backbone_gw(const void *data, u32 size) { - const struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data; + const struct batadv_bla_backbone_gw *gw; u32 hash = 0; - hash = jhash(&claim->addr, sizeof(claim->addr), hash); - hash = jhash(&claim->vid, sizeof(claim->vid), hash); + gw = (struct batadv_bla_backbone_gw *)data; + hash = jhash(&gw->orig, sizeof(gw->orig), hash); + hash = jhash(&gw->vid, sizeof(gw->vid), hash); return hash % size; } -- GitLab From 48fb5d1e39bcbeb397c09fe246cb092592678af9 Mon Sep 17 00:00:00 2001 From: Jing Xiangfeng Date: Fri, 4 Sep 2020 10:51:03 +0800 Subject: [PATCH 1102/1304] atm: eni: fix the missed pci_disable_device() for eni_init_one() [ Upstream commit c2b947879ca320ac5505c6c29a731ff17da5e805 ] eni_init_one() misses to call pci_disable_device() in an error path. Jump to err_disable to fix it. Fixes: ede58ef28e10 ("atm: remove deprecated use of pci api") Signed-off-by: Jing Xiangfeng Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- drivers/atm/eni.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c index 7323e9210f4b..38fec976e62d 100644 --- a/drivers/atm/eni.c +++ b/drivers/atm/eni.c @@ -2243,7 +2243,7 @@ static int eni_init_one(struct pci_dev *pci_dev, rc = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32)); if (rc < 0) - goto out; + goto err_disable; rc = -ENOMEM; eni_dev = kmalloc(sizeof(struct eni_dev), GFP_KERNEL); -- GitLab From e63e927da2df208304725fbceb6f585eb47ddfdb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Linus=20L=C3=BCssing?= Date: Fri, 4 Sep 2020 20:28:00 +0200 Subject: [PATCH 1103/1304] batman-adv: mcast/TT: fix wrongly dropped or rerouted packets MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 7dda5b3384121181c4e79f6eaeac2b94c0622c8d ] The unicast packet rerouting code makes several assumptions. For instance it assumes that there is always exactly one destination in the TT. This breaks for multicast frames in a unicast packets in several ways: For one thing if there is actually no TT entry and the destination node was selected due to the multicast tvlv flags it announced. Then an intermediate node will wrongly drop the packet. For another thing if there is a TT entry but the TTVN of this entry is newer than the originally addressed destination node: Then the intermediate node will wrongly redirect the packet, leading to duplicated multicast packets at a multicast listener and missing packets at other multicast listeners or multicast routers. Fixing this by not applying the unicast packet rerouting to batman-adv unicast packets with a multicast payload. We are not able to detect a roaming multicast listener at the moment and will just continue to send the multicast frame to both the new and old destination for a while in case of such a roaming multicast listener. Fixes: a73105b8d4c7 ("batman-adv: improved client announcement mechanism") Signed-off-by: Linus Lüssing Signed-off-by: Sven Eckelmann Signed-off-by: Simon Wunderlich Signed-off-by: Sasha Levin --- net/batman-adv/routing.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index cc3ed93a6d51..98af41e3810d 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c @@ -838,6 +838,10 @@ static bool batadv_check_unicast_ttvn(struct batadv_priv *bat_priv, vid = batadv_get_vid(skb, hdr_len); ethhdr = (struct ethhdr *)(skb->data + hdr_len); + /* do not reroute multicast frames in a unicast header */ + if (is_multicast_ether_addr(ethhdr->h_dest)) + return true; + /* check if the destination client was served by this node and it is now * roaming. In this case, it means that the node has got a ROAM_ADV * message and that it knows the new destination in the mesh to re-route -- GitLab From 788a00c1f837544bf4622ebb14d15506b4a1151d Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 8 Sep 2020 03:40:25 -0700 Subject: [PATCH 1104/1304] mac802154: tx: fix use-after-free [ Upstream commit 0ff4628f4c6c1ab87eef9f16b25355cadc426d64 ] syzbot reported a bug in ieee802154_tx() [1] A similar issue in ieee802154_xmit_worker() is also fixed in this patch. [1] BUG: KASAN: use-after-free in ieee802154_tx+0x3d2/0x480 net/mac802154/tx.c:88 Read of size 4 at addr ffff8880251a8c70 by task syz-executor.3/928 CPU: 0 PID: 928 Comm: syz-executor.3 Not tainted 5.9.0-rc3-syzkaller #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:77 [inline] dump_stack+0x198/0x1fd lib/dump_stack.c:118 print_address_description.constprop.0.cold+0xae/0x497 mm/kasan/report.c:383 __kasan_report mm/kasan/report.c:513 [inline] kasan_report.cold+0x1f/0x37 mm/kasan/report.c:530 ieee802154_tx+0x3d2/0x480 net/mac802154/tx.c:88 ieee802154_subif_start_xmit+0xbe/0xe4 net/mac802154/tx.c:130 __netdev_start_xmit include/linux/netdevice.h:4634 [inline] netdev_start_xmit include/linux/netdevice.h:4648 [inline] dev_direct_xmit+0x4e9/0x6e0 net/core/dev.c:4203 packet_snd net/packet/af_packet.c:2989 [inline] packet_sendmsg+0x2413/0x5290 net/packet/af_packet.c:3014 sock_sendmsg_nosec net/socket.c:651 [inline] sock_sendmsg+0xcf/0x120 net/socket.c:671 ____sys_sendmsg+0x6e8/0x810 net/socket.c:2353 ___sys_sendmsg+0xf3/0x170 net/socket.c:2407 __sys_sendmsg+0xe5/0x1b0 net/socket.c:2440 do_syscall_64+0x2d/0x70 arch/x86/entry/common.c:46 entry_SYSCALL_64_after_hwframe+0x44/0xa9 RIP: 0033:0x45d5b9 Code: 5d b4 fb ff c3 66 2e 0f 1f 84 00 00 00 00 00 66 90 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 0f 83 2b b4 fb ff c3 66 2e 0f 1f 84 00 00 00 00 RSP: 002b:00007fc98e749c78 EFLAGS: 00000246 ORIG_RAX: 000000000000002e RAX: ffffffffffffffda RBX: 000000000002ccc0 RCX: 000000000045d5b9 RDX: 0000000000000000 RSI: 0000000020007780 RDI: 000000000000000b RBP: 000000000118d020 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000246 R12: 000000000118cfec R13: 00007fff690c720f R14: 00007fc98e74a9c0 R15: 000000000118cfec Allocated by task 928: kasan_save_stack+0x1b/0x40 mm/kasan/common.c:48 kasan_set_track mm/kasan/common.c:56 [inline] __kasan_kmalloc.constprop.0+0xbf/0xd0 mm/kasan/common.c:461 slab_post_alloc_hook mm/slab.h:518 [inline] slab_alloc_node mm/slab.c:3254 [inline] kmem_cache_alloc_node+0x136/0x3e0 mm/slab.c:3574 __alloc_skb+0x71/0x550 net/core/skbuff.c:198 alloc_skb include/linux/skbuff.h:1094 [inline] alloc_skb_with_frags+0x92/0x570 net/core/skbuff.c:5771 sock_alloc_send_pskb+0x72a/0x880 net/core/sock.c:2348 packet_alloc_skb net/packet/af_packet.c:2837 [inline] packet_snd net/packet/af_packet.c:2932 [inline] packet_sendmsg+0x19fb/0x5290 net/packet/af_packet.c:3014 sock_sendmsg_nosec net/socket.c:651 [inline] sock_sendmsg+0xcf/0x120 net/socket.c:671 ____sys_sendmsg+0x6e8/0x810 net/socket.c:2353 ___sys_sendmsg+0xf3/0x170 net/socket.c:2407 __sys_sendmsg+0xe5/0x1b0 net/socket.c:2440 do_syscall_64+0x2d/0x70 arch/x86/entry/common.c:46 entry_SYSCALL_64_after_hwframe+0x44/0xa9 Freed by task 928: kasan_save_stack+0x1b/0x40 mm/kasan/common.c:48 kasan_set_track+0x1c/0x30 mm/kasan/common.c:56 kasan_set_free_info+0x1b/0x30 mm/kasan/generic.c:355 __kasan_slab_free+0xd8/0x120 mm/kasan/common.c:422 __cache_free mm/slab.c:3418 [inline] kmem_cache_free.part.0+0x74/0x1e0 mm/slab.c:3693 kfree_skbmem+0xef/0x1b0 net/core/skbuff.c:622 __kfree_skb net/core/skbuff.c:679 [inline] consume_skb net/core/skbuff.c:838 [inline] consume_skb+0xcf/0x160 net/core/skbuff.c:832 __dev_kfree_skb_any+0x9c/0xc0 net/core/dev.c:3107 fakelb_hw_xmit+0x20e/0x2a0 drivers/net/ieee802154/fakelb.c:81 drv_xmit_async net/mac802154/driver-ops.h:16 [inline] ieee802154_tx+0x282/0x480 net/mac802154/tx.c:81 ieee802154_subif_start_xmit+0xbe/0xe4 net/mac802154/tx.c:130 __netdev_start_xmit include/linux/netdevice.h:4634 [inline] netdev_start_xmit include/linux/netdevice.h:4648 [inline] dev_direct_xmit+0x4e9/0x6e0 net/core/dev.c:4203 packet_snd net/packet/af_packet.c:2989 [inline] packet_sendmsg+0x2413/0x5290 net/packet/af_packet.c:3014 sock_sendmsg_nosec net/socket.c:651 [inline] sock_sendmsg+0xcf/0x120 net/socket.c:671 ____sys_sendmsg+0x6e8/0x810 net/socket.c:2353 ___sys_sendmsg+0xf3/0x170 net/socket.c:2407 __sys_sendmsg+0xe5/0x1b0 net/socket.c:2440 do_syscall_64+0x2d/0x70 arch/x86/entry/common.c:46 entry_SYSCALL_64_after_hwframe+0x44/0xa9 The buggy address belongs to the object at ffff8880251a8c00 which belongs to the cache skbuff_head_cache of size 224 The buggy address is located 112 bytes inside of 224-byte region [ffff8880251a8c00, ffff8880251a8ce0) The buggy address belongs to the page: page:0000000062b6a4f1 refcount:1 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x251a8 flags: 0xfffe0000000200(slab) raw: 00fffe0000000200 ffffea0000435c88 ffffea00028b6c08 ffff8880a9055d00 raw: 0000000000000000 ffff8880251a80c0 000000010000000c 0000000000000000 page dumped because: kasan: bad access detected Memory state around the buggy address: ffff8880251a8b00: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb ffff8880251a8b80: fb fb fb fb fc fc fc fc fc fc fc fc fc fc fc fc >ffff8880251a8c00: fa fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb ^ ffff8880251a8c80: fb fb fb fb fb fb fb fb fb fb fb fb fc fc fc fc ffff8880251a8d00: fc fc fc fc fc fc fc fc fa fb fb fb fb fb fb fb Fixes: 409c3b0c5f03 ("mac802154: tx: move stats tx increment") Signed-off-by: Eric Dumazet Reported-by: syzbot Cc: Alexander Aring Cc: Stefan Schmidt Cc: linux-wpan@vger.kernel.org Link: https://lore.kernel.org/r/20200908104025.4009085-1-edumazet@google.com Signed-off-by: Stefan Schmidt Signed-off-by: Sasha Levin --- net/mac802154/tx.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/net/mac802154/tx.c b/net/mac802154/tx.c index bcd1a5e6ebf4..2f873a0dc583 100644 --- a/net/mac802154/tx.c +++ b/net/mac802154/tx.c @@ -42,11 +42,11 @@ void ieee802154_xmit_worker(struct work_struct *work) if (res) goto err_tx; - ieee802154_xmit_complete(&local->hw, skb, false); - dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; + ieee802154_xmit_complete(&local->hw, skb, false); + return; err_tx: @@ -86,6 +86,8 @@ ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb) /* async is priority, otherwise sync is fallback */ if (local->ops->xmit_async) { + unsigned int len = skb->len; + ret = drv_xmit_async(local, skb); if (ret) { ieee802154_wake_queue(&local->hw); @@ -93,7 +95,7 @@ ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb) } dev->stats.tx_packets++; - dev->stats.tx_bytes += skb->len; + dev->stats.tx_bytes += len; } else { local->tx_skb = skb; queue_work(local->workqueue, &local->tx_work); -- GitLab From 87f947e2bb5a11dad396a64505f30c647d5ed0ed Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Tue, 8 Sep 2020 00:04:10 +0200 Subject: [PATCH 1105/1304] bpf: Fix clobbering of r2 in bpf_gen_ld_abs [ Upstream commit e6a18d36118bea3bf497c9df4d9988b6df120689 ] Bryce reported that he saw the following with: 0: r6 = r1 1: r1 = 12 2: r0 = *(u16 *)skb[r1] The xlated sequence was incorrectly clobbering r2 with pointer value of r6 ... 0: (bf) r6 = r1 1: (b7) r1 = 12 2: (bf) r1 = r6 3: (bf) r2 = r1 4: (85) call bpf_skb_load_helper_16_no_cache#7692160 ... and hence call to the load helper never succeeded given the offset was too high. Fix it by reordering the load of r6 to r1. Other than that the insn has similar calling convention than BPF helpers, that is, r0 - r5 are scratch regs, so nothing else affected after the insn. Fixes: e0cea7ce988c ("bpf: implement ld_abs/ld_ind in native bpf") Reported-by: Bryce Kahle Signed-off-by: Daniel Borkmann Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/cace836e4d07bb63b1a53e49c5dfb238a040c298.1599512096.git.daniel@iogearbox.net Signed-off-by: Sasha Levin --- net/core/filter.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/core/filter.c b/net/core/filter.c index 25a2c3186e14..557bd5cc8f94 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -5418,8 +5418,6 @@ static int bpf_gen_ld_abs(const struct bpf_insn *orig, bool indirect = BPF_MODE(orig->code) == BPF_IND; struct bpf_insn *insn = insn_buf; - /* We're guaranteed here that CTX is in R6. */ - *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_CTX); if (!indirect) { *insn++ = BPF_MOV64_IMM(BPF_REG_2, orig->imm); } else { @@ -5427,6 +5425,8 @@ static int bpf_gen_ld_abs(const struct bpf_insn *orig, if (orig->imm) *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, orig->imm); } + /* We're guaranteed here that CTX is in R6. */ + *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_CTX); switch (BPF_SIZE(orig->code)) { case BPF_B: -- GitLab From 71d4d527a17419d16360f3860fc60c23e34e7e0e Mon Sep 17 00:00:00 2001 From: Marek Szyprowski Date: Wed, 1 Jul 2020 09:39:49 +0200 Subject: [PATCH 1106/1304] drm/vc4/vc4_hdmi: fill ASoC card owner [ Upstream commit ec653df2a0cbc306a4bfcb0e3484d318fa779002 ] card->owner is a required property and since commit 81033c6b584b ("ALSA: core: Warn on empty module") a warning is issued if it is empty. Fix lack of it. This fixes following warning observed on RaspberryPi 3B board with ARM 32bit kernel and multi_v7_defconfig: ------------[ cut here ]------------ WARNING: CPU: 1 PID: 210 at sound/core/init.c:207 snd_card_new+0x378/0x398 [snd] Modules linked in: vc4(+) snd_soc_core ac97_bus snd_pcm_dmaengine bluetooth snd_pcm snd_timer crc32_arm_ce raspberrypi_hwmon snd soundcore ecdh_generic ecc bcm2835_thermal phy_generic CPU: 1 PID: 210 Comm: systemd-udevd Not tainted 5.8.0-rc1-00027-g81033c6b584b #1087 Hardware name: BCM2835 [] (unwind_backtrace) from [] (show_stack+0x10/0x14) [] (show_stack) from [] (dump_stack+0xd4/0xe8) [] (dump_stack) from [] (__warn+0xdc/0xf4) [] (__warn) from [] (warn_slowpath_fmt+0xb0/0xb8) [] (warn_slowpath_fmt) from [] (snd_card_new+0x378/0x398 [snd]) [] (snd_card_new [snd]) from [] (snd_soc_bind_card+0x280/0x99c [snd_soc_core]) [] (snd_soc_bind_card [snd_soc_core]) from [] (devm_snd_soc_register_card+0x34/0x6c [snd_soc_core]) [] (devm_snd_soc_register_card [snd_soc_core]) from [] (vc4_hdmi_bind+0x43c/0x5f4 [vc4]) [] (vc4_hdmi_bind [vc4]) from [] (component_bind_all+0xec/0x24c) [] (component_bind_all) from [] (vc4_drm_bind+0xd4/0x174 [vc4]) [] (vc4_drm_bind [vc4]) from [] (try_to_bring_up_master+0x160/0x1b0) [] (try_to_bring_up_master) from [] (component_master_add_with_match+0xd0/0x104) [] (component_master_add_with_match) from [] (vc4_platform_drm_probe+0x9c/0xbc [vc4]) [] (vc4_platform_drm_probe [vc4]) from [] (platform_drv_probe+0x6c/0xa4) [] (platform_drv_probe) from [] (really_probe+0x210/0x350) [] (really_probe) from [] (driver_probe_device+0x5c/0xb4) [] (driver_probe_device) from [] (device_driver_attach+0x58/0x60) [] (device_driver_attach) from [] (__driver_attach+0x80/0xbc) [] (__driver_attach) from [] (bus_for_each_dev+0x68/0xb4) [] (bus_for_each_dev) from [] (bus_add_driver+0x130/0x1e8) [] (bus_add_driver) from [] (driver_register+0x78/0x110) [] (driver_register) from [] (do_one_initcall+0x50/0x220) [] (do_one_initcall) from [] (do_init_module+0x60/0x210) [] (do_init_module) from [] (load_module+0x1e34/0x2338) [] (load_module) from [] (sys_finit_module+0xac/0xbc) [] (sys_finit_module) from [] (ret_fast_syscall+0x0/0x54) Exception stack(0xeded9fa8 to 0xeded9ff0) ... ---[ end trace 6414689569c2bc08 ]--- Fixes: bb7d78568814 ("drm/vc4: Add HDMI audio support") Suggested-by: Takashi Iwai Signed-off-by: Marek Szyprowski Tested-by: Stefan Wahren Signed-off-by: Maxime Ripard Link: https://patchwork.freedesktop.org/patch/msgid/20200701073949.28941-1-m.szyprowski@samsung.com Signed-off-by: Sasha Levin --- drivers/gpu/drm/vc4/vc4_hdmi.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index 86b98856756d..116166266457 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -1134,6 +1134,7 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *hdmi) card->num_links = 1; card->name = "vc4-hdmi"; card->dev = dev; + card->owner = THIS_MODULE; /* * Be careful, snd_soc_register_card() calls dev_set_drvdata() and -- GitLab From 9349fed2312da12209413401d62a78f12950ea2d Mon Sep 17 00:00:00 2001 From: Dmitry Bogdanov Date: Wed, 9 Sep 2020 20:43:10 +0300 Subject: [PATCH 1107/1304] net: qed: RDMA personality shouldn't fail VF load [ Upstream commit ce1cf9e5025f4e2d2198728391f1847b3e168bc6 ] Fix the assert during VF driver installation when the personality is iWARP Fixes: 1fe614d10f45 ("qed: Relax VF firmware requirements") Signed-off-by: Igor Russkikh Signed-off-by: Michal Kalderon Signed-off-by: Dmitry Bogdanov Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/ethernet/qlogic/qed/qed_sriov.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 71a7af134dd8..886c7aae662f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -96,6 +96,7 @@ static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf) p_ramrod->personality = PERSONALITY_ETH; break; case QED_PCI_ETH_ROCE: + case QED_PCI_ETH_IWARP: p_ramrod->personality = PERSONALITY_RDMA_AND_ETH; break; default: -- GitLab From 1ed9a527e6220fac25f8992941569d9f08b98b44 Mon Sep 17 00:00:00 2001 From: Martin Cerveny Date: Sun, 6 Sep 2020 18:21:39 +0200 Subject: [PATCH 1108/1304] drm/sun4i: sun8i-csc: Secondary CSC register correction [ Upstream commit cab4c03b4ba54c8d9378298cacb8bc0fd74ceece ] "Allwinner V3s" has secondary video layer (VI). Decoded video is displayed in wrong colors until secondary CSC registers are programmed correctly. Fixes: 883029390550 ("drm/sun4i: Add DE2 CSC library") Signed-off-by: Martin Cerveny Reviewed-by: Jernej Skrabec Signed-off-by: Maxime Ripard Link: https://patchwork.freedesktop.org/patch/msgid/20200906162140.5584-2-m.cerveny@computer.org Signed-off-by: Sasha Levin --- drivers/gpu/drm/sun4i/sun8i_csc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/sun4i/sun8i_csc.h b/drivers/gpu/drm/sun4i/sun8i_csc.h index 880e8fbb0855..242752b2d328 100644 --- a/drivers/gpu/drm/sun4i/sun8i_csc.h +++ b/drivers/gpu/drm/sun4i/sun8i_csc.h @@ -14,7 +14,7 @@ struct sun8i_mixer; /* VI channel CSC units offsets */ #define CCSC00_OFFSET 0xAA050 -#define CCSC01_OFFSET 0xFA000 +#define CCSC01_OFFSET 0xFA050 #define CCSC10_OFFSET 0xA0000 #define CCSC11_OFFSET 0xF0000 -- GitLab From 14d60e8488156da66cbd210219bcae2b3aa6b14f Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Mon, 14 Sep 2020 13:58:16 +0200 Subject: [PATCH 1109/1304] batman-adv: Add missing include for in_interrupt() [ Upstream commit 4bba9dab86b6ac15ca560ef1f2b5aa4529cbf784 ] The fix for receiving (internally generated) bla packets outside the interrupt context introduced the usage of in_interrupt(). But this functionality is only defined in linux/preempt.h which was not included with the same patch. Fixes: 279e89b2281a ("batman-adv: bla: use netif_rx_ni when not in interrupt context") Signed-off-by: Sven Eckelmann Signed-off-by: Simon Wunderlich Signed-off-by: Sasha Levin --- net/batman-adv/bridge_loop_avoidance.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index e71a35a3950d..557d7fdf0b8d 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c @@ -37,6 +37,7 @@ #include #include #include +#include #include #include #include -- GitLab From 5ccdc2780653f87de601770f3b53ec3f37bb7942 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Linus=20L=C3=BCssing?= Date: Tue, 15 Sep 2020 09:54:09 +0200 Subject: [PATCH 1110/1304] batman-adv: mcast: fix duplicate mcast packets in BLA backbone from mesh MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 74c09b7275126da1b642b90c9cdc3ae8b729ad4b ] Scenario: * Multicast frame send from mesh to a BLA backbone (multiple nodes with their bat0 bridged together, with BLA enabled) Issue: * BLA backbone nodes receive the frame multiple times on bat0, once from mesh->bat0 and once from each backbone_gw from LAN For unicast, a node will send only to the best backbone gateway according to the TQ. However for multicast we currently cannot determine if multiple destination nodes share the same backbone if they don't share the same backbone with us. So we need to keep sending the unicasts to all backbone gateways and let the backbone gateways decide which one will forward the frame. We can use the CLAIM mechanism to make this decision. One catch: The batman-adv gateway feature for DHCP packets potentially sends multicast packets in the same batman-adv unicast header as the multicast optimizations code. And we are not allowed to drop those even if we did not claim the source address of the sender, as for such packets there is only this one multicast-in-unicast packet. How can we distinguish the two cases? The gateway feature uses a batman-adv unicast 4 address header. While the multicast-to-unicasts feature uses a simple, 3 address batman-adv unicast header. So let's use this to distinguish. Fixes: fe2da6ff27c7 ("batman-adv: check incoming packet type for bla") Signed-off-by: Linus Lüssing Signed-off-by: Sven Eckelmann Signed-off-by: Simon Wunderlich Signed-off-by: Sasha Levin --- net/batman-adv/bridge_loop_avoidance.c | 34 +++++++++++++++++++------- net/batman-adv/bridge_loop_avoidance.h | 4 +-- net/batman-adv/soft-interface.c | 6 ++--- 3 files changed, 30 insertions(+), 14 deletions(-) diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index 557d7fdf0b8d..3f76872d411b 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c @@ -1827,7 +1827,7 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb, * @bat_priv: the bat priv with all the soft interface information * @skb: the frame to be checked * @vid: the VLAN ID of the frame - * @is_bcast: the packet came in a broadcast packet type. + * @packet_type: the batman packet type this frame came in * * batadv_bla_rx avoidance checks if: * * we have to race for a claim @@ -1839,7 +1839,7 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb, * further process the skb. */ bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, - unsigned short vid, bool is_bcast) + unsigned short vid, int packet_type) { struct batadv_bla_backbone_gw *backbone_gw; struct ethhdr *ethhdr; @@ -1861,9 +1861,24 @@ bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, goto handled; if (unlikely(atomic_read(&bat_priv->bla.num_requests))) - /* don't allow broadcasts while requests are in flight */ - if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) - goto handled; + /* don't allow multicast packets while requests are in flight */ + if (is_multicast_ether_addr(ethhdr->h_dest)) + /* Both broadcast flooding or multicast-via-unicasts + * delivery might send to multiple backbone gateways + * sharing the same LAN and therefore need to coordinate + * which backbone gateway forwards into the LAN, + * by claiming the payload source address. + * + * Broadcast flooding and multicast-via-unicasts + * delivery use the following two batman packet types. + * Note: explicitly exclude BATADV_UNICAST_4ADDR, + * as the DHCP gateway feature will send explicitly + * to only one BLA gateway, so the claiming process + * should be avoided there. + */ + if (packet_type == BATADV_BCAST || + packet_type == BATADV_UNICAST) + goto handled; ether_addr_copy(search_claim.addr, ethhdr->h_source); search_claim.vid = vid; @@ -1898,13 +1913,14 @@ bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, goto allow; } - /* if it is a broadcast ... */ - if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) { + /* if it is a multicast ... */ + if (is_multicast_ether_addr(ethhdr->h_dest) && + (packet_type == BATADV_BCAST || packet_type == BATADV_UNICAST)) { /* ... drop it. the responsible gateway is in charge. * - * We need to check is_bcast because with the gateway + * We need to check packet type because with the gateway * feature, broadcasts (like DHCP requests) may be sent - * using a unicast packet type. + * using a unicast 4 address packet type. See comment above. */ goto handled; } else { diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h index 71f95a3e4d3f..af28fdb01467 100644 --- a/net/batman-adv/bridge_loop_avoidance.h +++ b/net/batman-adv/bridge_loop_avoidance.h @@ -48,7 +48,7 @@ static inline bool batadv_bla_is_loopdetect_mac(const uint8_t *mac) #ifdef CONFIG_BATMAN_ADV_BLA bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, - unsigned short vid, bool is_bcast); + unsigned short vid, int packet_type); bool batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, unsigned short vid); bool batadv_bla_is_backbone_gw(struct sk_buff *skb, @@ -79,7 +79,7 @@ bool batadv_bla_check_claim(struct batadv_priv *bat_priv, u8 *addr, static inline bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, unsigned short vid, - bool is_bcast) + int packet_type) { return false; } diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index a2976adeeedc..6ff78080ec7f 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@ -426,10 +426,10 @@ void batadv_interface_rx(struct net_device *soft_iface, struct vlan_ethhdr *vhdr; struct ethhdr *ethhdr; unsigned short vid; - bool is_bcast; + int packet_type; batadv_bcast_packet = (struct batadv_bcast_packet *)skb->data; - is_bcast = (batadv_bcast_packet->packet_type == BATADV_BCAST); + packet_type = batadv_bcast_packet->packet_type; skb_pull_rcsum(skb, hdr_size); skb_reset_mac_header(skb); @@ -472,7 +472,7 @@ void batadv_interface_rx(struct net_device *soft_iface, /* Let the bridge loop avoidance check the packet. If will * not handle it, we can safely push it up. */ - if (batadv_bla_rx(bat_priv, skb, vid, is_bcast)) + if (batadv_bla_rx(bat_priv, skb, vid, packet_type)) goto out; if (orig_node) -- GitLab From 41f5e62866f0ceb31a825dc91f0440727dbb9495 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Linus=20L=C3=BCssing?= Date: Tue, 15 Sep 2020 09:54:10 +0200 Subject: [PATCH 1111/1304] batman-adv: mcast: fix duplicate mcast packets from BLA backbone to mesh MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 2369e827046920ef0599e6a36b975ac5c0a359c2 ] Scenario: * Multicast frame send from BLA backbone gateways (multiple nodes with their bat0 bridged together, with BLA enabled) sharing the same LAN to nodes in the mesh Issue: * Nodes receive the frame multiple times on bat0 from the mesh, once from each foreign BLA backbone gateway which shares the same LAN with another For multicast frames via batman-adv broadcast packets coming from the same BLA backbone but from different backbone gateways duplicates are currently detected via a CRC history of previously received packets. However this CRC so far was not performed for multicast frames received via batman-adv unicast packets. Fixing this by appyling the same check for such packets, too. Room for improvements in the future: Ideally we would introduce the possibility to not only claim a client, but a complete originator, too. This would allow us to only send a multicast-in-unicast packet from a BLA backbone gateway claiming the node and by that avoid potential redundant transmissions in the first place. Fixes: 279e89b2281a ("batman-adv: add broadcast duplicate check") Signed-off-by: Linus Lüssing Signed-off-by: Sven Eckelmann Signed-off-by: Simon Wunderlich Signed-off-by: Sasha Levin --- net/batman-adv/bridge_loop_avoidance.c | 103 +++++++++++++++++++++---- 1 file changed, 87 insertions(+), 16 deletions(-) diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index 3f76872d411b..1401031f4bb4 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c @@ -1594,13 +1594,16 @@ int batadv_bla_init(struct batadv_priv *bat_priv) } /** - * batadv_bla_check_bcast_duplist() - Check if a frame is in the broadcast dup. + * batadv_bla_check_duplist() - Check if a frame is in the broadcast dup. * @bat_priv: the bat priv with all the soft interface information - * @skb: contains the bcast_packet to be checked + * @skb: contains the multicast packet to be checked + * @payload_ptr: pointer to position inside the head buffer of the skb + * marking the start of the data to be CRC'ed + * @orig: originator mac address, NULL if unknown * - * check if it is on our broadcast list. Another gateway might - * have sent the same packet because it is connected to the same backbone, - * so we have to remove this duplicate. + * Check if it is on our broadcast list. Another gateway might have sent the + * same packet because it is connected to the same backbone, so we have to + * remove this duplicate. * * This is performed by checking the CRC, which will tell us * with a good chance that it is the same packet. If it is furthermore @@ -1609,19 +1612,17 @@ int batadv_bla_init(struct batadv_priv *bat_priv) * * Return: true if a packet is in the duplicate list, false otherwise. */ -bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv, - struct sk_buff *skb) +static bool batadv_bla_check_duplist(struct batadv_priv *bat_priv, + struct sk_buff *skb, u8 *payload_ptr, + const u8 *orig) { - int i, curr; - __be32 crc; - struct batadv_bcast_packet *bcast_packet; struct batadv_bcast_duplist_entry *entry; bool ret = false; - - bcast_packet = (struct batadv_bcast_packet *)skb->data; + int i, curr; + __be32 crc; /* calculate the crc ... */ - crc = batadv_skb_crc32(skb, (u8 *)(bcast_packet + 1)); + crc = batadv_skb_crc32(skb, payload_ptr); spin_lock_bh(&bat_priv->bla.bcast_duplist_lock); @@ -1640,8 +1641,21 @@ bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv, if (entry->crc != crc) continue; - if (batadv_compare_eth(entry->orig, bcast_packet->orig)) - continue; + /* are the originators both known and not anonymous? */ + if (orig && !is_zero_ether_addr(orig) && + !is_zero_ether_addr(entry->orig)) { + /* If known, check if the new frame came from + * the same originator: + * We are safe to take identical frames from the + * same orig, if known, as multiplications in + * the mesh are detected via the (orig, seqno) pair. + * So we can be a bit more liberal here and allow + * identical frames from the same orig which the source + * host might have sent multiple times on purpose. + */ + if (batadv_compare_eth(entry->orig, orig)) + continue; + } /* this entry seems to match: same crc, not too old, * and from another gw. therefore return true to forbid it. @@ -1657,7 +1671,14 @@ bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv, entry = &bat_priv->bla.bcast_duplist[curr]; entry->crc = crc; entry->entrytime = jiffies; - ether_addr_copy(entry->orig, bcast_packet->orig); + + /* known originator */ + if (orig) + ether_addr_copy(entry->orig, orig); + /* anonymous originator */ + else + eth_zero_addr(entry->orig); + bat_priv->bla.bcast_duplist_curr = curr; out: @@ -1666,6 +1687,48 @@ bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv, return ret; } +/** + * batadv_bla_check_ucast_duplist() - Check if a frame is in the broadcast dup. + * @bat_priv: the bat priv with all the soft interface information + * @skb: contains the multicast packet to be checked, decapsulated from a + * unicast_packet + * + * Check if it is on our broadcast list. Another gateway might have sent the + * same packet because it is connected to the same backbone, so we have to + * remove this duplicate. + * + * Return: true if a packet is in the duplicate list, false otherwise. + */ +static bool batadv_bla_check_ucast_duplist(struct batadv_priv *bat_priv, + struct sk_buff *skb) +{ + return batadv_bla_check_duplist(bat_priv, skb, (u8 *)skb->data, NULL); +} + +/** + * batadv_bla_check_bcast_duplist() - Check if a frame is in the broadcast dup. + * @bat_priv: the bat priv with all the soft interface information + * @skb: contains the bcast_packet to be checked + * + * Check if it is on our broadcast list. Another gateway might have sent the + * same packet because it is connected to the same backbone, so we have to + * remove this duplicate. + * + * Return: true if a packet is in the duplicate list, false otherwise. + */ +bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv, + struct sk_buff *skb) +{ + struct batadv_bcast_packet *bcast_packet; + u8 *payload_ptr; + + bcast_packet = (struct batadv_bcast_packet *)skb->data; + payload_ptr = (u8 *)(bcast_packet + 1); + + return batadv_bla_check_duplist(bat_priv, skb, payload_ptr, + bcast_packet->orig); +} + /** * batadv_bla_is_backbone_gw_orig() - Check if the originator is a gateway for * the VLAN identified by vid. @@ -1880,6 +1943,14 @@ bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, packet_type == BATADV_UNICAST) goto handled; + /* potential duplicates from foreign BLA backbone gateways via + * multicast-in-unicast packets + */ + if (is_multicast_ether_addr(ethhdr->h_dest) && + packet_type == BATADV_UNICAST && + batadv_bla_check_ucast_duplist(bat_priv, skb)) + goto handled; + ether_addr_copy(search_claim.addr, ethhdr->h_source); search_claim.vid = vid; claim = batadv_claim_hash_find(bat_priv, &search_claim); -- GitLab From e1a75e94a3acf78e6afdd548a5d504fc29cbc953 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Tue, 15 Sep 2020 17:44:01 -0700 Subject: [PATCH 1112/1304] bpf: Fix a rcu warning for bpffs map pretty-print [ Upstream commit ce880cb825fcc22d4e39046a6c3a3a7f6603883d ] Running selftest ./btf_btf -p the kernel had the following warning: [ 51.528185] WARNING: CPU: 3 PID: 1756 at kernel/bpf/hashtab.c:717 htab_map_get_next_key+0x2eb/0x300 [ 51.529217] Modules linked in: [ 51.529583] CPU: 3 PID: 1756 Comm: test_btf Not tainted 5.9.0-rc1+ #878 [ 51.530346] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.9.3-1.el7.centos 04/01/2014 [ 51.531410] RIP: 0010:htab_map_get_next_key+0x2eb/0x300 ... [ 51.542826] Call Trace: [ 51.543119] map_seq_next+0x53/0x80 [ 51.543528] seq_read+0x263/0x400 [ 51.543932] vfs_read+0xad/0x1c0 [ 51.544311] ksys_read+0x5f/0xe0 [ 51.544689] do_syscall_64+0x33/0x40 [ 51.545116] entry_SYSCALL_64_after_hwframe+0x44/0xa9 The related source code in kernel/bpf/hashtab.c: 709 static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key) 710 { 711 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 712 struct hlist_nulls_head *head; 713 struct htab_elem *l, *next_l; 714 u32 hash, key_size; 715 int i = 0; 716 717 WARN_ON_ONCE(!rcu_read_lock_held()); In kernel/bpf/inode.c, bpffs map pretty print calls map->ops->map_get_next_key() without holding a rcu_read_lock(), hence causing the above warning. To fix the issue, just surrounding map->ops->map_get_next_key() with rcu read lock. Fixes: a26ca7c982cb ("bpf: btf: Add pretty print support to the basic arraymap") Reported-by: Alexei Starovoitov Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Cc: Martin KaFai Lau Link: https://lore.kernel.org/bpf/20200916004401.146277-1-yhs@fb.com Signed-off-by: Sasha Levin --- kernel/bpf/inode.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c index c04815bb15cc..11fade89c1f3 100644 --- a/kernel/bpf/inode.c +++ b/kernel/bpf/inode.c @@ -207,10 +207,12 @@ static void *map_seq_next(struct seq_file *m, void *v, loff_t *pos) else prev_key = key; + rcu_read_lock(); if (map->ops->map_get_next_key(map, prev_key, key)) { map_iter(m)->done = true; - return NULL; + key = NULL; } + rcu_read_unlock(); return key; } -- GitLab From f37ace9a29866ead7785f2c75f70f3840e774540 Mon Sep 17 00:00:00 2001 From: Tom Rix Date: Sun, 13 Sep 2020 09:52:30 -0700 Subject: [PATCH 1113/1304] ALSA: asihpi: fix iounmap in error handler [ Upstream commit 472eb39103e885f302fd8fd6eff104fcf5503f1b ] clang static analysis flags this problem hpioctl.c:513:7: warning: Branch condition evaluates to a garbage value if (pci.ap_mem_base[idx]) { ^~~~~~~~~~~~~~~~~~~~ If there is a failure in the middle of the memory space loop, only some of the memory spaces need to be cleaned up. At the error handler, idx holds the number of successful memory spaces mapped. So rework the handler loop to use the old idx. There is a second problem, the memory space loop conditionally iomaps()/sets the mem_base so it is necessay to initize pci. Fixes: 719f82d3987a ("ALSA: Add support of AudioScience ASI boards") Signed-off-by: Tom Rix Link: https://lore.kernel.org/r/20200913165230.17166-1-trix@redhat.com Signed-off-by: Takashi Iwai Signed-off-by: Sasha Levin --- sound/pci/asihpi/hpioctl.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sound/pci/asihpi/hpioctl.c b/sound/pci/asihpi/hpioctl.c index 7d049569012c..3f06986fbecf 100644 --- a/sound/pci/asihpi/hpioctl.c +++ b/sound/pci/asihpi/hpioctl.c @@ -350,7 +350,7 @@ int asihpi_adapter_probe(struct pci_dev *pci_dev, struct hpi_message hm; struct hpi_response hr; struct hpi_adapter adapter; - struct hpi_pci pci; + struct hpi_pci pci = { 0 }; memset(&adapter, 0, sizeof(adapter)); @@ -506,7 +506,7 @@ int asihpi_adapter_probe(struct pci_dev *pci_dev, return 0; err: - for (idx = 0; idx < HPI_MAX_ADAPTER_MEM_SPACES; idx++) { + while (--idx >= 0) { if (pci.ap_mem_base[idx]) { iounmap(pci.ap_mem_base[idx]); pci.ap_mem_base[idx] = NULL; -- GitLab From 7b038e4deb458b977a15ab68923e0483778ebcb8 Mon Sep 17 00:00:00 2001 From: Dmitry Baryshkov Date: Thu, 17 Sep 2020 18:34:04 +0300 Subject: [PATCH 1114/1304] regmap: fix page selection for noinc reads [ Upstream commit 4003324856311faebb46cbd56a1616bd3f3b67c2 ] Non-incrementing reads can fail if register + length crosses page border. However for non-incrementing reads we should not check for page border crossing. Fix this by passing additional flag to _regmap_raw_read and passing length to _regmap_select_page basing on the flag. Signed-off-by: Dmitry Baryshkov Fixes: 74fe7b551f33 ("regmap: Add regmap_noinc_read API") Link: https://lore.kernel.org/r/20200917153405.3139200-1-dmitry.baryshkov@linaro.org Signed-off-by: Mark Brown Signed-off-by: Sasha Levin --- drivers/base/regmap/regmap.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index d26b485ccc7d..e8b3353c18eb 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -2367,7 +2367,7 @@ int regmap_raw_write_async(struct regmap *map, unsigned int reg, EXPORT_SYMBOL_GPL(regmap_raw_write_async); static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val, - unsigned int val_len) + unsigned int val_len, bool noinc) { struct regmap_range_node *range; int ret; @@ -2380,7 +2380,7 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val, range = _regmap_range_lookup(map, reg); if (range) { ret = _regmap_select_page(map, ®, range, - val_len / map->format.val_bytes); + noinc ? 1 : val_len / map->format.val_bytes); if (ret != 0) return ret; } @@ -2418,7 +2418,7 @@ static int _regmap_bus_read(void *context, unsigned int reg, if (!map->format.parse_val) return -EINVAL; - ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes); + ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes, false); if (ret == 0) *val = map->format.parse_val(work_val); @@ -2536,7 +2536,7 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val, /* Read bytes that fit into whole chunks */ for (i = 0; i < chunk_count; i++) { - ret = _regmap_raw_read(map, reg, val, chunk_bytes); + ret = _regmap_raw_read(map, reg, val, chunk_bytes, false); if (ret != 0) goto out; @@ -2547,7 +2547,7 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val, /* Read remaining bytes */ if (val_len) { - ret = _regmap_raw_read(map, reg, val, val_len); + ret = _regmap_raw_read(map, reg, val, val_len, false); if (ret != 0) goto out; } @@ -2622,7 +2622,7 @@ int regmap_noinc_read(struct regmap *map, unsigned int reg, read_len = map->max_raw_read; else read_len = val_len; - ret = _regmap_raw_read(map, reg, val, read_len); + ret = _regmap_raw_read(map, reg, val, read_len, true); if (ret) goto out_unlock; val = ((u8 *)val) + read_len; -- GitLab From 81998b8fc6a5d13b5ff4130ff0fde2e91f1fc3a6 Mon Sep 17 00:00:00 2001 From: Wei Li Date: Wed, 23 Sep 2020 14:53:12 +0800 Subject: [PATCH 1115/1304] MIPS: Add the missing 'CPU_1074K' into __get_cpu_type() [ Upstream commit e393fbe6fa27af23f78df6e16a8fd2963578a8c4 ] Commit 442e14a2c55e ("MIPS: Add 1074K CPU support explicitly.") split 1074K from the 74K as an unique CPU type, while it missed to add the 'CPU_1074K' in __get_cpu_type(). So let's add it back. Fixes: 442e14a2c55e ("MIPS: Add 1074K CPU support explicitly.") Signed-off-by: Wei Li Signed-off-by: Thomas Bogendoerfer Signed-off-by: Sasha Levin --- arch/mips/include/asm/cpu-type.h | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/mips/include/asm/cpu-type.h b/arch/mips/include/asm/cpu-type.h index a45af3de075d..d43e4ab20b23 100644 --- a/arch/mips/include/asm/cpu-type.h +++ b/arch/mips/include/asm/cpu-type.h @@ -47,6 +47,7 @@ static inline int __pure __get_cpu_type(const int cpu_type) case CPU_34K: case CPU_1004K: case CPU_74K: + case CPU_1074K: case CPU_M14KC: case CPU_M14KEC: case CPU_INTERAPTIV: -- GitLab From cc868976fbfd60805f8ed9b67fba9ec1ac5226f0 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 23 Sep 2020 14:53:52 -0700 Subject: [PATCH 1116/1304] KVM: x86: Reset MMU context if guest toggles CR4.SMAP or CR4.PKE [ Upstream commit 8d214c481611b29458a57913bd786f0ac06f0605 ] Reset the MMU context during kvm_set_cr4() if SMAP or PKE is toggled. Recent commits to (correctly) not reload PDPTRs when SMAP/PKE are toggled inadvertantly skipped the MMU context reset due to the mask of bits that triggers PDPTR loads also being used to trigger MMU context resets. Fixes: 427890aff855 ("kvm: x86: Toggling CR4.SMAP does not load PDPTEs in PAE mode") Fixes: cb957adb4ea4 ("kvm: x86: Toggling CR4.PKE does not load PDPTEs in PAE mode") Cc: Jim Mattson Cc: Peter Shier Cc: Oliver Upton Signed-off-by: Sean Christopherson Message-Id: <20200923215352.17756-1-sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini Signed-off-by: Sasha Levin --- arch/x86/kvm/x86.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 620ed1fa3511..dd182228be71 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -858,6 +858,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) unsigned long old_cr4 = kvm_read_cr4(vcpu); unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_SMEP; + unsigned long mmu_role_bits = pdptr_bits | X86_CR4_SMAP | X86_CR4_PKE; if (kvm_valid_cr4(vcpu, cr4)) return 1; @@ -885,7 +886,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) if (kvm_x86_ops->set_cr4(vcpu, cr4)) return 1; - if (((cr4 ^ old_cr4) & pdptr_bits) || + if (((cr4 ^ old_cr4) & mmu_role_bits) || (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE))) kvm_mmu_reset_context(vcpu); -- GitLab From e794df7b5426c031d07e362d14c5785ced2c1ef3 Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Thu, 24 Sep 2020 13:41:57 -0500 Subject: [PATCH 1117/1304] KVM: SVM: Add a dedicated INVD intercept routine [ Upstream commit 4bb05f30483fd21ea5413eaf1182768f251cf625 ] The INVD instruction intercept performs emulation. Emulation can't be done on an SEV guest because the guest memory is encrypted. Provide a dedicated intercept routine for the INVD intercept. And since the instruction is emulated as a NOP, just skip it instead. Fixes: 1654efcbc431 ("KVM: SVM: Add KVM_SEV_INIT command") Signed-off-by: Tom Lendacky Message-Id: Signed-off-by: Paolo Bonzini Signed-off-by: Sasha Levin --- arch/x86/kvm/svm.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 2aafb6c79134..cb09a0ec8750 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -3942,6 +3942,12 @@ static int iret_interception(struct vcpu_svm *svm) return 1; } +static int invd_interception(struct vcpu_svm *svm) +{ + /* Treat an INVD instruction as a NOP and just skip it. */ + return kvm_skip_emulated_instruction(&svm->vcpu); +} + static int invlpg_interception(struct vcpu_svm *svm) { if (!static_cpu_has(X86_FEATURE_DECODEASSISTS)) @@ -4831,7 +4837,7 @@ static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = { [SVM_EXIT_RDPMC] = rdpmc_interception, [SVM_EXIT_CPUID] = cpuid_interception, [SVM_EXIT_IRET] = iret_interception, - [SVM_EXIT_INVD] = emulate_on_interception, + [SVM_EXIT_INVD] = invd_interception, [SVM_EXIT_PAUSE] = pause_interception, [SVM_EXIT_HLT] = halt_interception, [SVM_EXIT_INVLPG] = invlpg_interception, -- GitLab From 240dd5118a9e0454f280ffeae63f22bd14735733 Mon Sep 17 00:00:00 2001 From: Tom Rix Date: Mon, 7 Sep 2020 06:58:45 -0700 Subject: [PATCH 1118/1304] tracing: fix double free commit 46bbe5c671e06f070428b9be142cc4ee5cedebac upstream. clang static analyzer reports this problem trace_events_hist.c:3824:3: warning: Attempt to free released memory kfree(hist_data->attrs->var_defs.name[i]); In parse_var_defs() if there is a problem allocating var_defs.expr, the earlier var_defs.name is freed. This free is duplicated by free_var_defs() which frees the rest of the list. Because free_var_defs() has to run anyway, remove the second free fom parse_var_defs(). Link: https://lkml.kernel.org/r/20200907135845.15804-1-trix@redhat.com Cc: stable@vger.kernel.org Fixes: 30350d65ac56 ("tracing: Add variable support to hist triggers") Reviewed-by: Tom Zanussi Signed-off-by: Tom Rix Signed-off-by: Steven Rostedt (VMware) Signed-off-by: Greg Kroah-Hartman --- kernel/trace/trace_events_hist.c | 1 - 1 file changed, 1 deletion(-) diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index dbd3c97d1501..3ed2d7f7e571 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -4225,7 +4225,6 @@ static int parse_var_defs(struct hist_trigger_data *hist_data) s = kstrdup(field_str, GFP_KERNEL); if (!s) { - kfree(hist_data->attrs->var_defs.name[n_vars]); ret = -ENOMEM; goto free; } -- GitLab From 9ab4bc95f4de9419639972aa9709b81945137669 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20H=C3=B6ppner?= Date: Mon, 14 Sep 2020 13:56:47 +0200 Subject: [PATCH 1119/1304] s390/dasd: Fix zero write for FBA devices MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 709192d531e5b0a91f20aa14abfe2fc27ddd47af upstream. A discard request that writes zeros using the global kernel internal ZERO_PAGE will fail for machines with more than 2GB of memory due to the location of the ZERO_PAGE. Fix this by using a driver owned global zero page allocated with GFP_DMA flag set. Fixes: 28b841b3a7cb ("s390/dasd: Add discard support for FBA devices") Signed-off-by: Jan Höppner Reviewed-by: Stefan Haberland Cc: # 4.14+ Signed-off-by: Jens Axboe Signed-off-by: Greg Kroah-Hartman --- drivers/s390/block/dasd_fba.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c index 56007a3e7f11..fab09455ba94 100644 --- a/drivers/s390/block/dasd_fba.c +++ b/drivers/s390/block/dasd_fba.c @@ -40,6 +40,7 @@ MODULE_LICENSE("GPL"); static struct dasd_discipline dasd_fba_discipline; +static void *dasd_fba_zero_page; struct dasd_fba_private { struct dasd_fba_characteristics rdc_data; @@ -270,7 +271,7 @@ static void ccw_write_zero(struct ccw1 *ccw, int count) ccw->cmd_code = DASD_FBA_CCW_WRITE; ccw->flags |= CCW_FLAG_SLI; ccw->count = count; - ccw->cda = (__u32) (addr_t) page_to_phys(ZERO_PAGE(0)); + ccw->cda = (__u32) (addr_t) dasd_fba_zero_page; } /* @@ -811,6 +812,11 @@ dasd_fba_init(void) int ret; ASCEBC(dasd_fba_discipline.ebcname, 4); + + dasd_fba_zero_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); + if (!dasd_fba_zero_page) + return -ENOMEM; + ret = ccw_driver_register(&dasd_fba_driver); if (!ret) wait_for_device_probe(); @@ -822,6 +828,7 @@ static void __exit dasd_fba_cleanup(void) { ccw_driver_unregister(&dasd_fba_driver); + free_page((unsigned long)dasd_fba_zero_page); } module_init(dasd_fba_init); -- GitLab From ce7ff920092130f249b75f9fe177edb3362fefe8 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 1 Sep 2020 00:12:07 +0900 Subject: [PATCH 1120/1304] kprobes: Fix to check probe enabled before disarm_kprobe_ftrace() commit 3031313eb3d549b7ad6f9fbcc52ba04412e3eb9e upstream. Commit 0cb2f1372baa ("kprobes: Fix NULL pointer dereference at kprobe_ftrace_handler") fixed one bug but not completely fixed yet. If we run a kprobe_module.tc of ftracetest, kernel showed a warning as below. # ./ftracetest test.d/kprobe/kprobe_module.tc === Ftrace unit tests === [1] Kprobe dynamic event - probing module ... [ 22.400215] ------------[ cut here ]------------ [ 22.400962] Failed to disarm kprobe-ftrace at trace_printk_irq_work+0x0/0x7e [trace_printk] (-2) [ 22.402139] WARNING: CPU: 7 PID: 200 at kernel/kprobes.c:1091 __disarm_kprobe_ftrace.isra.0+0x7e/0xa0 [ 22.403358] Modules linked in: trace_printk(-) [ 22.404028] CPU: 7 PID: 200 Comm: rmmod Not tainted 5.9.0-rc2+ #66 [ 22.404870] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.13.0-1ubuntu1 04/01/2014 [ 22.406139] RIP: 0010:__disarm_kprobe_ftrace.isra.0+0x7e/0xa0 [ 22.406947] Code: 30 8b 03 eb c9 80 3d e5 09 1f 01 00 75 dc 49 8b 34 24 89 c2 48 c7 c7 a0 c2 05 82 89 45 e4 c6 05 cc 09 1f 01 01 e8 a9 c7 f0 ff <0f> 0b 8b 45 e4 eb b9 89 c6 48 c7 c7 70 c2 05 82 89 45 e4 e8 91 c7 [ 22.409544] RSP: 0018:ffffc90000237df0 EFLAGS: 00010286 [ 22.410385] RAX: 0000000000000000 RBX: ffffffff83066024 RCX: 0000000000000000 [ 22.411434] RDX: 0000000000000001 RSI: ffffffff810de8d3 RDI: ffffffff810de8d3 [ 22.412687] RBP: ffffc90000237e10 R08: 0000000000000001 R09: 0000000000000001 [ 22.413762] R10: 0000000000000000 R11: 0000000000000001 R12: ffff88807c478640 [ 22.414852] R13: ffffffff8235ebc0 R14: ffffffffa00060c0 R15: 0000000000000000 [ 22.415941] FS: 00000000019d48c0(0000) GS:ffff88807d7c0000(0000) knlGS:0000000000000000 [ 22.417264] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 22.418176] CR2: 00000000005bb7e3 CR3: 0000000078f7a000 CR4: 00000000000006a0 [ 22.419309] Call Trace: [ 22.419990] kill_kprobe+0x94/0x160 [ 22.420652] kprobes_module_callback+0x64/0x230 [ 22.421470] notifier_call_chain+0x4f/0x70 [ 22.422184] blocking_notifier_call_chain+0x49/0x70 [ 22.422979] __x64_sys_delete_module+0x1ac/0x240 [ 22.423733] do_syscall_64+0x38/0x50 [ 22.424366] entry_SYSCALL_64_after_hwframe+0x44/0xa9 [ 22.425176] RIP: 0033:0x4bb81d [ 22.425741] Code: 00 c3 66 2e 0f 1f 84 00 00 00 00 00 90 f3 0f 1e fa 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 e0 ff ff ff f7 d8 64 89 01 48 [ 22.428726] RSP: 002b:00007ffc70fef008 EFLAGS: 00000246 ORIG_RAX: 00000000000000b0 [ 22.430169] RAX: ffffffffffffffda RBX: 00000000019d48a0 RCX: 00000000004bb81d [ 22.431375] RDX: 0000000000000000 RSI: 0000000000000880 RDI: 00007ffc70fef028 [ 22.432543] RBP: 0000000000000880 R08: 00000000ffffffff R09: 00007ffc70fef320 [ 22.433692] R10: 0000000000656300 R11: 0000000000000246 R12: 00007ffc70fef028 [ 22.434635] R13: 0000000000000000 R14: 0000000000000002 R15: 0000000000000000 [ 22.435682] irq event stamp: 1169 [ 22.436240] hardirqs last enabled at (1179): [] console_unlock+0x422/0x580 [ 22.437466] hardirqs last disabled at (1188): [] console_unlock+0x7b/0x580 [ 22.438608] softirqs last enabled at (866): [] __do_softirq+0x38e/0x490 [ 22.439637] softirqs last disabled at (859): [] asm_call_on_stack+0x12/0x20 [ 22.440690] ---[ end trace 1e7ce7e1e4567276 ]--- [ 22.472832] trace_kprobe: This probe might be able to register after target module is loaded. Continue. This is because the kill_kprobe() calls disarm_kprobe_ftrace() even if the given probe is not enabled. In that case, ftrace_set_filter_ip() fails because the given probe point is not registered to ftrace. Fix to check the given (going) probe is enabled before invoking disarm_kprobe_ftrace(). Link: https://lkml.kernel.org/r/159888672694.1411785.5987998076694782591.stgit@devnote2 Fixes: 0cb2f1372baa ("kprobes: Fix NULL pointer dereference at kprobe_ftrace_handler") Cc: Ingo Molnar Cc: "Naveen N . Rao" Cc: Anil S Keshavamurthy Cc: David Miller Cc: Muchun Song Cc: Chengming Zhou Cc: stable@vger.kernel.org Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (VMware) Signed-off-by: Greg Kroah-Hartman --- kernel/kprobes.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 230d9d599b5a..915c2ce474fd 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -2083,9 +2083,10 @@ static void kill_kprobe(struct kprobe *p) /* * The module is going away. We should disarm the kprobe which - * is using ftrace. + * is using ftrace, because ftrace framework is still available at + * MODULE_STATE_GOING notification. */ - if (kprobe_ftrace(p)) + if (kprobe_ftrace(p) && !kprobe_disabled(p) && !kprobes_all_disarmed) disarm_kprobe_ftrace(p); } -- GitLab From f3e8ed3d33fa963f1b6827977696235852cdd8d9 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Fri, 25 Sep 2020 21:19:01 -0700 Subject: [PATCH 1121/1304] mm, THP, swap: fix allocating cluster for swapfile by mistake commit 41663430588c737dd735bad5a0d1ba325dcabd59 upstream. SWP_FS is used to make swap_{read,write}page() go through the filesystem, and it's only used for swap files over NFS. So, !SWP_FS means non NFS for now, it could be either file backed or device backed. Something similar goes with legacy SWP_FILE. So in order to achieve the goal of the original patch, SWP_BLKDEV should be used instead. FS corruption can be observed with SSD device + XFS + fragmented swapfile due to CONFIG_THP_SWAP=y. I reproduced the issue with the following details: Environment: QEMU + upstream kernel + buildroot + NVMe (2 GB) Kernel config: CONFIG_BLK_DEV_NVME=y CONFIG_THP_SWAP=y Some reproducible steps: mkfs.xfs -f /dev/nvme0n1 mkdir /tmp/mnt mount /dev/nvme0n1 /tmp/mnt bs="32k" sz="1024m" # doesn't matter too much, I also tried 16m xfs_io -f -c "pwrite -R -b $bs 0 $sz" -c "fdatasync" /tmp/mnt/sw xfs_io -f -c "pwrite -R -b $bs 0 $sz" -c "fdatasync" /tmp/mnt/sw xfs_io -f -c "pwrite -R -b $bs 0 $sz" -c "fdatasync" /tmp/mnt/sw xfs_io -f -c "pwrite -F -S 0 -b $bs 0 $sz" -c "fdatasync" /tmp/mnt/sw xfs_io -f -c "pwrite -R -b $bs 0 $sz" -c "fsync" /tmp/mnt/sw mkswap /tmp/mnt/sw swapon /tmp/mnt/sw stress --vm 2 --vm-bytes 600M # doesn't matter too much as well Symptoms: - FS corruption (e.g. checksum failure) - memory corruption at: 0xd2808010 - segfault Fixes: f0eea189e8e9 ("mm, THP, swap: Don't allocate huge cluster for file backed swap device") Fixes: 38d8b4e6bdc8 ("mm, THP, swap: delay splitting THP during swap out") Signed-off-by: Gao Xiang Signed-off-by: Andrew Morton Reviewed-by: "Huang, Ying" Reviewed-by: Yang Shi Acked-by: Rafael Aquini Cc: Matthew Wilcox Cc: Carlos Maiolino Cc: Eric Sandeen Cc: Dave Chinner Cc: Link: https://lkml.kernel.org/r/20200820045323.7809-1-hsiangkao@redhat.com Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- mm/swapfile.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/swapfile.c b/mm/swapfile.c index c3684cfa9534..adeb49fcad23 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -998,7 +998,7 @@ int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_size) goto nextsi; } if (size == SWAPFILE_CLUSTER) { - if (!(si->flags & SWP_FILE)) + if (si->flags & SWP_BLKDEV) n_ret = swap_alloc_cluster(si, swp_entries); } else n_ret = scan_swap_map_slots(si, SWAP_HAS_CACHE, -- GitLab From 1c10b4b35a9faa1fc5373c00862aea6142cce437 Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Mon, 21 Sep 2020 12:48:36 +0200 Subject: [PATCH 1122/1304] s390/zcrypt: Fix ZCRYPT_PERDEV_REQCNT ioctl commit f7e80983f0cf470bb82036e73bff4d5a7daf8fc2 upstream. reqcnt is an u32 pointer but we do copy sizeof(reqcnt) which is the size of the pointer. This means we only copy 8 byte. Let us copy the full monty. Signed-off-by: Christian Borntraeger Cc: Harald Freudenberger Cc: stable@vger.kernel.org Fixes: af4a72276d49 ("s390/zcrypt: Support up to 256 crypto adapters.") Reviewed-by: Harald Freudenberger Signed-off-by: Vasily Gorbik Signed-off-by: Greg Kroah-Hartman --- drivers/s390/crypto/zcrypt_api.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c index 23c24a699cef..b7cb897cd83e 100644 --- a/drivers/s390/crypto/zcrypt_api.c +++ b/drivers/s390/crypto/zcrypt_api.c @@ -915,7 +915,8 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, if (!reqcnt) return -ENOMEM; zcrypt_perdev_reqcnt(reqcnt, AP_DEVICES); - if (copy_to_user((int __user *) arg, reqcnt, sizeof(reqcnt))) + if (copy_to_user((int __user *) arg, reqcnt, + sizeof(u32) * AP_DEVICES)) rc = -EFAULT; kfree(reqcnt); return rc; -- GitLab From b0c985d6ae87cbf78e7950abcb48f897fcc1fdf0 Mon Sep 17 00:00:00 2001 From: Muchun Song Date: Thu, 6 Aug 2020 01:20:46 +0800 Subject: [PATCH 1123/1304] kprobes: Fix compiler warning for !CONFIG_KPROBES_ON_FTRACE commit 10de795a5addd1962406796a6e13ba6cc0fc6bee upstream. Fix compiler warning(as show below) for !CONFIG_KPROBES_ON_FTRACE. kernel/kprobes.c: In function 'kill_kprobe': kernel/kprobes.c:1116:33: warning: statement with no effect [-Wunused-value] 1116 | #define disarm_kprobe_ftrace(p) (-ENODEV) | ^ kernel/kprobes.c:2154:3: note: in expansion of macro 'disarm_kprobe_ftrace' 2154 | disarm_kprobe_ftrace(p); Link: https://lore.kernel.org/r/20200805142136.0331f7ea@canb.auug.org.au Link: https://lkml.kernel.org/r/20200805172046.19066-1-songmuchun@bytedance.com Reported-by: Stephen Rothwell Fixes: 0cb2f1372baa ("kprobes: Fix NULL pointer dereference at kprobe_ftrace_handler") Acked-by: Masami Hiramatsu Acked-by: John Fastabend Signed-off-by: Muchun Song Signed-off-by: Steven Rostedt (VMware) Signed-off-by: Greg Kroah-Hartman --- kernel/kprobes.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 915c2ce474fd..2161f519d481 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -1065,9 +1065,20 @@ static int disarm_kprobe_ftrace(struct kprobe *p) return ret; } #else /* !CONFIG_KPROBES_ON_FTRACE */ -#define prepare_kprobe(p) arch_prepare_kprobe(p) -#define arm_kprobe_ftrace(p) (-ENODEV) -#define disarm_kprobe_ftrace(p) (-ENODEV) +static inline int prepare_kprobe(struct kprobe *p) +{ + return arch_prepare_kprobe(p); +} + +static inline int arm_kprobe_ftrace(struct kprobe *p) +{ + return -ENODEV; +} + +static inline int disarm_kprobe_ftrace(struct kprobe *p) +{ + return -ENODEV; +} #endif /* Arm a kprobe with text_mutex */ -- GitLab From a34e3ce81dc8061f322454341a91b300fcb463f8 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Thu, 31 Oct 2019 10:59:44 +0100 Subject: [PATCH 1124/1304] ata: define AC_ERR_OK commit 25937580a5065d6fbd92d9c8ebd47145ad80052e upstream. Since we will return enum ata_completion_errors from qc_prep in the next patch, let's define AC_ERR_OK to mark the OK status. Signed-off-by: Jiri Slaby Cc: Jens Axboe Cc: linux-ide@vger.kernel.org Signed-off-by: Jens Axboe Signed-off-by: Greg Kroah-Hartman --- include/linux/libata.h | 1 + 1 file changed, 1 insertion(+) diff --git a/include/linux/libata.h b/include/linux/libata.h index afc1d72161ba..a682e85188db 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -503,6 +503,7 @@ enum hsm_task_states { }; enum ata_completion_errors { + AC_ERR_OK = 0, /* no error */ AC_ERR_DEV = (1 << 0), /* device reported error */ AC_ERR_HSM = (1 << 1), /* host state machine violation */ AC_ERR_TIMEOUT = (1 << 2), /* timeout */ -- GitLab From c9a512f8fa91aeee122ded2374d6061b73dd9536 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Thu, 31 Oct 2019 10:59:45 +0100 Subject: [PATCH 1125/1304] ata: make qc_prep return ata_completion_errors commit 95364f36701e62dd50eee91e1303187fd1a9f567 upstream. In case a driver wants to return an error from qc_prep, return enum ata_completion_errors. sata_mv is one of those drivers -- see the next patch. Other drivers return the newly defined AC_ERR_OK. [v2] use enum ata_completion_errors and AC_ERR_OK. Signed-off-by: Jiri Slaby Cc: Jens Axboe Cc: linux-ide@vger.kernel.org Signed-off-by: Jens Axboe Signed-off-by: Greg Kroah-Hartman --- Documentation/driver-api/libata.rst | 2 +- drivers/ata/acard-ahci.c | 6 ++++-- drivers/ata/libahci.c | 6 ++++-- drivers/ata/libata-core.c | 9 +++++++-- drivers/ata/libata-sff.c | 12 ++++++++---- drivers/ata/pata_macio.c | 6 ++++-- drivers/ata/pata_pxa.c | 8 +++++--- drivers/ata/pdc_adma.c | 7 ++++--- drivers/ata/sata_fsl.c | 4 +++- drivers/ata/sata_inic162x.c | 4 +++- drivers/ata/sata_mv.c | 26 +++++++++++++++----------- drivers/ata/sata_nv.c | 18 +++++++++++------- drivers/ata/sata_promise.c | 6 ++++-- drivers/ata/sata_qstor.c | 8 +++++--- drivers/ata/sata_rcar.c | 6 ++++-- drivers/ata/sata_sil.c | 8 +++++--- drivers/ata/sata_sil24.c | 6 ++++-- drivers/ata/sata_sx4.c | 6 ++++-- include/linux/libata.h | 12 ++++++------ 19 files changed, 101 insertions(+), 59 deletions(-) diff --git a/Documentation/driver-api/libata.rst b/Documentation/driver-api/libata.rst index 70e180e6b93d..9f3e5dc31184 100644 --- a/Documentation/driver-api/libata.rst +++ b/Documentation/driver-api/libata.rst @@ -250,7 +250,7 @@ High-level taskfile hooks :: - void (*qc_prep) (struct ata_queued_cmd *qc); + enum ata_completion_errors (*qc_prep) (struct ata_queued_cmd *qc); int (*qc_issue) (struct ata_queued_cmd *qc); diff --git a/drivers/ata/acard-ahci.c b/drivers/ata/acard-ahci.c index 583e366be7e2..505f8c316818 100644 --- a/drivers/ata/acard-ahci.c +++ b/drivers/ata/acard-ahci.c @@ -72,7 +72,7 @@ struct acard_sg { __le32 size; /* bit 31 (EOT) max==0x10000 (64k) */ }; -static void acard_ahci_qc_prep(struct ata_queued_cmd *qc); +static enum ata_completion_errors acard_ahci_qc_prep(struct ata_queued_cmd *qc); static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc); static int acard_ahci_port_start(struct ata_port *ap); static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); @@ -257,7 +257,7 @@ static unsigned int acard_ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl) return si; } -static void acard_ahci_qc_prep(struct ata_queued_cmd *qc) +static enum ata_completion_errors acard_ahci_qc_prep(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct ahci_port_priv *pp = ap->private_data; @@ -295,6 +295,8 @@ static void acard_ahci_qc_prep(struct ata_queued_cmd *qc) opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH; ahci_fill_cmd_slot(pp, qc->hw_tag, opts); + + return AC_ERR_OK; } static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc) diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index 2bdb250a2142..f1153e7ba3b3 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c @@ -73,7 +73,7 @@ static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val); static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc); static int ahci_port_start(struct ata_port *ap); static void ahci_port_stop(struct ata_port *ap); -static void ahci_qc_prep(struct ata_queued_cmd *qc); +static enum ata_completion_errors ahci_qc_prep(struct ata_queued_cmd *qc); static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc); static void ahci_freeze(struct ata_port *ap); static void ahci_thaw(struct ata_port *ap); @@ -1640,7 +1640,7 @@ static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc) return sata_pmp_qc_defer_cmd_switch(qc); } -static void ahci_qc_prep(struct ata_queued_cmd *qc) +static enum ata_completion_errors ahci_qc_prep(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct ahci_port_priv *pp = ap->private_data; @@ -1676,6 +1676,8 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc) opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH; ahci_fill_cmd_slot(pp, qc->hw_tag, opts); + + return AC_ERR_OK; } static void ahci_fbs_dec_intr(struct ata_port *ap) diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index fead7243930c..db1d86af21b4 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -4996,7 +4996,10 @@ int ata_std_qc_defer(struct ata_queued_cmd *qc) return ATA_DEFER_LINK; } -void ata_noop_qc_prep(struct ata_queued_cmd *qc) { } +enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc) +{ + return AC_ERR_OK; +} /** * ata_sg_init - Associate command with scatter-gather table. @@ -5483,7 +5486,9 @@ void ata_qc_issue(struct ata_queued_cmd *qc) return; } - ap->ops->qc_prep(qc); + qc->err_mask |= ap->ops->qc_prep(qc); + if (unlikely(qc->err_mask)) + goto err; trace_ata_qc_issue(qc); qc->err_mask |= ap->ops->qc_issue(qc); if (unlikely(qc->err_mask)) diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index 873cc0906055..7484ffdabd54 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c @@ -2695,12 +2695,14 @@ static void ata_bmdma_fill_sg_dumb(struct ata_queued_cmd *qc) * LOCKING: * spin_lock_irqsave(host lock) */ -void ata_bmdma_qc_prep(struct ata_queued_cmd *qc) +enum ata_completion_errors ata_bmdma_qc_prep(struct ata_queued_cmd *qc) { if (!(qc->flags & ATA_QCFLAG_DMAMAP)) - return; + return AC_ERR_OK; ata_bmdma_fill_sg(qc); + + return AC_ERR_OK; } EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep); @@ -2713,12 +2715,14 @@ EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep); * LOCKING: * spin_lock_irqsave(host lock) */ -void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc) +enum ata_completion_errors ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc) { if (!(qc->flags & ATA_QCFLAG_DMAMAP)) - return; + return AC_ERR_OK; ata_bmdma_fill_sg_dumb(qc); + + return AC_ERR_OK; } EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep); diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c index 9588e685d994..765b99319d3c 100644 --- a/drivers/ata/pata_macio.c +++ b/drivers/ata/pata_macio.c @@ -507,7 +507,7 @@ static int pata_macio_cable_detect(struct ata_port *ap) return ATA_CBL_PATA40; } -static void pata_macio_qc_prep(struct ata_queued_cmd *qc) +static enum ata_completion_errors pata_macio_qc_prep(struct ata_queued_cmd *qc) { unsigned int write = (qc->tf.flags & ATA_TFLAG_WRITE); struct ata_port *ap = qc->ap; @@ -520,7 +520,7 @@ static void pata_macio_qc_prep(struct ata_queued_cmd *qc) __func__, qc, qc->flags, write, qc->dev->devno); if (!(qc->flags & ATA_QCFLAG_DMAMAP)) - return; + return AC_ERR_OK; table = (struct dbdma_cmd *) priv->dma_table_cpu; @@ -565,6 +565,8 @@ static void pata_macio_qc_prep(struct ata_queued_cmd *qc) table->command = cpu_to_le16(DBDMA_STOP); dev_dbgdma(priv->dev, "%s: %d DMA list entries\n", __func__, pi); + + return AC_ERR_OK; } diff --git a/drivers/ata/pata_pxa.c b/drivers/ata/pata_pxa.c index e8b6a2e464c9..5b1458ca986b 100644 --- a/drivers/ata/pata_pxa.c +++ b/drivers/ata/pata_pxa.c @@ -58,25 +58,27 @@ static void pxa_ata_dma_irq(void *d) /* * Prepare taskfile for submission. */ -static void pxa_qc_prep(struct ata_queued_cmd *qc) +static enum ata_completion_errors pxa_qc_prep(struct ata_queued_cmd *qc) { struct pata_pxa_data *pd = qc->ap->private_data; struct dma_async_tx_descriptor *tx; enum dma_transfer_direction dir; if (!(qc->flags & ATA_QCFLAG_DMAMAP)) - return; + return AC_ERR_OK; dir = (qc->dma_dir == DMA_TO_DEVICE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM); tx = dmaengine_prep_slave_sg(pd->dma_chan, qc->sg, qc->n_elem, dir, DMA_PREP_INTERRUPT); if (!tx) { ata_dev_err(qc->dev, "prep_slave_sg() failed\n"); - return; + return AC_ERR_OK; } tx->callback = pxa_ata_dma_irq; tx->callback_param = pd; pd->dma_cookie = dmaengine_submit(tx); + + return AC_ERR_OK; } /* diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c index f1e873a37465..096b4771b19d 100644 --- a/drivers/ata/pdc_adma.c +++ b/drivers/ata/pdc_adma.c @@ -132,7 +132,7 @@ static int adma_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); static int adma_port_start(struct ata_port *ap); static void adma_port_stop(struct ata_port *ap); -static void adma_qc_prep(struct ata_queued_cmd *qc); +static enum ata_completion_errors adma_qc_prep(struct ata_queued_cmd *qc); static unsigned int adma_qc_issue(struct ata_queued_cmd *qc); static int adma_check_atapi_dma(struct ata_queued_cmd *qc); static void adma_freeze(struct ata_port *ap); @@ -311,7 +311,7 @@ static int adma_fill_sg(struct ata_queued_cmd *qc) return i; } -static void adma_qc_prep(struct ata_queued_cmd *qc) +static enum ata_completion_errors adma_qc_prep(struct ata_queued_cmd *qc) { struct adma_port_priv *pp = qc->ap->private_data; u8 *buf = pp->pkt; @@ -322,7 +322,7 @@ static void adma_qc_prep(struct ata_queued_cmd *qc) adma_enter_reg_mode(qc->ap); if (qc->tf.protocol != ATA_PROT_DMA) - return; + return AC_ERR_OK; buf[i++] = 0; /* Response flags */ buf[i++] = 0; /* reserved */ @@ -387,6 +387,7 @@ static void adma_qc_prep(struct ata_queued_cmd *qc) printk("%s\n", obuf); } #endif + return AC_ERR_OK; } static inline void adma_packet_start(struct ata_queued_cmd *qc) diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c index ae52a45fab5f..8b3be0ff91cb 100644 --- a/drivers/ata/sata_fsl.c +++ b/drivers/ata/sata_fsl.c @@ -507,7 +507,7 @@ static unsigned int sata_fsl_fill_sg(struct ata_queued_cmd *qc, void *cmd_desc, return num_prde; } -static void sata_fsl_qc_prep(struct ata_queued_cmd *qc) +static enum ata_completion_errors sata_fsl_qc_prep(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct sata_fsl_port_priv *pp = ap->private_data; @@ -553,6 +553,8 @@ static void sata_fsl_qc_prep(struct ata_queued_cmd *qc) VPRINTK("SATA FSL : xx_qc_prep, di = 0x%x, ttl = %d, num_prde = %d\n", desc_info, ttl_dwords, num_prde); + + return AC_ERR_OK; } static unsigned int sata_fsl_qc_issue(struct ata_queued_cmd *qc) diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c index 9b6d7930d1c7..6c7ddc037fce 100644 --- a/drivers/ata/sata_inic162x.c +++ b/drivers/ata/sata_inic162x.c @@ -472,7 +472,7 @@ static void inic_fill_sg(struct inic_prd *prd, struct ata_queued_cmd *qc) prd[-1].flags |= PRD_END; } -static void inic_qc_prep(struct ata_queued_cmd *qc) +static enum ata_completion_errors inic_qc_prep(struct ata_queued_cmd *qc) { struct inic_port_priv *pp = qc->ap->private_data; struct inic_pkt *pkt = pp->pkt; @@ -532,6 +532,8 @@ static void inic_qc_prep(struct ata_queued_cmd *qc) inic_fill_sg(prd, qc); pp->cpb_tbl[0] = pp->pkt_dma; + + return AC_ERR_OK; } static unsigned int inic_qc_issue(struct ata_queued_cmd *qc) diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index ab2e9f62ddc1..11511037b4e3 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c @@ -605,8 +605,8 @@ static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val) static int mv_port_start(struct ata_port *ap); static void mv_port_stop(struct ata_port *ap); static int mv_qc_defer(struct ata_queued_cmd *qc); -static void mv_qc_prep(struct ata_queued_cmd *qc); -static void mv_qc_prep_iie(struct ata_queued_cmd *qc); +static enum ata_completion_errors mv_qc_prep(struct ata_queued_cmd *qc); +static enum ata_completion_errors mv_qc_prep_iie(struct ata_queued_cmd *qc); static unsigned int mv_qc_issue(struct ata_queued_cmd *qc); static int mv_hardreset(struct ata_link *link, unsigned int *class, unsigned long deadline); @@ -2044,7 +2044,7 @@ static void mv_rw_multi_errata_sata24(struct ata_queued_cmd *qc) * LOCKING: * Inherited from caller. */ -static void mv_qc_prep(struct ata_queued_cmd *qc) +static enum ata_completion_errors mv_qc_prep(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct mv_port_priv *pp = ap->private_data; @@ -2056,15 +2056,15 @@ static void mv_qc_prep(struct ata_queued_cmd *qc) switch (tf->protocol) { case ATA_PROT_DMA: if (tf->command == ATA_CMD_DSM) - return; + return AC_ERR_OK; /* fall-thru */ case ATA_PROT_NCQ: break; /* continue below */ case ATA_PROT_PIO: mv_rw_multi_errata_sata24(qc); - return; + return AC_ERR_OK; default: - return; + return AC_ERR_OK; } /* Fill in command request block @@ -2129,8 +2129,10 @@ static void mv_qc_prep(struct ata_queued_cmd *qc) mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */ if (!(qc->flags & ATA_QCFLAG_DMAMAP)) - return; + return AC_ERR_OK; mv_fill_sg(qc); + + return AC_ERR_OK; } /** @@ -2145,7 +2147,7 @@ static void mv_qc_prep(struct ata_queued_cmd *qc) * LOCKING: * Inherited from caller. */ -static void mv_qc_prep_iie(struct ata_queued_cmd *qc) +static enum ata_completion_errors mv_qc_prep_iie(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct mv_port_priv *pp = ap->private_data; @@ -2156,9 +2158,9 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc) if ((tf->protocol != ATA_PROT_DMA) && (tf->protocol != ATA_PROT_NCQ)) - return; + return AC_ERR_OK; if (tf->command == ATA_CMD_DSM) - return; /* use bmdma for this */ + return AC_ERR_OK; /* use bmdma for this */ /* Fill in Gen IIE command request block */ if (!(tf->flags & ATA_TFLAG_WRITE)) @@ -2199,8 +2201,10 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc) ); if (!(qc->flags & ATA_QCFLAG_DMAMAP)) - return; + return AC_ERR_OK; mv_fill_sg(qc); + + return AC_ERR_OK; } /** diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c index 761577d57ff3..798d549435cc 100644 --- a/drivers/ata/sata_nv.c +++ b/drivers/ata/sata_nv.c @@ -313,7 +313,7 @@ static void nv_ck804_freeze(struct ata_port *ap); static void nv_ck804_thaw(struct ata_port *ap); static int nv_adma_slave_config(struct scsi_device *sdev); static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc); -static void nv_adma_qc_prep(struct ata_queued_cmd *qc); +static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc); static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc); static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance); static void nv_adma_irq_clear(struct ata_port *ap); @@ -335,7 +335,7 @@ static void nv_mcp55_freeze(struct ata_port *ap); static void nv_swncq_error_handler(struct ata_port *ap); static int nv_swncq_slave_config(struct scsi_device *sdev); static int nv_swncq_port_start(struct ata_port *ap); -static void nv_swncq_qc_prep(struct ata_queued_cmd *qc); +static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc); static void nv_swncq_fill_sg(struct ata_queued_cmd *qc); static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc); static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis); @@ -1365,7 +1365,7 @@ static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc) return 1; } -static void nv_adma_qc_prep(struct ata_queued_cmd *qc) +static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc) { struct nv_adma_port_priv *pp = qc->ap->private_data; struct nv_adma_cpb *cpb = &pp->cpb[qc->hw_tag]; @@ -1377,7 +1377,7 @@ static void nv_adma_qc_prep(struct ata_queued_cmd *qc) (qc->flags & ATA_QCFLAG_DMAMAP)); nv_adma_register_mode(qc->ap); ata_bmdma_qc_prep(qc); - return; + return AC_ERR_OK; } cpb->resp_flags = NV_CPB_RESP_DONE; @@ -1409,6 +1409,8 @@ static void nv_adma_qc_prep(struct ata_queued_cmd *qc) cpb->ctl_flags = ctl_flags; wmb(); cpb->resp_flags = 0; + + return AC_ERR_OK; } static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc) @@ -1972,17 +1974,19 @@ static int nv_swncq_port_start(struct ata_port *ap) return 0; } -static void nv_swncq_qc_prep(struct ata_queued_cmd *qc) +static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc) { if (qc->tf.protocol != ATA_PROT_NCQ) { ata_bmdma_qc_prep(qc); - return; + return AC_ERR_OK; } if (!(qc->flags & ATA_QCFLAG_DMAMAP)) - return; + return AC_ERR_OK; nv_swncq_fill_sg(qc); + + return AC_ERR_OK; } static void nv_swncq_fill_sg(struct ata_queued_cmd *qc) diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c index d032bf657f70..29d2bb465f60 100644 --- a/drivers/ata/sata_promise.c +++ b/drivers/ata/sata_promise.c @@ -155,7 +155,7 @@ static int pdc_sata_scr_write(struct ata_link *link, unsigned int sc_reg, u32 va static int pdc_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); static int pdc_common_port_start(struct ata_port *ap); static int pdc_sata_port_start(struct ata_port *ap); -static void pdc_qc_prep(struct ata_queued_cmd *qc); +static enum ata_completion_errors pdc_qc_prep(struct ata_queued_cmd *qc); static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf); static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf); static int pdc_check_atapi_dma(struct ata_queued_cmd *qc); @@ -649,7 +649,7 @@ static void pdc_fill_sg(struct ata_queued_cmd *qc) prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); } -static void pdc_qc_prep(struct ata_queued_cmd *qc) +static enum ata_completion_errors pdc_qc_prep(struct ata_queued_cmd *qc) { struct pdc_port_priv *pp = qc->ap->private_data; unsigned int i; @@ -681,6 +681,8 @@ static void pdc_qc_prep(struct ata_queued_cmd *qc) default: break; } + + return AC_ERR_OK; } static int pdc_is_sataii_tx4(unsigned long flags) diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c index 1fe941688e95..a66d10628c18 100644 --- a/drivers/ata/sata_qstor.c +++ b/drivers/ata/sata_qstor.c @@ -116,7 +116,7 @@ static int qs_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val); static int qs_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); static int qs_port_start(struct ata_port *ap); static void qs_host_stop(struct ata_host *host); -static void qs_qc_prep(struct ata_queued_cmd *qc); +static enum ata_completion_errors qs_qc_prep(struct ata_queued_cmd *qc); static unsigned int qs_qc_issue(struct ata_queued_cmd *qc); static int qs_check_atapi_dma(struct ata_queued_cmd *qc); static void qs_freeze(struct ata_port *ap); @@ -276,7 +276,7 @@ static unsigned int qs_fill_sg(struct ata_queued_cmd *qc) return si; } -static void qs_qc_prep(struct ata_queued_cmd *qc) +static enum ata_completion_errors qs_qc_prep(struct ata_queued_cmd *qc) { struct qs_port_priv *pp = qc->ap->private_data; u8 dflags = QS_DF_PORD, *buf = pp->pkt; @@ -288,7 +288,7 @@ static void qs_qc_prep(struct ata_queued_cmd *qc) qs_enter_reg_mode(qc->ap); if (qc->tf.protocol != ATA_PROT_DMA) - return; + return AC_ERR_OK; nelem = qs_fill_sg(qc); @@ -311,6 +311,8 @@ static void qs_qc_prep(struct ata_queued_cmd *qc) /* frame information structure (FIS) */ ata_tf_to_fis(&qc->tf, 0, 1, &buf[32]); + + return AC_ERR_OK; } static inline void qs_packet_start(struct ata_queued_cmd *qc) diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c index 50ebd779d975..8323f88d17a5 100644 --- a/drivers/ata/sata_rcar.c +++ b/drivers/ata/sata_rcar.c @@ -554,12 +554,14 @@ static void sata_rcar_bmdma_fill_sg(struct ata_queued_cmd *qc) prd[si - 1].addr |= cpu_to_le32(SATA_RCAR_DTEND); } -static void sata_rcar_qc_prep(struct ata_queued_cmd *qc) +static enum ata_completion_errors sata_rcar_qc_prep(struct ata_queued_cmd *qc) { if (!(qc->flags & ATA_QCFLAG_DMAMAP)) - return; + return AC_ERR_OK; sata_rcar_bmdma_fill_sg(qc); + + return AC_ERR_OK; } static void sata_rcar_bmdma_setup(struct ata_queued_cmd *qc) diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c index ed76f070d21e..82adaf02887f 100644 --- a/drivers/ata/sata_sil.c +++ b/drivers/ata/sata_sil.c @@ -119,7 +119,7 @@ static void sil_dev_config(struct ata_device *dev); static int sil_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val); static int sil_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val); static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed); -static void sil_qc_prep(struct ata_queued_cmd *qc); +static enum ata_completion_errors sil_qc_prep(struct ata_queued_cmd *qc); static void sil_bmdma_setup(struct ata_queued_cmd *qc); static void sil_bmdma_start(struct ata_queued_cmd *qc); static void sil_bmdma_stop(struct ata_queued_cmd *qc); @@ -333,12 +333,14 @@ static void sil_fill_sg(struct ata_queued_cmd *qc) last_prd->flags_len |= cpu_to_le32(ATA_PRD_EOT); } -static void sil_qc_prep(struct ata_queued_cmd *qc) +static enum ata_completion_errors sil_qc_prep(struct ata_queued_cmd *qc) { if (!(qc->flags & ATA_QCFLAG_DMAMAP)) - return; + return AC_ERR_OK; sil_fill_sg(qc); + + return AC_ERR_OK; } static unsigned char sil_get_device_cache_line(struct pci_dev *pdev) diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c index 319f517137cd..7a8ca81e52bf 100644 --- a/drivers/ata/sata_sil24.c +++ b/drivers/ata/sata_sil24.c @@ -336,7 +336,7 @@ static void sil24_dev_config(struct ata_device *dev); static int sil24_scr_read(struct ata_link *link, unsigned sc_reg, u32 *val); static int sil24_scr_write(struct ata_link *link, unsigned sc_reg, u32 val); static int sil24_qc_defer(struct ata_queued_cmd *qc); -static void sil24_qc_prep(struct ata_queued_cmd *qc); +static enum ata_completion_errors sil24_qc_prep(struct ata_queued_cmd *qc); static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc); static bool sil24_qc_fill_rtf(struct ata_queued_cmd *qc); static void sil24_pmp_attach(struct ata_port *ap); @@ -840,7 +840,7 @@ static int sil24_qc_defer(struct ata_queued_cmd *qc) return ata_std_qc_defer(qc); } -static void sil24_qc_prep(struct ata_queued_cmd *qc) +static enum ata_completion_errors sil24_qc_prep(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct sil24_port_priv *pp = ap->private_data; @@ -884,6 +884,8 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc) if (qc->flags & ATA_QCFLAG_DMAMAP) sil24_fill_sg(qc, sge); + + return AC_ERR_OK; } static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc) diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c index 405e606a234d..0d742457925e 100644 --- a/drivers/ata/sata_sx4.c +++ b/drivers/ata/sata_sx4.c @@ -218,7 +218,7 @@ static void pdc_error_handler(struct ata_port *ap); static void pdc_freeze(struct ata_port *ap); static void pdc_thaw(struct ata_port *ap); static int pdc_port_start(struct ata_port *ap); -static void pdc20621_qc_prep(struct ata_queued_cmd *qc); +static enum ata_completion_errors pdc20621_qc_prep(struct ata_queued_cmd *qc); static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf); static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf); static unsigned int pdc20621_dimm_init(struct ata_host *host); @@ -546,7 +546,7 @@ static void pdc20621_nodata_prep(struct ata_queued_cmd *qc) VPRINTK("ata pkt buf ofs %u, mmio copied\n", i); } -static void pdc20621_qc_prep(struct ata_queued_cmd *qc) +static enum ata_completion_errors pdc20621_qc_prep(struct ata_queued_cmd *qc) { switch (qc->tf.protocol) { case ATA_PROT_DMA: @@ -558,6 +558,8 @@ static void pdc20621_qc_prep(struct ata_queued_cmd *qc) default: break; } + + return AC_ERR_OK; } static void __pdc20621_push_hdma(struct ata_queued_cmd *qc, diff --git a/include/linux/libata.h b/include/linux/libata.h index a682e85188db..3d076aca7ac2 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -913,9 +913,9 @@ struct ata_port_operations { /* * Command execution */ - int (*qc_defer)(struct ata_queued_cmd *qc); - int (*check_atapi_dma)(struct ata_queued_cmd *qc); - void (*qc_prep)(struct ata_queued_cmd *qc); + int (*qc_defer)(struct ata_queued_cmd *qc); + int (*check_atapi_dma)(struct ata_queued_cmd *qc); + enum ata_completion_errors (*qc_prep)(struct ata_queued_cmd *qc); unsigned int (*qc_issue)(struct ata_queued_cmd *qc); bool (*qc_fill_rtf)(struct ata_queued_cmd *qc); @@ -1182,7 +1182,7 @@ extern int ata_xfer_mode2shift(unsigned long xfer_mode); extern const char *ata_mode_string(unsigned long xfer_mask); extern unsigned long ata_id_xfermask(const u16 *id); extern int ata_std_qc_defer(struct ata_queued_cmd *qc); -extern void ata_noop_qc_prep(struct ata_queued_cmd *qc); +extern enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc); extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, unsigned int n_elem); extern unsigned int ata_dev_classify(const struct ata_taskfile *tf); @@ -1917,9 +1917,9 @@ extern const struct ata_port_operations ata_bmdma_port_ops; .sg_tablesize = LIBATA_MAX_PRD, \ .dma_boundary = ATA_DMA_BOUNDARY -extern void ata_bmdma_qc_prep(struct ata_queued_cmd *qc); +extern enum ata_completion_errors ata_bmdma_qc_prep(struct ata_queued_cmd *qc); extern unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc); -extern void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc); +extern enum ata_completion_errors ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc); extern unsigned int ata_bmdma_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc); extern irqreturn_t ata_bmdma_interrupt(int irq, void *dev_instance); -- GitLab From 3b69fe0d6d0f760f6faba1e5e11cfacd35df8d75 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Thu, 31 Oct 2019 10:59:46 +0100 Subject: [PATCH 1126/1304] ata: sata_mv, avoid trigerrable BUG_ON commit e9f691d899188679746eeb96e6cb520459eda9b4 upstream. There are several reports that the BUG_ON on unsupported command in mv_qc_prep can be triggered under some circumstances: https://bugzilla.suse.com/show_bug.cgi?id=1110252 https://serverfault.com/questions/888897/raid-problems-after-power-outage https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1652185 https://bugs.centos.org/view.php?id=14998 Let sata_mv handle the failure gracefully: warn about that incl. the failed command number and return an AC_ERR_INVALID error. We can do that now thanks to the previous patch. Remove also the long-standing FIXME. [v2] use %.2x as commands are defined as hexa. Signed-off-by: Jiri Slaby Cc: Jens Axboe Cc: linux-ide@vger.kernel.org Cc: Sergei Shtylyov Signed-off-by: Jens Axboe Signed-off-by: Greg Kroah-Hartman --- drivers/ata/sata_mv.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index 11511037b4e3..2910b22fac11 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c @@ -2111,12 +2111,10 @@ static enum ata_completion_errors mv_qc_prep(struct ata_queued_cmd *qc) * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none * of which are defined/used by Linux. If we get here, this * driver needs work. - * - * FIXME: modify libata to give qc_prep a return value and - * return error here. */ - BUG_ON(tf->command); - break; + ata_port_err(ap, "%s: unsupported command: %.2x\n", __func__, + tf->command); + return AC_ERR_INVALID; } mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0); mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0); -- GitLab From 1fa2c32e375e87834a9a2af93210201039f7e1ee Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 15 Sep 2020 11:42:17 +0100 Subject: [PATCH 1127/1304] KVM: arm64: Assume write fault on S1PTW permission fault on instruction fetch commit c4ad98e4b72cb5be30ea282fce935248f2300e62 upstream. KVM currently assumes that an instruction abort can never be a write. This is in general true, except when the abort is triggered by a S1PTW on instruction fetch that tries to update the S1 page tables (to set AF, for example). This can happen if the page tables have been paged out and brought back in without seeing a direct write to them (they are thus marked read only), and the fault handling code will make the PT executable(!) instead of writable. The guest gets stuck forever. In these conditions, the permission fault must be considered as a write so that the Stage-1 update can take place. This is essentially the I-side equivalent of the problem fixed by 60e21a0ef54c ("arm64: KVM: Take S1 walks into account when determining S2 write faults"). Update kvm_is_write_fault() to return true on IABT+S1PTW, and introduce kvm_vcpu_trap_is_exec_fault() that only return true when no faulting on a S1 fault. Additionally, kvm_vcpu_dabt_iss1tw() is renamed to kvm_vcpu_abt_iss1tw(), as the above makes it plain that it isn't specific to data abort. Signed-off-by: Marc Zyngier Reviewed-by: Will Deacon Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20200915104218.1284701-2-maz@kernel.org Signed-off-by: Greg Kroah-Hartman --- arch/arm/include/asm/kvm_emulate.h | 11 ++++++++--- arch/arm64/include/asm/kvm_emulate.h | 9 +++++++-- arch/arm64/kvm/hyp/switch.c | 2 +- virt/kvm/arm/mmio.c | 2 +- virt/kvm/arm/mmu.c | 5 ++++- 5 files changed, 21 insertions(+), 8 deletions(-) diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index 7d2ca035d6c8..11d4ff9f3e4d 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h @@ -216,7 +216,7 @@ static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu) return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT; } -static inline bool kvm_vcpu_dabt_iss1tw(struct kvm_vcpu *vcpu) +static inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu) { return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW; } @@ -248,16 +248,21 @@ static inline bool kvm_vcpu_trap_il_is32bit(struct kvm_vcpu *vcpu) return kvm_vcpu_get_hsr(vcpu) & HSR_IL; } -static inline u8 kvm_vcpu_trap_get_class(struct kvm_vcpu *vcpu) +static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu) { return kvm_vcpu_get_hsr(vcpu) >> HSR_EC_SHIFT; } -static inline bool kvm_vcpu_trap_is_iabt(struct kvm_vcpu *vcpu) +static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu) { return kvm_vcpu_trap_get_class(vcpu) == HSR_EC_IABT; } +static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu) +{ + return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu); +} + static inline u8 kvm_vcpu_trap_get_fault(struct kvm_vcpu *vcpu) { return kvm_vcpu_get_hsr(vcpu) & HSR_FSC; diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 778cb4f868d9..669c960dd069 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -303,7 +303,7 @@ static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu) return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT; } -static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu) +static inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu) { return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW); } @@ -311,7 +311,7 @@ static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu) static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu) { return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) || - kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */ + kvm_vcpu_abt_iss1tw(vcpu); /* AF/DBM update */ } static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu) @@ -340,6 +340,11 @@ static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu) return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW; } +static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu) +{ + return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu); +} + static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu) { return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC; diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index f146bff53edf..15312e429b7d 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -430,7 +430,7 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code) kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT && kvm_vcpu_dabt_isvalid(vcpu) && !kvm_vcpu_dabt_isextabt(vcpu) && - !kvm_vcpu_dabt_iss1tw(vcpu); + !kvm_vcpu_abt_iss1tw(vcpu); if (valid) { int ret = __vgic_v2_perform_cpuif_access(vcpu); diff --git a/virt/kvm/arm/mmio.c b/virt/kvm/arm/mmio.c index 878e0edb2e1b..ff0a1c608371 100644 --- a/virt/kvm/arm/mmio.c +++ b/virt/kvm/arm/mmio.c @@ -142,7 +142,7 @@ static int decode_hsr(struct kvm_vcpu *vcpu, bool *is_write, int *len) bool sign_extend; bool sixty_four; - if (kvm_vcpu_dabt_iss1tw(vcpu)) { + if (kvm_vcpu_abt_iss1tw(vcpu)) { /* page table accesses IO mem: tell guest to fix its TTBR */ kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu)); return 1; diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c index 41d6285c3da9..787f7329d1b7 100644 --- a/virt/kvm/arm/mmu.c +++ b/virt/kvm/arm/mmu.c @@ -1282,6 +1282,9 @@ static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap) static bool kvm_is_write_fault(struct kvm_vcpu *vcpu) { + if (kvm_vcpu_abt_iss1tw(vcpu)) + return true; + if (kvm_vcpu_trap_is_iabt(vcpu)) return false; @@ -1496,7 +1499,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, unsigned long flags = 0; write_fault = kvm_is_write_fault(vcpu); - exec_fault = kvm_vcpu_trap_is_iabt(vcpu); + exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu); VM_BUG_ON(write_fault && exec_fault); if (fault_status == FSC_PERM && !write_fault && !exec_fault) { -- GitLab From b09c34517e1ac4018e3bb75ed5c8610a8a1f486b Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Thu, 1 Oct 2020 13:14:54 +0200 Subject: [PATCH 1128/1304] Linux 4.19.149 Tested-by: Jon Hunter Tested-by: Shuah Khan Tested-by: Linux Kernel Functional Testing Link: https://lore.kernel.org/r/20200929142826.951084251@linuxfoundation.org Signed-off-by: Greg Kroah-Hartman --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 3ffd5b03e6dd..3ff5cf33ef55 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 4 PATCHLEVEL = 19 -SUBLEVEL = 148 +SUBLEVEL = 149 EXTRAVERSION = NAME = "People's Front" -- GitLab From b03349cb585a41994683b29f03dfa70ac3cd7637 Mon Sep 17 00:00:00 2001 From: Nick Desaulniers Date: Tue, 29 Sep 2020 17:50:50 -0700 Subject: [PATCH 1129/1304] ANDROID: build.config.common: enable LLVM=1 This moves builds to all use LLVM=1 argument to make, rather than CC=clang NM=llvm-nm OBJCOPY=llvm-objcopy ... (see also https://www.kernel.org/doc/html/latest/kbuild/llvm.html#llvm-utilities). Step 3 of: https://android.googlesource.com/platform/prebuilts/clang/host/linux-x86/+/master/BINUTILS_KERNEL_DEPRECATION.md Bug: 65987925 Bug: 141693040 Signed-off-by: Nick Desaulniers Change-Id: I0d1a4e322a76a91746f2d0888bbba1eee54a7926 --- build.config.common | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/build.config.common b/build.config.common index 1454d4c3c7c7..1b0034f31452 100644 --- a/build.config.common +++ b/build.config.common @@ -1,10 +1,7 @@ BRANCH=android-4.19-stable KMI_GENERATION=0 -CC=clang -LD=ld.lld -NM=llvm-nm -OBJCOPY=llvm-objcopy +LLVM=1 DEPMOD=depmod CLANG_PREBUILT_BIN=prebuilts-master/clang/host/linux-x86/clang-r383902/bin BUILDTOOLS_PREBUILT_BIN=build/build-tools/path/linux-x86 -- GitLab From 141de28f9519d42ab95ca93cb39acc685a353f36 Mon Sep 17 00:00:00 2001 From: Nick Desaulniers Date: Thu, 1 Oct 2020 17:09:28 -0700 Subject: [PATCH 1130/1304] ANDROID: use arm-linux-androidkernel- for CROSS_COMPILE_COMPAT While ultimately resulting in the same binary being used, this allows the arm64 compat vdso to use the same target triple as an ARCH=arm build for Android. This means that arm-linux-androideabi-elfedit doesn't need to be created in: https://android.googlesource.com/platform/prebuilts/gas/linux-x86/. Bug: 141693040 Test: BUILD_CONFIG=common/build.config.gki.aarch64 ./build/build.sh Signed-off-by: Nick Desaulniers Change-Id: Iecebbfe2588615399c73255fd89d0c5ce42db9c0 --- build.config.aarch64 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.config.aarch64 b/build.config.aarch64 index 7eabc9652bf5..ce1709ac1812 100644 --- a/build.config.aarch64 +++ b/build.config.aarch64 @@ -2,7 +2,7 @@ ARCH=arm64 CLANG_TRIPLE=aarch64-linux-gnu- CROSS_COMPILE=aarch64-linux-androidkernel- -CROSS_COMPILE_COMPAT=arm-linux-androideabi- +CROSS_COMPILE_COMPAT=arm-linux-androidkernel- LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9/bin LINUX_GCC_CROSS_COMPILE_COMPAT_PREBUILTS_BIN=prebuilts/gcc/linux-x86/arm/arm-linux-androideabi-4.9/bin/ -- GitLab From a9ac777f96d955c9512498b10387a9d25e42de97 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Sun, 27 Sep 2020 12:48:21 +0200 Subject: [PATCH 1131/1304] mmc: sdhci: Workaround broken command queuing on Intel GLK based IRBIS models commit afd7f30886b0b445a4240a99020458a9772f2b89 upstream. Commit bedf9fc01ff1 ("mmc: sdhci: Workaround broken command queuing on Intel GLK"), disabled command-queuing on Intel GLK based LENOVO models because of it being broken due to what is believed to be a bug in the BIOS. It seems that the BIOS of some IRBIS models, including the IRBIS NB111 model has the same issue, so disable command queuing there too. Fixes: bedf9fc01ff1 ("mmc: sdhci: Workaround broken command queuing on Intel GLK") BugLink: https://bugzilla.kernel.org/show_bug.cgi?id=209397 Reported-and-tested-by: RussianNeuroMancer Signed-off-by: Hans de Goede Acked-by: Adrian Hunter Link: https://lore.kernel.org/r/20200927104821.5676-1-hdegoede@redhat.com Cc: stable@vger.kernel.org Signed-off-by: Ulf Hansson Signed-off-by: Greg Kroah-Hartman --- drivers/mmc/host/sdhci-pci-core.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index 35168b47afe6..a411300f9d6d 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c @@ -739,7 +739,8 @@ static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot) static bool glk_broken_cqhci(struct sdhci_pci_slot *slot) { return slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_EMMC && - dmi_match(DMI_BIOS_VENDOR, "LENOVO"); + (dmi_match(DMI_BIOS_VENDOR, "LENOVO") || + dmi_match(DMI_SYS_VENDOR, "IRBIS")); } static int glk_emmc_probe_slot(struct sdhci_pci_slot *slot) -- GitLab From 69868141bcf701a7d5b4c1999dda13eb84e0ff56 Mon Sep 17 00:00:00 2001 From: Bryan O'Donoghue Date: Sun, 20 Sep 2020 18:01:58 +0100 Subject: [PATCH 1132/1304] USB: gadget: f_ncm: Fix NDP16 datagram validation commit 2b405533c2560d7878199c57d95a39151351df72 upstream. commit 2b74b0a04d3e ("USB: gadget: f_ncm: add bounds checks to ncm_unwrap_ntb()") adds important bounds checking however it unfortunately also introduces a bug with respect to section 3.3.1 of the NCM specification. wDatagramIndex[1] : "Byte index, in little endian, of the second datagram described by this NDP16. If zero, then this marks the end of the sequence of datagrams in this NDP16." wDatagramLength[1]: "Byte length, in little endian, of the second datagram described by this NDP16. If zero, then this marks the end of the sequence of datagrams in this NDP16." wDatagramIndex[1] and wDatagramLength[1] respectively then may be zero but that does not mean we should throw away the data referenced by wDatagramIndex[0] and wDatagramLength[0] as is currently the case. Breaking the loop on (index2 == 0 || dg_len2 == 0) should come at the end as was previously the case and checks for index2 and dg_len2 should be removed since zero is valid. I'm not sure how much testing the above patch received but for me right now after enumeration ping doesn't work. Reverting the commit restores ping, scp, etc. The extra validation associated with wDatagramIndex[0] and wDatagramLength[0] appears to be valid so, this change removes the incorrect restriction on wDatagramIndex[1] and wDatagramLength[1] restoring data processing between host and device. Fixes: 2b74b0a04d3e ("USB: gadget: f_ncm: add bounds checks to ncm_unwrap_ntb()") Cc: Ilja Van Sprundel Cc: Brooke Basile Cc: stable Signed-off-by: Bryan O'Donoghue Link: https://lore.kernel.org/r/20200920170158.1217068-1-bryan.odonoghue@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/usb/gadget/function/f_ncm.c | 30 ++--------------------------- 1 file changed, 2 insertions(+), 28 deletions(-) diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c index 8d8c81d43069..e2eefdd8bf78 100644 --- a/drivers/usb/gadget/function/f_ncm.c +++ b/drivers/usb/gadget/function/f_ncm.c @@ -1192,7 +1192,6 @@ static int ncm_unwrap_ntb(struct gether *port, const struct ndp_parser_opts *opts = ncm->parser_opts; unsigned crc_len = ncm->is_crc ? sizeof(uint32_t) : 0; int dgram_counter; - bool ndp_after_header; /* dwSignature */ if (get_unaligned_le32(tmp) != opts->nth_sign) { @@ -1219,7 +1218,6 @@ static int ncm_unwrap_ntb(struct gether *port, } ndp_index = get_ncm(&tmp, opts->ndp_index); - ndp_after_header = false; /* Run through all the NDP's in the NTB */ do { @@ -1235,8 +1233,6 @@ static int ncm_unwrap_ntb(struct gether *port, ndp_index); goto err; } - if (ndp_index == opts->nth_size) - ndp_after_header = true; /* * walk through NDP @@ -1315,37 +1311,13 @@ static int ncm_unwrap_ntb(struct gether *port, index2 = get_ncm(&tmp, opts->dgram_item_len); dg_len2 = get_ncm(&tmp, opts->dgram_item_len); - if (index2 == 0 || dg_len2 == 0) - break; - /* wDatagramIndex[1] */ - if (ndp_after_header) { - if (index2 < opts->nth_size + opts->ndp_size) { - INFO(port->func.config->cdev, - "Bad index: %#X\n", index2); - goto err; - } - } else { - if (index2 < opts->nth_size + opts->dpe_size) { - INFO(port->func.config->cdev, - "Bad index: %#X\n", index2); - goto err; - } - } if (index2 > block_len - opts->dpe_size) { INFO(port->func.config->cdev, "Bad index: %#X\n", index2); goto err; } - /* wDatagramLength[1] */ - if ((dg_len2 < 14 + crc_len) || - (dg_len2 > frame_max)) { - INFO(port->func.config->cdev, - "Bad dgram length: %#X\n", dg_len); - goto err; - } - /* * Copy the data into a new skb. * This ensures the truesize is correct @@ -1362,6 +1334,8 @@ static int ncm_unwrap_ntb(struct gether *port, ndp_len -= 2 * (opts->dgram_item_len * 2); dgram_counter++; + if (index2 == 0 || dg_len2 == 0) + break; } while (ndp_len > 2 * (opts->dgram_item_len * 2)); } while (ndp_index); -- GitLab From cc8df1d63eb64fae9b4871a9602a66dc3f0095ba Mon Sep 17 00:00:00 2001 From: Bartosz Golaszewski Date: Tue, 8 Sep 2020 15:07:49 +0200 Subject: [PATCH 1133/1304] gpio: mockup: fix resource leak in error path commit 1b02d9e770cd7087f34c743f85ccf5ea8372b047 upstream. If the module init function fails after creating the debugs directory, it's never removed. Add proper cleanup calls to avoid this resource leak. Fixes: 9202ba2397d1 ("gpio: mockup: implement event injecting over debugfs") Cc: Signed-off-by: Bartosz Golaszewski Reviewed-by: Andy Shevchenko Signed-off-by: Greg Kroah-Hartman --- drivers/gpio/gpio-mockup.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c index 945bd13e5e79..cab324eb7df2 100644 --- a/drivers/gpio/gpio-mockup.c +++ b/drivers/gpio/gpio-mockup.c @@ -367,6 +367,7 @@ static int __init gpio_mockup_init(void) err = platform_driver_register(&gpio_mockup_driver); if (err) { gpio_mockup_err("error registering platform driver\n"); + debugfs_remove_recursive(gpio_mockup_dbg_dir); return err; } @@ -386,6 +387,7 @@ static int __init gpio_mockup_init(void) gpio_mockup_err("error registering device"); platform_driver_unregister(&gpio_mockup_driver); gpio_mockup_unregister_pdevs(); + debugfs_remove_recursive(gpio_mockup_dbg_dir); return PTR_ERR(pdev); } -- GitLab From 735cbbcddd83087dca4f66d835af1936f8ab4fdc Mon Sep 17 00:00:00 2001 From: dillon min Date: Thu, 3 Sep 2020 15:30:21 +0800 Subject: [PATCH 1134/1304] gpio: tc35894: fix up tc35894 interrupt configuration commit 214b0e1ad01abf4c1f6d8d28fa096bf167e47cef upstream. The offset of regmap is incorrect, j * 8 is move to the wrong register. for example: asume i = 0, j = 1. we want to set KPY5 as interrupt falling edge mode, regmap[0][1] should be TC3589x_GPIOIBE1 0xcd but, regmap[i] + j * 8 = TC3589x_GPIOIBE0 + 8 ,point to 0xd4, this is TC3589x_GPIOIE2 not TC3589x_GPIOIBE1. Fixes: d88b25be3584 ("gpio: Add TC35892 GPIO driver") Cc: Cc: stable@vger.kernel.org Signed-off-by: dillon min Signed-off-by: Bartosz Golaszewski Signed-off-by: Greg Kroah-Hartman --- drivers/gpio/gpio-tc3589x.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpio/gpio-tc3589x.c b/drivers/gpio/gpio-tc3589x.c index 91a8ef8e7f3f..1436098b1614 100644 --- a/drivers/gpio/gpio-tc3589x.c +++ b/drivers/gpio/gpio-tc3589x.c @@ -209,7 +209,7 @@ static void tc3589x_gpio_irq_sync_unlock(struct irq_data *d) continue; tc3589x_gpio->oldregs[i][j] = new; - tc3589x_reg_write(tc3589x, regmap[i] + j * 8, new); + tc3589x_reg_write(tc3589x, regmap[i] + j, new); } } -- GitLab From b4b27faf3ed987a8eb02f944276796335145a6d4 Mon Sep 17 00:00:00 2001 From: Dinh Nguyen Date: Mon, 31 Aug 2020 15:26:57 -0500 Subject: [PATCH 1135/1304] clk: socfpga: stratix10: fix the divider for the emac_ptp_free_clk commit b02cf0c4736c65c6667f396efaae6b5521e82abf upstream. The fixed divider the emac_ptp_free_clk should be 2, not 4. Fixes: 07afb8db7340 ("clk: socfpga: stratix10: add clock driver for Stratix10 platform") Cc: stable@vger.kernel.org Signed-off-by: Dinh Nguyen Link: https://lore.kernel.org/r/20200831202657.8224-1-dinguyen@kernel.org Signed-off-by: Stephen Boyd Signed-off-by: Greg Kroah-Hartman --- drivers/clk/socfpga/clk-s10.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/clk/socfpga/clk-s10.c b/drivers/clk/socfpga/clk-s10.c index 5bed36e12951..7327e90735c8 100644 --- a/drivers/clk/socfpga/clk-s10.c +++ b/drivers/clk/socfpga/clk-s10.c @@ -107,7 +107,7 @@ static const struct stratix10_perip_cnt_clock s10_main_perip_cnt_clks[] = { { STRATIX10_EMAC_B_FREE_CLK, "emacb_free_clk", NULL, emacb_free_mux, ARRAY_SIZE(emacb_free_mux), 0, 0, 2, 0xB0, 1}, { STRATIX10_EMAC_PTP_FREE_CLK, "emac_ptp_free_clk", NULL, emac_ptp_free_mux, - ARRAY_SIZE(emac_ptp_free_mux), 0, 0, 4, 0xB0, 2}, + ARRAY_SIZE(emac_ptp_free_mux), 0, 0, 2, 0xB0, 2}, { STRATIX10_GPIO_DB_FREE_CLK, "gpio_db_free_clk", NULL, gpio_db_free_mux, ARRAY_SIZE(gpio_db_free_mux), 0, 0, 0, 0xB0, 3}, { STRATIX10_SDMMC_FREE_CLK, "sdmmc_free_clk", NULL, sdmmc_free_mux, -- GitLab From 1fa81b7cadc479924412de67e5737d45b00e01ac Mon Sep 17 00:00:00 2001 From: Stefano Garzarella Date: Fri, 5 Jul 2019 13:04:52 +0200 Subject: [PATCH 1136/1304] vsock/virtio: use RCU to avoid use-after-free on the_virtio_vsock [ Upstream commit 9c7a5582f5d720dc35cfcc42ccaded69f0642e4a ] Some callbacks used by the upper layers can run while we are in the .remove(). A potential use-after-free can happen, because we free the_virtio_vsock without knowing if the callbacks are over or not. To solve this issue we move the assignment of the_virtio_vsock at the end of .probe(), when we finished all the initialization, and at the beginning of .remove(), before to release resources. For the same reason, we do the same also for the vdev->priv. We use RCU to be sure that all callbacks that use the_virtio_vsock ended before freeing it. This is not required for callbacks that use vdev->priv, because after the vdev->config->del_vqs() we are sure that they are ended and will no longer be invoked. We also take the mutex during the .remove() to avoid that .probe() can run while we are resetting the device. Signed-off-by: Stefano Garzarella Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- net/vmw_vsock/virtio_transport.c | 70 +++++++++++++++++++++----------- 1 file changed, 46 insertions(+), 24 deletions(-) diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c index 96ab344f17bb..68186419c445 100644 --- a/net/vmw_vsock/virtio_transport.c +++ b/net/vmw_vsock/virtio_transport.c @@ -66,19 +66,22 @@ struct virtio_vsock { u32 guest_cid; }; -static struct virtio_vsock *virtio_vsock_get(void) -{ - return the_virtio_vsock; -} - static u32 virtio_transport_get_local_cid(void) { - struct virtio_vsock *vsock = virtio_vsock_get(); + struct virtio_vsock *vsock; + u32 ret; - if (!vsock) - return VMADDR_CID_ANY; + rcu_read_lock(); + vsock = rcu_dereference(the_virtio_vsock); + if (!vsock) { + ret = VMADDR_CID_ANY; + goto out_rcu; + } - return vsock->guest_cid; + ret = vsock->guest_cid; +out_rcu: + rcu_read_unlock(); + return ret; } static void virtio_transport_loopback_work(struct work_struct *work) @@ -198,14 +201,18 @@ virtio_transport_send_pkt(struct virtio_vsock_pkt *pkt) struct virtio_vsock *vsock; int len = pkt->len; - vsock = virtio_vsock_get(); + rcu_read_lock(); + vsock = rcu_dereference(the_virtio_vsock); if (!vsock) { virtio_transport_free_pkt(pkt); - return -ENODEV; + len = -ENODEV; + goto out_rcu; } - if (le64_to_cpu(pkt->hdr.dst_cid) == vsock->guest_cid) - return virtio_transport_send_pkt_loopback(vsock, pkt); + if (le64_to_cpu(pkt->hdr.dst_cid) == vsock->guest_cid) { + len = virtio_transport_send_pkt_loopback(vsock, pkt); + goto out_rcu; + } if (pkt->reply) atomic_inc(&vsock->queued_replies); @@ -215,6 +222,9 @@ virtio_transport_send_pkt(struct virtio_vsock_pkt *pkt) spin_unlock_bh(&vsock->send_pkt_list_lock); queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work); + +out_rcu: + rcu_read_unlock(); return len; } @@ -223,12 +233,14 @@ virtio_transport_cancel_pkt(struct vsock_sock *vsk) { struct virtio_vsock *vsock; struct virtio_vsock_pkt *pkt, *n; - int cnt = 0; + int cnt = 0, ret; LIST_HEAD(freeme); - vsock = virtio_vsock_get(); + rcu_read_lock(); + vsock = rcu_dereference(the_virtio_vsock); if (!vsock) { - return -ENODEV; + ret = -ENODEV; + goto out_rcu; } spin_lock_bh(&vsock->send_pkt_list_lock); @@ -256,7 +268,11 @@ virtio_transport_cancel_pkt(struct vsock_sock *vsk) queue_work(virtio_vsock_workqueue, &vsock->rx_work); } - return 0; + ret = 0; + +out_rcu: + rcu_read_unlock(); + return ret; } static void virtio_vsock_rx_fill(struct virtio_vsock *vsock) @@ -566,7 +582,8 @@ static int virtio_vsock_probe(struct virtio_device *vdev) return ret; /* Only one virtio-vsock device per guest is supported */ - if (the_virtio_vsock) { + if (rcu_dereference_protected(the_virtio_vsock, + lockdep_is_held(&the_virtio_vsock_mutex))) { ret = -EBUSY; goto out; } @@ -591,8 +608,6 @@ static int virtio_vsock_probe(struct virtio_device *vdev) vsock->rx_buf_max_nr = 0; atomic_set(&vsock->queued_replies, 0); - vdev->priv = vsock; - the_virtio_vsock = vsock; mutex_init(&vsock->tx_lock); mutex_init(&vsock->rx_lock); mutex_init(&vsock->event_lock); @@ -614,6 +629,9 @@ static int virtio_vsock_probe(struct virtio_device *vdev) virtio_vsock_event_fill(vsock); mutex_unlock(&vsock->event_lock); + vdev->priv = vsock; + rcu_assign_pointer(the_virtio_vsock, vsock); + mutex_unlock(&the_virtio_vsock_mutex); return 0; @@ -628,6 +646,12 @@ static void virtio_vsock_remove(struct virtio_device *vdev) struct virtio_vsock *vsock = vdev->priv; struct virtio_vsock_pkt *pkt; + mutex_lock(&the_virtio_vsock_mutex); + + vdev->priv = NULL; + rcu_assign_pointer(the_virtio_vsock, NULL); + synchronize_rcu(); + flush_work(&vsock->loopback_work); flush_work(&vsock->rx_work); flush_work(&vsock->tx_work); @@ -667,12 +691,10 @@ static void virtio_vsock_remove(struct virtio_device *vdev) } spin_unlock_bh(&vsock->loopback_list_lock); - mutex_lock(&the_virtio_vsock_mutex); - the_virtio_vsock = NULL; - mutex_unlock(&the_virtio_vsock_mutex); - vdev->config->del_vqs(vdev); + mutex_unlock(&the_virtio_vsock_mutex); + kfree(vsock); } -- GitLab From 6a5a7a88cc6f587e2119bbfbd8299ba31e5ac9d8 Mon Sep 17 00:00:00 2001 From: Stefano Garzarella Date: Fri, 5 Jul 2019 13:04:53 +0200 Subject: [PATCH 1137/1304] vsock/virtio: stop workers during the .remove() [ Upstream commit 17dd1367389cfe7f150790c83247b68e0c19d106 ] Before to call vdev->config->reset(vdev) we need to be sure that no one is accessing the device, for this reason, we add new variables in the struct virtio_vsock to stop the workers during the .remove(). This patch also add few comments before vdev->config->reset(vdev) and vdev->config->del_vqs(vdev). Suggested-by: Stefan Hajnoczi Suggested-by: Michael S. Tsirkin Signed-off-by: Stefano Garzarella Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- net/vmw_vsock/virtio_transport.c | 51 +++++++++++++++++++++++++++++++- 1 file changed, 50 insertions(+), 1 deletion(-) diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c index 68186419c445..4bc217ef5694 100644 --- a/net/vmw_vsock/virtio_transport.c +++ b/net/vmw_vsock/virtio_transport.c @@ -39,6 +39,7 @@ struct virtio_vsock { * must be accessed with tx_lock held. */ struct mutex tx_lock; + bool tx_run; struct work_struct send_pkt_work; spinlock_t send_pkt_list_lock; @@ -54,6 +55,7 @@ struct virtio_vsock { * must be accessed with rx_lock held. */ struct mutex rx_lock; + bool rx_run; int rx_buf_nr; int rx_buf_max_nr; @@ -61,6 +63,7 @@ struct virtio_vsock { * vqs[VSOCK_VQ_EVENT] must be accessed with event_lock held. */ struct mutex event_lock; + bool event_run; struct virtio_vsock_event event_list[8]; u32 guest_cid; @@ -95,6 +98,10 @@ static void virtio_transport_loopback_work(struct work_struct *work) spin_unlock_bh(&vsock->loopback_list_lock); mutex_lock(&vsock->rx_lock); + + if (!vsock->rx_run) + goto out; + while (!list_empty(&pkts)) { struct virtio_vsock_pkt *pkt; @@ -103,6 +110,7 @@ static void virtio_transport_loopback_work(struct work_struct *work) virtio_transport_recv_pkt(pkt); } +out: mutex_unlock(&vsock->rx_lock); } @@ -131,6 +139,9 @@ virtio_transport_send_pkt_work(struct work_struct *work) mutex_lock(&vsock->tx_lock); + if (!vsock->tx_run) + goto out; + vq = vsock->vqs[VSOCK_VQ_TX]; for (;;) { @@ -189,6 +200,7 @@ virtio_transport_send_pkt_work(struct work_struct *work) if (added) virtqueue_kick(vq); +out: mutex_unlock(&vsock->tx_lock); if (restart_rx) @@ -324,6 +336,10 @@ static void virtio_transport_tx_work(struct work_struct *work) vq = vsock->vqs[VSOCK_VQ_TX]; mutex_lock(&vsock->tx_lock); + + if (!vsock->tx_run) + goto out; + do { struct virtio_vsock_pkt *pkt; unsigned int len; @@ -334,6 +350,8 @@ static void virtio_transport_tx_work(struct work_struct *work) added = true; } } while (!virtqueue_enable_cb(vq)); + +out: mutex_unlock(&vsock->tx_lock); if (added) @@ -362,6 +380,9 @@ static void virtio_transport_rx_work(struct work_struct *work) mutex_lock(&vsock->rx_lock); + if (!vsock->rx_run) + goto out; + do { virtqueue_disable_cb(vq); for (;;) { @@ -471,6 +492,9 @@ static void virtio_transport_event_work(struct work_struct *work) mutex_lock(&vsock->event_lock); + if (!vsock->event_run) + goto out; + do { struct virtio_vsock_event *event; unsigned int len; @@ -485,7 +509,7 @@ static void virtio_transport_event_work(struct work_struct *work) } while (!virtqueue_enable_cb(vq)); virtqueue_kick(vsock->vqs[VSOCK_VQ_EVENT]); - +out: mutex_unlock(&vsock->event_lock); } @@ -621,12 +645,18 @@ static int virtio_vsock_probe(struct virtio_device *vdev) INIT_WORK(&vsock->send_pkt_work, virtio_transport_send_pkt_work); INIT_WORK(&vsock->loopback_work, virtio_transport_loopback_work); + mutex_lock(&vsock->tx_lock); + vsock->tx_run = true; + mutex_unlock(&vsock->tx_lock); + mutex_lock(&vsock->rx_lock); virtio_vsock_rx_fill(vsock); + vsock->rx_run = true; mutex_unlock(&vsock->rx_lock); mutex_lock(&vsock->event_lock); virtio_vsock_event_fill(vsock); + vsock->event_run = true; mutex_unlock(&vsock->event_lock); vdev->priv = vsock; @@ -661,6 +691,24 @@ static void virtio_vsock_remove(struct virtio_device *vdev) /* Reset all connected sockets when the device disappear */ vsock_for_each_connected_socket(virtio_vsock_reset_sock); + /* Stop all work handlers to make sure no one is accessing the device, + * so we can safely call vdev->config->reset(). + */ + mutex_lock(&vsock->rx_lock); + vsock->rx_run = false; + mutex_unlock(&vsock->rx_lock); + + mutex_lock(&vsock->tx_lock); + vsock->tx_run = false; + mutex_unlock(&vsock->tx_lock); + + mutex_lock(&vsock->event_lock); + vsock->event_run = false; + mutex_unlock(&vsock->event_lock); + + /* Flush all device writes and interrupts, device will not use any + * more buffers. + */ vdev->config->reset(vdev); mutex_lock(&vsock->rx_lock); @@ -691,6 +739,7 @@ static void virtio_vsock_remove(struct virtio_device *vdev) } spin_unlock_bh(&vsock->loopback_list_lock); + /* Delete virtqueues and flush outstanding callbacks if any */ vdev->config->del_vqs(vdev); mutex_unlock(&the_virtio_vsock_mutex); -- GitLab From 7c20b974aac73b192fcb4aa8a3e3f901d1a9e591 Mon Sep 17 00:00:00 2001 From: Stefano Garzarella Date: Thu, 14 Nov 2019 10:57:40 +0100 Subject: [PATCH 1138/1304] vsock/virtio: add transport parameter to the virtio_transport_reset_no_sock() [ Upstream commit 4c7246dc45e2706770d5233f7ce1597a07e069ba ] We are going to add 'struct vsock_sock *' parameter to virtio_transport_get_ops(). In some cases, like in the virtio_transport_reset_no_sock(), we don't have any socket assigned to the packet received, so we can't use the virtio_transport_get_ops(). In order to allow virtio_transport_reset_no_sock() to use the '.send_pkt' callback from the 'vhost_transport' or 'virtio_transport', we add the 'struct virtio_transport *' to it and to its caller: virtio_transport_recv_pkt(). We moved the 'vhost_transport' and 'virtio_transport' definition, to pass their address to the virtio_transport_recv_pkt(). Reviewed-by: Stefan Hajnoczi Signed-off-by: Stefano Garzarella Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/vhost/vsock.c | 94 +++++++------- include/linux/virtio_vsock.h | 3 +- net/vmw_vsock/virtio_transport.c | 160 ++++++++++++------------ net/vmw_vsock/virtio_transport_common.c | 12 +- 4 files changed, 135 insertions(+), 134 deletions(-) diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index 7891bd40ebd8..6ee320259e4f 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -383,6 +383,52 @@ static bool vhost_vsock_more_replies(struct vhost_vsock *vsock) return val < vq->num; } +static struct virtio_transport vhost_transport = { + .transport = { + .get_local_cid = vhost_transport_get_local_cid, + + .init = virtio_transport_do_socket_init, + .destruct = virtio_transport_destruct, + .release = virtio_transport_release, + .connect = virtio_transport_connect, + .shutdown = virtio_transport_shutdown, + .cancel_pkt = vhost_transport_cancel_pkt, + + .dgram_enqueue = virtio_transport_dgram_enqueue, + .dgram_dequeue = virtio_transport_dgram_dequeue, + .dgram_bind = virtio_transport_dgram_bind, + .dgram_allow = virtio_transport_dgram_allow, + + .stream_enqueue = virtio_transport_stream_enqueue, + .stream_dequeue = virtio_transport_stream_dequeue, + .stream_has_data = virtio_transport_stream_has_data, + .stream_has_space = virtio_transport_stream_has_space, + .stream_rcvhiwat = virtio_transport_stream_rcvhiwat, + .stream_is_active = virtio_transport_stream_is_active, + .stream_allow = virtio_transport_stream_allow, + + .notify_poll_in = virtio_transport_notify_poll_in, + .notify_poll_out = virtio_transport_notify_poll_out, + .notify_recv_init = virtio_transport_notify_recv_init, + .notify_recv_pre_block = virtio_transport_notify_recv_pre_block, + .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue, + .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue, + .notify_send_init = virtio_transport_notify_send_init, + .notify_send_pre_block = virtio_transport_notify_send_pre_block, + .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue, + .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue, + + .set_buffer_size = virtio_transport_set_buffer_size, + .set_min_buffer_size = virtio_transport_set_min_buffer_size, + .set_max_buffer_size = virtio_transport_set_max_buffer_size, + .get_buffer_size = virtio_transport_get_buffer_size, + .get_min_buffer_size = virtio_transport_get_min_buffer_size, + .get_max_buffer_size = virtio_transport_get_max_buffer_size, + }, + + .send_pkt = vhost_transport_send_pkt, +}; + static void vhost_vsock_handle_tx_kick(struct vhost_work *work) { struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, @@ -439,7 +485,7 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work) if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid && le64_to_cpu(pkt->hdr.dst_cid) == vhost_transport_get_local_cid()) - virtio_transport_recv_pkt(pkt); + virtio_transport_recv_pkt(&vhost_transport, pkt); else virtio_transport_free_pkt(pkt); @@ -792,52 +838,6 @@ static struct miscdevice vhost_vsock_misc = { .fops = &vhost_vsock_fops, }; -static struct virtio_transport vhost_transport = { - .transport = { - .get_local_cid = vhost_transport_get_local_cid, - - .init = virtio_transport_do_socket_init, - .destruct = virtio_transport_destruct, - .release = virtio_transport_release, - .connect = virtio_transport_connect, - .shutdown = virtio_transport_shutdown, - .cancel_pkt = vhost_transport_cancel_pkt, - - .dgram_enqueue = virtio_transport_dgram_enqueue, - .dgram_dequeue = virtio_transport_dgram_dequeue, - .dgram_bind = virtio_transport_dgram_bind, - .dgram_allow = virtio_transport_dgram_allow, - - .stream_enqueue = virtio_transport_stream_enqueue, - .stream_dequeue = virtio_transport_stream_dequeue, - .stream_has_data = virtio_transport_stream_has_data, - .stream_has_space = virtio_transport_stream_has_space, - .stream_rcvhiwat = virtio_transport_stream_rcvhiwat, - .stream_is_active = virtio_transport_stream_is_active, - .stream_allow = virtio_transport_stream_allow, - - .notify_poll_in = virtio_transport_notify_poll_in, - .notify_poll_out = virtio_transport_notify_poll_out, - .notify_recv_init = virtio_transport_notify_recv_init, - .notify_recv_pre_block = virtio_transport_notify_recv_pre_block, - .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue, - .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue, - .notify_send_init = virtio_transport_notify_send_init, - .notify_send_pre_block = virtio_transport_notify_send_pre_block, - .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue, - .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue, - - .set_buffer_size = virtio_transport_set_buffer_size, - .set_min_buffer_size = virtio_transport_set_min_buffer_size, - .set_max_buffer_size = virtio_transport_set_max_buffer_size, - .get_buffer_size = virtio_transport_get_buffer_size, - .get_min_buffer_size = virtio_transport_get_min_buffer_size, - .get_max_buffer_size = virtio_transport_get_max_buffer_size, - }, - - .send_pkt = vhost_transport_send_pkt, -}; - static int __init vhost_vsock_init(void) { int ret; diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h index e223e2632edd..8b8d13f01cae 100644 --- a/include/linux/virtio_vsock.h +++ b/include/linux/virtio_vsock.h @@ -149,7 +149,8 @@ virtio_transport_dgram_enqueue(struct vsock_sock *vsk, void virtio_transport_destruct(struct vsock_sock *vsk); -void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt); +void virtio_transport_recv_pkt(struct virtio_transport *t, + struct virtio_vsock_pkt *pkt); void virtio_transport_free_pkt(struct virtio_vsock_pkt *pkt); void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct virtio_vsock_pkt *pkt); u32 virtio_transport_get_credit(struct virtio_vsock_sock *vvs, u32 wanted); diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c index 4bc217ef5694..cc70d651d13e 100644 --- a/net/vmw_vsock/virtio_transport.c +++ b/net/vmw_vsock/virtio_transport.c @@ -87,33 +87,6 @@ static u32 virtio_transport_get_local_cid(void) return ret; } -static void virtio_transport_loopback_work(struct work_struct *work) -{ - struct virtio_vsock *vsock = - container_of(work, struct virtio_vsock, loopback_work); - LIST_HEAD(pkts); - - spin_lock_bh(&vsock->loopback_list_lock); - list_splice_init(&vsock->loopback_list, &pkts); - spin_unlock_bh(&vsock->loopback_list_lock); - - mutex_lock(&vsock->rx_lock); - - if (!vsock->rx_run) - goto out; - - while (!list_empty(&pkts)) { - struct virtio_vsock_pkt *pkt; - - pkt = list_first_entry(&pkts, struct virtio_vsock_pkt, list); - list_del_init(&pkt->list); - - virtio_transport_recv_pkt(pkt); - } -out: - mutex_unlock(&vsock->rx_lock); -} - static int virtio_transport_send_pkt_loopback(struct virtio_vsock *vsock, struct virtio_vsock_pkt *pkt) { @@ -370,59 +343,6 @@ static bool virtio_transport_more_replies(struct virtio_vsock *vsock) return val < virtqueue_get_vring_size(vq); } -static void virtio_transport_rx_work(struct work_struct *work) -{ - struct virtio_vsock *vsock = - container_of(work, struct virtio_vsock, rx_work); - struct virtqueue *vq; - - vq = vsock->vqs[VSOCK_VQ_RX]; - - mutex_lock(&vsock->rx_lock); - - if (!vsock->rx_run) - goto out; - - do { - virtqueue_disable_cb(vq); - for (;;) { - struct virtio_vsock_pkt *pkt; - unsigned int len; - - if (!virtio_transport_more_replies(vsock)) { - /* Stop rx until the device processes already - * pending replies. Leave rx virtqueue - * callbacks disabled. - */ - goto out; - } - - pkt = virtqueue_get_buf(vq, &len); - if (!pkt) { - break; - } - - vsock->rx_buf_nr--; - - /* Drop short/long packets */ - if (unlikely(len < sizeof(pkt->hdr) || - len > sizeof(pkt->hdr) + pkt->len)) { - virtio_transport_free_pkt(pkt); - continue; - } - - pkt->len = len - sizeof(pkt->hdr); - virtio_transport_deliver_tap_pkt(pkt); - virtio_transport_recv_pkt(pkt); - } - } while (!virtqueue_enable_cb(vq)); - -out: - if (vsock->rx_buf_nr < vsock->rx_buf_max_nr / 2) - virtio_vsock_rx_fill(vsock); - mutex_unlock(&vsock->rx_lock); -} - /* event_lock must be held */ static int virtio_vsock_event_fill_one(struct virtio_vsock *vsock, struct virtio_vsock_event *event) @@ -586,6 +506,86 @@ static struct virtio_transport virtio_transport = { .send_pkt = virtio_transport_send_pkt, }; +static void virtio_transport_loopback_work(struct work_struct *work) +{ + struct virtio_vsock *vsock = + container_of(work, struct virtio_vsock, loopback_work); + LIST_HEAD(pkts); + + spin_lock_bh(&vsock->loopback_list_lock); + list_splice_init(&vsock->loopback_list, &pkts); + spin_unlock_bh(&vsock->loopback_list_lock); + + mutex_lock(&vsock->rx_lock); + + if (!vsock->rx_run) + goto out; + + while (!list_empty(&pkts)) { + struct virtio_vsock_pkt *pkt; + + pkt = list_first_entry(&pkts, struct virtio_vsock_pkt, list); + list_del_init(&pkt->list); + + virtio_transport_recv_pkt(&virtio_transport, pkt); + } +out: + mutex_unlock(&vsock->rx_lock); +} + +static void virtio_transport_rx_work(struct work_struct *work) +{ + struct virtio_vsock *vsock = + container_of(work, struct virtio_vsock, rx_work); + struct virtqueue *vq; + + vq = vsock->vqs[VSOCK_VQ_RX]; + + mutex_lock(&vsock->rx_lock); + + if (!vsock->rx_run) + goto out; + + do { + virtqueue_disable_cb(vq); + for (;;) { + struct virtio_vsock_pkt *pkt; + unsigned int len; + + if (!virtio_transport_more_replies(vsock)) { + /* Stop rx until the device processes already + * pending replies. Leave rx virtqueue + * callbacks disabled. + */ + goto out; + } + + pkt = virtqueue_get_buf(vq, &len); + if (!pkt) { + break; + } + + vsock->rx_buf_nr--; + + /* Drop short/long packets */ + if (unlikely(len < sizeof(pkt->hdr) || + len > sizeof(pkt->hdr) + pkt->len)) { + virtio_transport_free_pkt(pkt); + continue; + } + + pkt->len = len - sizeof(pkt->hdr); + virtio_transport_deliver_tap_pkt(pkt); + virtio_transport_recv_pkt(&virtio_transport, pkt); + } + } while (!virtqueue_enable_cb(vq)); + +out: + if (vsock->rx_buf_nr < vsock->rx_buf_max_nr / 2) + virtio_vsock_rx_fill(vsock); + mutex_unlock(&vsock->rx_lock); +} + static int virtio_vsock_probe(struct virtio_device *vdev) { vq_callback_t *callbacks[] = { diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c index 52242a148c70..fae2bded5d51 100644 --- a/net/vmw_vsock/virtio_transport_common.c +++ b/net/vmw_vsock/virtio_transport_common.c @@ -669,9 +669,9 @@ static int virtio_transport_reset(struct vsock_sock *vsk, /* Normally packets are associated with a socket. There may be no socket if an * attempt was made to connect to a socket that does not exist. */ -static int virtio_transport_reset_no_sock(struct virtio_vsock_pkt *pkt) +static int virtio_transport_reset_no_sock(const struct virtio_transport *t, + struct virtio_vsock_pkt *pkt) { - const struct virtio_transport *t; struct virtio_vsock_pkt *reply; struct virtio_vsock_pkt_info info = { .op = VIRTIO_VSOCK_OP_RST, @@ -691,7 +691,6 @@ static int virtio_transport_reset_no_sock(struct virtio_vsock_pkt *pkt) if (!reply) return -ENOMEM; - t = virtio_transport_get_ops(); if (!t) { virtio_transport_free_pkt(reply); return -ENOTCONN; @@ -993,7 +992,8 @@ static bool virtio_transport_space_update(struct sock *sk, /* We are under the virtio-vsock's vsock->rx_lock or vhost-vsock's vq->mutex * lock. */ -void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt) +void virtio_transport_recv_pkt(struct virtio_transport *t, + struct virtio_vsock_pkt *pkt) { struct sockaddr_vm src, dst; struct vsock_sock *vsk; @@ -1015,7 +1015,7 @@ void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt) le32_to_cpu(pkt->hdr.fwd_cnt)); if (le16_to_cpu(pkt->hdr.type) != VIRTIO_VSOCK_TYPE_STREAM) { - (void)virtio_transport_reset_no_sock(pkt); + (void)virtio_transport_reset_no_sock(t, pkt); goto free_pkt; } @@ -1026,7 +1026,7 @@ void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt) if (!sk) { sk = vsock_find_bound_socket(&dst); if (!sk) { - (void)virtio_transport_reset_no_sock(pkt); + (void)virtio_transport_reset_no_sock(t, pkt); goto free_pkt; } } -- GitLab From 7f2acd64ac15271dc47fe42b6685fc64085b4d56 Mon Sep 17 00:00:00 2001 From: Sebastien Boeuf Date: Fri, 14 Feb 2020 12:48:01 +0100 Subject: [PATCH 1139/1304] net: virtio_vsock: Enhance connection semantics [ Upstream commit df12eb6d6cd920ab2f0e0a43cd6e1c23a05cea91 ] Whenever the vsock backend on the host sends a packet through the RX queue, it expects an answer on the TX queue. Unfortunately, there is one case where the host side will hang waiting for the answer and might effectively never recover if no timeout mechanism was implemented. This issue happens when the guest side starts binding to the socket, which insert a new bound socket into the list of already bound sockets. At this time, we expect the guest to also start listening, which will trigger the sk_state to move from TCP_CLOSE to TCP_LISTEN. The problem occurs if the host side queued a RX packet and triggered an interrupt right between the end of the binding process and the beginning of the listening process. In this specific case, the function processing the packet virtio_transport_recv_pkt() will find a bound socket, which means it will hit the switch statement checking for the sk_state, but the state won't be changed into TCP_LISTEN yet, which leads the code to pick the default statement. This default statement will only free the buffer, while it should also respond to the host side, by sending a packet on its TX queue. In order to simply fix this unfortunate chain of events, it is important that in case the default statement is entered, and because at this stage we know the host side is waiting for an answer, we must send back a packet containing the operation VIRTIO_VSOCK_OP_RST. One could say that a proper timeout mechanism on the host side will be enough to avoid the backend to hang. But the point of this patch is to ensure the normal use case will be provided with proper responsiveness when it comes to establishing the connection. Signed-off-by: Sebastien Boeuf Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- net/vmw_vsock/virtio_transport_common.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c index fae2bded5d51..5f8a72d34d31 100644 --- a/net/vmw_vsock/virtio_transport_common.c +++ b/net/vmw_vsock/virtio_transport_common.c @@ -1060,6 +1060,7 @@ void virtio_transport_recv_pkt(struct virtio_transport *t, virtio_transport_free_pkt(pkt); break; default: + (void)virtio_transport_reset_no_sock(t, pkt); virtio_transport_free_pkt(pkt); break; } -- GitLab From eab97fc2aa4a6e6966d07b596a4bb6b5c68886b3 Mon Sep 17 00:00:00 2001 From: Jiri Kosina Date: Mon, 28 Sep 2020 16:21:17 -0700 Subject: [PATCH 1140/1304] Input: i8042 - add nopnp quirk for Acer Aspire 5 A515 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 5fc27b098dafb8e30794a9db0705074c7d766179 upstream. Touchpad on this laptop is not detected properly during boot, as PNP enumerates (wrongly) AUX port as disabled on this machine. Fix that by adding this board (with admittedly quite funny DMI identifiers) to nopnp quirk list. Reported-by: Andrés Barrantes Silman Signed-off-by: Jiri Kosina Link: https://lore.kernel.org/r/nycvar.YFH.7.76.2009252337340.3336@cbobk.fhfr.pm Cc: stable@vger.kernel.org Signed-off-by: Dmitry Torokhov Signed-off-by: Greg Kroah-Hartman --- drivers/input/serio/i8042-x86ia64io.h | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index 7c05e09abacf..51bd2ebaa342 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -725,6 +725,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nopnp_table[] = { DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"), }, }, + { + /* Acer Aspire 5 A515 */ + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "Grumpy_PK"), + DMI_MATCH(DMI_BOARD_VENDOR, "PK"), + }, + }, { } }; -- GitLab From 2fd5a462eb7b39694ae013450dc47d84cdf7204a Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Tue, 29 Sep 2020 12:40:31 -0400 Subject: [PATCH 1141/1304] ftrace: Move RCU is watching check after recursion check commit b40341fad6cc2daa195f8090fd3348f18fff640a upstream. The first thing that the ftrace function callback helper functions should do is to check for recursion. Peter Zijlstra found that when "rcu_is_watching()" had its notrace removed, it caused perf function tracing to crash. This is because the call of rcu_is_watching() is tested before function recursion is checked and and if it is traced, it will cause an infinite recursion loop. rcu_is_watching() should still stay notrace, but to prevent this should never had crashed in the first place. The recursion prevention must be the first thing done in callback functions. Link: https://lore.kernel.org/r/20200929112541.GM2628@hirez.programming.kicks-ass.net Cc: stable@vger.kernel.org Cc: Paul McKenney Fixes: c68c0fa293417 ("ftrace: Have ftrace_ops_get_func() handle RCU and PER_CPU flags too") Acked-by: Peter Zijlstra (Intel) Reported-by: Peter Zijlstra (Intel) Signed-off-by: Steven Rostedt (VMware) Signed-off-by: Greg Kroah-Hartman --- kernel/trace/ftrace.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 70f7743c1672..992d48774c9e 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -6370,16 +6370,14 @@ static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip, { int bit; - if ((op->flags & FTRACE_OPS_FL_RCU) && !rcu_is_watching()) - return; - bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX); if (bit < 0) return; preempt_disable_notrace(); - op->func(ip, parent_ip, op, regs); + if (!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) + op->func(ip, parent_ip, op, regs); preempt_enable_notrace(); trace_clear_recursion(bit); -- GitLab From 80f0f9b5803e1394c5bbcb7259359b3ab9d6b335 Mon Sep 17 00:00:00 2001 From: Jean Delvare Date: Mon, 28 Sep 2020 11:10:37 +0200 Subject: [PATCH 1142/1304] drm/amdgpu: restore proper ref count in amdgpu_display_crtc_set_config commit a39d0d7bdf8c21ac7645c02e9676b5cb2b804c31 upstream. A recent attempt to fix a ref count leak in amdgpu_display_crtc_set_config() turned out to be doing too much and "fixed" an intended decrease as if it were a leak. Undo that part to restore the proper balance. This is the very nature of this function to increase or decrease the power reference count depending on the situation. Consequences of this bug is that the power reference would eventually get down to 0 while the display was still in use, resulting in that display switching off unexpectedly. Signed-off-by: Jean Delvare Fixes: e008fa6fb415 ("drm/amdgpu: fix ref count leak in amdgpu_display_crtc_set_config") Cc: stable@vger.kernel.org Cc: Navid Emamdoost Cc: Alex Deucher Signed-off-by: Alex Deucher Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/amd/amdgpu/amdgpu_display.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 049a1961c3fa..5f85c9586cba 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -290,7 +290,7 @@ int amdgpu_display_crtc_set_config(struct drm_mode_set *set, take the current one */ if (active && !adev->have_disp_power_ref) { adev->have_disp_power_ref = true; - goto out; + return ret; } /* if we have no active crtcs, then drop the power ref we got before */ -- GitLab From 342ee084a1bbf62ace68f03a59742da38e2b009f Mon Sep 17 00:00:00 2001 From: Xie He Date: Wed, 2 Sep 2020 17:06:58 -0700 Subject: [PATCH 1143/1304] drivers/net/wan/hdlc_fr: Add needed_headroom for PVC devices [ Upstream commit 44a049c42681de71c783d75cd6e56b4e339488b0 ] PVC devices are virtual devices in this driver stacked on top of the actual HDLC device. They are the devices normal users would use. PVC devices have two types: normal PVC devices and Ethernet-emulating PVC devices. When transmitting data with PVC devices, the ndo_start_xmit function will prepend a header of 4 or 10 bytes. Currently this driver requests this headroom to be reserved for normal PVC devices by setting their hard_header_len to 10. However, this does not work when these devices are used with AF_PACKET/RAW sockets. Also, this driver does not request this headroom for Ethernet-emulating PVC devices (but deals with this problem by reallocating the skb when needed, which is not optimal). This patch replaces hard_header_len with needed_headroom, and set needed_headroom for Ethernet-emulating PVC devices, too. This makes the driver to request headroom for all PVC devices in all cases. Cc: Krzysztof Halasa Cc: Martin Schiller Signed-off-by: Xie He Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- drivers/net/wan/hdlc_fr.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c index 038236a9c60e..67f89917277c 100644 --- a/drivers/net/wan/hdlc_fr.c +++ b/drivers/net/wan/hdlc_fr.c @@ -1044,7 +1044,7 @@ static void pvc_setup(struct net_device *dev) { dev->type = ARPHRD_DLCI; dev->flags = IFF_POINTOPOINT; - dev->hard_header_len = 10; + dev->hard_header_len = 0; dev->addr_len = 2; netif_keep_dst(dev); } @@ -1096,6 +1096,7 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type) dev->mtu = HDLC_MAX_MTU; dev->min_mtu = 68; dev->max_mtu = HDLC_MAX_MTU; + dev->needed_headroom = 10; dev->priv_flags |= IFF_NO_QUEUE; dev->ml_priv = pvc; -- GitLab From 87a4f2848cb4bc1d5810c399dc274d3894bfddcf Mon Sep 17 00:00:00 2001 From: Martin Cerveny Date: Sun, 6 Sep 2020 18:21:40 +0200 Subject: [PATCH 1144/1304] drm/sun4i: mixer: Extend regmap max_register [ Upstream commit 74ea06164cda81dc80e97790164ca533fd7e3087 ] Better guess. Secondary CSC registers are from 0xF0000. Signed-off-by: Martin Cerveny Reviewed-by: Jernej Skrabec Signed-off-by: Maxime Ripard Link: https://patchwork.freedesktop.org/patch/msgid/20200906162140.5584-3-m.cerveny@computer.org Signed-off-by: Sasha Levin --- drivers/gpu/drm/sun4i/sun8i_mixer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c index 71a798e5d559..649b57e5e4b7 100644 --- a/drivers/gpu/drm/sun4i/sun8i_mixer.c +++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c @@ -364,7 +364,7 @@ static struct regmap_config sun8i_mixer_regmap_config = { .reg_bits = 32, .val_bits = 32, .reg_stride = 4, - .max_register = 0xbfffc, /* guessed */ + .max_register = 0xffffc, /* guessed */ }; static int sun8i_mixer_of_get_id(struct device_node *node) -- GitLab From 2f9f01cfc32b4f39891a88dd653c89cf7d7220f4 Mon Sep 17 00:00:00 2001 From: Lucy Yan Date: Thu, 10 Sep 2020 12:05:09 -0700 Subject: [PATCH 1145/1304] net: dec: de2104x: Increase receive ring size for Tulip [ Upstream commit ee460417d254d941dfea5fb7cff841f589643992 ] Increase Rx ring size to address issue where hardware is reaching the receive work limit. Before: [ 102.223342] de2104x 0000:17:00.0 eth0: rx work limit reached [ 102.245695] de2104x 0000:17:00.0 eth0: rx work limit reached [ 102.251387] de2104x 0000:17:00.0 eth0: rx work limit reached [ 102.267444] de2104x 0000:17:00.0 eth0: rx work limit reached Signed-off-by: Lucy Yan Reviewed-by: Moritz Fischer Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/ethernet/dec/tulip/de2104x.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c index 13430f75496c..b312cd9bce16 100644 --- a/drivers/net/ethernet/dec/tulip/de2104x.c +++ b/drivers/net/ethernet/dec/tulip/de2104x.c @@ -91,7 +91,7 @@ MODULE_PARM_DESC (rx_copybreak, "de2104x Breakpoint at which Rx packets are copi #define DSL CONFIG_DE2104X_DSL #endif -#define DE_RX_RING_SIZE 64 +#define DE_RX_RING_SIZE 128 #define DE_TX_RING_SIZE 64 #define DE_RING_BYTES \ ((sizeof(struct de_desc) * DE_RX_RING_SIZE) + \ -- GitLab From 22edeb67dfa89f1e1394dfbd8702b8dc3ae55062 Mon Sep 17 00:00:00 2001 From: Olympia Giannou Date: Fri, 11 Sep 2020 14:17:24 +0000 Subject: [PATCH 1146/1304] rndis_host: increase sleep time in the query-response loop [ Upstream commit 4202c9fdf03d79dedaa94b2c4cf574f25793d669 ] Some WinCE devices face connectivity issues via the NDIS interface. They fail to register, resulting in -110 timeout errors and failures during the probe procedure. In this kind of WinCE devices, the Windows-side ndis driver needs quite more time to be loaded and configured, so that the linux rndis host queries to them fail to be responded correctly on time. More specifically, when INIT is called on the WinCE side - no other requests can be served by the Client and this results in a failed QUERY afterwards. The increase of the waiting time on the side of the linux rndis host in the command-response loop leaves the INIT process to complete and respond to a QUERY, which comes afterwards. The WinCE devices with this special "feature" in their ndis driver are satisfied by this fix. Signed-off-by: Olympia Giannou Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/usb/rndis_host.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c index b807c91abe1d..a22ae3137a3f 100644 --- a/drivers/net/usb/rndis_host.c +++ b/drivers/net/usb/rndis_host.c @@ -213,7 +213,7 @@ int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf, int buflen) dev_dbg(&info->control->dev, "rndis response error, code %d\n", retval); } - msleep(20); + msleep(40); } dev_dbg(&info->control->dev, "rndis response timeout\n"); return -ETIMEDOUT; -- GitLab From c2df194a0d50bc1370c6761f5b80d3a32f42bcd4 Mon Sep 17 00:00:00 2001 From: Chaitanya Kulkarni Date: Tue, 15 Sep 2020 20:53:25 -0700 Subject: [PATCH 1147/1304] nvme-core: get/put ctrl and transport module in nvme_dev_open/release() [ Upstream commit 52a3974feb1a3eec25d8836d37a508b67b0a9cd0 ] Get and put the reference to the ctrl in the nvme_dev_open() and nvme_dev_release() before and after module get/put for ctrl in char device file operations. Introduce char_dev relase function, get/put the controller and module which allows us to fix the potential Oops which can be easily reproduced with a passthru ctrl (although the problem also exists with pure user access): Entering kdb (current=0xffff8887f8290000, pid 3128) on processor 30 Oops: (null) due to oops @ 0xffffffffa01019ad CPU: 30 PID: 3128 Comm: bash Tainted: G W OE 5.8.0-rc4nvme-5.9+ #35 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.12.0-59-gc9ba5276e321-prebuilt.qemu.4 RIP: 0010:nvme_free_ctrl+0x234/0x285 [nvme_core] Code: 57 10 a0 e8 73 bf 02 e1 ba 3d 11 00 00 48 c7 c6 98 33 10 a0 48 c7 c7 1d 57 10 a0 e8 5b bf 02 e1 8 RSP: 0018:ffffc90001d63de0 EFLAGS: 00010246 RAX: ffffffffa05c0440 RBX: ffff8888119e45a0 RCX: 0000000000000000 RDX: 0000000000000000 RSI: ffff8888177e9550 RDI: ffff8888119e43b0 RBP: ffff8887d4768000 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000000 R11: ffffc90001d63c90 R12: ffff8888119e43b0 R13: ffff8888119e5108 R14: dead000000000100 R15: ffff8888119e5108 FS: 00007f1ef27b0740(0000) GS:ffff888817600000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: ffffffffa05c0470 CR3: 00000007f6bee000 CR4: 00000000003406e0 Call Trace: device_release+0x27/0x80 kobject_put+0x98/0x170 nvmet_passthru_ctrl_disable+0x4a/0x70 [nvmet] nvmet_passthru_enable_store+0x4c/0x90 [nvmet] configfs_write_file+0xe6/0x150 vfs_write+0xba/0x1e0 ksys_write+0x5f/0xe0 do_syscall_64+0x52/0xb0 entry_SYSCALL_64_after_hwframe+0x44/0xa9 RIP: 0033:0x7f1ef1eb2840 Code: Bad RIP value. RSP: 002b:00007fffdbff0eb8 EFLAGS: 00000246 ORIG_RAX: 0000000000000001 RAX: ffffffffffffffda RBX: 0000000000000002 RCX: 00007f1ef1eb2840 RDX: 0000000000000002 RSI: 00007f1ef27d2000 RDI: 0000000000000001 RBP: 00007f1ef27d2000 R08: 000000000000000a R09: 00007f1ef27b0740 R10: 0000000000000001 R11: 0000000000000246 R12: 00007f1ef2186400 R13: 0000000000000002 R14: 0000000000000001 R15: 0000000000000000 With this patch fix we take the module ref count in nvme_dev_open() and release that ref count in newly introduced nvme_dev_release(). Signed-off-by: Chaitanya Kulkarni Signed-off-by: Christoph Hellwig Signed-off-by: Sasha Levin --- drivers/nvme/host/core.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 33dad9774da0..9ea3d8e61100 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -2605,10 +2605,24 @@ static int nvme_dev_open(struct inode *inode, struct file *file) return -EWOULDBLOCK; } + nvme_get_ctrl(ctrl); + if (!try_module_get(ctrl->ops->module)) + return -EINVAL; + file->private_data = ctrl; return 0; } +static int nvme_dev_release(struct inode *inode, struct file *file) +{ + struct nvme_ctrl *ctrl = + container_of(inode->i_cdev, struct nvme_ctrl, cdev); + + module_put(ctrl->ops->module); + nvme_put_ctrl(ctrl); + return 0; +} + static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp) { struct nvme_ns *ns; @@ -2669,6 +2683,7 @@ static long nvme_dev_ioctl(struct file *file, unsigned int cmd, static const struct file_operations nvme_dev_fops = { .owner = THIS_MODULE, .open = nvme_dev_open, + .release = nvme_dev_release, .unlocked_ioctl = nvme_dev_ioctl, .compat_ioctl = nvme_dev_ioctl, }; -- GitLab From 596d087ef1628d84ddc56797f7ec33daac97bf11 Mon Sep 17 00:00:00 2001 From: Xie He Date: Wed, 16 Sep 2020 09:49:18 -0700 Subject: [PATCH 1148/1304] drivers/net/wan/lapbether: Make skb->protocol consistent with the header [ Upstream commit 83f9a9c8c1edc222846dc1bde6e3479703e8e5a3 ] This driver is a virtual driver stacked on top of Ethernet interfaces. When this driver transmits data on the Ethernet device, the skb->protocol setting is inconsistent with the Ethernet header prepended to the skb. This causes a user listening on the Ethernet interface with an AF_PACKET socket, to see different sll_protocol values for incoming and outgoing frames, because incoming frames would have this value set by parsing the Ethernet header. This patch changes the skb->protocol value for outgoing Ethernet frames, making it consistent with the Ethernet header prepended. This makes a user listening on the Ethernet device with an AF_PACKET socket, to see the same sll_protocol value for incoming and outgoing frames. Cc: Martin Schiller Signed-off-by: Xie He Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/wan/lapbether.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c index 15177a54b17d..e5fc1b95cea6 100644 --- a/drivers/net/wan/lapbether.c +++ b/drivers/net/wan/lapbether.c @@ -201,8 +201,6 @@ static void lapbeth_data_transmit(struct net_device *ndev, struct sk_buff *skb) struct net_device *dev; int size = skb->len; - skb->protocol = htons(ETH_P_X25); - ptr = skb_push(skb, 2); *ptr++ = size % 256; @@ -213,6 +211,8 @@ static void lapbeth_data_transmit(struct net_device *ndev, struct sk_buff *skb) skb->dev = dev = lapbeth->ethdev; + skb->protocol = htons(ETH_P_DEC); + skb_reset_network_header(skb); dev_hard_header(skb, dev, ETH_P_DEC, bcast_addr, NULL, 0); -- GitLab From 7fcf25b4f8af806064c7b9c037bff8067ba90701 Mon Sep 17 00:00:00 2001 From: Xie He Date: Wed, 16 Sep 2020 14:25:07 -0700 Subject: [PATCH 1149/1304] drivers/net/wan/hdlc: Set skb->protocol before transmitting [ Upstream commit 9fb030a70431a2a2a1b292dbf0b2f399cc072c16 ] This patch sets skb->protocol before transmitting frames on the HDLC device, so that a user listening on the HDLC device with an AF_PACKET socket will see outgoing frames' sll_protocol field correctly set and consistent with that of incoming frames. 1. Control frames in hdlc_cisco and hdlc_ppp When these drivers send control frames, skb->protocol is not set. This value should be set to htons(ETH_P_HDLC), because when receiving control frames, their skb->protocol is set to htons(ETH_P_HDLC). When receiving, hdlc_type_trans in hdlc.h is called, which then calls cisco_type_trans or ppp_type_trans. The skb->protocol of control frames is set to htons(ETH_P_HDLC) so that the control frames can be received by hdlc_rcv in hdlc.c, which calls cisco_rx or ppp_rx to process the control frames. 2. hdlc_fr When this driver sends control frames, skb->protocol is set to internal values used in this driver. When this driver sends data frames (from upper stacked PVC devices), skb->protocol is the same as that of the user data packet being sent on the upper PVC device (for normal PVC devices), or is htons(ETH_P_802_3) (for Ethernet-emulating PVC devices). However, skb->protocol for both control frames and data frames should be set to htons(ETH_P_HDLC), because when receiving, all frames received on the HDLC device will have their skb->protocol set to htons(ETH_P_HDLC). When receiving, hdlc_type_trans in hdlc.h is called, and because this driver doesn't provide a type_trans function in struct hdlc_proto, all frames will have their skb->protocol set to htons(ETH_P_HDLC). The frames are then received by hdlc_rcv in hdlc.c, which calls fr_rx to process the frames (control frames are consumed and data frames are re-received on upper PVC devices). Cc: Krzysztof Halasa Signed-off-by: Xie He Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/wan/hdlc_cisco.c | 1 + drivers/net/wan/hdlc_fr.c | 3 +++ drivers/net/wan/hdlc_ppp.c | 1 + 3 files changed, 5 insertions(+) diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c index c169a26e5359..2c6e3fa6947a 100644 --- a/drivers/net/wan/hdlc_cisco.c +++ b/drivers/net/wan/hdlc_cisco.c @@ -121,6 +121,7 @@ static void cisco_keepalive_send(struct net_device *dev, u32 type, skb_put(skb, sizeof(struct cisco_packet)); skb->priority = TC_PRIO_CONTROL; skb->dev = dev; + skb->protocol = htons(ETH_P_HDLC); skb_reset_network_header(skb); dev_queue_xmit(skb); diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c index 67f89917277c..03b5f5cce6f4 100644 --- a/drivers/net/wan/hdlc_fr.c +++ b/drivers/net/wan/hdlc_fr.c @@ -436,6 +436,8 @@ static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev) if (pvc->state.fecn) /* TX Congestion counter */ dev->stats.tx_compressed++; skb->dev = pvc->frad; + skb->protocol = htons(ETH_P_HDLC); + skb_reset_network_header(skb); dev_queue_xmit(skb); return NETDEV_TX_OK; } @@ -558,6 +560,7 @@ static void fr_lmi_send(struct net_device *dev, int fullrep) skb_put(skb, i); skb->priority = TC_PRIO_CONTROL; skb->dev = dev; + skb->protocol = htons(ETH_P_HDLC); skb_reset_network_header(skb); dev_queue_xmit(skb); diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c index 85844f26547d..20d9b6585fba 100644 --- a/drivers/net/wan/hdlc_ppp.c +++ b/drivers/net/wan/hdlc_ppp.c @@ -254,6 +254,7 @@ static void ppp_tx_cp(struct net_device *dev, u16 pid, u8 code, skb->priority = TC_PRIO_CONTROL; skb->dev = dev; + skb->protocol = htons(ETH_P_HDLC); skb_reset_network_header(skb); skb_queue_tail(&tx_queue, skb); } -- GitLab From 487882d928cadeea0edd08fa1e659a2e6edf53e8 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Thu, 17 Sep 2020 14:50:31 +0200 Subject: [PATCH 1150/1304] mac80211: do not allow bigger VHT MPDUs than the hardware supports [ Upstream commit 3bd5c7a28a7c3aba07a2d300d43f8e988809e147 ] Limit maximum VHT MPDU size by local capability. Signed-off-by: Felix Fietkau Link: https://lore.kernel.org/r/20200917125031.45009-1-nbd@nbd.name Signed-off-by: Johannes Berg Signed-off-by: Sasha Levin --- net/mac80211/vht.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c index 259325cbcc31..4d154efb80c8 100644 --- a/net/mac80211/vht.c +++ b/net/mac80211/vht.c @@ -170,10 +170,7 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata, /* take some capabilities as-is */ cap_info = le32_to_cpu(vht_cap_ie->vht_cap_info); vht_cap->cap = cap_info; - vht_cap->cap &= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895 | - IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 | - IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 | - IEEE80211_VHT_CAP_RXLDPC | + vht_cap->cap &= IEEE80211_VHT_CAP_RXLDPC | IEEE80211_VHT_CAP_VHT_TXOP_PS | IEEE80211_VHT_CAP_HTC_VHT | IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK | @@ -182,6 +179,9 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata, IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN | IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN; + vht_cap->cap |= min_t(u32, cap_info & IEEE80211_VHT_CAP_MAX_MPDU_MASK, + own_cap.cap & IEEE80211_VHT_CAP_MAX_MPDU_MASK); + /* and some based on our own capabilities */ switch (own_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) { case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ: -- GitLab From 8f72de67c77398d2e3b50e09e82a6c5131841462 Mon Sep 17 00:00:00 2001 From: Chris Packham Date: Fri, 4 Sep 2020 12:28:12 +1200 Subject: [PATCH 1151/1304] spi: fsl-espi: Only process interrupts for expected events [ Upstream commit b867eef4cf548cd9541225aadcdcee644669b9e1 ] The SPIE register contains counts for the TX FIFO so any time the irq handler was invoked we would attempt to process the RX/TX fifos. Use the SPIM value to mask the events so that we only process interrupts that were expected. This was a latent issue exposed by commit 3282a3da25bd ("powerpc/64: Implement soft interrupt replay in C"). Signed-off-by: Chris Packham Link: https://lore.kernel.org/r/20200904002812.7300-1-chris.packham@alliedtelesis.co.nz Signed-off-by: Mark Brown Signed-off-by: Sasha Levin --- drivers/spi/spi-fsl-espi.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c index 1e8ff6256079..b8dd75b8518b 100644 --- a/drivers/spi/spi-fsl-espi.c +++ b/drivers/spi/spi-fsl-espi.c @@ -559,13 +559,14 @@ static void fsl_espi_cpu_irq(struct fsl_espi *espi, u32 events) static irqreturn_t fsl_espi_irq(s32 irq, void *context_data) { struct fsl_espi *espi = context_data; - u32 events; + u32 events, mask; spin_lock(&espi->lock); /* Get interrupt events(tx/rx) */ events = fsl_espi_read_reg(espi, ESPI_SPIE); - if (!events) { + mask = fsl_espi_read_reg(espi, ESPI_SPIM); + if (!(events & mask)) { spin_unlock(&espi->lock); return IRQ_NONE; } -- GitLab From 4ffc945b2ab8c52bb147b966636b87ba8884045f Mon Sep 17 00:00:00 2001 From: James Smart Date: Thu, 17 Sep 2020 13:33:22 -0700 Subject: [PATCH 1152/1304] nvme-fc: fail new connections to a deleted host or remote port [ Upstream commit 9e0e8dac985d4bd07d9e62922b9d189d3ca2fccf ] The lldd may have made calls to delete a remote port or local port and the delete is in progress when the cli then attempts to create a new controller. Currently, this proceeds without error although it can't be very successful. Fix this by validating that both the host port and remote port are present when a new controller is to be created. Signed-off-by: James Smart Reviewed-by: Himanshu Madhani Signed-off-by: Christoph Hellwig Signed-off-by: Sasha Levin --- drivers/nvme/host/fc.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index 73db32f97abf..ed88d5021772 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -3294,12 +3294,14 @@ nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts) spin_lock_irqsave(&nvme_fc_lock, flags); list_for_each_entry(lport, &nvme_fc_lport_list, port_list) { if (lport->localport.node_name != laddr.nn || - lport->localport.port_name != laddr.pn) + lport->localport.port_name != laddr.pn || + lport->localport.port_state != FC_OBJSTATE_ONLINE) continue; list_for_each_entry(rport, &lport->endp_list, endp_list) { if (rport->remoteport.node_name != raddr.nn || - rport->remoteport.port_name != raddr.pn) + rport->remoteport.port_name != raddr.pn || + rport->remoteport.port_state != FC_OBJSTATE_ONLINE) continue; /* if fail to get reference fall through. Will error */ -- GitLab From 3851aa13f46b44849dbfd234edebea1a645121f0 Mon Sep 17 00:00:00 2001 From: Taiping Lai Date: Mon, 31 Aug 2020 17:09:47 +0800 Subject: [PATCH 1153/1304] gpio: sprd: Clear interrupt when setting the type as edge [ Upstream commit 5fcface659aab7eac4bd65dd116d98b8f7bb88d5 ] The raw interrupt status of GPIO maybe set before the interrupt is enabled, which would trigger the interrupt event once enabled it from user side. This is the case for edge interrupts only. Adding a clear operation when setting interrupt type can avoid that. There're a few considerations for the solution: 1) This issue is for edge interrupt only; The interrupts requested by users are IRQ_TYPE_LEVEL_HIGH as default, so clearing interrupt when request is useless. 2) The interrupt type can be set to edge when request and following up with clearing it though, but the problem is still there once users set the interrupt type to level trggier. 3) We can add a clear operation after each time of setting interrupt enable bit, but it is redundant for level trigger interrupt. Therefore, the solution is this patch seems the best for now. Fixes: 9a3821c2bb47 ("gpio: Add GPIO driver for Spreadtrum SC9860 platform") Signed-off-by: Taiping Lai Signed-off-by: Chunyan Zhang Reviewed-by: Baolin Wang Signed-off-by: Bartosz Golaszewski Signed-off-by: Sasha Levin --- drivers/gpio/gpio-sprd.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpio/gpio-sprd.c b/drivers/gpio/gpio-sprd.c index 55072d2b367f..4d53347adcaf 100644 --- a/drivers/gpio/gpio-sprd.c +++ b/drivers/gpio/gpio-sprd.c @@ -149,17 +149,20 @@ static int sprd_gpio_irq_set_type(struct irq_data *data, sprd_gpio_update(chip, offset, SPRD_GPIO_IS, 0); sprd_gpio_update(chip, offset, SPRD_GPIO_IBE, 0); sprd_gpio_update(chip, offset, SPRD_GPIO_IEV, 1); + sprd_gpio_update(chip, offset, SPRD_GPIO_IC, 1); irq_set_handler_locked(data, handle_edge_irq); break; case IRQ_TYPE_EDGE_FALLING: sprd_gpio_update(chip, offset, SPRD_GPIO_IS, 0); sprd_gpio_update(chip, offset, SPRD_GPIO_IBE, 0); sprd_gpio_update(chip, offset, SPRD_GPIO_IEV, 0); + sprd_gpio_update(chip, offset, SPRD_GPIO_IC, 1); irq_set_handler_locked(data, handle_edge_irq); break; case IRQ_TYPE_EDGE_BOTH: sprd_gpio_update(chip, offset, SPRD_GPIO_IS, 0); sprd_gpio_update(chip, offset, SPRD_GPIO_IBE, 1); + sprd_gpio_update(chip, offset, SPRD_GPIO_IC, 1); irq_set_handler_locked(data, handle_edge_irq); break; case IRQ_TYPE_LEVEL_HIGH: -- GitLab From 837af2c131c030322881f69e1098885a0aea2422 Mon Sep 17 00:00:00 2001 From: Chris Packham Date: Tue, 8 Sep 2020 09:17:10 +1200 Subject: [PATCH 1154/1304] pinctrl: mvebu: Fix i2c sda definition for 98DX3236 [ Upstream commit 63c3212e7a37d68c89a13bdaebce869f4e064e67 ] Per the datasheet the i2c functions use MPP_Sel=0x1. They are documented as using MPP_Sel=0x4 as well but mixing 0x1 and 0x4 is clearly wrong. On the board tested 0x4 resulted in a non-functioning i2c bus so stick with 0x1 which works. Fixes: d7ae8f8dee7f ("pinctrl: mvebu: pinctrl driver for 98DX3236 SoC") Signed-off-by: Chris Packham Reviewed-by: Andrew Lunn Link: https://lore.kernel.org/r/20200907211712.9697-2-chris.packham@alliedtelesis.co.nz Signed-off-by: Linus Walleij Signed-off-by: Sasha Levin --- drivers/pinctrl/mvebu/pinctrl-armada-xp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-xp.c b/drivers/pinctrl/mvebu/pinctrl-armada-xp.c index 43231fd065a1..1a9450ef932b 100644 --- a/drivers/pinctrl/mvebu/pinctrl-armada-xp.c +++ b/drivers/pinctrl/mvebu/pinctrl-armada-xp.c @@ -418,7 +418,7 @@ static struct mvebu_mpp_mode mv98dx3236_mpp_modes[] = { MPP_VAR_FUNCTION(0x1, "i2c0", "sck", V_98DX3236_PLUS)), MPP_MODE(15, MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_98DX3236_PLUS), - MPP_VAR_FUNCTION(0x4, "i2c0", "sda", V_98DX3236_PLUS)), + MPP_VAR_FUNCTION(0x1, "i2c0", "sda", V_98DX3236_PLUS)), MPP_MODE(16, MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS), MPP_VAR_FUNCTION(0x4, "dev", "oe", V_98DX3236_PLUS)), -- GitLab From 345c6f260c89e417de6e7d81f3366bd5079f48a3 Mon Sep 17 00:00:00 2001 From: Jeffrey Mitchell Date: Tue, 15 Sep 2020 16:42:52 -0500 Subject: [PATCH 1155/1304] nfs: Fix security label length not being reset [ Upstream commit d33030e2ee3508d65db5644551435310df86010e ] nfs_readdir_page_filler() iterates over entries in a directory, reusing the same security label buffer, but does not reset the buffer's length. This causes decode_attr_security_label() to return -ERANGE if an entry's security label is longer than the previous one's. This error, in nfs4_decode_dirent(), only gets passed up as -EAGAIN, which causes another failed attempt to copy into the buffer. The second error is ignored and the remaining entries do not show up in ls, specifically the getdents64() syscall. Reproduce by creating multiple files in NFS and giving one of the later files a longer security label. ls will not see that file nor any that are added afterwards, though they will exist on the backend. In nfs_readdir_page_filler(), reset security label buffer length before every reuse Signed-off-by: Jeffrey Mitchell Fixes: b4487b935452 ("nfs: Fix getxattr kernel panic and memory overflow") Signed-off-by: Trond Myklebust Signed-off-by: Sasha Levin --- fs/nfs/dir.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 4ae726e70d87..733fd9e4f0a1 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -553,6 +553,9 @@ int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *en xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE); do { + if (entry->label) + entry->label->len = NFS4_MAXLABELLEN; + status = xdr_decode(desc, entry, &stream); if (status != 0) { if (status == -EAGAIN) -- GitLab From a84da5ea38334ff79c13259b7aa5cf50ed52aa67 Mon Sep 17 00:00:00 2001 From: Marek Szyprowski Date: Tue, 22 Sep 2020 14:40:46 +0200 Subject: [PATCH 1156/1304] clk: samsung: exynos4: mark 'chipid' clock as CLK_IGNORE_UNUSED [ Upstream commit f3bb0f796f5ffe32f0fbdce5b1b12eb85511158f ] The ChipID IO region has it's own clock, which is being disabled while scanning for unused clocks. It turned out that some CPU hotplug, CPU idle or even SOC firmware code depends on the reads from that area. Fix the mysterious hang caused by entering deep CPU idle state by ignoring the 'chipid' clock during unused clocks scan, as there are no direct clients for it which will keep it enabled. Fixes: e062b571777f ("clk: exynos4: register clocks using common clock framework") Signed-off-by: Marek Szyprowski Link: https://lore.kernel.org/r/20200922124046.10496-1-m.szyprowski@samsung.com Reviewed-by: Krzysztof Kozlowski Acked-by: Sylwester Nawrocki Signed-off-by: Stephen Boyd Signed-off-by: Sasha Levin --- drivers/clk/samsung/clk-exynos4.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c index 442309b56920..8086756e7f07 100644 --- a/drivers/clk/samsung/clk-exynos4.c +++ b/drivers/clk/samsung/clk-exynos4.c @@ -1072,7 +1072,7 @@ static const struct samsung_gate_clock exynos4210_gate_clks[] __initconst = { GATE(CLK_PCIE, "pcie", "aclk133", GATE_IP_FSYS, 14, 0, 0), GATE(CLK_SMMU_PCIE, "smmu_pcie", "aclk133", GATE_IP_FSYS, 18, 0, 0), GATE(CLK_MODEMIF, "modemif", "aclk100", GATE_IP_PERIL, 28, 0, 0), - GATE(CLK_CHIPID, "chipid", "aclk100", E4210_GATE_IP_PERIR, 0, 0, 0), + GATE(CLK_CHIPID, "chipid", "aclk100", E4210_GATE_IP_PERIR, 0, CLK_IGNORE_UNUSED, 0), GATE(CLK_SYSREG, "sysreg", "aclk100", E4210_GATE_IP_PERIR, 0, CLK_IGNORE_UNUSED, 0), GATE(CLK_HDMI_CEC, "hdmi_cec", "aclk100", E4210_GATE_IP_PERIR, 11, 0, @@ -1113,7 +1113,7 @@ static const struct samsung_gate_clock exynos4x12_gate_clks[] __initconst = { 0), GATE(CLK_TSADC, "tsadc", "aclk133", E4X12_GATE_BUS_FSYS1, 16, 0, 0), GATE(CLK_MIPI_HSI, "mipi_hsi", "aclk133", GATE_IP_FSYS, 10, 0, 0), - GATE(CLK_CHIPID, "chipid", "aclk100", E4X12_GATE_IP_PERIR, 0, 0, 0), + GATE(CLK_CHIPID, "chipid", "aclk100", E4X12_GATE_IP_PERIR, 0, CLK_IGNORE_UNUSED, 0), GATE(CLK_SYSREG, "sysreg", "aclk100", E4X12_GATE_IP_PERIR, 1, CLK_IGNORE_UNUSED, 0), GATE(CLK_HDMI_CEC, "hdmi_cec", "aclk100", E4X12_GATE_IP_PERIR, 11, 0, -- GitLab From 543db1d99b0993d891acd91f667202fa17140e51 Mon Sep 17 00:00:00 2001 From: Yu Kuai Date: Fri, 18 Sep 2020 09:13:35 +0800 Subject: [PATCH 1157/1304] iommu/exynos: add missing put_device() call in exynos_iommu_of_xlate() [ Upstream commit 1a26044954a6d1f4d375d5e62392446af663be7a ] if of_find_device_by_node() succeed, exynos_iommu_of_xlate() doesn't have a corresponding put_device(). Thus add put_device() to fix the exception handling for this function implementation. Fixes: aa759fd376fb ("iommu/exynos: Add callback for initializing devices from device tree") Signed-off-by: Yu Kuai Acked-by: Marek Szyprowski Link: https://lore.kernel.org/r/20200918011335.909141-1-yukuai3@huawei.com Signed-off-by: Joerg Roedel Signed-off-by: Sasha Levin --- drivers/iommu/exynos-iommu.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index 1bd0cd7168df..4bf6049dd2c7 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c @@ -1302,13 +1302,17 @@ static int exynos_iommu_of_xlate(struct device *dev, return -ENODEV; data = platform_get_drvdata(sysmmu); - if (!data) + if (!data) { + put_device(&sysmmu->dev); return -ENODEV; + } if (!owner) { owner = kzalloc(sizeof(*owner), GFP_KERNEL); - if (!owner) + if (!owner) { + put_device(&sysmmu->dev); return -ENOMEM; + } INIT_LIST_HEAD(&owner->controllers); mutex_init(&owner->rpm_lock); -- GitLab From 69e0a9eb6c49ccbf1d565e8a4d188132af3df70e Mon Sep 17 00:00:00 2001 From: Nicolas VINCENT Date: Wed, 23 Sep 2020 16:08:40 +0200 Subject: [PATCH 1158/1304] i2c: cpm: Fix i2c_ram structure [ Upstream commit a2bd970aa62f2f7f80fd0d212b1d4ccea5df4aed ] the i2c_ram structure is missing the sdmatmp field mentionned in datasheet for MPC8272 at paragraph 36.5. With this field missing, the hardware would write past the allocated memory done through cpm_muram_alloc for the i2c_ram structure and land in memory allocated for the buffers descriptors corrupting the cbd_bufaddr field. Since this field is only set during setup(), the first i2c transaction would work and the following would send data read from an arbitrary memory location. Fixes: 61045dbe9d8d ("i2c: Add support for I2C bus on Freescale CPM1/CPM2 controllers") Signed-off-by: Nicolas VINCENT Acked-by: Jochen Friedrich Acked-by: Christophe Leroy Signed-off-by: Wolfram Sang Signed-off-by: Sasha Levin --- drivers/i2c/busses/i2c-cpm.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c index 8a8ca945561b..7eba874a981d 100644 --- a/drivers/i2c/busses/i2c-cpm.c +++ b/drivers/i2c/busses/i2c-cpm.c @@ -74,6 +74,9 @@ struct i2c_ram { char res1[4]; /* Reserved */ ushort rpbase; /* Relocation pointer */ char res2[2]; /* Reserved */ + /* The following elements are only for CPM2 */ + char res3[4]; /* Reserved */ + uint sdmatmp; /* Internal */ }; #define I2COM_START 0x80 -- GitLab From 78ba2e803f40d55e4147f12bf9b29ac1f933992f Mon Sep 17 00:00:00 2001 From: Vincent Huang Date: Mon, 28 Sep 2020 16:19:05 -0700 Subject: [PATCH 1159/1304] Input: trackpoint - enable Synaptics trackpoints [ Upstream commit 996d585b079ad494a30cac10e08585bcd5345125 ] Add Synaptics IDs in trackpoint_start_protocol() to mark them as valid. Signed-off-by: Vincent Huang Fixes: 6c77545af100 ("Input: trackpoint - add new trackpoint variant IDs") Reviewed-by: Harry Cutts Tested-by: Harry Cutts Link: https://lore.kernel.org/r/20200924053013.1056953-1-vincent.huang@tw.synaptics.com Signed-off-by: Dmitry Torokhov Signed-off-by: Sasha Levin --- drivers/input/mouse/trackpoint.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c index 31c16b68aa31..e46865785409 100644 --- a/drivers/input/mouse/trackpoint.c +++ b/drivers/input/mouse/trackpoint.c @@ -285,6 +285,8 @@ static int trackpoint_start_protocol(struct psmouse *psmouse, case TP_VARIANT_ALPS: case TP_VARIANT_ELAN: case TP_VARIANT_NXP: + case TP_VARIANT_JYT_SYNAPTICS: + case TP_VARIANT_SYNAPTICS: if (variant_id) *variant_id = param[0]; if (firmware_id) -- GitLab From a4ebc2d6aa3ac2aa92cac8f6f53662df2c4904c9 Mon Sep 17 00:00:00 2001 From: Thibaut Sautereau Date: Fri, 2 Oct 2020 17:16:11 +0200 Subject: [PATCH 1160/1304] random32: Restore __latent_entropy attribute on net_rand_state [ Upstream commit 09a6b0bc3be793ca8cba580b7992d73e9f68f15d ] Commit f227e3ec3b5c ("random32: update the net random state on interrupt and activity") broke compilation and was temporarily fixed by Linus in 83bdc7275e62 ("random32: remove net_rand_state from the latent entropy gcc plugin") by entirely moving net_rand_state out of the things handled by the latent_entropy GCC plugin. From what I understand when reading the plugin code, using the __latent_entropy attribute on a declaration was the wrong part and simply keeping the __latent_entropy attribute on the variable definition was the correct fix. Fixes: 83bdc7275e62 ("random32: remove net_rand_state from the latent entropy gcc plugin") Acked-by: Willy Tarreau Cc: Emese Revfy Signed-off-by: Thibaut Sautereau Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin --- lib/random32.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/random32.c b/lib/random32.c index 036de0c93e22..b6f3325e38e4 100644 --- a/lib/random32.c +++ b/lib/random32.c @@ -48,7 +48,7 @@ static inline void prandom_state_selftest(void) } #endif -DEFINE_PER_CPU(struct rnd_state, net_rand_state); +DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy; /** * prandom_u32_state - seeded pseudo-random number generator. -- GitLab From 25eaea1b33f2569f69a82dfddb3fb05384143bd0 Mon Sep 17 00:00:00 2001 From: Laurent Dufour Date: Fri, 25 Sep 2020 21:19:28 -0700 Subject: [PATCH 1161/1304] mm: replace memmap_context by meminit_context commit c1d0da83358a2316d9be7f229f26126dbaa07468 upstream. Patch series "mm: fix memory to node bad links in sysfs", v3. Sometimes, firmware may expose interleaved memory layout like this: Early memory node ranges node 1: [mem 0x0000000000000000-0x000000011fffffff] node 2: [mem 0x0000000120000000-0x000000014fffffff] node 1: [mem 0x0000000150000000-0x00000001ffffffff] node 0: [mem 0x0000000200000000-0x000000048fffffff] node 2: [mem 0x0000000490000000-0x00000007ffffffff] In that case, we can see memory blocks assigned to multiple nodes in sysfs: $ ls -l /sys/devices/system/memory/memory21 total 0 lrwxrwxrwx 1 root root 0 Aug 24 05:27 node1 -> ../../node/node1 lrwxrwxrwx 1 root root 0 Aug 24 05:27 node2 -> ../../node/node2 -rw-r--r-- 1 root root 65536 Aug 24 05:27 online -r--r--r-- 1 root root 65536 Aug 24 05:27 phys_device -r--r--r-- 1 root root 65536 Aug 24 05:27 phys_index drwxr-xr-x 2 root root 0 Aug 24 05:27 power -r--r--r-- 1 root root 65536 Aug 24 05:27 removable -rw-r--r-- 1 root root 65536 Aug 24 05:27 state lrwxrwxrwx 1 root root 0 Aug 24 05:25 subsystem -> ../../../../bus/memory -rw-r--r-- 1 root root 65536 Aug 24 05:25 uevent -r--r--r-- 1 root root 65536 Aug 24 05:27 valid_zones The same applies in the node's directory with a memory21 link in both the node1 and node2's directory. This is wrong but doesn't prevent the system to run. However when later, one of these memory blocks is hot-unplugged and then hot-plugged, the system is detecting an inconsistency in the sysfs layout and a BUG_ON() is raised: kernel BUG at /Users/laurent/src/linux-ppc/mm/memory_hotplug.c:1084! LE PAGE_SIZE=64K MMU=Hash SMP NR_CPUS=2048 NUMA pSeries Modules linked in: rpadlpar_io rpaphp pseries_rng rng_core vmx_crypto gf128mul binfmt_misc ip_tables x_tables xfs libcrc32c crc32c_vpmsum autofs4 CPU: 8 PID: 10256 Comm: drmgr Not tainted 5.9.0-rc1+ #25 Call Trace: add_memory_resource+0x23c/0x340 (unreliable) __add_memory+0x5c/0xf0 dlpar_add_lmb+0x1b4/0x500 dlpar_memory+0x1f8/0xb80 handle_dlpar_errorlog+0xc0/0x190 dlpar_store+0x198/0x4a0 kobj_attr_store+0x30/0x50 sysfs_kf_write+0x64/0x90 kernfs_fop_write+0x1b0/0x290 vfs_write+0xe8/0x290 ksys_write+0xdc/0x130 system_call_exception+0x160/0x270 system_call_common+0xf0/0x27c This has been seen on PowerPC LPAR. The root cause of this issue is that when node's memory is registered, the range used can overlap another node's range, thus the memory block is registered to multiple nodes in sysfs. There are two issues here: (a) The sysfs memory and node's layouts are broken due to these multiple links (b) The link errors in link_mem_sections() should not lead to a system panic. To address (a) register_mem_sect_under_node should not rely on the system state to detect whether the link operation is triggered by a hot plug operation or not. This is addressed by the patches 1 and 2 of this series. Issue (b) will be addressed separately. This patch (of 2): The memmap_context enum is used to detect whether a memory operation is due to a hot-add operation or happening at boot time. Make it general to the hotplug operation and rename it as meminit_context. There is no functional change introduced by this patch Suggested-by: David Hildenbrand Signed-off-by: Laurent Dufour Signed-off-by: Andrew Morton Reviewed-by: David Hildenbrand Reviewed-by: Oscar Salvador Acked-by: Michal Hocko Cc: Greg Kroah-Hartman Cc: "Rafael J . Wysocki" Cc: Nathan Lynch Cc: Scott Cheloha Cc: Tony Luck Cc: Fenghua Yu Cc: Link: https://lkml.kernel.org/r/20200915094143.79181-1-ldufour@linux.ibm.com Link: https://lkml.kernel.org/r/20200915132624.9723-1-ldufour@linux.ibm.com Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- arch/ia64/mm/init.c | 6 +++--- include/linux/mm.h | 2 +- include/linux/mmzone.h | 11 ++++++++--- mm/memory_hotplug.c | 2 +- mm/page_alloc.c | 11 ++++++----- 5 files changed, 19 insertions(+), 13 deletions(-) diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 79e5cc70f1fd..561e2573bd34 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -499,7 +499,7 @@ virtual_memmap_init(u64 start, u64 end, void *arg) if (map_start < map_end) memmap_init_zone((unsigned long)(map_end - map_start), args->nid, args->zone, page_to_pfn(map_start), - MEMMAP_EARLY, NULL); + MEMINIT_EARLY, NULL); return 0; } @@ -508,8 +508,8 @@ memmap_init (unsigned long size, int nid, unsigned long zone, unsigned long start_pfn) { if (!vmem_map) { - memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY, - NULL); + memmap_init_zone(size, nid, zone, start_pfn, + MEMINIT_EARLY, NULL); } else { struct page *start; struct memmap_init_callback_data args; diff --git a/include/linux/mm.h b/include/linux/mm.h index 05bc5f25ab85..83828c118b6b 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2179,7 +2179,7 @@ static inline void zero_resv_unavail(void) {} extern void set_dma_reserve(unsigned long new_dma_reserve); extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long, - enum memmap_context, struct vmem_altmap *); + enum meminit_context, struct vmem_altmap *); extern void setup_per_zone_wmarks(void); extern int __meminit init_per_zone_wmark_min(void); extern void mem_init(void); diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index fdd93a39f1fa..fa02014eba8e 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -759,10 +759,15 @@ bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned int alloc_flags); bool zone_watermark_ok_safe(struct zone *z, unsigned int order, unsigned long mark, int classzone_idx); -enum memmap_context { - MEMMAP_EARLY, - MEMMAP_HOTPLUG, +/* + * Memory initialization context, use to differentiate memory added by + * the platform statically or via memory hotplug interface. + */ +enum meminit_context { + MEMINIT_EARLY, + MEMINIT_HOTPLUG, }; + extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn, unsigned long size); diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index aae7ff485671..c839c4ad4871 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -733,7 +733,7 @@ void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, * are reserved so nobody should be touching them so we should be safe */ memmap_init_zone(nr_pages, nid, zone_idx(zone), start_pfn, - MEMMAP_HOTPLUG, altmap); + MEMINIT_HOTPLUG, altmap); set_zone_contiguous(zone); } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 5717ee66c8b3..545800433dfb 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5480,7 +5480,7 @@ void __ref build_all_zonelists(pg_data_t *pgdat) * done. Non-atomic initialization, single-pass. */ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, - unsigned long start_pfn, enum memmap_context context, + unsigned long start_pfn, enum meminit_context context, struct vmem_altmap *altmap) { unsigned long end_pfn = start_pfn + size; @@ -5507,7 +5507,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, * There can be holes in boot-time mem_map[]s handed to this * function. They do not exist on hotplugged memory. */ - if (context != MEMMAP_EARLY) + if (context != MEMINIT_EARLY) goto not_early; if (!early_pfn_valid(pfn)) @@ -5542,7 +5542,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, not_early: page = pfn_to_page(pfn); __init_single_page(page, pfn, zone, nid); - if (context == MEMMAP_HOTPLUG) + if (context == MEMINIT_HOTPLUG) SetPageReserved(page); /* @@ -5557,7 +5557,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, * check here not to call set_pageblock_migratetype() against * pfn out of zone. * - * Please note that MEMMAP_HOTPLUG path doesn't clear memmap + * Please note that MEMINIT_HOTPLUG path doesn't clear memmap * because this is done early in sparse_add_one_section */ if (!(pfn & (pageblock_nr_pages - 1))) { @@ -5578,7 +5578,8 @@ static void __meminit zone_init_free_lists(struct zone *zone) #ifndef __HAVE_ARCH_MEMMAP_INIT #define memmap_init(size, nid, zone, start_pfn) \ - memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY, NULL) + memmap_init_zone((size), (nid), (zone), (start_pfn), \ + MEMINIT_EARLY, NULL) #endif static int zone_batchsize(struct zone *zone) -- GitLab From b6f69f72c15d7f973f5709c5351f378f235b3654 Mon Sep 17 00:00:00 2001 From: Laurent Dufour Date: Fri, 25 Sep 2020 21:19:31 -0700 Subject: [PATCH 1162/1304] mm: don't rely on system state to detect hot-plug operations commit f85086f95fa36194eb0db5cd5c12e56801b98523 upstream. In register_mem_sect_under_node() the system_state's value is checked to detect whether the call is made during boot time or during an hot-plug operation. Unfortunately, that check against SYSTEM_BOOTING is wrong because regular memory is registered at SYSTEM_SCHEDULING state. In addition, memory hot-plug operation can be triggered at this system state by the ACPI [1]. So checking against the system state is not enough. The consequence is that on system with interleaved node's ranges like this: Early memory node ranges node 1: [mem 0x0000000000000000-0x000000011fffffff] node 2: [mem 0x0000000120000000-0x000000014fffffff] node 1: [mem 0x0000000150000000-0x00000001ffffffff] node 0: [mem 0x0000000200000000-0x000000048fffffff] node 2: [mem 0x0000000490000000-0x00000007ffffffff] This can be seen on PowerPC LPAR after multiple memory hot-plug and hot-unplug operations are done. At the next reboot the node's memory ranges can be interleaved and since the call to link_mem_sections() is made in topology_init() while the system is in the SYSTEM_SCHEDULING state, the node's id is not checked, and the sections registered to multiple nodes: $ ls -l /sys/devices/system/memory/memory21/node* total 0 lrwxrwxrwx 1 root root 0 Aug 24 05:27 node1 -> ../../node/node1 lrwxrwxrwx 1 root root 0 Aug 24 05:27 node2 -> ../../node/node2 In that case, the system is able to boot but if later one of theses memory blocks is hot-unplugged and then hot-plugged, the sysfs inconsistency is detected and this is triggering a BUG_ON(): kernel BUG at /Users/laurent/src/linux-ppc/mm/memory_hotplug.c:1084! Oops: Exception in kernel mode, sig: 5 [#1] LE PAGE_SIZE=64K MMU=Hash SMP NR_CPUS=2048 NUMA pSeries Modules linked in: rpadlpar_io rpaphp pseries_rng rng_core vmx_crypto gf128mul binfmt_misc ip_tables x_tables xfs libcrc32c crc32c_vpmsum autofs4 CPU: 8 PID: 10256 Comm: drmgr Not tainted 5.9.0-rc1+ #25 Call Trace: add_memory_resource+0x23c/0x340 (unreliable) __add_memory+0x5c/0xf0 dlpar_add_lmb+0x1b4/0x500 dlpar_memory+0x1f8/0xb80 handle_dlpar_errorlog+0xc0/0x190 dlpar_store+0x198/0x4a0 kobj_attr_store+0x30/0x50 sysfs_kf_write+0x64/0x90 kernfs_fop_write+0x1b0/0x290 vfs_write+0xe8/0x290 ksys_write+0xdc/0x130 system_call_exception+0x160/0x270 system_call_common+0xf0/0x27c This patch addresses the root cause by not relying on the system_state value to detect whether the call is due to a hot-plug operation. An extra parameter is added to link_mem_sections() detailing whether the operation is due to a hot-plug operation. [1] According to Oscar Salvador, using this qemu command line, ACPI memory hotplug operations are raised at SYSTEM_SCHEDULING state: $QEMU -enable-kvm -machine pc -smp 4,sockets=4,cores=1,threads=1 -cpu host -monitor pty \ -m size=$MEM,slots=255,maxmem=4294967296k \ -numa node,nodeid=0,cpus=0-3,mem=512 -numa node,nodeid=1,mem=512 \ -object memory-backend-ram,id=memdimm0,size=134217728 -device pc-dimm,node=0,memdev=memdimm0,id=dimm0,slot=0 \ -object memory-backend-ram,id=memdimm1,size=134217728 -device pc-dimm,node=0,memdev=memdimm1,id=dimm1,slot=1 \ -object memory-backend-ram,id=memdimm2,size=134217728 -device pc-dimm,node=0,memdev=memdimm2,id=dimm2,slot=2 \ -object memory-backend-ram,id=memdimm3,size=134217728 -device pc-dimm,node=0,memdev=memdimm3,id=dimm3,slot=3 \ -object memory-backend-ram,id=memdimm4,size=134217728 -device pc-dimm,node=1,memdev=memdimm4,id=dimm4,slot=4 \ -object memory-backend-ram,id=memdimm5,size=134217728 -device pc-dimm,node=1,memdev=memdimm5,id=dimm5,slot=5 \ -object memory-backend-ram,id=memdimm6,size=134217728 -device pc-dimm,node=1,memdev=memdimm6,id=dimm6,slot=6 \ Fixes: 4fbce633910e ("mm/memory_hotplug.c: make register_mem_sect_under_node() a callback of walk_memory_range()") Signed-off-by: Laurent Dufour Signed-off-by: Andrew Morton Reviewed-by: David Hildenbrand Reviewed-by: Oscar Salvador Acked-by: Michal Hocko Cc: Greg Kroah-Hartman Cc: "Rafael J. Wysocki" Cc: Fenghua Yu Cc: Nathan Lynch Cc: Scott Cheloha Cc: Tony Luck Cc: Link: https://lkml.kernel.org/r/20200915094143.79181-3-ldufour@linux.ibm.com Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- drivers/base/node.c | 84 ++++++++++++++++++++++++++++---------------- include/linux/node.h | 11 +++--- mm/memory_hotplug.c | 3 +- 3 files changed, 63 insertions(+), 35 deletions(-) diff --git a/drivers/base/node.c b/drivers/base/node.c index f3565c2dbc52..503e2f90e58e 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -403,10 +403,32 @@ static int __ref get_nid_for_pfn(unsigned long pfn) return pfn_to_nid(pfn); } +static int do_register_memory_block_under_node(int nid, + struct memory_block *mem_blk) +{ + int ret; + + /* + * If this memory block spans multiple nodes, we only indicate + * the last processed node. + */ + mem_blk->nid = nid; + + ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj, + &mem_blk->dev.kobj, + kobject_name(&mem_blk->dev.kobj)); + if (ret) + return ret; + + return sysfs_create_link_nowarn(&mem_blk->dev.kobj, + &node_devices[nid]->dev.kobj, + kobject_name(&node_devices[nid]->dev.kobj)); +} + /* register memory section under specified node if it spans that node */ -int register_mem_sect_under_node(struct memory_block *mem_blk, void *arg) +int register_mem_block_under_node_early(struct memory_block *mem_blk, void *arg) { - int ret, nid = *(int *)arg; + int nid = *(int *)arg; unsigned long pfn, sect_start_pfn, sect_end_pfn; sect_start_pfn = section_nr_to_pfn(mem_blk->start_section_nr); @@ -426,38 +448,33 @@ int register_mem_sect_under_node(struct memory_block *mem_blk, void *arg) } /* - * We need to check if page belongs to nid only for the boot - * case, during hotplug we know that all pages in the memory - * block belong to the same node. - */ - if (system_state == SYSTEM_BOOTING) { - page_nid = get_nid_for_pfn(pfn); - if (page_nid < 0) - continue; - if (page_nid != nid) - continue; - } - - /* - * If this memory block spans multiple nodes, we only indicate - * the last processed node. + * We need to check if page belongs to nid only at the boot + * case because node's ranges can be interleaved. */ - mem_blk->nid = nid; - - ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj, - &mem_blk->dev.kobj, - kobject_name(&mem_blk->dev.kobj)); - if (ret) - return ret; + page_nid = get_nid_for_pfn(pfn); + if (page_nid < 0) + continue; + if (page_nid != nid) + continue; - return sysfs_create_link_nowarn(&mem_blk->dev.kobj, - &node_devices[nid]->dev.kobj, - kobject_name(&node_devices[nid]->dev.kobj)); + return do_register_memory_block_under_node(nid, mem_blk); } /* mem section does not span the specified node */ return 0; } +/* + * During hotplug we know that all pages in the memory block belong to the same + * node. + */ +static int register_mem_block_under_node_hotplug(struct memory_block *mem_blk, + void *arg) +{ + int nid = *(int *)arg; + + return do_register_memory_block_under_node(nid, mem_blk); +} + /* * Unregister a memory block device under the node it spans. Memory blocks * with multiple nodes cannot be offlined and therefore also never be removed. @@ -473,10 +490,17 @@ void unregister_memory_block_under_nodes(struct memory_block *mem_blk) kobject_name(&node_devices[mem_blk->nid]->dev.kobj)); } -int link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn) +int link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn, + enum meminit_context context) { - return walk_memory_range(start_pfn, end_pfn, (void *)&nid, - register_mem_sect_under_node); + walk_memory_blocks_func_t func; + + if (context == MEMINIT_HOTPLUG) + func = register_mem_block_under_node_hotplug; + else + func = register_mem_block_under_node_early; + + return walk_memory_range(start_pfn, end_pfn, (void *)&nid, func); } #ifdef CONFIG_HUGETLBFS diff --git a/include/linux/node.h b/include/linux/node.h index 708939bae9aa..a79ec4492650 100644 --- a/include/linux/node.h +++ b/include/linux/node.h @@ -32,11 +32,13 @@ extern struct node *node_devices[]; typedef void (*node_registration_func_t)(struct node *); #if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_NUMA) -extern int link_mem_sections(int nid, unsigned long start_pfn, - unsigned long end_pfn); +int link_mem_sections(int nid, unsigned long start_pfn, + unsigned long end_pfn, + enum meminit_context context); #else static inline int link_mem_sections(int nid, unsigned long start_pfn, - unsigned long end_pfn) + unsigned long end_pfn, + enum meminit_context context) { return 0; } @@ -61,7 +63,8 @@ static inline int register_one_node(int nid) if (error) return error; /* link memory sections under this node */ - error = link_mem_sections(nid, start_pfn, end_pfn); + error = link_mem_sections(nid, start_pfn, end_pfn, + MEMINIT_EARLY); } return error; diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index c839c4ad4871..e60e28131f67 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1102,7 +1102,8 @@ int __ref add_memory_resource(int nid, struct resource *res, bool online) } /* link memory sections under this node.*/ - ret = link_mem_sections(nid, PFN_DOWN(start), PFN_UP(start + size - 1)); + ret = link_mem_sections(nid, PFN_DOWN(start), PFN_UP(start + size - 1), + MEMINIT_HOTPLUG); BUG_ON(ret); /* create new memmap entry */ -- GitLab From 1c3886dc302329f199cc04f8a56ba44d17a0df16 Mon Sep 17 00:00:00 2001 From: Or Cohen Date: Thu, 3 Sep 2020 21:05:28 -0700 Subject: [PATCH 1163/1304] net/packet: fix overflow in tpacket_rcv commit acf69c946233259ab4d64f8869d4037a198c7f06 upstream. Using tp_reserve to calculate netoff can overflow as tp_reserve is unsigned int and netoff is unsigned short. This may lead to macoff receving a smaller value then sizeof(struct virtio_net_hdr), and if po->has_vnet_hdr is set, an out-of-bounds write will occur when calling virtio_net_hdr_from_skb. The bug is fixed by converting netoff to unsigned int and checking if it exceeds USHRT_MAX. This addresses CVE-2020-14386 Fixes: 8913336a7e8d ("packet: add PACKET_RESERVE sockopt") Signed-off-by: Or Cohen Signed-off-by: Eric Dumazet Signed-off-by: Linus Torvalds [ snu: backported to pre-5.3, changed tp_drops counting/locking ] Signed-off-by: Stefan Nuernberger CC: David Woodhouse CC: Amit Shah CC: stable@vger.kernel.org Signed-off-by: Greg Kroah-Hartman --- net/packet/af_packet.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index b3caf1eac6af..16b745d254fe 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -2162,7 +2162,8 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, int skb_len = skb->len; unsigned int snaplen, res; unsigned long status = TP_STATUS_USER; - unsigned short macoff, netoff, hdrlen; + unsigned short macoff, hdrlen; + unsigned int netoff; struct sk_buff *copy_skb = NULL; struct timespec ts; __u32 ts_status; @@ -2225,6 +2226,12 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, } macoff = netoff - maclen; } + if (netoff > USHRT_MAX) { + spin_lock(&sk->sk_receive_queue.lock); + po->stats.stats1.tp_drops++; + spin_unlock(&sk->sk_receive_queue.lock); + goto drop_n_restore; + } if (po->tp_version <= TPACKET_V2) { if (macoff + snaplen > po->rx_ring.frame_size) { if (po->copy_thresh && -- GitLab From 3e3bbc4d23eeb90bf282e98c7dfeca7702df3169 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 9 Sep 2020 22:25:06 -0400 Subject: [PATCH 1164/1304] epoll: do not insert into poll queues until all sanity checks are done commit f8d4f44df056c5b504b0d49683fb7279218fd207 upstream. Signed-off-by: Al Viro Signed-off-by: Greg Kroah-Hartman --- fs/eventpoll.c | 37 ++++++++++++++++++------------------- 1 file changed, 18 insertions(+), 19 deletions(-) diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 61a52bb26d12..ed6c06dbb536 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -1450,6 +1450,22 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event, RCU_INIT_POINTER(epi->ws, NULL); } + /* Add the current item to the list of active epoll hook for this file */ + spin_lock(&tfile->f_lock); + list_add_tail_rcu(&epi->fllink, &tfile->f_ep_links); + spin_unlock(&tfile->f_lock); + + /* + * Add the current item to the RB tree. All RB tree operations are + * protected by "mtx", and ep_insert() is called with "mtx" held. + */ + ep_rbtree_insert(ep, epi); + + /* now check if we've created too many backpaths */ + error = -EINVAL; + if (full_check && reverse_path_check()) + goto error_remove_epi; + /* Initialize the poll table using the queue callback */ epq.epi = epi; init_poll_funcptr(&epq.pt, ep_ptable_queue_proc); @@ -1472,22 +1488,6 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event, if (epi->nwait < 0) goto error_unregister; - /* Add the current item to the list of active epoll hook for this file */ - spin_lock(&tfile->f_lock); - list_add_tail_rcu(&epi->fllink, &tfile->f_ep_links); - spin_unlock(&tfile->f_lock); - - /* - * Add the current item to the RB tree. All RB tree operations are - * protected by "mtx", and ep_insert() is called with "mtx" held. - */ - ep_rbtree_insert(ep, epi); - - /* now check if we've created too many backpaths */ - error = -EINVAL; - if (full_check && reverse_path_check()) - goto error_remove_epi; - /* We have to drop the new item inside our item list to keep track of it */ spin_lock_irq(&ep->wq.lock); @@ -1516,6 +1516,8 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event, return 0; +error_unregister: + ep_unregister_pollwait(ep, epi); error_remove_epi: spin_lock(&tfile->f_lock); list_del_rcu(&epi->fllink); @@ -1523,9 +1525,6 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event, rb_erase_cached(&epi->rbn, &ep->rbr); -error_unregister: - ep_unregister_pollwait(ep, epi); - /* * We need to do this because an event could have been arrived on some * allocated wait queue. Note that we don't care about the ep->ovflist -- GitLab From ff329915a5b1f6778344a6fc7b060c991376b095 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 10 Sep 2020 08:30:05 -0400 Subject: [PATCH 1165/1304] epoll: replace ->visited/visited_list with generation count commit 18306c404abe18a0972587a6266830583c60c928 upstream. removes the need to clear it, along with the races. Signed-off-by: Al Viro Signed-off-by: Greg Kroah-Hartman --- fs/eventpoll.c | 26 +++++++------------------- 1 file changed, 7 insertions(+), 19 deletions(-) diff --git a/fs/eventpoll.c b/fs/eventpoll.c index ed6c06dbb536..9ae301bc016c 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -222,8 +222,7 @@ struct eventpoll { struct file *file; /* used to optimize loop detection check */ - int visited; - struct list_head visited_list_link; + u64 gen; #ifdef CONFIG_NET_RX_BUSY_POLL /* used to track busy poll napi_id */ @@ -273,6 +272,8 @@ static long max_user_watches __read_mostly; */ static DEFINE_MUTEX(epmutex); +static u64 loop_check_gen = 0; + /* Used to check for epoll file descriptor inclusion loops */ static struct nested_calls poll_loop_ncalls; @@ -282,9 +283,6 @@ static struct kmem_cache *epi_cache __read_mostly; /* Slab cache used to allocate "struct eppoll_entry" */ static struct kmem_cache *pwq_cache __read_mostly; -/* Visited nodes during ep_loop_check(), so we can unset them when we finish */ -static LIST_HEAD(visited_list); - /* * List of files with newly added links, where we may need to limit the number * of emanating paths. Protected by the epmutex. @@ -1867,13 +1865,12 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests) struct epitem *epi; mutex_lock_nested(&ep->mtx, call_nests + 1); - ep->visited = 1; - list_add(&ep->visited_list_link, &visited_list); + ep->gen = loop_check_gen; for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) { epi = rb_entry(rbp, struct epitem, rbn); if (unlikely(is_file_epoll(epi->ffd.file))) { ep_tovisit = epi->ffd.file->private_data; - if (ep_tovisit->visited) + if (ep_tovisit->gen == loop_check_gen) continue; error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS, ep_loop_check_proc, epi->ffd.file, @@ -1914,18 +1911,8 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests) */ static int ep_loop_check(struct eventpoll *ep, struct file *file) { - int ret; - struct eventpoll *ep_cur, *ep_next; - - ret = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS, + return ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS, ep_loop_check_proc, file, ep, current); - /* clear visited list */ - list_for_each_entry_safe(ep_cur, ep_next, &visited_list, - visited_list_link) { - ep_cur->visited = 0; - list_del(&ep_cur->visited_list_link); - } - return ret; } static void clear_tfile_check_list(void) @@ -2147,6 +2134,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, error_tgt_fput: if (full_check) { clear_tfile_check_list(); + loop_check_gen++; mutex_unlock(&epmutex); } -- GitLab From 90ef231ba534d43033884b8560df26e608ca0a21 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 10 Sep 2020 08:33:27 -0400 Subject: [PATCH 1166/1304] epoll: EPOLL_CTL_ADD: close the race in decision to take fast path commit fe0a916c1eae8e17e86c3753d13919177d63ed7e upstream. Checking for the lack of epitems refering to the epoll we want to insert into is not enough; we might have an insertion of that epoll into another one that has already collected the set of files to recheck for excessive reverse paths, but hasn't gotten to creating/inserting the epitem for it. However, any such insertion in progress can be detected - it will update the generation count in our epoll when it's done looking through it for files to check. That gets done under ->mtx of our epoll and that allows us to detect that safely. We are *not* holding epmutex here, so the generation count is not stable. However, since both the update of ep->gen by loop check and (later) insertion into ->f_ep_link are done with ep->mtx held, we are fine - the sequence is grab epmutex bump loop_check_gen ... grab tep->mtx // 1 tep->gen = loop_check_gen ... drop tep->mtx // 2 ... grab tep->mtx // 3 ... insert into ->f_ep_link ... drop tep->mtx // 4 bump loop_check_gen drop epmutex and if the fastpath check in another thread happens for that eventpoll, it can come * before (1) - in that case fastpath is just fine * after (4) - we'll see non-empty ->f_ep_link, slow path taken * between (2) and (3) - loop_check_gen is stable, with ->mtx providing barriers and we end up taking slow path. Note that ->f_ep_link emptiness check is slightly racy - we are protected against insertions into that list, but removals can happen right under us. Not a problem - in the worst case we'll end up taking a slow path for no good reason. Signed-off-by: Al Viro Signed-off-by: Greg Kroah-Hartman --- fs/eventpoll.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 9ae301bc016c..c4c833bad4b7 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -2074,6 +2074,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, mutex_lock_nested(&ep->mtx, 0); if (op == EPOLL_CTL_ADD) { if (!list_empty(&f.file->f_ep_links) || + ep->gen == loop_check_gen || is_file_epoll(tf.file)) { full_check = 1; mutex_unlock(&ep->mtx); -- GitLab From ced8ce5d2157142c469eccc5eef5ea8ad579fa5e Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 24 Sep 2020 19:41:58 -0400 Subject: [PATCH 1167/1304] ep_create_wakeup_source(): dentry name can change under you... commit 3701cb59d892b88d569427586f01491552f377b1 upstream. or get freed, for that matter, if it's a long (separately stored) name. Signed-off-by: Al Viro Signed-off-by: Greg Kroah-Hartman --- fs/eventpoll.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/fs/eventpoll.c b/fs/eventpoll.c index c4c833bad4b7..a4a32b79e832 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -1376,7 +1376,7 @@ static int reverse_path_check(void) static int ep_create_wakeup_source(struct epitem *epi) { - const char *name; + struct name_snapshot n; struct wakeup_source *ws; if (!epi->ep->ws) { @@ -1385,8 +1385,9 @@ static int ep_create_wakeup_source(struct epitem *epi) return -ENOMEM; } - name = epi->ffd.file->f_path.dentry->d_name.name; - ws = wakeup_source_register(name); + take_dentry_name_snapshot(&n, epi->ffd.file->f_path.dentry); + ws = wakeup_source_register(n.name); + release_dentry_name_snapshot(&n); if (!ws) return -ENOMEM; -- GitLab From 289fe546ea16c2dcb57c5198c5a7b7387604530e Mon Sep 17 00:00:00 2001 From: Will McVicker Date: Mon, 24 Aug 2020 19:38:32 +0000 Subject: [PATCH 1168/1304] netfilter: ctnetlink: add a range check for l3/l4 protonum commit 1cc5ef91d2ff94d2bf2de3b3585423e8a1051cb6 upstream. The indexes to the nf_nat_l[34]protos arrays come from userspace. So check the tuple's family, e.g. l3num, when creating the conntrack in order to prevent an OOB memory access during setup. Here is an example kernel panic on 4.14.180 when userspace passes in an index greater than NFPROTO_NUMPROTO. Internal error: Oops - BUG: 0 [#1] PREEMPT SMP Modules linked in:... Process poc (pid: 5614, stack limit = 0x00000000a3933121) CPU: 4 PID: 5614 Comm: poc Tainted: G S W O 4.14.180-g051355490483 Hardware name: Qualcomm Technologies, Inc. SM8150 V2 PM8150 Google Inc. MSM task: 000000002a3dfffe task.stack: 00000000a3933121 pc : __cfi_check_fail+0x1c/0x24 lr : __cfi_check_fail+0x1c/0x24 ... Call trace: __cfi_check_fail+0x1c/0x24 name_to_dev_t+0x0/0x468 nfnetlink_parse_nat_setup+0x234/0x258 ctnetlink_parse_nat_setup+0x4c/0x228 ctnetlink_new_conntrack+0x590/0xc40 nfnetlink_rcv_msg+0x31c/0x4d4 netlink_rcv_skb+0x100/0x184 nfnetlink_rcv+0xf4/0x180 netlink_unicast+0x360/0x770 netlink_sendmsg+0x5a0/0x6a4 ___sys_sendmsg+0x314/0x46c SyS_sendmsg+0xb4/0x108 el0_svc_naked+0x34/0x38 This crash is not happening since 5.4+, however, ctnetlink still allows for creating entries with unsupported layer 3 protocol number. Fixes: c1d10adb4a521 ("[NETFILTER]: Add ctnetlink port for nf_conntrack") Signed-off-by: Will McVicker [pablo@netfilter.org: rebased original patch on top of nf.git] Signed-off-by: Pablo Neira Ayuso Signed-off-by: Greg Kroah-Hartman --- net/netfilter/nf_conntrack_netlink.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 31fa94064a62..0b89609a6e9d 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -1129,6 +1129,8 @@ ctnetlink_parse_tuple(const struct nlattr * const cda[], if (!tb[CTA_TUPLE_IP]) return -EINVAL; + if (l3num != NFPROTO_IPV4 && l3num != NFPROTO_IPV6) + return -EOPNOTSUPP; tuple->src.l3num = l3num; err = ctnetlink_parse_tuple_ip(tb[CTA_TUPLE_IP], tuple); -- GitLab From a1b977b49b66c75e6c51a515f6700371ae720217 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Wed, 7 Oct 2020 08:00:09 +0200 Subject: [PATCH 1169/1304] Linux 4.19.150 Tested-by: Jon Hunter Tested-by: Shuah Khan Tested-by: Linux Kernel Functional Testing Tested-by: Guenter Roeck Link: https://lore.kernel.org/r/20201005142108.650363140@linuxfoundation.org Signed-off-by: Greg Kroah-Hartman --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 3ff5cf33ef55..65485185bec2 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 4 PATCHLEVEL = 19 -SUBLEVEL = 149 +SUBLEVEL = 150 EXTRAVERSION = NAME = "People's Front" -- GitLab From 6b3e43947043ea06259bbd7ed5569bdb6d22fd51 Mon Sep 17 00:00:00 2001 From: Shadab Naseem Date: Wed, 7 Oct 2020 16:11:46 +0530 Subject: [PATCH 1170/1304] soc: qcom: Add LLCC driver for Orchid Add QCOM LLCC cache controller driver for Orchid SoC. Change-Id: I0f8b735653f3c4c20701f8ccbd6881dcce9edb50 Signed-off-by: Shadab Naseem --- drivers/soc/qcom/Makefile | 2 +- drivers/soc/qcom/llcc-orchid.c | 88 ++++++++++++++++++++++++++++++++++ 2 files changed, 89 insertions(+), 1 deletion(-) create mode 100644 drivers/soc/qcom/llcc-orchid.c diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile index 62a34a5e20b8..f033e5ba73eb 100644 --- a/drivers/soc/qcom/Makefile +++ b/drivers/soc/qcom/Makefile @@ -24,7 +24,7 @@ obj-$(CONFIG_QCOM_WCNSS_CTRL) += wcnss_ctrl.o obj-$(CONFIG_QCOM_IPCC) += qcom_ipcc.o obj-$(CONFIG_QCOM_LLCC) += llcc-slice.o obj-$(CONFIG_QCOM_KONA_LLCC) += llcc-kona.o -obj-$(CONFIG_QCOM_LITO_LLCC) += llcc-lito.o +obj-$(CONFIG_QCOM_LITO_LLCC) += llcc-lito.o llcc-orchid.o obj-$(CONFIG_QCOM_LAGOON_LLCC) += llcc-lagoon.o obj-$(CONFIG_QCOM_LLCC_PERFMON) += llcc_perfmon.o obj-$(CONFIG_QCOM_APR) += apr.o diff --git a/drivers/soc/qcom/llcc-orchid.c b/drivers/soc/qcom/llcc-orchid.c new file mode 100644 index 000000000000..8b800a89674b --- /dev/null +++ b/drivers/soc/qcom/llcc-orchid.c @@ -0,0 +1,88 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + */ + +#include +#include +#include +#include +#include + +/* + * SCT entry contains of the following parameters + * uid: Unique id for the client's use case + * slice_id: llcc slice id for each client + * max_cap: The maximum capacity of the cache slice provided in KB + * priority: Priority of the client used to select victim line for replacement + * fixed_size: Determine of the slice has a fixed capacity + * bonus_ways: Bonus ways to be used by any slice, bonus way is used only if + * it't not a reserved way. + * res_ways: Reserved ways for the cache slice, the reserved ways cannot be used + * by any other client than the one its assigned to. + * cache_mode: Each slice operates as a cache, this controls the mode of the + * slice normal or TCM + * probe_target_ways: Determines what ways to probe for access hit. When + * configured to 1 only bonus and reseved ways are probed. + * when configured to 0 all ways in llcc are probed. + * dis_cap_alloc: Disable capacity based allocation for a client + * write_scid_en: Bit enables write cache support for a given scid. + * retain_on_pc: If this bit is set and client has maitained active vote + * then the ways assigned to this client are not flushed on power + * collapse. + * activate_on_init: Activate the slice immidiately after the SCT is programmed + */ +#define SCT_ENTRY(uid, sid, mc, p, fs, bway, rway, cmod, ptw, dca, wse, rp, a) \ + { \ + .usecase_id = uid, \ + .slice_id = sid, \ + .max_cap = mc, \ + .priority = p, \ + .fixed_size = fs, \ + .bonus_ways = bway, \ + .res_ways = rway, \ + .cache_mode = cmod, \ + .probe_target_ways = ptw, \ + .dis_cap_alloc = dca, \ + .write_scid_en = wse, \ + .retain_on_pc = rp, \ + .activate_on_init = a, \ + } + +static struct llcc_slice_config orchid_data[] = { + SCT_ENTRY(LLCC_CPUSS, 1, 1536, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 1), + SCT_ENTRY(LLCC_AUDIO, 6, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0), + SCT_ENTRY(LLCC_MDM, 8, 512, 2, 0, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0), + SCT_ENTRY(LLCC_GPUHTW, 11, 256, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0), + SCT_ENTRY(LLCC_GPU, 12, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0), + SCT_ENTRY(LLCC_DISP, 16, 1536, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0), + SCT_ENTRY(LLCC_MDMPNG, 21, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0), + SCT_ENTRY(LLCC_AUDHW, 22, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0), + SCT_ENTRY(LLCC_NPU, 23, 512, 2, 0, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0), + SCT_ENTRY(LLCC_MODEMVPE, 29, 128, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0), + SCT_ENTRY(LLCC_WRTCH, 31, 128, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0, 1), +}; + +static int orchid_qcom_llcc_probe(struct platform_device *pdev) +{ + return qcom_llcc_probe(pdev, orchid_data, + ARRAY_SIZE(orchid_data)); +} + +static const struct of_device_id orchid_qcom_llcc_of_match[] = { + { .compatible = "qcom,llcc-v2", }, + { }, +}; + +static struct platform_driver orchid_qcom_llcc_driver = { + .driver = { + .name = "orchid-llcc", + .of_match_table = orchid_qcom_llcc_of_match, + }, + .probe = orchid_qcom_llcc_probe, +}; +module_platform_driver(orchid_qcom_llcc_driver); + +MODULE_DESCRIPTION("QCOM orchid LLCC driver"); +MODULE_LICENSE("GPL v2"); -- GitLab From 482d6eea6bd391e57e38c3bd074db294f0336818 Mon Sep 17 00:00:00 2001 From: Neeraj Upadhyay Date: Mon, 12 Oct 2020 09:45:39 +0530 Subject: [PATCH 1171/1304] soc: qcom: Fix memcpy operations in ramdump_read Currently, for device memory addresses, which are not aligned on 8 byte boundary, ramdump_read() copies the bytes till the start of next aligned address. However, this calculation does not take into account the case, where copy_size is less that the number of bytes till next alignment. This causes memcpy to copy device memory to region, which is outside of the allocated buffer. Fix this by copying only the copy_size buffer for this case. Change-Id: I918ab5886d664f89c0eebd99972f5b9960e1ae00 Signed-off-by: Neeraj Upadhyay --- drivers/soc/qcom/ramdump.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/soc/qcom/ramdump.c b/drivers/soc/qcom/ramdump.c index 1555c4f44475..5de35201c7db 100644 --- a/drivers/soc/qcom/ramdump.c +++ b/drivers/soc/qcom/ramdump.c @@ -241,6 +241,8 @@ static ssize_t ramdump_read(struct file *filep, char __user *buf, size_t count, if ((unsigned long)device_mem & 0x7) { bytes_before = 8 - ((unsigned long)device_mem & 0x7); + bytes_before = min_t(unsigned long, (unsigned long)copy_size, + bytes_before); memcpy_fromio(alignbuf, device_mem, bytes_before); device_mem += bytes_before; alignbuf += bytes_before; -- GitLab From 95082849697a6eba4b1ad2a77d63f9f0cd9fdb9e Mon Sep 17 00:00:00 2001 From: Rohith Kollalsi Date: Wed, 7 Oct 2020 19:37:27 +0530 Subject: [PATCH 1172/1304] usb: dwc3: Stop active transfer on control endpoints Currently glue driver tries to issue an end transfer for control endpoints in case of usb disconnect or composition switch through dwc3_ep0_end_control_data only if ep number is 1. Just before issuing end transfer it checks the direction and based on that it either issues end transfer for ep0 out or ep0 in through dwc3_ep0_end_control_data. If remove requests will be called for ep0 out endpoint then it doesn't issue an end transfer as resource index will be zero for ep0out endpoint and end transfer will also not be issued through dwc3_ep0_end_control_data as ep number is 0, but glue driver unmaps and gives back all the requests. This sometimes may lead to smmu fault as the requests were unmapped before issuing an end transfer. Fix this by issuing an end transfer from remove requests for control endpoints if ep number is 0. In this scenario it can be ensured that we issue end transfer for control endpoints before unmapping and giving back the requests. Change-Id: I6022c0b4e7cd62bfb7087313e39ff0ea3c9cc253 Signed-off-by: Rohith Kollalsi --- drivers/usb/dwc3/gadget.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index f820795599b2..d95ef08eebf3 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -812,7 +812,7 @@ static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep) dbg_log_string("START for %s(%d)", dep->name, dep->number); dwc3_stop_active_transfer(dwc, dep->number, true); - if (dep->number == 1 && dwc->ep0state != EP0_SETUP_PHASE) { + if (dep->number == 0 && dwc->ep0state != EP0_SETUP_PHASE) { unsigned int dir; dbg_log_string("CTRLPEND(%d)", dwc->ep0state); -- GitLab From bca62a0ae56581b0919182af412036ae11c2f431 Mon Sep 17 00:00:00 2001 From: Abhijeet Dharmapurikar Date: Fri, 12 Jul 2019 15:17:48 -0700 Subject: [PATCH 1173/1304] sched/tune: Fix improper accounting of tasks cgroup_migrate_execute() calls can_attach() and css_set_move_task() separately without holding rq->lock. The schedtune implementation breaks here, since can_attach() accounts for the task move way before the group move is committed. If the task sleeps right after can_attach(), the sleep is accounted towards the previous group. This ends up in disparity of counts between group. Consider this race: TaskA is moved from root_grp to topapp_grp, root_grp's tasks = 1 and topapp tasks =0 right before the move and TaskB is moving it. On cpu X TaskA runs * cgroup_migrate_execute() schedtune_can_attach() root_grp.tasks--; topapp_grp.tasks++; (root_grp.tasks = 0 and topapp_grp.tasks = 1) *right at this moment context is switched and TaskA runs. *TaskA sleeps dequeue_task() schedtune_dequeue_task() schedtune_task_update root_grp.tasks--; //TaskA has not really "switched" group, so it decrements from the root_grp, however can_attach() has accounted the task move and this leaves us with root_grp.tasks = 0 (it is -ve value protected) topapp.grp.tasks = 1 Now even if cpuX is idle (TaskA is long gone sleeping), its topapp_grp.tasks continues to stay +ve and it is subject to topapp's boost unnecessarily. An easy way to fix this is to move the group change accounting in attach() callback which gets called _after_ css_set_move_task(). Also maintain the task's current idx in struct task_struct as it moves between groups. The task's enqueue/dequeue is accounted towards the cached idx value. In an event when the task dequeues just before group changes, it gets subtracted from the old group, which is correct because the task would have bumped up the old group's count. If the task changes group while its running, the attach() callback has to decrement from the old group and increment from the new group so that the next dequeue will subtract from the new group. IOW the attach() callback has to account only for running task but has to update the cached index for both running and sleeping task. The current uses task->on_rq != 0 check to determine whether a task is queued on the runqueue or not. This is an incorrect check. Because task->on_rq is set to TASK_ON_RQ_MIGRATING (value = 2) during migration. Fix this by using task_on_rq_queued() to check if a task is queued or not. Change-Id: If412da5a239c18d9122cfad2be59b355c14c068f Signed-off-by: Abhijeet Dharmapurikar Co-developed-by: Pavankumar Kondeti Signed-off-by: Pavankumar Kondeti --- include/linux/sched.h | 3 + init/init_task.c | 3 + kernel/sched/tune.c | 162 +++++++++++++++++++----------------------- 3 files changed, 79 insertions(+), 89 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index 45359686e27b..e66bc29128e5 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -885,6 +885,9 @@ struct task_struct { #ifdef CONFIG_CGROUP_SCHED struct task_group *sched_task_group; +#endif +#ifdef CONFIG_SCHED_TUNE + int stune_idx; #endif struct sched_dl_entity dl; diff --git a/init/init_task.c b/init/init_task.c index 53e6e27ea8b5..3f6ec9b2bf0f 100644 --- a/init/init_task.c +++ b/init/init_task.c @@ -93,6 +93,9 @@ struct task_struct init_task #endif #ifdef CONFIG_CGROUP_SCHED .sched_task_group = &root_task_group, +#endif +#ifdef CONFIG_SCHED_TUNE + .stune_idx = 0, #endif .ptraced = LIST_HEAD_INIT(init_task.ptraced), .ptrace_entry = LIST_HEAD_INIT(init_task.ptrace_entry), diff --git a/kernel/sched/tune.c b/kernel/sched/tune.c index 93e3acf0e0aa..754e1b228d13 100644 --- a/kernel/sched/tune.c +++ b/kernel/sched/tune.c @@ -420,7 +420,6 @@ void schedtune_enqueue_task(struct task_struct *p, int cpu) { struct boost_groups *bg = &per_cpu(cpu_boost_groups, cpu); unsigned long irq_flags; - struct schedtune *st; int idx; if (unlikely(!schedtune_initialized)) @@ -432,90 +431,16 @@ void schedtune_enqueue_task(struct task_struct *p, int cpu) * do_exit()::cgroup_exit() and task migration. */ raw_spin_lock_irqsave(&bg->lock, irq_flags); - rcu_read_lock(); - st = task_schedtune(p); - idx = st->idx; + idx = p->stune_idx; schedtune_tasks_update(p, cpu, idx, ENQUEUE_TASK); - rcu_read_unlock(); raw_spin_unlock_irqrestore(&bg->lock, irq_flags); } int schedtune_can_attach(struct cgroup_taskset *tset) { - struct task_struct *task; - struct cgroup_subsys_state *css; - struct boost_groups *bg; - struct rq_flags rq_flags; - unsigned int cpu; - struct rq *rq; - int src_bg; /* Source boost group index */ - int dst_bg; /* Destination boost group index */ - int tasks; - u64 now; - - if (unlikely(!schedtune_initialized)) - return 0; - - - cgroup_taskset_for_each(task, css, tset) { - - /* - * Lock the CPU's RQ the task is enqueued to avoid race - * conditions with migration code while the task is being - * accounted - */ - rq = task_rq_lock(task, &rq_flags); - - if (!task->on_rq) { - task_rq_unlock(rq, task, &rq_flags); - continue; - } - - /* - * Boost group accouting is protected by a per-cpu lock and requires - * interrupt to be disabled to avoid race conditions on... - */ - cpu = cpu_of(rq); - bg = &per_cpu(cpu_boost_groups, cpu); - raw_spin_lock(&bg->lock); - - dst_bg = css_st(css)->idx; - src_bg = task_schedtune(task)->idx; - - /* - * Current task is not changing boostgroup, which can - * happen when the new hierarchy is in use. - */ - if (unlikely(dst_bg == src_bg)) { - raw_spin_unlock(&bg->lock); - task_rq_unlock(rq, task, &rq_flags); - continue; - } - - /* - * This is the case of a RUNNABLE task which is switching its - * current boost group. - */ - - /* Move task from src to dst boost group */ - tasks = bg->group[src_bg].tasks - 1; - bg->group[src_bg].tasks = max(0, tasks); - bg->group[dst_bg].tasks += 1; - - /* Update boost hold start for this group */ - now = sched_clock_cpu(cpu); - bg->group[dst_bg].ts = now; - - /* Force boost group re-evaluation at next boost check */ - bg->boost_ts = now - SCHEDTUNE_BOOST_HOLD_NS; - - raw_spin_unlock(&bg->lock); - task_rq_unlock(rq, task, &rq_flags); - } - return 0; } @@ -580,7 +505,6 @@ void schedtune_dequeue_task(struct task_struct *p, int cpu) { struct boost_groups *bg = &per_cpu(cpu_boost_groups, cpu); unsigned long irq_flags; - struct schedtune *st; int idx; if (unlikely(!schedtune_initialized)) @@ -591,14 +515,11 @@ void schedtune_dequeue_task(struct task_struct *p, int cpu) * interrupt to be disabled to avoid race conditions on... */ raw_spin_lock_irqsave(&bg->lock, irq_flags); - rcu_read_lock(); - st = task_schedtune(p); - idx = st->idx; + idx = p->stune_idx; schedtune_tasks_update(p, cpu, idx, DEQUEUE_TASK); - rcu_read_unlock(); raw_spin_unlock_irqrestore(&bg->lock, irq_flags); } @@ -677,11 +598,19 @@ boost_read(struct cgroup_subsys_state *css, struct cftype *cft) return st->boost; } -#ifdef CONFIG_SCHED_WALT static void schedtune_attach(struct cgroup_taskset *tset) { struct task_struct *task; struct cgroup_subsys_state *css; + struct boost_groups *bg; + struct rq_flags rq_flags; + unsigned int cpu; + struct rq *rq; + int src_idx; /* Source boost group index */ + int dst_idx; /* Destination boost group index */ + int tasks; + u64 now; +#ifdef CONFIG_SCHED_WALT struct schedtune *st; bool colocate; @@ -692,13 +621,68 @@ static void schedtune_attach(struct cgroup_taskset *tset) cgroup_taskset_for_each(task, css, tset) sync_cgroup_colocation(task, colocate); -} -#else -static void schedtune_attach(struct cgroup_taskset *tset) -{ -} #endif + cgroup_taskset_for_each(task, css, tset) { + /* + * Lock the CPU's RQ the task is enqueued to avoid race + * conditions with migration code while the task is being + * accounted + */ + rq = task_rq_lock(task, &rq_flags); + + /* + * Boost group accouting is protected by a per-cpu lock and + * requires interrupt to be disabled to avoid race conditions + * on... + */ + cpu = cpu_of(rq); + bg = &per_cpu(cpu_boost_groups, cpu); + raw_spin_lock(&bg->lock); + + dst_idx = task_schedtune(task)->idx; + src_idx = task->stune_idx; + + /* + * Current task is not changing boostgroup, which can + * happen when the new hierarchy is in use. + */ + if (unlikely(dst_idx == src_idx)) { + raw_spin_unlock(&bg->lock); + task_rq_unlock(rq, task, &rq_flags); + continue; + } + + task->stune_idx = dst_idx; + + if (!task_on_rq_queued(task)) { + raw_spin_unlock(&bg->lock); + task_rq_unlock(rq, task, &rq_flags); + continue; + } + + /* + * This is the case of a RUNNABLE task which is switching its + * current boost group. + */ + + /* Move task from src to dst boost group */ + tasks = bg->group[src_idx].tasks - 1; + bg->group[src_idx].tasks = max(0, tasks); + bg->group[dst_idx].tasks += 1; + + /* Update boost hold start for this group */ + now = sched_clock_cpu(cpu); + bg->group[dst_idx].ts = now; + + /* Force boost group re-evaluation at next boost check */ + bg->boost_ts = now - SCHEDTUNE_BOOST_HOLD_NS; + + raw_spin_unlock(&bg->lock); + task_rq_unlock(rq, task, &rq_flags); + } +} + static int boost_write(struct cgroup_subsys_state *css, struct cftype *cft, s64 boost) @@ -831,8 +815,8 @@ struct cgroup_subsys schedtune_cgrp_subsys = { .css_alloc = schedtune_css_alloc, .css_free = schedtune_css_free, .attach = schedtune_attach, - .can_attach = schedtune_can_attach, - .cancel_attach = schedtune_cancel_attach, + .can_attach = schedtune_can_attach, + .cancel_attach = schedtune_cancel_attach, .legacy_cftypes = files, .early_init = 1, }; -- GitLab From 7b9eaa7241ea2cfa580b854d461be72107a4b35c Mon Sep 17 00:00:00 2001 From: Peilin Ye Date: Thu, 24 Sep 2020 09:40:53 -0400 Subject: [PATCH 1174/1304] fbdev, newport_con: Move FONT_EXTRA_WORDS macros into linux/font.h commit bb0890b4cd7f8203e3aa99c6d0f062d6acdaad27 upstream. drivers/video/console/newport_con.c is borrowing FONT_EXTRA_WORDS macros from drivers/video/fbdev/core/fbcon.h. To keep things simple, move all definitions into . Since newport_con now uses four extra words, initialize the fourth word in newport_set_font() properly. Cc: stable@vger.kernel.org Signed-off-by: Peilin Ye Reviewed-by: Greg Kroah-Hartman Signed-off-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/7fb8bc9b0abc676ada6b7ac0e0bd443499357267.1600953813.git.yepeilin.cs@gmail.com Signed-off-by: Greg Kroah-Hartman --- drivers/video/console/newport_con.c | 7 +------ drivers/video/fbdev/core/fbcon.h | 7 ------- drivers/video/fbdev/core/fbcon_rotate.c | 1 + drivers/video/fbdev/core/tileblit.c | 1 + include/linux/font.h | 8 ++++++++ 5 files changed, 11 insertions(+), 13 deletions(-) diff --git a/drivers/video/console/newport_con.c b/drivers/video/console/newport_con.c index cc2fb5043184..02b24ae8b9cb 100644 --- a/drivers/video/console/newport_con.c +++ b/drivers/video/console/newport_con.c @@ -35,12 +35,6 @@ #define FONT_DATA ((unsigned char *)font_vga_8x16.data) -/* borrowed from fbcon.c */ -#define REFCOUNT(fd) (((int *)(fd))[-1]) -#define FNTSIZE(fd) (((int *)(fd))[-2]) -#define FNTCHARCNT(fd) (((int *)(fd))[-3]) -#define FONT_EXTRA_WORDS 3 - static unsigned char *font_data[MAX_NR_CONSOLES]; static struct newport_regs *npregs; @@ -522,6 +516,7 @@ static int newport_set_font(int unit, struct console_font *op) FNTSIZE(new_data) = size; FNTCHARCNT(new_data) = op->charcount; REFCOUNT(new_data) = 0; /* usage counter */ + FNTSUM(new_data) = 0; p = new_data; for (i = 0; i < op->charcount; i++) { diff --git a/drivers/video/fbdev/core/fbcon.h b/drivers/video/fbdev/core/fbcon.h index aeea63abbe98..c023009f2978 100644 --- a/drivers/video/fbdev/core/fbcon.h +++ b/drivers/video/fbdev/core/fbcon.h @@ -152,13 +152,6 @@ static inline int attr_col_ec(int shift, struct vc_data *vc, #define attr_bgcol_ec(bgshift, vc, info) attr_col_ec(bgshift, vc, info, 0) #define attr_fgcol_ec(fgshift, vc, info) attr_col_ec(fgshift, vc, info, 1) -/* Font */ -#define REFCOUNT(fd) (((int *)(fd))[-1]) -#define FNTSIZE(fd) (((int *)(fd))[-2]) -#define FNTCHARCNT(fd) (((int *)(fd))[-3]) -#define FNTSUM(fd) (((int *)(fd))[-4]) -#define FONT_EXTRA_WORDS 4 - /* * Scroll Method */ diff --git a/drivers/video/fbdev/core/fbcon_rotate.c b/drivers/video/fbdev/core/fbcon_rotate.c index c0d445294aa7..ac72d4f85f7d 100644 --- a/drivers/video/fbdev/core/fbcon_rotate.c +++ b/drivers/video/fbdev/core/fbcon_rotate.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include "fbcon.h" #include "fbcon_rotate.h" diff --git a/drivers/video/fbdev/core/tileblit.c b/drivers/video/fbdev/core/tileblit.c index eb664dbf96f6..adff8d6ffe6f 100644 --- a/drivers/video/fbdev/core/tileblit.c +++ b/drivers/video/fbdev/core/tileblit.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include "fbcon.h" diff --git a/include/linux/font.h b/include/linux/font.h index d6821769dd1e..0a3639a00b3a 100644 --- a/include/linux/font.h +++ b/include/linux/font.h @@ -57,4 +57,12 @@ extern const struct font_desc *get_default_font(int xres, int yres, /* Max. length for the name of a predefined font */ #define MAX_FONT_NAME 32 +/* Extra word getters */ +#define REFCOUNT(fd) (((int *)(fd))[-1]) +#define FNTSIZE(fd) (((int *)(fd))[-2]) +#define FNTCHARCNT(fd) (((int *)(fd))[-3]) +#define FNTSUM(fd) (((int *)(fd))[-4]) + +#define FONT_EXTRA_WORDS 4 + #endif /* _VIDEO_FONT_H */ -- GitLab From 2162bcbc74817f6378a5593d527087c4b4593e16 Mon Sep 17 00:00:00 2001 From: Peilin Ye Date: Thu, 24 Sep 2020 09:42:22 -0400 Subject: [PATCH 1175/1304] Fonts: Support FONT_EXTRA_WORDS macros for built-in fonts commit 6735b4632def0640dbdf4eb9f99816aca18c4f16 upstream. syzbot has reported an issue in the framebuffer layer, where a malicious user may overflow our built-in font data buffers. In order to perform a reliable range check, subsystems need to know `FONTDATAMAX` for each built-in font. Unfortunately, our font descriptor, `struct console_font` does not contain `FONTDATAMAX`, and is part of the UAPI, making it infeasible to modify it. For user-provided fonts, the framebuffer layer resolves this issue by reserving four extra words at the beginning of data buffers. Later, whenever a function needs to access them, it simply uses the following macros: Recently we have gathered all the above macros to . Let us do the same thing for built-in fonts, prepend four extra words (including `FONTDATAMAX`) to their data buffers, so that subsystems can use these macros for all fonts, no matter built-in or user-provided. This patch depends on patch "fbdev, newport_con: Move FONT_EXTRA_WORDS macros into linux/font.h". Cc: stable@vger.kernel.org Link: https://syzkaller.appspot.com/bug?id=08b8be45afea11888776f897895aef9ad1c3ecfd Signed-off-by: Peilin Ye Reviewed-by: Greg Kroah-Hartman Signed-off-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/ef18af00c35fb3cc826048a5f70924ed6ddce95b.1600953813.git.yepeilin.cs@gmail.com Signed-off-by: Greg Kroah-Hartman --- include/linux/font.h | 5 +++++ lib/fonts/font_10x18.c | 9 ++++----- lib/fonts/font_6x10.c | 9 +++++---- lib/fonts/font_6x11.c | 9 ++++----- lib/fonts/font_7x14.c | 9 ++++----- lib/fonts/font_8x16.c | 9 ++++----- lib/fonts/font_8x8.c | 9 ++++----- lib/fonts/font_acorn_8x8.c | 9 ++++++--- lib/fonts/font_mini_4x6.c | 8 ++++---- lib/fonts/font_pearl_8x8.c | 9 ++++----- lib/fonts/font_sun12x22.c | 9 ++++----- lib/fonts/font_sun8x16.c | 7 ++++--- 12 files changed, 52 insertions(+), 49 deletions(-) diff --git a/include/linux/font.h b/include/linux/font.h index 0a3639a00b3a..f85e70bd4793 100644 --- a/include/linux/font.h +++ b/include/linux/font.h @@ -65,4 +65,9 @@ extern const struct font_desc *get_default_font(int xres, int yres, #define FONT_EXTRA_WORDS 4 +struct font_data { + unsigned int extra[FONT_EXTRA_WORDS]; + const unsigned char data[]; +} __packed; + #endif /* _VIDEO_FONT_H */ diff --git a/lib/fonts/font_10x18.c b/lib/fonts/font_10x18.c index 532f0ff89a96..0e2deac97da0 100644 --- a/lib/fonts/font_10x18.c +++ b/lib/fonts/font_10x18.c @@ -8,8 +8,8 @@ #define FONTDATAMAX 9216 -static const unsigned char fontdata_10x18[FONTDATAMAX] = { - +static struct font_data fontdata_10x18 = { + { 0, 0, FONTDATAMAX, 0 }, { /* 0 0x00 '^@' */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ @@ -5129,8 +5129,7 @@ static const unsigned char fontdata_10x18[FONTDATAMAX] = { 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ 0x00, 0x00, /* 0000000000 */ - -}; +} }; const struct font_desc font_10x18 = { @@ -5138,7 +5137,7 @@ const struct font_desc font_10x18 = { .name = "10x18", .width = 10, .height = 18, - .data = fontdata_10x18, + .data = fontdata_10x18.data, #ifdef __sparc__ .pref = 5, #else diff --git a/lib/fonts/font_6x10.c b/lib/fonts/font_6x10.c index 09b2cc03435b..87da8acd07db 100644 --- a/lib/fonts/font_6x10.c +++ b/lib/fonts/font_6x10.c @@ -1,8 +1,10 @@ // SPDX-License-Identifier: GPL-2.0 #include -static const unsigned char fontdata_6x10[] = { +#define FONTDATAMAX 2560 +static struct font_data fontdata_6x10 = { + { 0, 0, FONTDATAMAX, 0 }, { /* 0 0x00 '^@' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ @@ -3074,14 +3076,13 @@ static const unsigned char fontdata_6x10[] = { 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ - -}; +} }; const struct font_desc font_6x10 = { .idx = FONT6x10_IDX, .name = "6x10", .width = 6, .height = 10, - .data = fontdata_6x10, + .data = fontdata_6x10.data, .pref = 0, }; diff --git a/lib/fonts/font_6x11.c b/lib/fonts/font_6x11.c index d7136c33f1f0..5e975dfa10a5 100644 --- a/lib/fonts/font_6x11.c +++ b/lib/fonts/font_6x11.c @@ -9,8 +9,8 @@ #define FONTDATAMAX (11*256) -static const unsigned char fontdata_6x11[FONTDATAMAX] = { - +static struct font_data fontdata_6x11 = { + { 0, 0, FONTDATAMAX, 0 }, { /* 0 0x00 '^@' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ @@ -3338,8 +3338,7 @@ static const unsigned char fontdata_6x11[FONTDATAMAX] = { 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ - -}; +} }; const struct font_desc font_vga_6x11 = { @@ -3347,7 +3346,7 @@ const struct font_desc font_vga_6x11 = { .name = "ProFont6x11", .width = 6, .height = 11, - .data = fontdata_6x11, + .data = fontdata_6x11.data, /* Try avoiding this font if possible unless on MAC */ .pref = -2000, }; diff --git a/lib/fonts/font_7x14.c b/lib/fonts/font_7x14.c index 89752d0b23e8..86d298f38505 100644 --- a/lib/fonts/font_7x14.c +++ b/lib/fonts/font_7x14.c @@ -8,8 +8,8 @@ #define FONTDATAMAX 3584 -static const unsigned char fontdata_7x14[FONTDATAMAX] = { - +static struct font_data fontdata_7x14 = { + { 0, 0, FONTDATAMAX, 0 }, { /* 0 0x00 '^@' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ @@ -4105,8 +4105,7 @@ static const unsigned char fontdata_7x14[FONTDATAMAX] = { 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ - -}; +} }; const struct font_desc font_7x14 = { @@ -4114,6 +4113,6 @@ const struct font_desc font_7x14 = { .name = "7x14", .width = 7, .height = 14, - .data = fontdata_7x14, + .data = fontdata_7x14.data, .pref = 0, }; diff --git a/lib/fonts/font_8x16.c b/lib/fonts/font_8x16.c index b7ab1f5fbdb8..37cedd36ca5e 100644 --- a/lib/fonts/font_8x16.c +++ b/lib/fonts/font_8x16.c @@ -10,8 +10,8 @@ #define FONTDATAMAX 4096 -static const unsigned char fontdata_8x16[FONTDATAMAX] = { - +static struct font_data fontdata_8x16 = { + { 0, 0, FONTDATAMAX, 0 }, { /* 0 0x00 '^@' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ @@ -4619,8 +4619,7 @@ static const unsigned char fontdata_8x16[FONTDATAMAX] = { 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ - -}; +} }; const struct font_desc font_vga_8x16 = { @@ -4628,7 +4627,7 @@ const struct font_desc font_vga_8x16 = { .name = "VGA8x16", .width = 8, .height = 16, - .data = fontdata_8x16, + .data = fontdata_8x16.data, .pref = 0, }; EXPORT_SYMBOL(font_vga_8x16); diff --git a/lib/fonts/font_8x8.c b/lib/fonts/font_8x8.c index 2328ebc8bab5..8ab695538395 100644 --- a/lib/fonts/font_8x8.c +++ b/lib/fonts/font_8x8.c @@ -9,8 +9,8 @@ #define FONTDATAMAX 2048 -static const unsigned char fontdata_8x8[FONTDATAMAX] = { - +static struct font_data fontdata_8x8 = { + { 0, 0, FONTDATAMAX, 0 }, { /* 0 0x00 '^@' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ @@ -2570,8 +2570,7 @@ static const unsigned char fontdata_8x8[FONTDATAMAX] = { 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ - -}; +} }; const struct font_desc font_vga_8x8 = { @@ -2579,6 +2578,6 @@ const struct font_desc font_vga_8x8 = { .name = "VGA8x8", .width = 8, .height = 8, - .data = fontdata_8x8, + .data = fontdata_8x8.data, .pref = 0, }; diff --git a/lib/fonts/font_acorn_8x8.c b/lib/fonts/font_acorn_8x8.c index 0ff0e85d4481..069b3e80c434 100644 --- a/lib/fonts/font_acorn_8x8.c +++ b/lib/fonts/font_acorn_8x8.c @@ -3,7 +3,10 @@ #include -static const unsigned char acorndata_8x8[] = { +#define FONTDATAMAX 2048 + +static struct font_data acorndata_8x8 = { +{ 0, 0, FONTDATAMAX, 0 }, { /* 00 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* ^@ */ /* 01 */ 0x7e, 0x81, 0xa5, 0x81, 0xbd, 0x99, 0x81, 0x7e, /* ^A */ /* 02 */ 0x7e, 0xff, 0xbd, 0xff, 0xc3, 0xe7, 0xff, 0x7e, /* ^B */ @@ -260,14 +263,14 @@ static const unsigned char acorndata_8x8[] = { /* FD */ 0x38, 0x04, 0x18, 0x20, 0x3c, 0x00, 0x00, 0x00, /* FE */ 0x00, 0x00, 0x3c, 0x3c, 0x3c, 0x3c, 0x00, 0x00, /* FF */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 -}; +} }; const struct font_desc font_acorn_8x8 = { .idx = ACORN8x8_IDX, .name = "Acorn8x8", .width = 8, .height = 8, - .data = acorndata_8x8, + .data = acorndata_8x8.data, #ifdef CONFIG_ARCH_ACORN .pref = 20, #else diff --git a/lib/fonts/font_mini_4x6.c b/lib/fonts/font_mini_4x6.c index 838caa1cfef7..1449876c6a27 100644 --- a/lib/fonts/font_mini_4x6.c +++ b/lib/fonts/font_mini_4x6.c @@ -43,8 +43,8 @@ __END__; #define FONTDATAMAX 1536 -static const unsigned char fontdata_mini_4x6[FONTDATAMAX] = { - +static struct font_data fontdata_mini_4x6 = { + { 0, 0, FONTDATAMAX, 0 }, { /*{*/ /* Char 0: ' ' */ 0xee, /*= [*** ] */ @@ -2145,14 +2145,14 @@ static const unsigned char fontdata_mini_4x6[FONTDATAMAX] = { 0xee, /*= [*** ] */ 0x00, /*= [ ] */ /*}*/ -}; +} }; const struct font_desc font_mini_4x6 = { .idx = MINI4x6_IDX, .name = "MINI4x6", .width = 4, .height = 6, - .data = fontdata_mini_4x6, + .data = fontdata_mini_4x6.data, .pref = 3, }; diff --git a/lib/fonts/font_pearl_8x8.c b/lib/fonts/font_pearl_8x8.c index b15d3c342c5b..32d65551e7ed 100644 --- a/lib/fonts/font_pearl_8x8.c +++ b/lib/fonts/font_pearl_8x8.c @@ -14,8 +14,8 @@ #define FONTDATAMAX 2048 -static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = { - +static struct font_data fontdata_pearl8x8 = { + { 0, 0, FONTDATAMAX, 0 }, { /* 0 0x00 '^@' */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ @@ -2575,14 +2575,13 @@ static const unsigned char fontdata_pearl8x8[FONTDATAMAX] = { 0x00, /* 00000000 */ 0x00, /* 00000000 */ 0x00, /* 00000000 */ - -}; +} }; const struct font_desc font_pearl_8x8 = { .idx = PEARL8x8_IDX, .name = "PEARL8x8", .width = 8, .height = 8, - .data = fontdata_pearl8x8, + .data = fontdata_pearl8x8.data, .pref = 2, }; diff --git a/lib/fonts/font_sun12x22.c b/lib/fonts/font_sun12x22.c index 955d6eee3959..641a6b4dca42 100644 --- a/lib/fonts/font_sun12x22.c +++ b/lib/fonts/font_sun12x22.c @@ -3,8 +3,8 @@ #define FONTDATAMAX 11264 -static const unsigned char fontdata_sun12x22[FONTDATAMAX] = { - +static struct font_data fontdata_sun12x22 = { + { 0, 0, FONTDATAMAX, 0 }, { /* 0 0x00 '^@' */ 0x00, 0x00, /* 000000000000 */ 0x00, 0x00, /* 000000000000 */ @@ -6148,8 +6148,7 @@ static const unsigned char fontdata_sun12x22[FONTDATAMAX] = { 0x00, 0x00, /* 000000000000 */ 0x00, 0x00, /* 000000000000 */ 0x00, 0x00, /* 000000000000 */ - -}; +} }; const struct font_desc font_sun_12x22 = { @@ -6157,7 +6156,7 @@ const struct font_desc font_sun_12x22 = { .name = "SUN12x22", .width = 12, .height = 22, - .data = fontdata_sun12x22, + .data = fontdata_sun12x22.data, #ifdef __sparc__ .pref = 5, #else diff --git a/lib/fonts/font_sun8x16.c b/lib/fonts/font_sun8x16.c index 03d71e53954a..193fe6d988e0 100644 --- a/lib/fonts/font_sun8x16.c +++ b/lib/fonts/font_sun8x16.c @@ -3,7 +3,8 @@ #define FONTDATAMAX 4096 -static const unsigned char fontdata_sun8x16[FONTDATAMAX] = { +static struct font_data fontdata_sun8x16 = { +{ 0, 0, FONTDATAMAX, 0 }, { /* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x7e,0x81,0xa5,0x81,0x81,0xbd,0x99,0x81,0x81,0x7e,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x7e,0xff,0xdb,0xff,0xff,0xc3,0xe7,0xff,0xff,0x7e,0x00,0x00,0x00,0x00, @@ -260,14 +261,14 @@ static const unsigned char fontdata_sun8x16[FONTDATAMAX] = { /* */ 0x00,0x70,0xd8,0x30,0x60,0xc8,0xf8,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x7c,0x7c,0x7c,0x7c,0x7c,0x7c,0x7c,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -}; +} }; const struct font_desc font_sun_8x16 = { .idx = SUN8x16_IDX, .name = "SUN8x16", .width = 8, .height = 16, - .data = fontdata_sun8x16, + .data = fontdata_sun8x16.data, #ifdef __sparc__ .pref = 10, #else -- GitLab From 43198a5b1c42e3d8aadc6524a73bb3aa3666cd43 Mon Sep 17 00:00:00 2001 From: Peilin Ye Date: Thu, 24 Sep 2020 09:43:48 -0400 Subject: [PATCH 1176/1304] fbcon: Fix global-out-of-bounds read in fbcon_get_font() commit 5af08640795b2b9a940c9266c0260455377ae262 upstream. fbcon_get_font() is reading out-of-bounds. A malicious user may resize `vc->vc_font.height` to a large value, causing fbcon_get_font() to read out of `fontdata`. fbcon_get_font() handles both built-in and user-provided fonts. Fortunately, recently we have added FONT_EXTRA_WORDS support for built-in fonts, so fix it by adding range checks using FNTSIZE(). This patch depends on patch "fbdev, newport_con: Move FONT_EXTRA_WORDS macros into linux/font.h", and patch "Fonts: Support FONT_EXTRA_WORDS macros for built-in fonts". Cc: stable@vger.kernel.org Reported-and-tested-by: syzbot+29d4ed7f3bdedf2aa2fd@syzkaller.appspotmail.com Link: https://syzkaller.appspot.com/bug?id=08b8be45afea11888776f897895aef9ad1c3ecfd Signed-off-by: Peilin Ye Reviewed-by: Greg Kroah-Hartman Signed-off-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/b34544687a1a09d6de630659eb7a773f4953238b.1600953813.git.yepeilin.cs@gmail.com Signed-off-by: Greg Kroah-Hartman --- drivers/video/fbdev/core/fbcon.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index 0bf5ea518558..5742a0dc774e 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -2270,6 +2270,9 @@ static int fbcon_get_font(struct vc_data *vc, struct console_font *font) if (font->width <= 8) { j = vc->vc_font.height; + if (font->charcount * j > FNTSIZE(fontdata)) + return -EINVAL; + for (i = 0; i < font->charcount; i++) { memcpy(data, fontdata, j); memset(data + j, 0, 32 - j); @@ -2278,6 +2281,9 @@ static int fbcon_get_font(struct vc_data *vc, struct console_font *font) } } else if (font->width <= 16) { j = vc->vc_font.height * 2; + if (font->charcount * j > FNTSIZE(fontdata)) + return -EINVAL; + for (i = 0; i < font->charcount; i++) { memcpy(data, fontdata, j); memset(data + j, 0, 64 - j); @@ -2285,6 +2291,9 @@ static int fbcon_get_font(struct vc_data *vc, struct console_font *font) fontdata += j; } } else if (font->width <= 24) { + if (font->charcount * (vc->vc_font.height * sizeof(u32)) > FNTSIZE(fontdata)) + return -EINVAL; + for (i = 0; i < font->charcount; i++) { for (j = 0; j < vc->vc_font.height; j++) { *data++ = fontdata[0]; @@ -2297,6 +2306,9 @@ static int fbcon_get_font(struct vc_data *vc, struct console_font *font) } } else { j = vc->vc_font.height * 4; + if (font->charcount * j > FNTSIZE(fontdata)) + return -EINVAL; + for (i = 0; i < font->charcount; i++) { memcpy(data, fontdata, j); memset(data + j, 0, 128 - j); -- GitLab From be92b3b5e5aae0a55699d539783ea33f34a240ff Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Tue, 22 Sep 2020 09:29:31 +0200 Subject: [PATCH 1177/1304] Revert "ravb: Fixed to be able to unload modules" commit 77972b55fb9d35d4a6b0abca99abffaa4ec6a85b upstream. This reverts commit 1838d6c62f57836639bd3d83e7855e0ee4f6defc. This commit moved the ravb_mdio_init() call (and thus the of_mdiobus_register() call) from the ravb_probe() to the ravb_open() call. This causes a regression during system resume (s2idle/s2ram), as new PHY devices cannot be bound while suspended. During boot, the Micrel PHY is detected like this: Micrel KSZ9031 Gigabit PHY e6800000.ethernet-ffffffff:00: attached PHY driver [Micrel KSZ9031 Gigabit PHY] (mii_bus:phy_addr=e6800000.ethernet-ffffffff:00, irq=228) ravb e6800000.ethernet eth0: Link is Up - 1Gbps/Full - flow control off During system suspend, (A) defer_all_probes is set to true, and (B) usermodehelper_disabled is set to UMH_DISABLED, to avoid drivers being probed while suspended. A. If CONFIG_MODULES=n, phy_device_register() calling device_add() merely adds the device, but does not probe it yet, as really_probe() returns early due to defer_all_probes being set: dpm_resume+0x128/0x4f8 device_resume+0xcc/0x1b0 dpm_run_callback+0x74/0x340 ravb_resume+0x190/0x1b8 ravb_open+0x84/0x770 of_mdiobus_register+0x1e0/0x468 of_mdiobus_register_phy+0x1b8/0x250 of_mdiobus_phy_device_register+0x178/0x1e8 phy_device_register+0x114/0x1b8 device_add+0x3d4/0x798 bus_probe_device+0x98/0xa0 device_initial_probe+0x10/0x18 __device_attach+0xe4/0x140 bus_for_each_drv+0x64/0xc8 __device_attach_driver+0xb8/0xe0 driver_probe_device.part.11+0xc4/0xd8 really_probe+0x32c/0x3b8 Later, phy_attach_direct() notices no PHY driver has been bound, and falls back to the Generic PHY, leading to degraded operation: Generic PHY e6800000.ethernet-ffffffff:00: attached PHY driver [Generic PHY] (mii_bus:phy_addr=e6800000.ethernet-ffffffff:00, irq=POLL) ravb e6800000.ethernet eth0: Link is Up - 1Gbps/Full - flow control off B. If CONFIG_MODULES=y, request_module() returns early with -EBUSY due to UMH_DISABLED, and MDIO initialization fails completely: mdio_bus e6800000.ethernet-ffffffff:00: error -16 loading PHY driver module for ID 0x00221622 ravb e6800000.ethernet eth0: failed to initialize MDIO PM: dpm_run_callback(): ravb_resume+0x0/0x1b8 returns -16 PM: Device e6800000.ethernet failed to resume: error -16 Ignoring -EBUSY in phy_request_driver_module(), like was done for -ENOENT in commit 21e194425abd65b5 ("net: phy: fix issue with loading PHY driver w/o initramfs"), would makes it fall back to the Generic PHY, like in the CONFIG_MODULES=n case. Signed-off-by: Geert Uytterhoeven Cc: stable@vger.kernel.org Reviewed-by: Sergei Shtylyov Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/renesas/ravb_main.c | 110 +++++++++++------------ 1 file changed, 55 insertions(+), 55 deletions(-) diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index b5066cf86c85..569e698b5c80 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -1337,51 +1337,6 @@ static inline int ravb_hook_irq(unsigned int irq, irq_handler_t handler, return error; } -/* MDIO bus init function */ -static int ravb_mdio_init(struct ravb_private *priv) -{ - struct platform_device *pdev = priv->pdev; - struct device *dev = &pdev->dev; - int error; - - /* Bitbang init */ - priv->mdiobb.ops = &bb_ops; - - /* MII controller setting */ - priv->mii_bus = alloc_mdio_bitbang(&priv->mdiobb); - if (!priv->mii_bus) - return -ENOMEM; - - /* Hook up MII support for ethtool */ - priv->mii_bus->name = "ravb_mii"; - priv->mii_bus->parent = dev; - snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", - pdev->name, pdev->id); - - /* Register MDIO bus */ - error = of_mdiobus_register(priv->mii_bus, dev->of_node); - if (error) - goto out_free_bus; - - return 0; - -out_free_bus: - free_mdio_bitbang(priv->mii_bus); - return error; -} - -/* MDIO bus release function */ -static int ravb_mdio_release(struct ravb_private *priv) -{ - /* Unregister mdio bus */ - mdiobus_unregister(priv->mii_bus); - - /* Free bitbang info */ - free_mdio_bitbang(priv->mii_bus); - - return 0; -} - /* Network device open function for Ethernet AVB */ static int ravb_open(struct net_device *ndev) { @@ -1390,13 +1345,6 @@ static int ravb_open(struct net_device *ndev) struct device *dev = &pdev->dev; int error; - /* MDIO bus init */ - error = ravb_mdio_init(priv); - if (error) { - netdev_err(ndev, "failed to initialize MDIO\n"); - return error; - } - napi_enable(&priv->napi[RAVB_BE]); napi_enable(&priv->napi[RAVB_NC]); @@ -1474,7 +1422,6 @@ static int ravb_open(struct net_device *ndev) out_napi_off: napi_disable(&priv->napi[RAVB_NC]); napi_disable(&priv->napi[RAVB_BE]); - ravb_mdio_release(priv); return error; } @@ -1774,8 +1721,6 @@ static int ravb_close(struct net_device *ndev) ravb_ring_free(ndev, RAVB_BE); ravb_ring_free(ndev, RAVB_NC); - ravb_mdio_release(priv); - return 0; } @@ -1922,6 +1867,51 @@ static const struct net_device_ops ravb_netdev_ops = { .ndo_set_features = ravb_set_features, }; +/* MDIO bus init function */ +static int ravb_mdio_init(struct ravb_private *priv) +{ + struct platform_device *pdev = priv->pdev; + struct device *dev = &pdev->dev; + int error; + + /* Bitbang init */ + priv->mdiobb.ops = &bb_ops; + + /* MII controller setting */ + priv->mii_bus = alloc_mdio_bitbang(&priv->mdiobb); + if (!priv->mii_bus) + return -ENOMEM; + + /* Hook up MII support for ethtool */ + priv->mii_bus->name = "ravb_mii"; + priv->mii_bus->parent = dev; + snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", + pdev->name, pdev->id); + + /* Register MDIO bus */ + error = of_mdiobus_register(priv->mii_bus, dev->of_node); + if (error) + goto out_free_bus; + + return 0; + +out_free_bus: + free_mdio_bitbang(priv->mii_bus); + return error; +} + +/* MDIO bus release function */ +static int ravb_mdio_release(struct ravb_private *priv) +{ + /* Unregister mdio bus */ + mdiobus_unregister(priv->mii_bus); + + /* Free bitbang info */ + free_mdio_bitbang(priv->mii_bus); + + return 0; +} + static const struct of_device_id ravb_match_table[] = { { .compatible = "renesas,etheravb-r8a7790", .data = (void *)RCAR_GEN2 }, { .compatible = "renesas,etheravb-r8a7794", .data = (void *)RCAR_GEN2 }, @@ -2148,6 +2138,13 @@ static int ravb_probe(struct platform_device *pdev) eth_hw_addr_random(ndev); } + /* MDIO bus init */ + error = ravb_mdio_init(priv); + if (error) { + dev_err(&pdev->dev, "failed to initialize MDIO\n"); + goto out_dma_free; + } + netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll, 64); netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll, 64); @@ -2169,6 +2166,8 @@ static int ravb_probe(struct platform_device *pdev) out_napi_del: netif_napi_del(&priv->napi[RAVB_NC]); netif_napi_del(&priv->napi[RAVB_BE]); + ravb_mdio_release(priv); +out_dma_free: dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat, priv->desc_bat_dma); @@ -2200,6 +2199,7 @@ static int ravb_remove(struct platform_device *pdev) unregister_netdev(ndev); netif_napi_del(&priv->napi[RAVB_NC]); netif_napi_del(&priv->napi[RAVB_BE]); + ravb_mdio_release(priv); pm_runtime_disable(&pdev->dev); free_netdev(ndev); platform_set_drvdata(pdev, NULL); -- GitLab From c37528577ade675fd4d597b714395e74798797c2 Mon Sep 17 00:00:00 2001 From: Anant Thazhemadam Date: Wed, 7 Oct 2020 09:24:01 +0530 Subject: [PATCH 1178/1304] net: wireless: nl80211: fix out-of-bounds access in nl80211_del_key() commit 3dc289f8f139997f4e9d3cfccf8738f20d23e47b upstream. In nl80211_parse_key(), key.idx is first initialized as -1. If this value of key.idx remains unmodified and gets returned, and nl80211_key_allowed() also returns 0, then rdev_del_key() gets called with key.idx = -1. This causes an out-of-bounds array access. Handle this issue by checking if the value of key.idx after nl80211_parse_key() is called and return -EINVAL if key.idx < 0. Cc: stable@vger.kernel.org Reported-by: syzbot+b1bb342d1d097516cbda@syzkaller.appspotmail.com Tested-by: syzbot+b1bb342d1d097516cbda@syzkaller.appspotmail.com Signed-off-by: Anant Thazhemadam Link: https://lore.kernel.org/r/20201007035401.9522-1-anant.thazhemadam@gmail.com Signed-off-by: Johannes Berg Signed-off-by: Greg Kroah-Hartman --- net/wireless/nl80211.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 996b68b48a87..4e4179209982 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -3621,6 +3621,9 @@ static int nl80211_del_key(struct sk_buff *skb, struct genl_info *info) if (err) return err; + if (key.idx < 0) + return -EINVAL; + if (info->attrs[NL80211_ATTR_MAC]) mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); -- GitLab From 92f3373c99093e258cebe4cff4330662d19a3f4c Mon Sep 17 00:00:00 2001 From: Karol Herbst Date: Wed, 7 Oct 2020 00:05:28 +0200 Subject: [PATCH 1179/1304] drm/nouveau/mem: guard against NULL pointer access in mem_del commit d10285a25e29f13353bbf7760be8980048c1ef2f upstream. other drivers seems to do something similar Signed-off-by: Karol Herbst Cc: dri-devel Cc: Dave Airlie Cc: stable@vger.kernel.org Signed-off-by: Dave Airlie Link: https://patchwork.freedesktop.org/patch/msgid/20201006220528.13925-2-kherbst@redhat.com Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/nouveau/nouveau_mem.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index c002f8968507..9682f30ab6f6 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c @@ -176,6 +176,8 @@ void nouveau_mem_del(struct ttm_mem_reg *reg) { struct nouveau_mem *mem = nouveau_mem(reg); + if (!mem) + return; nouveau_mem_fini(mem); kfree(reg->mm_node); reg->mm_node = NULL; -- GitLab From 33acb78c859f1a0bd3c6b67801fada16f99614f6 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Mon, 5 Oct 2020 10:56:22 -0700 Subject: [PATCH 1180/1304] usermodehelper: reset umask to default before executing user process commit 4013c1496c49615d90d36b9d513eee8e369778e9 upstream. Kernel threads intentionally do CLONE_FS in order to follow any changes that 'init' does to set up the root directory (or cwd). It is admittedly a bit odd, but it avoids the situation where 'init' does some extensive setup to initialize the system environment, and then we execute a usermode helper program, and it uses the original FS setup from boot time that may be very limited and incomplete. [ Both Al Viro and Eric Biederman point out that 'pivot_root()' will follow the root regardless, since it fixes up other users of root (see chroot_fs_refs() for details), but overmounting root and doing a chroot() would not. ] However, Vegard Nossum noticed that the CLONE_FS not only means that we follow the root and current working directories, it also means we share umask with whatever init changed it to. That wasn't intentional. Just reset umask to the original default (0022) before actually starting the usermode helper program. Reported-by: Vegard Nossum Cc: Al Viro Acked-by: Eric W. Biederman Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- kernel/umh.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/kernel/umh.c b/kernel/umh.c index 52a9084f8541..16653319c8ce 100644 --- a/kernel/umh.c +++ b/kernel/umh.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -72,6 +73,14 @@ static int call_usermodehelper_exec_async(void *data) flush_signal_handlers(current, 1); spin_unlock_irq(¤t->sighand->siglock); + /* + * Initial kernel threads share ther FS with init, in order to + * get the init root directory. But we've now created a new + * thread that is going to execve a user process and has its own + * 'struct fs_struct'. Reset umask to the default. + */ + current->fs->umask = 0022; + /* * Our parent (unbound workqueue) runs with elevated scheduling * priority. Avoid propagating that into the userspace child. -- GitLab From c42dd41efbc6f5560a8c99f7647f04bf3a711674 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Sat, 12 Sep 2020 11:35:32 +0200 Subject: [PATCH 1181/1304] platform/x86: intel-vbtn: Fix SW_TABLET_MODE always reporting 1 on the HP Pavilion 11 x360 commit d823346876a970522ff9e4d2b323c9b734dcc4de upstream. Commit cfae58ed681c ("platform/x86: intel-vbtn: Only blacklist SW_TABLET_MODE on the 9 / "Laptop" chasis-type") restored SW_TABLET_MODE reporting on the HP stream x360 11 series on which it was previously broken by commit de9647efeaa9 ("platform/x86: intel-vbtn: Only activate tablet mode switch on 2-in-1's"). It turns out that enabling SW_TABLET_MODE reporting on devices with a chassis-type of 10 ("Notebook") causes SW_TABLET_MODE to always report 1 at boot on the HP Pavilion 11 x360, which causes libinput to disable the kbd and touchpad. The HP Pavilion 11 x360's ACPI VGBS method sets bit 4 instead of bit 6 when NOT in tablet mode at boot. Inspecting all the DSDTs in my DSDT collection shows only one other model, the Medion E1239T ever setting bit 4 and it always sets this together with bit 6. So lets treat bit 4 as a second bit which when set indicates the device not being in tablet-mode, as we already do for bit 6. While at it also prefix all VGBS constant defines with "VGBS_". Fixes: cfae58ed681c ("platform/x86: intel-vbtn: Only blacklist SW_TABLET_MODE on the 9 / "Laptop" chasis-type") Signed-off-by: Hans de Goede Acked-by: Mark Gross Signed-off-by: Andy Shevchenko Signed-off-by: Greg Kroah-Hartman --- drivers/platform/x86/intel-vbtn.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c index c7c8b432c163..d8a28ef11dfc 100644 --- a/drivers/platform/x86/intel-vbtn.c +++ b/drivers/platform/x86/intel-vbtn.c @@ -15,9 +15,13 @@ #include #include +/* Returned when NOT in tablet mode on some HP Stream x360 11 models */ +#define VGBS_TABLET_MODE_FLAG_ALT 0x10 /* When NOT in tablet mode, VGBS returns with the flag 0x40 */ -#define TABLET_MODE_FLAG 0x40 -#define DOCK_MODE_FLAG 0x80 +#define VGBS_TABLET_MODE_FLAG 0x40 +#define VGBS_DOCK_MODE_FLAG 0x80 + +#define VGBS_TABLET_MODE_FLAGS (VGBS_TABLET_MODE_FLAG | VGBS_TABLET_MODE_FLAG_ALT) MODULE_LICENSE("GPL"); MODULE_AUTHOR("AceLan Kao"); @@ -148,9 +152,9 @@ static void detect_tablet_mode(struct platform_device *device) if (ACPI_FAILURE(status)) return; - m = !(vgbs & TABLET_MODE_FLAG); + m = !(vgbs & VGBS_TABLET_MODE_FLAGS); input_report_switch(priv->input_dev, SW_TABLET_MODE, m); - m = (vgbs & DOCK_MODE_FLAG) ? 1 : 0; + m = (vgbs & VGBS_DOCK_MODE_FLAG) ? 1 : 0; input_report_switch(priv->input_dev, SW_DOCK, m); } -- GitLab From d32b4239790f49011775f950c61162b18b2ca335 Mon Sep 17 00:00:00 2001 From: Tom Rix Date: Sun, 13 Sep 2020 12:02:03 -0700 Subject: [PATCH 1182/1304] platform/x86: thinkpad_acpi: initialize tp_nvram_state variable commit 5f38b06db8af3ed6c2fc1b427504ca56fae2eacc upstream. clang static analysis flags this represenative problem thinkpad_acpi.c:2523:7: warning: Branch condition evaluates to a garbage value if (!oldn->mute || ^~~~~~~~~~~ In hotkey_kthread() mute is conditionally set by hotkey_read_nvram() but unconditionally checked by hotkey_compare_and_issue_event(). So the tp_nvram_state variable s[2] needs to be initialized. Fixes: 01e88f25985d ("ACPI: thinkpad-acpi: add CMOS NVRAM polling for hot keys (v9)") Signed-off-by: Tom Rix Reviewed-by: Hans de Goede Acked-by: mark gross Signed-off-by: Andy Shevchenko Signed-off-by: Greg Kroah-Hartman --- drivers/platform/x86/thinkpad_acpi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 8f85bb4fe784..820c8f3bf87d 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -2597,7 +2597,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn, */ static int hotkey_kthread(void *data) { - struct tp_nvram_state s[2]; + struct tp_nvram_state s[2] = { 0 }; u32 poll_mask, event_mask; unsigned int si, so; unsigned long t; -- GitLab From dfedfbe0feb7c01943882a601d43a3d2e8d2a16a Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Wed, 30 Sep 2020 15:19:05 +0200 Subject: [PATCH 1183/1304] platform/x86: intel-vbtn: Switch to an allow-list for SW_TABLET_MODE reporting MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 8169bd3e6e193497cab781acddcff8fde5d0c416 upstream. 2 recent commits: cfae58ed681c ("platform/x86: intel-vbtn: Only blacklist SW_TABLET_MODE on the 9 / "Laptop" chasis-type") 1fac39fd0316 ("platform/x86: intel-vbtn: Also handle tablet-mode switch on "Detachable" and "Portable" chassis-types") Enabled reporting of SW_TABLET_MODE on more devices since the vbtn ACPI interface is used by the firmware on some of those devices to report this. Testing has shown that unconditionally enabling SW_TABLET_MODE reporting on all devices with a chassis type of 8 ("Portable") or 10 ("Notebook") which support the VGBS method is a very bad idea. Many of these devices are normal laptops (non 2-in-1) models with a VGBS which always returns 0, which we translate to SW_TABLET_MODE=1. This in turn causes userspace (libinput) to suppress events from the builtin keyboard and touchpad, making the laptop essentially unusable. Since the problem of wrongly reporting SW_TABLET_MODE=1 in combination with libinput, leads to a non-usable system. Where as OTOH many people will not even notice when SW_TABLET_MODE is not being reported, this commit changes intel_vbtn_has_switches() to use a DMI based allow-list. The new DMI based allow-list matches on the 31 ("Convertible") and 32 ("Detachable") chassis-types, as these clearly are 2-in-1s and so far if they support the intel-vbtn ACPI interface they all have properly working SW_TABLET_MODE reporting. Besides these 2 generic matches, it also contains model specific matches for 2-in-1 models which use a different chassis-type and which are known to have properly working SW_TABLET_MODE reporting. This has been tested on the following 2-in-1 devices: Dell Venue 11 Pro 7130 vPro HP Pavilion X2 10-p002nd HP Stream x360 Convertible PC 11 Medion E1239T Fixes: cfae58ed681c ("platform/x86: intel-vbtn: Only blacklist SW_TABLET_MODE on the 9 / "Laptop" chasis-type") BugLink: https://forum.manjaro.org/t/keyboard-and-touchpad-only-work-on-kernel-5-6/22668 BugLink: https://bugzilla.opensuse.org/show_bug.cgi?id=1175599 Cc: Barnabás Pőcze Cc: Takashi Iwai Signed-off-by: Hans de Goede Signed-off-by: Andy Shevchenko Signed-off-by: Greg Kroah-Hartman --- drivers/platform/x86/intel-vbtn.c | 52 +++++++++++++++++++++++++------ 1 file changed, 43 insertions(+), 9 deletions(-) diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c index d8a28ef11dfc..1e6b4661c764 100644 --- a/drivers/platform/x86/intel-vbtn.c +++ b/drivers/platform/x86/intel-vbtn.c @@ -158,20 +158,54 @@ static void detect_tablet_mode(struct platform_device *device) input_report_switch(priv->input_dev, SW_DOCK, m); } +/* + * There are several laptops (non 2-in-1) models out there which support VGBS, + * but simply always return 0, which we translate to SW_TABLET_MODE=1. This in + * turn causes userspace (libinput) to suppress events from the builtin + * keyboard and touchpad, making the laptop essentially unusable. + * + * Since the problem of wrongly reporting SW_TABLET_MODE=1 in combination + * with libinput, leads to a non-usable system. Where as OTOH many people will + * not even notice when SW_TABLET_MODE is not being reported, a DMI based allow + * list is used here. This list mainly matches on the chassis-type of 2-in-1s. + * + * There are also some 2-in-1s which use the intel-vbtn ACPI interface to report + * SW_TABLET_MODE with a chassis-type of 8 ("Portable") or 10 ("Notebook"), + * these are matched on a per model basis, since many normal laptops with a + * possible broken VGBS ACPI-method also use these chassis-types. + */ +static const struct dmi_system_id dmi_switches_allow_list[] = { + { + .matches = { + DMI_EXACT_MATCH(DMI_CHASSIS_TYPE, "31" /* Convertible */), + }, + }, + { + .matches = { + DMI_EXACT_MATCH(DMI_CHASSIS_TYPE, "32" /* Detachable */), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Venue 11 Pro 7130"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Stream x360 Convertible PC 11"), + }, + }, + {} /* Array terminator */ +}; + static bool intel_vbtn_has_switches(acpi_handle handle) { - const char *chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE); unsigned long long vgbs; acpi_status status; - /* - * Some normal laptops have a VGBS method despite being non-convertible - * and their VGBS method always returns 0, causing detect_tablet_mode() - * to report SW_TABLET_MODE=1 to userspace, which causes issues. - * These laptops have a DMI chassis_type of 9 ("Laptop"), do not report - * switches on any devices with a DMI chassis_type of 9. - */ - if (chassis_type && strcmp(chassis_type, "9") == 0) + if (!dmi_check_system(dmi_switches_allow_list)) return false; status = acpi_evaluate_integer(handle, "VGBS", NULL, &vgbs); -- GitLab From b12eefc33824e4aea6bdfc26271107ab96e546ff Mon Sep 17 00:00:00 2001 From: Aaron Ma Date: Sat, 3 Oct 2020 01:09:16 +0800 Subject: [PATCH 1184/1304] platform/x86: thinkpad_acpi: re-initialize ACPI buffer size when reuse commit 720ef73d1a239e33c3ad8fac356b9b1348e68aaf upstream. Evaluating ACPI _BCL could fail, then ACPI buffer size will be set to 0. When reuse this ACPI buffer, AE_BUFFER_OVERFLOW will be triggered. Re-initialize buffer size will make ACPI evaluate successfully. Fixes: 46445b6b896fd ("thinkpad-acpi: fix handle locate for video and query of _BCL") Signed-off-by: Aaron Ma Signed-off-by: Andy Shevchenko Signed-off-by: Greg Kroah-Hartman --- drivers/platform/x86/thinkpad_acpi.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 820c8f3bf87d..98bd8213b037 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -6879,8 +6879,10 @@ static int __init tpacpi_query_bcl_levels(acpi_handle handle) list_for_each_entry(child, &device->children, node) { acpi_status status = acpi_evaluate_object(child->handle, "_BCL", NULL, &buffer); - if (ACPI_FAILURE(status)) + if (ACPI_FAILURE(status)) { + buffer.length = ACPI_ALLOCATE_BUFFER; continue; + } obj = (union acpi_object *)buffer.pointer; if (!obj || (obj->type != ACPI_TYPE_PACKAGE)) { -- GitLab From fbe293f9a67b8f34424d4ca0298db88d2845dd79 Mon Sep 17 00:00:00 2001 From: Tetsuo Handa Date: Mon, 13 Jul 2020 11:12:54 +0900 Subject: [PATCH 1185/1304] driver core: Fix probe_count imbalance in really_probe() commit b292b50b0efcc7095d8bf15505fba6909bb35dce upstream. syzbot is reporting hung task in wait_for_device_probe() [1]. At least, we always need to decrement probe_count if we incremented probe_count in really_probe(). However, since I can't find "Resources present before probing" message in the console log, both "this message simply flowed off" and "syzbot is not hitting this path" will be possible. Therefore, while we are at it, let's also prepare for concurrent wait_for_device_probe() calls by replacing wake_up() with wake_up_all(). [1] https://syzkaller.appspot.com/bug?id=25c833f1983c9c1d512f4ff860dd0d7f5a2e2c0f Reported-by: syzbot Fixes: 7c35e699c88bd607 ("driver core: Print device when resources present in really_probe()") Cc: Geert Uytterhoeven Signed-off-by: Tetsuo Handa Cc: stable Link: https://lore.kernel.org/r/20200713021254.3444-1-penguin-kernel@I-love.SAKURA.ne.jp [iwamatsu: Drop patch for deferred_probe_timeout_work_func()] Signed-off-by: Nobuhiro Iwamatsu (CIP) Signed-off-by: Greg Kroah-Hartman --- drivers/base/dd.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/base/dd.c b/drivers/base/dd.c index 0047bbdd43c0..b3c569412f4e 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -472,7 +472,8 @@ static int really_probe(struct device *dev, struct device_driver *drv) drv->bus->name, __func__, drv->name, dev_name(dev)); if (!list_empty(&dev->devres_head)) { dev_crit(dev, "Resources present before probing\n"); - return -EBUSY; + ret = -EBUSY; + goto done; } re_probe: @@ -579,7 +580,7 @@ static int really_probe(struct device *dev, struct device_driver *drv) ret = 0; done: atomic_dec(&probe_count); - wake_up(&probe_waitqueue); + wake_up_all(&probe_waitqueue); return ret; } -- GitLab From 9d3b4a36126a1a27149adfe9b0e61a781582c0c5 Mon Sep 17 00:00:00 2001 From: Tommi Rantala Date: Thu, 5 Mar 2020 10:37:12 +0200 Subject: [PATCH 1186/1304] perf top: Fix stdio interface input handling with glibc 2.28+ commit 29b4f5f188571c112713c35cc87eefb46efee612 upstream. Since glibc 2.28 when running 'perf top --stdio', input handling no longer works, but hitting any key always just prints the "Mapped keys" help text. To fix it, call clearerr() in the display_thread() loop to clear any EOF sticky errors, as instructed in the glibc NEWS file (https://sourceware.org/git/?p=glibc.git;a=blob;f=NEWS): * All stdio functions now treat end-of-file as a sticky condition. If you read from a file until EOF, and then the file is enlarged by another process, you must call clearerr or another function with the same effect (e.g. fseek, rewind) before you can read the additional data. This corrects a longstanding C99 conformance bug. It is most likely to affect programs that use stdio to read interactive input from a terminal. (Bug #1190.) Signed-off-by: Tommi Rantala Tested-by: Arnaldo Carvalho de Melo Cc: Alexander Shishkin Cc: Jiri Olsa Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Link: http://lore.kernel.org/lkml/20200305083714.9381-2-tommi.t.rantala@nokia.com Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: Greg Kroah-Hartman --- tools/perf/builtin-top.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index d0733251a386..9caab84c6294 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -651,7 +651,9 @@ static void *display_thread(void *arg) delay_msecs = top->delay_secs * MSEC_PER_SEC; set_term_quiet_input(&save); /* trash return*/ - getc(stdin); + clearerr(stdin); + if (poll(&stdin_poll, 1, 0) > 0) + getc(stdin); while (!done) { perf_top__print_sym_table(top); -- GitLab From b55f7362bd9d0104211af14c30803ebd1b231f07 Mon Sep 17 00:00:00 2001 From: Jean Delvare Date: Thu, 10 Sep 2020 11:57:08 +0200 Subject: [PATCH 1187/1304] i2c: i801: Exclude device from suspend direct complete optimization MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 845b89127bc5458d0152a4d63f165c62a22fcb70 upstream. By default, PCI drivers with runtime PM enabled will skip the calls to suspend and resume on system PM. For this driver, we don't want that, as we need to perform additional steps for system PM to work properly on all systems. So instruct the PM core to not skip these calls. Fixes: a9c8088c7988 ("i2c: i801: Don't restore config registers on runtime PM") Reported-by: Volker Rümelin Signed-off-by: Jean Delvare Cc: stable@vger.kernel.org Signed-off-by: Wolfram Sang [iwamatsu: Use DPM_FLAG_NEVER_SKIP instead of DPM_FLAG_NO_DIRECT_COMPLETE] Signed-off-by: Nobuhiro Iwamatsu (CIP) Signed-off-by: Greg Kroah-Hartman --- drivers/i2c/busses/i2c-i801.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index 58fc17e46694..3ac3b26cc931 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c @@ -1698,6 +1698,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) pci_set_drvdata(dev, priv); + dev_pm_set_driver_flags(&dev->dev, DPM_FLAG_NEVER_SKIP); pm_runtime_set_autosuspend_delay(&dev->dev, 1000); pm_runtime_use_autosuspend(&dev->dev); pm_runtime_put_autosuspend(&dev->dev); -- GitLab From 2485d0f373afe3124b2a4b6383d53afe431ce795 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 19 May 2020 15:00:26 +0200 Subject: [PATCH 1188/1304] mtd: rawnand: sunxi: Fix the probe error path commit 3d84515ffd8fb657e10fa5b1215e9f095fa7efca upstream. nand_release() is supposed be called after MTD device registration. Here, only nand_scan() happened, so use nand_cleanup() instead. Fixes: 1fef62c1423b ("mtd: nand: add sunxi NAND flash controller support") Signed-off-by: Miquel Raynal Cc: stable@vger.kernel.org Link: https://lore.kernel.org/linux-mtd/20200519130035.1883-54-miquel.raynal@bootlin.com Signed-off-by: Nobuhiro Iwamatsu Signed-off-by: Greg Kroah-Hartman --- drivers/mtd/nand/raw/sunxi_nand.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c index 5b8502fd50cb..88075e420f90 100644 --- a/drivers/mtd/nand/raw/sunxi_nand.c +++ b/drivers/mtd/nand/raw/sunxi_nand.c @@ -1947,7 +1947,7 @@ static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc, ret = mtd_device_register(mtd, NULL, 0); if (ret) { dev_err(dev, "failed to register mtd device: %d\n", ret); - nand_release(nand); + nand_cleanup(nand); return ret; } -- GitLab From 13e7ffa315ad3ab69ccfa19fc9a6c08425728728 Mon Sep 17 00:00:00 2001 From: Dinh Nguyen Date: Mon, 29 Jun 2020 11:25:43 -0500 Subject: [PATCH 1189/1304] arm64: dts: stratix10: add status to qspi dts node commit 263a0269a59c0b4145829462a107fe7f7327105f upstream. Add status = "okay" to QSPI node. Fixes: 0cb140d07fc75 ("arm64: dts: stratix10: Add QSPI support for Stratix10") Cc: linux-stable # >= v5.6 Signed-off-by: Dinh Nguyen [iwamatsu: Drop arch/arm64/boot/dts/altera/socfpga_stratix10_socdk_nand.dts] Signed-off-by: Nobuhiro Iwamatsu Signed-off-by: Greg Kroah-Hartman --- arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts index faa017d4cd56..636bab51de38 100644 --- a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts +++ b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts @@ -151,6 +151,7 @@ }; &qspi { + status = "okay"; flash@0 { #address-cells = <1>; #size-cells = <1>; -- GitLab From b6df5afc3d81e34d32f0b092d59b7fe8915d824b Mon Sep 17 00:00:00 2001 From: Chaitanya Kulkarni Date: Tue, 6 Oct 2020 16:36:47 -0700 Subject: [PATCH 1190/1304] nvme-core: put ctrl ref when module ref get fail commit 4bab69093044ca81f394bd0780be1b71c5a4d308 upstream. When try_module_get() fails in the nvme_dev_open() it returns without releasing the ctrl reference which was taken earlier. Put the ctrl reference which is taken before calling the try_module_get() in the error return code path. Fixes: 52a3974feb1a "nvme-core: get/put ctrl and transport module in nvme_dev_open/release()" Signed-off-by: Chaitanya Kulkarni Reviewed-by: Logan Gunthorpe Signed-off-by: Christoph Hellwig Signed-off-by: Greg Kroah-Hartman --- drivers/nvme/host/core.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 9ea3d8e61100..b633ea40430e 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -2606,8 +2606,10 @@ static int nvme_dev_open(struct inode *inode, struct file *file) } nvme_get_ctrl(ctrl); - if (!try_module_get(ctrl->ops->module)) + if (!try_module_get(ctrl->ops->module)) { + nvme_put_ctrl(ctrl); return -EINVAL; + } file->private_data = ctrl; return 0; -- GitLab From f74e8d46dd6f4302023b2844e7d20130719e7885 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 7 Oct 2020 01:42:46 -0700 Subject: [PATCH 1191/1304] macsec: avoid use-after-free in macsec_handle_frame() commit c7cc9200e9b4a2ac172e990ef1975cd42975dad6 upstream. De-referencing skb after call to gro_cells_receive() is not allowed. We need to fetch skb->len earlier. Fixes: 5491e7c6b1a9 ("macsec: enable GRO and RPS on macsec devices") Signed-off-by: Eric Dumazet Cc: Paolo Abeni Acked-by: Paolo Abeni Signed-off-by: Jakub Kicinski Signed-off-by: Greg Kroah-Hartman --- drivers/net/macsec.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index 4ad3b877e5fd..4c5b67a2d63a 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -1085,6 +1085,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) struct macsec_rx_sa *rx_sa; struct macsec_rxh_data *rxd; struct macsec_dev *macsec; + unsigned int len; sci_t sci; u32 pn; bool cbit; @@ -1240,9 +1241,10 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) macsec_rxsc_put(rx_sc); skb_orphan(skb); + len = skb->len; ret = gro_cells_receive(&macsec->gro_cells, skb); if (ret == NET_RX_SUCCESS) - count_rx(dev, skb->len); + count_rx(dev, len); else macsec->secy.netdev->stats.rx_dropped++; -- GitLab From fbe96d5aab1ef3c992b1dd7a0a4a5aeb21093571 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Fri, 9 Oct 2020 20:07:59 -0700 Subject: [PATCH 1192/1304] mm/khugepaged: fix filemap page_to_pgoff(page) != offset commit 033b5d77551167f8c24ca862ce83d3e0745f9245 upstream. There have been elusive reports of filemap_fault() hitting its VM_BUG_ON_PAGE(page_to_pgoff(page) != offset, page) on kernels built with CONFIG_READ_ONLY_THP_FOR_FS=y. Suren has hit it on a kernel with CONFIG_READ_ONLY_THP_FOR_FS=y and CONFIG_NUMA is not set: and he has analyzed it down to how khugepaged without NUMA reuses the same huge page after collapse_file() failed (whereas NUMA targets its allocation to the respective node each time). And most of us were usually testing with CONFIG_NUMA=y kernels. collapse_file(old start) new_page = khugepaged_alloc_page(hpage) __SetPageLocked(new_page) new_page->index = start // hpage->index=old offset new_page->mapping = mapping xas_store(&xas, new_page) filemap_fault page = find_get_page(mapping, offset) // if offset falls inside hpage then // compound_head(page) == hpage lock_page_maybe_drop_mmap() __lock_page(page) // collapse fails xas_store(&xas, old page) new_page->mapping = NULL unlock_page(new_page) collapse_file(new start) new_page = khugepaged_alloc_page(hpage) __SetPageLocked(new_page) new_page->index = start // hpage->index=new offset new_page->mapping = mapping // mapping becomes valid again // since compound_head(page) == hpage // page_to_pgoff(page) got changed VM_BUG_ON_PAGE(page_to_pgoff(page) != offset) An initial patch replaced __SetPageLocked() by lock_page(), which did fix the race which Suren illustrates above. But testing showed that it's not good enough: if the racing task's __lock_page() gets delayed long after its find_get_page(), then it may follow collapse_file(new start)'s successful final unlock_page(), and crash on the same VM_BUG_ON_PAGE. It could be fixed by relaxing filemap_fault()'s VM_BUG_ON_PAGE to a check and retry (as is done for mapping), with similar relaxations in find_lock_entry() and pagecache_get_page(): but it's not obvious what else might get caught out; and khugepaged non-NUMA appears to be unique in exposing a page to page cache, then revoking, without going through a full cycle of freeing before reuse. Instead, non-NUMA khugepaged_prealloc_page() release the old page if anyone else has a reference to it (1% of cases when I tested). Although never reported on huge tmpfs, I believe its find_lock_entry() has been at similar risk; but huge tmpfs does not rely on khugepaged for its normal working nearly so much as READ_ONLY_THP_FOR_FS does. Reported-by: Denis Lisov Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=206569 Link: https://lore.kernel.org/linux-mm/?q=20200219144635.3b7417145de19b65f258c943%40linux-foundation.org Reported-by: Qian Cai Link: https://lore.kernel.org/linux-xfs/?q=20200616013309.GB815%40lca.pw Reported-and-analyzed-by: Suren Baghdasaryan Fixes: 87c460a0bded ("mm/khugepaged: collapse_shmem() without freezing new_page") Signed-off-by: Hugh Dickins Cc: stable@vger.kernel.org # v4.9+ Reviewed-by: Matthew Wilcox (Oracle) Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- mm/khugepaged.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/mm/khugepaged.c b/mm/khugepaged.c index f37be43f8cae..30553d7df402 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -820,6 +820,18 @@ static struct page *khugepaged_alloc_hugepage(bool *wait) static bool khugepaged_prealloc_page(struct page **hpage, bool *wait) { + /* + * If the hpage allocated earlier was briefly exposed in page cache + * before collapse_file() failed, it is possible that racing lookups + * have not yet completed, and would then be unpleasantly surprised by + * finding the hpage reused for the same mapping at a different offset. + * Just release the previous allocation if there is any danger of that. + */ + if (*hpage && page_count(*hpage) > 1) { + put_page(*hpage); + *hpage = NULL; + } + if (!*hpage) *hpage = khugepaged_alloc_hugepage(wait); -- GitLab From a01cb66b26a39062a3cccc6e77b3b53a737c254e Mon Sep 17 00:00:00 2001 From: Sabrina Dubroca Date: Tue, 4 Aug 2020 11:37:29 +0200 Subject: [PATCH 1193/1304] xfrmi: drop ignore_df check before updating pmtu commit 45a36a18d01907710bad5258d81f76c18882ad88 upstream. xfrm interfaces currently test for !skb->ignore_df when deciding whether to update the pmtu on the skb's dst. Because of this, no pmtu exception is created when we do something like: ping -s 1438 By dropping this check, the pmtu exception will be created and the next ping attempt will work. Fixes: f203b76d7809 ("xfrm: Add virtual xfrm interfaces") Reported-by: Xiumei Mu Signed-off-by: Sabrina Dubroca Signed-off-by: Steffen Klassert Signed-off-by: Greg Kroah-Hartman --- net/xfrm/xfrm_interface.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c index 6f00f88adab9..6cc9f6e2dd2b 100644 --- a/net/xfrm/xfrm_interface.c +++ b/net/xfrm/xfrm_interface.c @@ -293,7 +293,7 @@ xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) } mtu = dst_mtu(dst); - if (!skb->ignore_df && skb->len > mtu) { + if (skb->len > mtu) { skb_dst_update_pmtu_no_confirm(skb, mtu); if (skb->protocol == htons(ETH_P_IPV6)) { -- GitLab From eb13209e0cbea7ed59a16da14c6987659219470f Mon Sep 17 00:00:00 2001 From: Vladimir Zapolskiy Date: Sat, 10 Oct 2020 21:25:54 +0300 Subject: [PATCH 1194/1304] cifs: Fix incomplete memory allocation on setxattr path commit 64b7f674c292207624b3d788eda2dde3dc1415df upstream. On setxattr() syscall path due to an apprent typo the size of a dynamically allocated memory chunk for storing struct smb2_file_full_ea_info object is computed incorrectly, to be more precise the first addend is the size of a pointer instead of the wanted object size. Coincidentally it makes no difference on 64-bit platforms, however on 32-bit targets the following memcpy() writes 4 bytes of data outside of the dynamically allocated memory. ============================================================================= BUG kmalloc-16 (Not tainted): Redzone overwritten ----------------------------------------------------------------------------- Disabling lock debugging due to kernel taint INFO: 0x79e69a6f-0x9e5cdecf @offset=368. First byte 0x73 instead of 0xcc INFO: Slab 0xd36d2454 objects=85 used=51 fp=0xf7d0fc7a flags=0x35000201 INFO: Object 0x6f171df3 @offset=352 fp=0x00000000 Redzone 5d4ff02d: cc cc cc cc cc cc cc cc cc cc cc cc cc cc cc cc ................ Object 6f171df3: 00 00 00 00 00 05 06 00 73 6e 72 75 62 00 66 69 ........snrub.fi Redzone 79e69a6f: 73 68 32 0a sh2. Padding 56254d82: 5a 5a 5a 5a 5a 5a 5a 5a ZZZZZZZZ CPU: 0 PID: 8196 Comm: attr Tainted: G B 5.9.0-rc8+ #3 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.13.0-1 04/01/2014 Call Trace: dump_stack+0x54/0x6e print_trailer+0x12c/0x134 check_bytes_and_report.cold+0x3e/0x69 check_object+0x18c/0x250 free_debug_processing+0xfe/0x230 __slab_free+0x1c0/0x300 kfree+0x1d3/0x220 smb2_set_ea+0x27d/0x540 cifs_xattr_set+0x57f/0x620 __vfs_setxattr+0x4e/0x60 __vfs_setxattr_noperm+0x4e/0x100 __vfs_setxattr_locked+0xae/0xd0 vfs_setxattr+0x4e/0xe0 setxattr+0x12c/0x1a0 path_setxattr+0xa4/0xc0 __ia32_sys_lsetxattr+0x1d/0x20 __do_fast_syscall_32+0x40/0x70 do_fast_syscall_32+0x29/0x60 do_SYSENTER_32+0x15/0x20 entry_SYSENTER_32+0x9f/0xf2 Fixes: 5517554e4313 ("cifs: Add support for writing attributes on SMB2+") Signed-off-by: Vladimir Zapolskiy Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- fs/cifs/smb2ops.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index 947a40069d24..3d63c76ed098 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -950,7 +950,7 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon, return rc; } - len = sizeof(ea) + ea_name_len + ea_value_len + 1; + len = sizeof(*ea) + ea_name_len + ea_value_len + 1; ea = kzalloc(len, GFP_KERNEL); if (ea == NULL) { SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid); -- GitLab From 59b6343f0a01c216fd6824ad3263675e626ed7ae Mon Sep 17 00:00:00 2001 From: Jerome Brunet Date: Wed, 7 Oct 2020 10:07:49 +0200 Subject: [PATCH 1195/1304] i2c: meson: fix clock setting overwrite commit 28683e847e2f20eed22cdd24f185d7783db396d3 upstream. When the slave address is written in do_start(), SLAVE_ADDR is written completely. This may overwrite some setting related to the clock rate or signal filtering. Fix this by writing only the bits related to slave address. To avoid causing unexpected changed, explicitly disable filtering or high/low clock mode which may have been left over by the bootloader. Fixes: 30021e3707a7 ("i2c: add support for Amlogic Meson I2C controller") Signed-off-by: Jerome Brunet Signed-off-by: Wolfram Sang Signed-off-by: Greg Kroah-Hartman --- drivers/i2c/busses/i2c-meson.c | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/drivers/i2c/busses/i2c-meson.c b/drivers/i2c/busses/i2c-meson.c index 90f5d0407d73..cfd7af3073fe 100644 --- a/drivers/i2c/busses/i2c-meson.c +++ b/drivers/i2c/busses/i2c-meson.c @@ -8,6 +8,7 @@ * published by the Free Software Foundation. */ +#include #include #include #include @@ -40,6 +41,12 @@ #define REG_CTRL_CLKDIVEXT_SHIFT 28 #define REG_CTRL_CLKDIVEXT_MASK GENMASK(29, 28) +#define REG_SLV_ADDR GENMASK(7, 0) +#define REG_SLV_SDA_FILTER GENMASK(10, 8) +#define REG_SLV_SCL_FILTER GENMASK(13, 11) +#define REG_SLV_SCL_LOW GENMASK(27, 16) +#define REG_SLV_SCL_LOW_EN BIT(28) + #define I2C_TIMEOUT_MS 500 enum { @@ -149,6 +156,9 @@ static void meson_i2c_set_clk_div(struct meson_i2c *i2c, unsigned int freq) meson_i2c_set_mask(i2c, REG_CTRL, REG_CTRL_CLKDIVEXT_MASK, (div >> 10) << REG_CTRL_CLKDIVEXT_SHIFT); + /* Disable HIGH/LOW mode */ + meson_i2c_set_mask(i2c, REG_SLAVE_ADDR, REG_SLV_SCL_LOW_EN, 0); + dev_dbg(i2c->dev, "%s: clk %lu, freq %u, div %u\n", __func__, clk_rate, freq, div); } @@ -276,7 +286,10 @@ static void meson_i2c_do_start(struct meson_i2c *i2c, struct i2c_msg *msg) token = (msg->flags & I2C_M_RD) ? TOKEN_SLAVE_ADDR_READ : TOKEN_SLAVE_ADDR_WRITE; - writel(msg->addr << 1, i2c->regs + REG_SLAVE_ADDR); + + meson_i2c_set_mask(i2c, REG_SLAVE_ADDR, REG_SLV_ADDR, + FIELD_PREP(REG_SLV_ADDR, msg->addr << 1)); + meson_i2c_add_token(i2c, TOKEN_START); meson_i2c_add_token(i2c, token); } @@ -435,6 +448,10 @@ static int meson_i2c_probe(struct platform_device *pdev) return ret; } + /* Disable filtering */ + meson_i2c_set_mask(i2c, REG_SLAVE_ADDR, + REG_SLV_SDA_FILTER | REG_SLV_SCL_FILTER, 0); + meson_i2c_set_clk_div(i2c, timings.bus_freq_hz); return 0; -- GitLab From f1a66d5b7683464bf8f2104f830ebf87da10535e Mon Sep 17 00:00:00 2001 From: Nicolas Belin Date: Wed, 7 Oct 2020 10:07:51 +0200 Subject: [PATCH 1196/1304] i2c: meson: fixup rate calculation with filter delay commit 1334d3b4e49e35d8912a7c37ffca4c5afb9a0516 upstream. Apparently, 15 cycles of the peripheral clock are used by the controller for sampling and filtering. Because this was not known before, the rate calculation is slightly off. Clean up and fix the calculation taking this filtering delay into account. Fixes: 30021e3707a7 ("i2c: add support for Amlogic Meson I2C controller") Signed-off-by: Nicolas Belin Signed-off-by: Jerome Brunet Signed-off-by: Wolfram Sang Signed-off-by: Greg Kroah-Hartman --- drivers/i2c/busses/i2c-meson.c | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/drivers/i2c/busses/i2c-meson.c b/drivers/i2c/busses/i2c-meson.c index cfd7af3073fe..f48e637f1a3e 100644 --- a/drivers/i2c/busses/i2c-meson.c +++ b/drivers/i2c/busses/i2c-meson.c @@ -36,10 +36,8 @@ #define REG_CTRL_ACK_IGNORE BIT(1) #define REG_CTRL_STATUS BIT(2) #define REG_CTRL_ERROR BIT(3) -#define REG_CTRL_CLKDIV_SHIFT 12 -#define REG_CTRL_CLKDIV_MASK GENMASK(21, 12) -#define REG_CTRL_CLKDIVEXT_SHIFT 28 -#define REG_CTRL_CLKDIVEXT_MASK GENMASK(29, 28) +#define REG_CTRL_CLKDIV GENMASK(21, 12) +#define REG_CTRL_CLKDIVEXT GENMASK(29, 28) #define REG_SLV_ADDR GENMASK(7, 0) #define REG_SLV_SDA_FILTER GENMASK(10, 8) @@ -48,6 +46,7 @@ #define REG_SLV_SCL_LOW_EN BIT(28) #define I2C_TIMEOUT_MS 500 +#define FILTER_DELAY 15 enum { TOKEN_END = 0, @@ -142,19 +141,21 @@ static void meson_i2c_set_clk_div(struct meson_i2c *i2c, unsigned int freq) unsigned long clk_rate = clk_get_rate(i2c->clk); unsigned int div; - div = DIV_ROUND_UP(clk_rate, freq * i2c->data->div_factor); + div = DIV_ROUND_UP(clk_rate, freq); + div -= FILTER_DELAY; + div = DIV_ROUND_UP(div, i2c->data->div_factor); /* clock divider has 12 bits */ - if (div >= (1 << 12)) { + if (div > GENMASK(11, 0)) { dev_err(i2c->dev, "requested bus frequency too low\n"); - div = (1 << 12) - 1; + div = GENMASK(11, 0); } - meson_i2c_set_mask(i2c, REG_CTRL, REG_CTRL_CLKDIV_MASK, - (div & GENMASK(9, 0)) << REG_CTRL_CLKDIV_SHIFT); + meson_i2c_set_mask(i2c, REG_CTRL, REG_CTRL_CLKDIV, + FIELD_PREP(REG_CTRL_CLKDIV, div & GENMASK(9, 0))); - meson_i2c_set_mask(i2c, REG_CTRL, REG_CTRL_CLKDIVEXT_MASK, - (div >> 10) << REG_CTRL_CLKDIVEXT_SHIFT); + meson_i2c_set_mask(i2c, REG_CTRL, REG_CTRL_CLKDIVEXT, + FIELD_PREP(REG_CTRL_CLKDIVEXT, div >> 10)); /* Disable HIGH/LOW mode */ meson_i2c_set_mask(i2c, REG_SLAVE_ADDR, REG_SLV_SCL_LOW_EN, 0); -- GitLab From 250a51b772dbb5938f506d3385d8a0909ddf94b7 Mon Sep 17 00:00:00 2001 From: Cristian Ciocaltea Date: Fri, 9 Oct 2020 00:44:39 +0300 Subject: [PATCH 1197/1304] i2c: owl: Clear NACK and BUS error bits commit f5b3f433641c543ebe5171285a42aa6adcdb2d22 upstream. When the NACK and BUS error bits are set by the hardware, the driver is responsible for clearing them by writing "1" into the corresponding status registers. Hence perform the necessary operations in owl_i2c_interrupt(). Fixes: d211e62af466 ("i2c: Add Actions Semiconductor Owl family S900 I2C driver") Reported-by: Manivannan Sadhasivam Signed-off-by: Cristian Ciocaltea Signed-off-by: Wolfram Sang Signed-off-by: Greg Kroah-Hartman --- drivers/i2c/busses/i2c-owl.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/i2c/busses/i2c-owl.c b/drivers/i2c/busses/i2c-owl.c index 96b4572e6d9c..cf3fcf35fe3d 100644 --- a/drivers/i2c/busses/i2c-owl.c +++ b/drivers/i2c/busses/i2c-owl.c @@ -179,6 +179,9 @@ static irqreturn_t owl_i2c_interrupt(int irq, void *_dev) fifostat = readl(i2c_dev->base + OWL_I2C_REG_FIFOSTAT); if (fifostat & OWL_I2C_FIFOSTAT_RNB) { i2c_dev->err = -ENXIO; + /* Clear NACK error bit by writing "1" */ + owl_i2c_update_reg(i2c_dev->base + OWL_I2C_REG_FIFOSTAT, + OWL_I2C_FIFOSTAT_RNB, true); goto stop; } @@ -186,6 +189,9 @@ static irqreturn_t owl_i2c_interrupt(int irq, void *_dev) stat = readl(i2c_dev->base + OWL_I2C_REG_STAT); if (stat & OWL_I2C_STAT_BEB) { i2c_dev->err = -EIO; + /* Clear BUS error bit by writing "1" */ + owl_i2c_update_reg(i2c_dev->base + OWL_I2C_REG_STAT, + OWL_I2C_STAT_BEB, true); goto stop; } -- GitLab From a813aaee68809b5fc3935ec5ccf7cdba75a9c792 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 8 Oct 2020 01:38:31 -0700 Subject: [PATCH 1198/1304] sctp: fix sctp_auth_init_hmacs() error path commit d42ee76ecb6c49d499fc5eb32ca34468d95dbc3e upstream. After freeing ep->auth_hmacs we have to clear the pointer or risk use-after-free as reported by syzbot: BUG: KASAN: use-after-free in sctp_auth_destroy_hmacs net/sctp/auth.c:509 [inline] BUG: KASAN: use-after-free in sctp_auth_destroy_hmacs net/sctp/auth.c:501 [inline] BUG: KASAN: use-after-free in sctp_auth_free+0x17e/0x1d0 net/sctp/auth.c:1070 Read of size 8 at addr ffff8880a8ff52c0 by task syz-executor941/6874 CPU: 0 PID: 6874 Comm: syz-executor941 Not tainted 5.9.0-rc8-syzkaller #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:77 [inline] dump_stack+0x198/0x1fd lib/dump_stack.c:118 print_address_description.constprop.0.cold+0xae/0x497 mm/kasan/report.c:383 __kasan_report mm/kasan/report.c:513 [inline] kasan_report.cold+0x1f/0x37 mm/kasan/report.c:530 sctp_auth_destroy_hmacs net/sctp/auth.c:509 [inline] sctp_auth_destroy_hmacs net/sctp/auth.c:501 [inline] sctp_auth_free+0x17e/0x1d0 net/sctp/auth.c:1070 sctp_endpoint_destroy+0x95/0x240 net/sctp/endpointola.c:203 sctp_endpoint_put net/sctp/endpointola.c:236 [inline] sctp_endpoint_free+0xd6/0x110 net/sctp/endpointola.c:183 sctp_destroy_sock+0x9c/0x3c0 net/sctp/socket.c:4981 sctp_v6_destroy_sock+0x11/0x20 net/sctp/socket.c:9415 sk_common_release+0x64/0x390 net/core/sock.c:3254 sctp_close+0x4ce/0x8b0 net/sctp/socket.c:1533 inet_release+0x12e/0x280 net/ipv4/af_inet.c:431 inet6_release+0x4c/0x70 net/ipv6/af_inet6.c:475 __sock_release+0xcd/0x280 net/socket.c:596 sock_close+0x18/0x20 net/socket.c:1277 __fput+0x285/0x920 fs/file_table.c:281 task_work_run+0xdd/0x190 kernel/task_work.c:141 exit_task_work include/linux/task_work.h:25 [inline] do_exit+0xb7d/0x29f0 kernel/exit.c:806 do_group_exit+0x125/0x310 kernel/exit.c:903 __do_sys_exit_group kernel/exit.c:914 [inline] __se_sys_exit_group kernel/exit.c:912 [inline] __x64_sys_exit_group+0x3a/0x50 kernel/exit.c:912 do_syscall_64+0x2d/0x70 arch/x86/entry/common.c:46 entry_SYSCALL_64_after_hwframe+0x44/0xa9 RIP: 0033:0x43f278 Code: Bad RIP value. RSP: 002b:00007fffe0995c38 EFLAGS: 00000246 ORIG_RAX: 00000000000000e7 RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 000000000043f278 RDX: 0000000000000000 RSI: 000000000000003c RDI: 0000000000000000 RBP: 00000000004bf068 R08: 00000000000000e7 R09: ffffffffffffffd0 R10: 0000000020000000 R11: 0000000000000246 R12: 0000000000000001 R13: 00000000006d1180 R14: 0000000000000000 R15: 0000000000000000 Allocated by task 6874: kasan_save_stack+0x1b/0x40 mm/kasan/common.c:48 kasan_set_track mm/kasan/common.c:56 [inline] __kasan_kmalloc.constprop.0+0xbf/0xd0 mm/kasan/common.c:461 kmem_cache_alloc_trace+0x174/0x300 mm/slab.c:3554 kmalloc include/linux/slab.h:554 [inline] kmalloc_array include/linux/slab.h:593 [inline] kcalloc include/linux/slab.h:605 [inline] sctp_auth_init_hmacs+0xdb/0x3b0 net/sctp/auth.c:464 sctp_auth_init+0x8a/0x4a0 net/sctp/auth.c:1049 sctp_setsockopt_auth_supported net/sctp/socket.c:4354 [inline] sctp_setsockopt+0x477e/0x97f0 net/sctp/socket.c:4631 __sys_setsockopt+0x2db/0x610 net/socket.c:2132 __do_sys_setsockopt net/socket.c:2143 [inline] __se_sys_setsockopt net/socket.c:2140 [inline] __x64_sys_setsockopt+0xba/0x150 net/socket.c:2140 do_syscall_64+0x2d/0x70 arch/x86/entry/common.c:46 entry_SYSCALL_64_after_hwframe+0x44/0xa9 Freed by task 6874: kasan_save_stack+0x1b/0x40 mm/kasan/common.c:48 kasan_set_track+0x1c/0x30 mm/kasan/common.c:56 kasan_set_free_info+0x1b/0x30 mm/kasan/generic.c:355 __kasan_slab_free+0xd8/0x120 mm/kasan/common.c:422 __cache_free mm/slab.c:3422 [inline] kfree+0x10e/0x2b0 mm/slab.c:3760 sctp_auth_destroy_hmacs net/sctp/auth.c:511 [inline] sctp_auth_destroy_hmacs net/sctp/auth.c:501 [inline] sctp_auth_init_hmacs net/sctp/auth.c:496 [inline] sctp_auth_init_hmacs+0x2b7/0x3b0 net/sctp/auth.c:454 sctp_auth_init+0x8a/0x4a0 net/sctp/auth.c:1049 sctp_setsockopt_auth_supported net/sctp/socket.c:4354 [inline] sctp_setsockopt+0x477e/0x97f0 net/sctp/socket.c:4631 __sys_setsockopt+0x2db/0x610 net/socket.c:2132 __do_sys_setsockopt net/socket.c:2143 [inline] __se_sys_setsockopt net/socket.c:2140 [inline] __x64_sys_setsockopt+0xba/0x150 net/socket.c:2140 do_syscall_64+0x2d/0x70 arch/x86/entry/common.c:46 entry_SYSCALL_64_after_hwframe+0x44/0xa9 Fixes: 1f485649f529 ("[SCTP]: Implement SCTP-AUTH internals") Signed-off-by: Eric Dumazet Cc: Vlad Yasevich Cc: Neil Horman Cc: Marcelo Ricardo Leitner Acked-by: Marcelo Ricardo Leitner Signed-off-by: Jakub Kicinski Signed-off-by: Greg Kroah-Hartman --- net/sctp/auth.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/sctp/auth.c b/net/sctp/auth.c index 5b537613946f..2bd8c80bd85f 100644 --- a/net/sctp/auth.c +++ b/net/sctp/auth.c @@ -515,6 +515,7 @@ int sctp_auth_init_hmacs(struct sctp_endpoint *ep, gfp_t gfp) out_err: /* Clean up any successful allocations */ sctp_auth_destroy_hmacs(ep->auth_hmacs); + ep->auth_hmacs = NULL; return -ENOMEM; } -- GitLab From 34c9b9c992b450cbac51fdca2b078d3b6fb5ce82 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 25 Sep 2020 06:38:08 -0700 Subject: [PATCH 1199/1304] team: set dev->needed_headroom in team_setup_by_port() commit 89d01748b2354e210b5d4ea47bc25a42a1b42c82 upstream. Some devices set needed_headroom. If we ignore it, we might end up crashing in various skb_push() for example in ipgre_header() since some layers assume enough headroom has been reserved. Fixes: 1d76efe1577b ("team: add support for non-ethernet devices") Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/team/team.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 53d9562a8818..a4ea56592f6b 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -2086,6 +2086,7 @@ static void team_setup_by_port(struct net_device *dev, dev->header_ops = port_dev->header_ops; dev->type = port_dev->type; dev->hard_header_len = port_dev->hard_header_len; + dev->needed_headroom = port_dev->needed_headroom; dev->addr_len = port_dev->addr_len; dev->mtu = port_dev->mtu; memcpy(dev->broadcast, port_dev->broadcast, port_dev->addr_len); -- GitLab From c7c4834d617702fed10a76e13cd7cd55234d00f2 Mon Sep 17 00:00:00 2001 From: Anant Thazhemadam Date: Mon, 5 Oct 2020 02:25:36 +0530 Subject: [PATCH 1200/1304] net: team: fix memory leak in __team_options_register commit 9a9e77495958c7382b2438bc19746dd3aaaabb8e upstream. The variable "i" isn't initialized back correctly after the first loop under the label inst_rollback gets executed. The value of "i" is assigned to be option_count - 1, and the ensuing loop (under alloc_rollback) begins by initializing i--. Thus, the value of i when the loop begins execution will now become i = option_count - 2. Thus, when kfree(dst_opts[i]) is called in the second loop in this order, (i.e., inst_rollback followed by alloc_rollback), dst_optsp[option_count - 2] is the first element freed, and dst_opts[option_count - 1] does not get freed, and thus, a memory leak is caused. This memory leak can be fixed, by assigning i = option_count (instead of option_count - 1). Fixes: 80f7c6683fe0 ("team: add support for per-port options") Reported-by: syzbot+69b804437cfec30deac3@syzkaller.appspotmail.com Tested-by: syzbot+69b804437cfec30deac3@syzkaller.appspotmail.com Signed-off-by: Anant Thazhemadam Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/team/team.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index a4ea56592f6b..3eb034a5a659 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -294,7 +294,7 @@ static int __team_options_register(struct team *team, for (i--; i >= 0; i--) __team_option_inst_del_option(team, dst_opts[i]); - i = option_count - 1; + i = option_count; alloc_rollback: for (i--; i >= 0; i--) kfree(dst_opts[i]); -- GitLab From 5ab1e499f2428a4200ea098a7221efe7ef133530 Mon Sep 17 00:00:00 2001 From: Dumitru Ceara Date: Wed, 7 Oct 2020 17:48:03 +0200 Subject: [PATCH 1201/1304] openvswitch: handle DNAT tuple collision commit 8aa7b526dc0b5dbf40c1b834d76a667ad672a410 upstream. With multiple DNAT rules it's possible that after destination translation the resulting tuples collide. For example, two openvswitch flows: nw_dst=10.0.0.10,tp_dst=10, actions=ct(commit,table=2,nat(dst=20.0.0.1:20)) nw_dst=10.0.0.20,tp_dst=10, actions=ct(commit,table=2,nat(dst=20.0.0.1:20)) Assuming two TCP clients initiating the following connections: 10.0.0.10:5000->10.0.0.10:10 10.0.0.10:5000->10.0.0.20:10 Both tuples would translate to 10.0.0.10:5000->20.0.0.1:20 causing nf_conntrack_confirm() to fail because of tuple collision. Netfilter handles this case by allocating a null binding for SNAT at egress by default. Perform the same operation in openvswitch for DNAT if no explicit SNAT is requested by the user and allocate a null binding for SNAT for packets in the "original" direction. Reported-at: https://bugzilla.redhat.com/1877128 Suggested-by: Florian Westphal Fixes: 05752523e565 ("openvswitch: Interface with NAT.") Signed-off-by: Dumitru Ceara Signed-off-by: Jakub Kicinski Signed-off-by: Greg Kroah-Hartman --- net/openvswitch/conntrack.c | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index f8e073ef1a67..fb13fcfedaf4 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@ -899,15 +899,19 @@ static int ovs_ct_nat(struct net *net, struct sw_flow_key *key, } err = ovs_ct_nat_execute(skb, ct, ctinfo, &info->range, maniptype); - if (err == NF_ACCEPT && - ct->status & IPS_SRC_NAT && ct->status & IPS_DST_NAT) { - if (maniptype == NF_NAT_MANIP_SRC) - maniptype = NF_NAT_MANIP_DST; - else - maniptype = NF_NAT_MANIP_SRC; - - err = ovs_ct_nat_execute(skb, ct, ctinfo, &info->range, - maniptype); + if (err == NF_ACCEPT && ct->status & IPS_DST_NAT) { + if (ct->status & IPS_SRC_NAT) { + if (maniptype == NF_NAT_MANIP_SRC) + maniptype = NF_NAT_MANIP_DST; + else + maniptype = NF_NAT_MANIP_SRC; + + err = ovs_ct_nat_execute(skb, ct, ctinfo, &info->range, + maniptype); + } else if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) { + err = ovs_ct_nat_execute(skb, ct, ctinfo, NULL, + NF_NAT_MANIP_SRC); + } } /* Mark NAT done if successful and update the flow key. */ -- GitLab From a1491a6504c38b1e81d7136da44863b214a697c6 Mon Sep 17 00:00:00 2001 From: Philip Yang Date: Tue, 15 Sep 2020 17:07:35 -0400 Subject: [PATCH 1202/1304] drm/amdgpu: prevent double kfree ttm->sg MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 1d0e16ac1a9e800598dcfa5b6bc53b704a103390 ] Set ttm->sg to NULL after kfree, to avoid memory corruption backtrace: [ 420.932812] kernel BUG at /build/linux-do9eLF/linux-4.15.0/mm/slub.c:295! [ 420.934182] invalid opcode: 0000 [#1] SMP NOPTI [ 420.935445] Modules linked in: xt_conntrack ipt_MASQUERADE [ 420.951332] Hardware name: Dell Inc. PowerEdge R7525/0PYVT1, BIOS 1.5.4 07/09/2020 [ 420.952887] RIP: 0010:__slab_free+0x180/0x2d0 [ 420.954419] RSP: 0018:ffffbe426291fa60 EFLAGS: 00010246 [ 420.955963] RAX: ffff9e29263e9c30 RBX: ffff9e29263e9c30 RCX: 000000018100004b [ 420.957512] RDX: ffff9e29263e9c30 RSI: fffff3d33e98fa40 RDI: ffff9e297e407a80 [ 420.959055] RBP: ffffbe426291fb00 R08: 0000000000000001 R09: ffffffffc0d39ade [ 420.960587] R10: ffffbe426291fb20 R11: ffff9e49ffdd4000 R12: ffff9e297e407a80 [ 420.962105] R13: fffff3d33e98fa40 R14: ffff9e29263e9c30 R15: ffff9e2954464fd8 [ 420.963611] FS: 00007fa2ea097780(0000) GS:ffff9e297e840000(0000) knlGS:0000000000000000 [ 420.965144] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 420.966663] CR2: 00007f16bfffefb8 CR3: 0000001ff0c62000 CR4: 0000000000340ee0 [ 420.968193] Call Trace: [ 420.969703] ? __page_cache_release+0x3c/0x220 [ 420.971294] ? amdgpu_ttm_tt_unpopulate+0x5e/0x80 [amdgpu] [ 420.972789] kfree+0x168/0x180 [ 420.974353] ? amdgpu_ttm_tt_set_user_pages+0x64/0xc0 [amdgpu] [ 420.975850] ? kfree+0x168/0x180 [ 420.977403] amdgpu_ttm_tt_unpopulate+0x5e/0x80 [amdgpu] [ 420.978888] ttm_tt_unpopulate.part.10+0x53/0x60 [amdttm] [ 420.980357] ttm_tt_destroy.part.11+0x4f/0x60 [amdttm] [ 420.981814] ttm_tt_destroy+0x13/0x20 [amdttm] [ 420.983273] ttm_bo_cleanup_memtype_use+0x36/0x80 [amdttm] [ 420.984725] ttm_bo_release+0x1c9/0x360 [amdttm] [ 420.986167] amdttm_bo_put+0x24/0x30 [amdttm] [ 420.987663] amdgpu_bo_unref+0x1e/0x30 [amdgpu] [ 420.989165] amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu+0x9ca/0xb10 [amdgpu] [ 420.990666] kfd_ioctl_alloc_memory_of_gpu+0xef/0x2c0 [amdgpu] Signed-off-by: Philip Yang Reviewed-by: Felix Kuehling Reviewed-by: Christian König Signed-off-by: Alex Deucher Signed-off-by: Sasha Levin --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index fcf421263fd9..abad7460084f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -954,6 +954,7 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm) release_sg: kfree(ttm->sg); + ttm->sg = NULL; return r; } -- GitLab From 50e117921b322323b7272f108d9c080ad883ee0a Mon Sep 17 00:00:00 2001 From: Antony Antony Date: Fri, 4 Sep 2020 08:49:38 +0200 Subject: [PATCH 1203/1304] xfrm: clone XFRMA_SET_MARK in xfrm_do_migrate [ Upstream commit 545e5c571662b1cd79d9588f9d3b6e36985b8007 ] XFRMA_SET_MARK and XFRMA_SET_MARK_MASK was not cloned from the old to the new. Migrate these two attributes during XFRMA_MSG_MIGRATE Fixes: 9b42c1f179a6 ("xfrm: Extend the output_mark to support input direction and masking.") Signed-off-by: Antony Antony Signed-off-by: Steffen Klassert Signed-off-by: Sasha Levin --- net/xfrm/xfrm_state.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 47a8ff972a2b..d76b019673aa 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -1410,6 +1410,7 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, } memcpy(&x->mark, &orig->mark, sizeof(x->mark)); + memcpy(&x->props.smark, &orig->props.smark, sizeof(x->props.smark)); if (xfrm_init_state(x) < 0) goto error; -- GitLab From fb48241729bd18dfa252f0e60ea88f331f10e968 Mon Sep 17 00:00:00 2001 From: Antony Antony Date: Fri, 4 Sep 2020 08:49:55 +0200 Subject: [PATCH 1204/1304] xfrm: clone XFRMA_REPLAY_ESN_VAL in xfrm_do_migrate [ Upstream commit 91a46c6d1b4fcbfa4773df9421b8ad3e58088101 ] XFRMA_REPLAY_ESN_VAL was not cloned completely from the old to the new. Migrate this attribute during XFRMA_MSG_MIGRATE v1->v2: - move curleft cloning to a separate patch Fixes: af2f464e326e ("xfrm: Assign esn pointers when cloning a state") Signed-off-by: Antony Antony Signed-off-by: Steffen Klassert Signed-off-by: Sasha Levin --- include/net/xfrm.h | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 3a0b5de742e9..fe8bed557691 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -1873,21 +1873,17 @@ static inline unsigned int xfrm_replay_state_esn_len(struct xfrm_replay_state_es static inline int xfrm_replay_clone(struct xfrm_state *x, struct xfrm_state *orig) { - x->replay_esn = kzalloc(xfrm_replay_state_esn_len(orig->replay_esn), + + x->replay_esn = kmemdup(orig->replay_esn, + xfrm_replay_state_esn_len(orig->replay_esn), GFP_KERNEL); if (!x->replay_esn) return -ENOMEM; - - x->replay_esn->bmp_len = orig->replay_esn->bmp_len; - x->replay_esn->replay_window = orig->replay_esn->replay_window; - - x->preplay_esn = kmemdup(x->replay_esn, - xfrm_replay_state_esn_len(x->replay_esn), + x->preplay_esn = kmemdup(orig->preplay_esn, + xfrm_replay_state_esn_len(orig->preplay_esn), GFP_KERNEL); - if (!x->preplay_esn) { - kfree(x->replay_esn); + if (!x->preplay_esn) return -ENOMEM; - } return 0; } -- GitLab From e4e0a05e1086b1a0045f1876786c340c212717c0 Mon Sep 17 00:00:00 2001 From: Antony Antony Date: Fri, 4 Sep 2020 08:50:11 +0200 Subject: [PATCH 1205/1304] xfrm: clone XFRMA_SEC_CTX in xfrm_do_migrate [ Upstream commit 7aa05d304785204703a67a6aa7f1db402889a172 ] XFRMA_SEC_CTX was not cloned from the old to the new. Migrate this attribute during XFRMA_MSG_MIGRATE v1->v2: - return -ENOMEM on error v2->v3: - fix return type to int Fixes: 80c9abaabf42 ("[XFRM]: Extension for dynamic update of endpoint address(es)") Signed-off-by: Antony Antony Signed-off-by: Steffen Klassert Signed-off-by: Sasha Levin --- net/xfrm/xfrm_state.c | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index d76b019673aa..c2640875ec75 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -1341,6 +1341,30 @@ int xfrm_state_add(struct xfrm_state *x) EXPORT_SYMBOL(xfrm_state_add); #ifdef CONFIG_XFRM_MIGRATE +static inline int clone_security(struct xfrm_state *x, struct xfrm_sec_ctx *security) +{ + struct xfrm_user_sec_ctx *uctx; + int size = sizeof(*uctx) + security->ctx_len; + int err; + + uctx = kmalloc(size, GFP_KERNEL); + if (!uctx) + return -ENOMEM; + + uctx->exttype = XFRMA_SEC_CTX; + uctx->len = size; + uctx->ctx_doi = security->ctx_doi; + uctx->ctx_alg = security->ctx_alg; + uctx->ctx_len = security->ctx_len; + memcpy(uctx + 1, security->ctx_str, security->ctx_len); + err = security_xfrm_state_alloc(x, uctx); + kfree(uctx); + if (err) + return err; + + return 0; +} + static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, struct xfrm_encap_tmpl *encap) { @@ -1397,6 +1421,10 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, goto error; } + if (orig->security) + if (clone_security(x, orig->security)) + goto error; + if (orig->coaddr) { x->coaddr = kmemdup(orig->coaddr, sizeof(*x->coaddr), GFP_KERNEL); -- GitLab From ce868836d8557b9832b17ad6eeed18a8d7f866f6 Mon Sep 17 00:00:00 2001 From: Antony Antony Date: Fri, 4 Sep 2020 08:50:29 +0200 Subject: [PATCH 1206/1304] xfrm: clone whole liftime_cur structure in xfrm_do_migrate [ Upstream commit 8366685b2883e523f91e9816d7be371eb1144749 ] When we clone state only add_time was cloned. It missed values like bytes, packets. Now clone the all members of the structure. v1->v3: - use memcpy to copy the entire structure Fixes: 80c9abaabf42 ("[XFRM]: Extension for dynamic update of endpoint address(es)") Signed-off-by: Antony Antony Signed-off-by: Steffen Klassert Signed-off-by: Sasha Levin --- net/xfrm/xfrm_state.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index c2640875ec75..c68eb587c0ef 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -1450,7 +1450,7 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, x->tfcpad = orig->tfcpad; x->replay_maxdiff = orig->replay_maxdiff; x->replay_maxage = orig->replay_maxage; - x->curlft.add_time = orig->curlft.add_time; + memcpy(&x->curlft, &orig->curlft, sizeof(x->curlft)); x->km.state = orig->km.state; x->km.seq = orig->km.seq; x->replay = orig->replay; -- GitLab From e1f07bb5cea26f0bf9dc68a41b5a0bfcf15c2afc Mon Sep 17 00:00:00 2001 From: Voon Weifeng Date: Wed, 23 Sep 2020 16:56:14 +0800 Subject: [PATCH 1207/1304] net: stmmac: removed enabling eee in EEE set callback [ Upstream commit 7241c5a697479c7d0c5a96595822cdab750d41ae ] EEE should be only be enabled during stmmac_mac_link_up() when the link are up and being set up properly. set_eee should only do settings configuration and disabling the eee. Without this fix, turning on EEE using ethtool will return "Operation not supported". This is due to the driver is in a dead loop waiting for eee to be advertised in the for eee to be activated but the driver will only configure the EEE advertisement after the eee is activated. Ethtool should only return "Operation not supported" if there is no EEE capbility in the MAC controller. Fixes: 8a7493e58ad6 ("net: stmmac: Fix a race in EEE enable callback") Signed-off-by: Voon Weifeng Acked-by: Mark Gross Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- .../net/ethernet/stmicro/stmmac/stmmac_ethtool.c | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 4d5fb4b51cc4..5986fe927ad0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -694,23 +694,16 @@ static int stmmac_ethtool_op_set_eee(struct net_device *dev, struct stmmac_priv *priv = netdev_priv(dev); int ret; - if (!edata->eee_enabled) { + if (!priv->dma_cap.eee) + return -EOPNOTSUPP; + + if (!edata->eee_enabled) stmmac_disable_eee_mode(priv); - } else { - /* We are asking for enabling the EEE but it is safe - * to verify all by invoking the eee_init function. - * In case of failure it will return an error. - */ - edata->eee_enabled = stmmac_eee_init(priv); - if (!edata->eee_enabled) - return -EOPNOTSUPP; - } ret = phy_ethtool_set_eee(dev->phydev, edata); if (ret) return ret; - priv->eee_enabled = edata->eee_enabled; priv->tx_lpi_timer = edata->tx_lpi_timer; return 0; } -- GitLab From deea1e4007ed00de5c2ca8211504fbea1417f61a Mon Sep 17 00:00:00 2001 From: Necip Fazil Yildiran Date: Thu, 17 Sep 2020 19:16:53 +0300 Subject: [PATCH 1208/1304] platform/x86: fix kconfig dependency warning for FUJITSU_LAPTOP [ Upstream commit afdd1ebb72051e8b6b83c4d7dc542a9be0e1352d ] When FUJITSU_LAPTOP is enabled and NEW_LEDS is disabled, it results in the following Kbuild warning: WARNING: unmet direct dependencies detected for LEDS_CLASS Depends on [n]: NEW_LEDS [=n] Selected by [y]: - FUJITSU_LAPTOP [=y] && X86 [=y] && X86_PLATFORM_DEVICES [=y] && ACPI [=y] && INPUT [=y] && BACKLIGHT_CLASS_DEVICE [=y] && (ACPI_VIDEO [=n] || ACPI_VIDEO [=n]=n) The reason is that FUJITSU_LAPTOP selects LEDS_CLASS without depending on or selecting NEW_LEDS while LEDS_CLASS is subordinate to NEW_LEDS. Honor the kconfig menu hierarchy to remove kconfig dependency warnings. Reported-by: Hans de Goede Fixes: d89bcc83e709 ("platform/x86: fujitsu-laptop: select LEDS_CLASS") Signed-off-by: Necip Fazil Yildiran Signed-off-by: Andy Shevchenko Signed-off-by: Sasha Levin --- drivers/platform/x86/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index 1e2524de6a63..a13bb4ddd0cf 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -235,6 +235,7 @@ config FUJITSU_LAPTOP depends on BACKLIGHT_CLASS_DEVICE depends on ACPI_VIDEO || ACPI_VIDEO = n select INPUT_SPARSEKMAP + select NEW_LEDS select LEDS_CLASS ---help--- This is a driver for laptops built by Fujitsu: -- GitLab From 66a675d390060faaabaf58eefd7ebbb9b51f723b Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 25 Sep 2020 14:42:56 +1000 Subject: [PATCH 1209/1304] xfrm: Use correct address family in xfrm_state_find [ Upstream commit e94ee171349db84c7cfdc5fefbebe414054d0924 ] The struct flowi must never be interpreted by itself as its size depends on the address family. Therefore it must always be grouped with its original family value. In this particular instance, the original family value is lost in the function xfrm_state_find. Therefore we get a bogus read when it's coupled with the wrong family which would occur with inter- family xfrm states. This patch fixes it by keeping the original family value. Note that the same bug could potentially occur in LSM through the xfrm_state_pol_flow_match hook. I checked the current code there and it seems to be safe for now as only secid is used which is part of struct flowi_common. But that API should be changed so that so that we don't get new bugs in the future. We could do that by replacing fl with just secid or adding a family field. Reported-by: syzbot+577fbac3145a6eb2e7a5@syzkaller.appspotmail.com Fixes: 48b8d78315bf ("[XFRM]: State selection update to use inner...") Signed-off-by: Herbert Xu Signed-off-by: Steffen Klassert Signed-off-by: Sasha Levin --- net/xfrm/xfrm_state.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index c68eb587c0ef..a649d7c2f48c 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -923,7 +923,8 @@ static void xfrm_state_look_at(struct xfrm_policy *pol, struct xfrm_state *x, */ if (x->km.state == XFRM_STATE_VALID) { if ((x->sel.family && - !xfrm_selector_match(&x->sel, fl, x->sel.family)) || + (x->sel.family != family || + !xfrm_selector_match(&x->sel, fl, family))) || !security_xfrm_state_pol_flow_match(x, pol, fl)) return; @@ -936,7 +937,9 @@ static void xfrm_state_look_at(struct xfrm_policy *pol, struct xfrm_state *x, *acq_in_progress = 1; } else if (x->km.state == XFRM_STATE_ERROR || x->km.state == XFRM_STATE_EXPIRED) { - if (xfrm_selector_match(&x->sel, fl, x->sel.family) && + if ((!x->sel.family || + (x->sel.family == family && + xfrm_selector_match(&x->sel, fl, family))) && security_xfrm_state_pol_flow_match(x, pol, fl)) *error = -ESRCH; } @@ -976,7 +979,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr, tmpl->mode == x->props.mode && tmpl->id.proto == x->id.proto && (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) - xfrm_state_look_at(pol, x, fl, encap_family, + xfrm_state_look_at(pol, x, fl, family, &best, &acquire_in_progress, &error); } if (best || acquire_in_progress) @@ -993,7 +996,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr, tmpl->mode == x->props.mode && tmpl->id.proto == x->id.proto && (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) - xfrm_state_look_at(pol, x, fl, encap_family, + xfrm_state_look_at(pol, x, fl, family, &best, &acquire_in_progress, &error); } -- GitLab From 3f396a6be34ab71d1d6bd935ad1b787800781849 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 25 Sep 2020 06:38:07 -0700 Subject: [PATCH 1210/1304] bonding: set dev->needed_headroom in bond_setup_by_slave() [ Upstream commit f32f19339596b214c208c0dba716f4b6cc4f6958 ] syzbot managed to crash a host by creating a bond with a GRE device. For non Ethernet device, bonding calls bond_setup_by_slave() instead of ether_setup(), and unfortunately dev->needed_headroom was not copied from the new added member. [ 171.243095] skbuff: skb_under_panic: text:ffffffffa184b9ea len:116 put:20 head:ffff883f84012dc0 data:ffff883f84012dbc tail:0x70 end:0xd00 dev:bond0 [ 171.243111] ------------[ cut here ]------------ [ 171.243112] kernel BUG at net/core/skbuff.c:112! [ 171.243117] invalid opcode: 0000 [#1] SMP KASAN PTI [ 171.243469] gsmi: Log Shutdown Reason 0x03 [ 171.243505] Call Trace: [ 171.243506] [ 171.243512] [] skb_push+0x49/0x50 [ 171.243516] [] ipgre_header+0x2a/0xf0 [ 171.243520] [] neigh_connected_output+0xb7/0x100 [ 171.243524] [] ip6_finish_output2+0x383/0x490 [ 171.243528] [] __ip6_finish_output+0xa2/0x110 [ 171.243531] [] ip6_finish_output+0x2c/0xa0 [ 171.243534] [] ip6_output+0x69/0x110 [ 171.243537] [] ? ip6_output+0x110/0x110 [ 171.243541] [] mld_sendpack+0x1b2/0x2d0 [ 171.243544] [] ? mld_send_report+0xf0/0xf0 [ 171.243548] [] mld_ifc_timer_expire+0x2d7/0x3b0 [ 171.243551] [] ? mld_gq_timer_expire+0x50/0x50 [ 171.243556] [] call_timer_fn+0x30/0x130 [ 171.243559] [] expire_timers+0x4c/0x110 [ 171.243563] [] __run_timers+0x213/0x260 [ 171.243566] [] ? ktime_get+0x3d/0xa0 [ 171.243570] [] ? clockevents_program_event+0x7e/0xe0 [ 171.243574] [] ? sched_clock_cpu+0x15/0x190 [ 171.243577] [] run_timer_softirq+0x1d/0x40 [ 171.243581] [] __do_softirq+0x152/0x2f0 [ 171.243585] [] irq_exit+0x9f/0xb0 [ 171.243588] [] smp_apic_timer_interrupt+0xfd/0x1a0 [ 171.243591] [] apic_timer_interrupt+0x86/0x90 Fixes: f5184d267c1a ("net: Allow netdevices to specify needed head/tailroom") Signed-off-by: Eric Dumazet Reported-by: syzbot Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/bonding/bond_main.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index d32e32e79174..a59333b87eaf 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1123,6 +1123,7 @@ static void bond_setup_by_slave(struct net_device *bond_dev, bond_dev->type = slave_dev->type; bond_dev->hard_header_len = slave_dev->hard_header_len; + bond_dev->needed_headroom = slave_dev->needed_headroom; bond_dev->addr_len = slave_dev->addr_len; memcpy(bond_dev->broadcast, slave_dev->broadcast, -- GitLab From 58e1d8506309b1030c17613fbdc12af39dc022ec Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sat, 26 Sep 2020 21:33:43 -0700 Subject: [PATCH 1211/1304] mdio: fix mdio-thunder.c dependency & build error [ Upstream commit 7dbbcf496f2a4b6d82cfc7810a0746e160b79762 ] Fix build error by selecting MDIO_DEVRES for MDIO_THUNDER. Fixes this build error: ld: drivers/net/phy/mdio-thunder.o: in function `thunder_mdiobus_pci_probe': drivers/net/phy/mdio-thunder.c:78: undefined reference to `devm_mdiobus_alloc_size' Fixes: 379d7ac7ca31 ("phy: mdio-thunder: Add driver for Cavium Thunder SoC MDIO buses.") Reported-by: kernel test robot Signed-off-by: Randy Dunlap Cc: Bartosz Golaszewski Cc: Andrew Lunn Cc: Heiner Kallweit Cc: netdev@vger.kernel.org Cc: David Daney Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/phy/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 1f5fd24cd749..2386871e1294 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -154,6 +154,7 @@ config MDIO_THUNDER depends on 64BIT depends on PCI select MDIO_CAVIUM + select MDIO_DEVRES help This driver supports the MDIO interfaces found on Cavium ThunderX SoCs when the MDIO bus device appears as a PCI -- GitLab From a0dab3f9a984e5d6ef20f542739e0fa6fe968e2e Mon Sep 17 00:00:00 2001 From: Wilken Gottwalt Date: Mon, 28 Sep 2020 11:01:04 +0200 Subject: [PATCH 1212/1304] net: usb: ax88179_178a: fix missing stop entry in driver_info [ Upstream commit 9666ea66a74adfe295cb3a8760c76e1ef70f9caf ] Adds the missing .stop entry in the Belkin driver_info structure. Fixes: e20bd60bf62a ("net: usb: asix88179_178a: Add support for the Belkin B2B128") Signed-off-by: Wilken Gottwalt Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/usb/ax88179_178a.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index 8455f72007b9..a9d0df435e26 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c @@ -1735,6 +1735,7 @@ static const struct driver_info belkin_info = { .status = ax88179_status, .link_reset = ax88179_link_reset, .reset = ax88179_reset, + .stop = ax88179_stop, .flags = FLAG_ETHER | FLAG_FRAMING_AX, .rx_fixup = ax88179_rx_fixup, .tx_fixup = ax88179_tx_fixup, -- GitLab From 7a592c6788f3dd9bf231808e8daadefed07d012d Mon Sep 17 00:00:00 2001 From: Aya Levin Date: Sun, 13 Sep 2020 17:57:23 +0300 Subject: [PATCH 1213/1304] net/mlx5e: Fix VLAN cleanup flow [ Upstream commit 8c7353b6f716436ad0bfda2b5c5524ab2dde5894 ] Prior to this patch unloading an interface in promiscuous mode with RX VLAN filtering feature turned off - resulted in a warning. This is due to a wrong condition in the VLAN rules cleanup flow, which left the any-vid rules in the VLAN steering table. These rules prevented destroying the flow group and the flow table. The any-vid rules are removed in 2 flows, but none of them remove it in case both promiscuous is set and VLAN filtering is off. Fix the issue by changing the condition of the VLAN table cleanup flow to clean also in case of promiscuous mode. mlx5_core 0000:00:08.0: mlx5_destroy_flow_group:2123:(pid 28729): Flow group 20 wasn't destroyed, refcount > 1 mlx5_core 0000:00:08.0: mlx5_destroy_flow_group:2123:(pid 28729): Flow group 19 wasn't destroyed, refcount > 1 mlx5_core 0000:00:08.0: mlx5_destroy_flow_table:2112:(pid 28729): Flow table 262149 wasn't destroyed, refcount > 1 ... ... ------------[ cut here ]------------ FW pages counter is 11560 after reclaiming all pages WARNING: CPU: 1 PID: 28729 at drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c:660 mlx5_reclaim_startup_pages+0x178/0x230 [mlx5_core] Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS rel-1.12.1-0-ga5cab58e9a3f-prebuilt.qemu.org 04/01/2014 Call Trace: mlx5_function_teardown+0x2f/0x90 [mlx5_core] mlx5_unload_one+0x71/0x110 [mlx5_core] remove_one+0x44/0x80 [mlx5_core] pci_device_remove+0x3e/0xc0 device_release_driver_internal+0xfb/0x1c0 device_release_driver+0x12/0x20 pci_stop_bus_device+0x68/0x90 pci_stop_and_remove_bus_device+0x12/0x20 hv_eject_device_work+0x6f/0x170 [pci_hyperv] ? __schedule+0x349/0x790 process_one_work+0x206/0x400 worker_thread+0x34/0x3f0 ? process_one_work+0x400/0x400 kthread+0x126/0x140 ? kthread_park+0x90/0x90 ret_from_fork+0x22/0x30 ---[ end trace 6283bde8d26170dc ]--- Fixes: 9df30601c843 ("net/mlx5e: Restore vlan filter after seamless reset") Signed-off-by: Aya Levin Reviewed-by: Moshe Shemesh Signed-off-by: Saeed Mahameed Signed-off-by: Sasha Levin --- drivers/net/ethernet/mellanox/mlx5/core/en_fs.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index 76cc10e44080..b8c3ceaed585 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -415,8 +415,12 @@ static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv) for_each_set_bit(i, priv->fs.vlan.active_svlans, VLAN_N_VID) mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i); - if (priv->fs.vlan.cvlan_filter_disabled && - !(priv->netdev->flags & IFF_PROMISC)) + WARN_ON_ONCE(!(test_bit(MLX5E_STATE_DESTROYING, &priv->state))); + + /* must be called after DESTROY bit is set and + * set_rx_mode is called and flushed + */ + if (priv->fs.vlan.cvlan_filter_disabled) mlx5e_del_any_vid_rules(priv); } -- GitLab From f9c195bf08c0a215cf2bd89255f754c79e923055 Mon Sep 17 00:00:00 2001 From: Aya Levin Date: Sun, 13 Sep 2020 18:05:40 +0300 Subject: [PATCH 1214/1304] net/mlx5e: Fix VLAN create flow [ Upstream commit d4a16052bccdd695982f89d815ca075825115821 ] When interface is attached while in promiscuous mode and with VLAN filtering turned off, both configurations are not respected and VLAN filtering is performed. There are 2 flows which add the any-vid rules during interface attach: VLAN creation table and set rx mode. Each is relaying on the other to add any-vid rules, eventually non of them does. Fix this by adding any-vid rules on VLAN creation regardless of promiscuous mode. Fixes: 9df30601c843 ("net/mlx5e: Restore vlan filter after seamless reset") Signed-off-by: Aya Levin Reviewed-by: Moshe Shemesh Signed-off-by: Saeed Mahameed Signed-off-by: Sasha Levin --- drivers/net/ethernet/mellanox/mlx5/core/en_fs.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index b8c3ceaed585..7ddacc9e4fe4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -217,6 +217,9 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv, break; } + if (WARN_ONCE(*rule_p, "VLAN rule already exists type %d", rule_type)) + return 0; + *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); if (IS_ERR(*rule_p)) { @@ -397,8 +400,7 @@ static void mlx5e_add_vlan_rules(struct mlx5e_priv *priv) for_each_set_bit(i, priv->fs.vlan.active_svlans, VLAN_N_VID) mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i); - if (priv->fs.vlan.cvlan_filter_disabled && - !(priv->netdev->flags & IFF_PROMISC)) + if (priv->fs.vlan.cvlan_filter_disabled) mlx5e_add_any_vid_rules(priv); } -- GitLab From 5de841c5ac1e0cdb1ed40b5ec0b7e1b965da9c8b Mon Sep 17 00:00:00 2001 From: Marc Dionne Date: Fri, 4 Sep 2020 14:01:24 -0300 Subject: [PATCH 1215/1304] rxrpc: Fix rxkad token xdr encoding [ Upstream commit 56305118e05b2db8d0395bba640ac9a3aee92624 ] The session key should be encoded with just the 8 data bytes and no length; ENCODE_DATA precedes it with a 4 byte length, which confuses some existing tools that try to parse this format. Add an ENCODE_BYTES macro that does not include a length, and use it for the key. Also adjust the expected length. Note that commit 774521f353e1d ("rxrpc: Fix an assertion in rxrpc_read()") had fixed a BUG by changing the length rather than fixing the encoding. The original length was correct. Fixes: 99455153d067 ("RxRPC: Parse security index 5 keys (Kerberos 5)") Signed-off-by: Marc Dionne Signed-off-by: David Howells Signed-off-by: Sasha Levin --- net/rxrpc/key.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/net/rxrpc/key.c b/net/rxrpc/key.c index ad9d1b21cb0b..fead67b42a99 100644 --- a/net/rxrpc/key.c +++ b/net/rxrpc/key.c @@ -1075,7 +1075,7 @@ static long rxrpc_read(const struct key *key, switch (token->security_index) { case RXRPC_SECURITY_RXKAD: - toksize += 9 * 4; /* viceid, kvno, key*2 + len, begin, + toksize += 8 * 4; /* viceid, kvno, key*2, begin, * end, primary, tktlen */ toksize += RND(token->kad->ticket_len); break; @@ -1141,6 +1141,14 @@ static long rxrpc_read(const struct key *key, memcpy((u8 *)xdr + _l, &zero, 4 - (_l & 3)); \ xdr += (_l + 3) >> 2; \ } while(0) +#define ENCODE_BYTES(l, s) \ + do { \ + u32 _l = (l); \ + memcpy(xdr, (s), _l); \ + if (_l & 3) \ + memcpy((u8 *)xdr + _l, &zero, 4 - (_l & 3)); \ + xdr += (_l + 3) >> 2; \ + } while(0) #define ENCODE64(x) \ do { \ __be64 y = cpu_to_be64(x); \ @@ -1168,7 +1176,7 @@ static long rxrpc_read(const struct key *key, case RXRPC_SECURITY_RXKAD: ENCODE(token->kad->vice_id); ENCODE(token->kad->kvno); - ENCODE_DATA(8, token->kad->session_key); + ENCODE_BYTES(8, token->kad->session_key); ENCODE(token->kad->start); ENCODE(token->kad->expiry); ENCODE(token->kad->primary_flag); -- GitLab From 4a0b2759c739422af74d0ebbeb77595e79806aad Mon Sep 17 00:00:00 2001 From: David Howells Date: Tue, 8 Sep 2020 22:09:04 +0100 Subject: [PATCH 1216/1304] rxrpc: Downgrade the BUG() for unsupported token type in rxrpc_read() [ Upstream commit 9a059cd5ca7d9c5c4ca5a6e755cf72f230176b6a ] If rxrpc_read() (which allows KEYCTL_READ to read a key), sees a token of a type it doesn't recognise, it can BUG in a couple of places, which is unnecessary as it can easily get back to userspace. Fix this to print an error message instead. Fixes: 99455153d067 ("RxRPC: Parse security index 5 keys (Kerberos 5)") Signed-off-by: David Howells Signed-off-by: Sasha Levin --- net/rxrpc/key.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/rxrpc/key.c b/net/rxrpc/key.c index fead67b42a99..1fe203c56faf 100644 --- a/net/rxrpc/key.c +++ b/net/rxrpc/key.c @@ -1110,7 +1110,8 @@ static long rxrpc_read(const struct key *key, break; default: /* we have a ticket we can't encode */ - BUG(); + pr_err("Unsupported key token type (%u)\n", + token->security_index); continue; } @@ -1226,7 +1227,6 @@ static long rxrpc_read(const struct key *key, break; default: - BUG(); break; } -- GitLab From 3995eed6f1672de84fbd153fcad5530cb9ae7f18 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 1 Oct 2020 11:57:40 +0100 Subject: [PATCH 1217/1304] rxrpc: Fix some missing _bh annotations on locking conn->state_lock [ Upstream commit fa1d113a0f96f9ab7e4fe4f8825753ba1e34a9d3 ] conn->state_lock may be taken in softirq mode, but a previous patch replaced an outer lock in the response-packet event handling code, and lost the _bh from that when doing so. Fix this by applying the _bh annotation to the state_lock locking. Fixes: a1399f8bb033 ("rxrpc: Call channels should have separate call number spaces") Signed-off-by: David Howells Signed-off-by: Sasha Levin --- net/rxrpc/conn_event.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c index 126154a97a59..04213afd7710 100644 --- a/net/rxrpc/conn_event.c +++ b/net/rxrpc/conn_event.c @@ -342,18 +342,18 @@ static int rxrpc_process_event(struct rxrpc_connection *conn, return ret; spin_lock(&conn->channel_lock); - spin_lock(&conn->state_lock); + spin_lock_bh(&conn->state_lock); if (conn->state == RXRPC_CONN_SERVICE_CHALLENGING) { conn->state = RXRPC_CONN_SERVICE; - spin_unlock(&conn->state_lock); + spin_unlock_bh(&conn->state_lock); for (loop = 0; loop < RXRPC_MAXCALLS; loop++) rxrpc_call_is_secure( rcu_dereference_protected( conn->channels[loop].call, lockdep_is_held(&conn->channel_lock))); } else { - spin_unlock(&conn->state_lock); + spin_unlock_bh(&conn->state_lock); } spin_unlock(&conn->channel_lock); -- GitLab From 4b00aa56d0e762c51c2b119bf8c6eb76c9fb3de1 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 2 Oct 2020 14:04:51 +0100 Subject: [PATCH 1218/1304] rxrpc: Fix server keyring leak [ Upstream commit 38b1dc47a35ba14c3f4472138ea56d014c2d609b ] If someone calls setsockopt() twice to set a server key keyring, the first keyring is leaked. Fix it to return an error instead if the server key keyring is already set. Fixes: 17926a79320a ("[AF_RXRPC]: Provide secure RxRPC sockets for use by userspace and kernel both") Signed-off-by: David Howells Signed-off-by: Sasha Levin --- net/rxrpc/key.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/rxrpc/key.c b/net/rxrpc/key.c index 1fe203c56faf..2fe2add62a8e 100644 --- a/net/rxrpc/key.c +++ b/net/rxrpc/key.c @@ -905,7 +905,7 @@ int rxrpc_request_key(struct rxrpc_sock *rx, char __user *optval, int optlen) _enter(""); - if (optlen <= 0 || optlen > PAGE_SIZE - 1) + if (optlen <= 0 || optlen > PAGE_SIZE - 1 || rx->securities) return -EINVAL; description = memdup_user_nul(optval, optlen); -- GitLab From 80e745b6729ed41248442a687943cc7a48e5e66a Mon Sep 17 00:00:00 2001 From: Kajol Jain Date: Thu, 27 Aug 2020 12:17:32 +0530 Subject: [PATCH 1219/1304] perf: Fix task_function_call() error handling [ Upstream commit 6d6b8b9f4fceab7266ca03d194f60ec72bd4b654 ] The error handling introduced by commit: 2ed6edd33a21 ("perf: Add cond_resched() to task_function_call()") looses any return value from smp_call_function_single() that is not {0, -EINVAL}. This is a problem because it will return -EXNIO when the target CPU is offline. Worse, in that case it'll turn into an infinite loop. Fixes: 2ed6edd33a21 ("perf: Add cond_resched() to task_function_call()") Reported-by: Srikar Dronamraju Signed-off-by: Kajol Jain Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Ingo Molnar Reviewed-by: Barret Rhoden Tested-by: Srikar Dronamraju Link: https://lkml.kernel.org/r/20200827064732.20860-1-kjain@linux.ibm.com Signed-off-by: Sasha Levin --- kernel/events/core.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/kernel/events/core.c b/kernel/events/core.c index a17e6302ded5..a35d742b0ba8 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -98,7 +98,7 @@ static void remote_function(void *data) * retry due to any failures in smp_call_function_single(), such as if the * task_cpu() goes offline concurrently. * - * returns @func return value or -ESRCH when the process isn't running + * returns @func return value or -ESRCH or -ENXIO when the process isn't running */ static int task_function_call(struct task_struct *p, remote_function_f func, void *info) @@ -114,7 +114,8 @@ task_function_call(struct task_struct *p, remote_function_f func, void *info) for (;;) { ret = smp_call_function_single(task_cpu(p), remote_function, &data, 1); - ret = !ret ? data.ret : -EAGAIN; + if (!ret) + ret = data.ret; if (ret != -EAGAIN) break; -- GitLab From abfe666def6d07677d3f8be513c6004e14b2b43c Mon Sep 17 00:00:00 2001 From: Coly Li Date: Fri, 2 Oct 2020 09:38:52 +0800 Subject: [PATCH 1220/1304] mmc: core: don't set limits.discard_granularity as 0 [ Upstream commit 4243219141b67d7c2fdb2d8073c17c539b9263eb ] In mmc_queue_setup_discard() the mmc driver queue's discard_granularity might be set as 0 (when card->pref_erase > max_discard) while the mmc device still declares to support discard operation. This is buggy and triggered the following kernel warning message, WARNING: CPU: 0 PID: 135 at __blkdev_issue_discard+0x200/0x294 CPU: 0 PID: 135 Comm: f2fs_discard-17 Not tainted 5.9.0-rc6 #1 Hardware name: Google Kevin (DT) pstate: 00000005 (nzcv daif -PAN -UAO BTYPE=--) pc : __blkdev_issue_discard+0x200/0x294 lr : __blkdev_issue_discard+0x54/0x294 sp : ffff800011dd3b10 x29: ffff800011dd3b10 x28: 0000000000000000 x27: ffff800011dd3cc4 x26: ffff800011dd3e18 x25: 000000000004e69b x24: 0000000000000c40 x23: ffff0000f1deaaf0 x22: ffff0000f2849200 x21: 00000000002734d8 x20: 0000000000000008 x19: 0000000000000000 x18: 0000000000000000 x17: 0000000000000000 x16: 0000000000000000 x15: 0000000000000000 x14: 0000000000000394 x13: 0000000000000000 x12: 0000000000000000 x11: 0000000000000000 x10: 00000000000008b0 x9 : ffff800011dd3cb0 x8 : 000000000004e69b x7 : 0000000000000000 x6 : ffff0000f1926400 x5 : ffff0000f1940800 x4 : 0000000000000000 x3 : 0000000000000c40 x2 : 0000000000000008 x1 : 00000000002734d8 x0 : 0000000000000000 Call trace: __blkdev_issue_discard+0x200/0x294 __submit_discard_cmd+0x128/0x374 __issue_discard_cmd_orderly+0x188/0x244 __issue_discard_cmd+0x2e8/0x33c issue_discard_thread+0xe8/0x2f0 kthread+0x11c/0x120 ret_from_fork+0x10/0x1c ---[ end trace e4c8023d33dfe77a ]--- This patch fixes the issue by setting discard_granularity as SECTOR_SIZE instead of 0 when (card->pref_erase > max_discard) is true. Now no more complain from __blkdev_issue_discard() for the improper value of discard granularity. This issue is exposed after commit b35fd7422c2f ("block: check queue's limits.discard_granularity in __blkdev_issue_discard()"), a "Fixes:" tag is also added for the commit to make sure people won't miss this patch after applying the change of __blkdev_issue_discard(). Fixes: e056a1b5b67b ("mmc: queue: let host controllers specify maximum discard timeout") Fixes: b35fd7422c2f ("block: check queue's limits.discard_granularity in __blkdev_issue_discard()"). Reported-and-tested-by: Vicente Bergas Signed-off-by: Coly Li Acked-by: Adrian Hunter Cc: Ulf Hansson Link: https://lore.kernel.org/r/20201002013852.51968-1-colyli@suse.de Signed-off-by: Ulf Hansson Signed-off-by: Sasha Levin --- drivers/mmc/core/queue.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index 2a788169cbb8..9b31cd6b6062 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c @@ -185,7 +185,7 @@ static void mmc_queue_setup_discard(struct request_queue *q, q->limits.discard_granularity = card->pref_erase << 9; /* granularity must not be greater than max. discard */ if (card->pref_erase > max_discard) - q->limits.discard_granularity = 0; + q->limits.discard_granularity = SECTOR_SIZE; if (mmc_can_secure_erase_trim(card)) blk_queue_flag_set(QUEUE_FLAG_SECERASE, q); } -- GitLab From 94c51675811267a1ccaa7f6dc336714a02e20246 Mon Sep 17 00:00:00 2001 From: Vijay Balakrishna Date: Sat, 10 Oct 2020 23:16:40 -0700 Subject: [PATCH 1221/1304] mm: khugepaged: recalculate min_free_kbytes after memory hotplug as expected by khugepaged commit 4aab2be0983031a05cb4a19696c9da5749523426 upstream. When memory is hotplug added or removed the min_free_kbytes should be recalculated based on what is expected by khugepaged. Currently after hotplug, min_free_kbytes will be set to a lower default and higher default set when THP enabled is lost. This change restores min_free_kbytes as expected for THP consumers. [vijayb@linux.microsoft.com: v5] Link: https://lkml.kernel.org/r/1601398153-5517-1-git-send-email-vijayb@linux.microsoft.com Fixes: f000565adb77 ("thp: set recommended min free kbytes") Signed-off-by: Vijay Balakrishna Signed-off-by: Andrew Morton Reviewed-by: Pavel Tatashin Acked-by: Michal Hocko Cc: Allen Pais Cc: Andrea Arcangeli Cc: "Kirill A. Shutemov" Cc: Oleg Nesterov Cc: Song Liu Cc: Link: https://lkml.kernel.org/r/1600305709-2319-2-git-send-email-vijayb@linux.microsoft.com Link: https://lkml.kernel.org/r/1600204258-13683-1-git-send-email-vijayb@linux.microsoft.com Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- include/linux/khugepaged.h | 5 +++++ mm/khugepaged.c | 13 +++++++++++-- mm/page_alloc.c | 3 +++ 3 files changed, 19 insertions(+), 2 deletions(-) diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h index 082d1d2a5216..dc9a2eecc8b8 100644 --- a/include/linux/khugepaged.h +++ b/include/linux/khugepaged.h @@ -15,6 +15,7 @@ extern int __khugepaged_enter(struct mm_struct *mm); extern void __khugepaged_exit(struct mm_struct *mm); extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma, unsigned long vm_flags); +extern void khugepaged_min_free_kbytes_update(void); #define khugepaged_enabled() \ (transparent_hugepage_flags & \ @@ -73,6 +74,10 @@ static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma, { return 0; } + +static inline void khugepaged_min_free_kbytes_update(void) +{ +} #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif /* _LINUX_KHUGEPAGED_H */ diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 30553d7df402..9c7dc2276156 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -53,6 +53,9 @@ enum scan_result { #define CREATE_TRACE_POINTS #include +static struct task_struct *khugepaged_thread __read_mostly; +static DEFINE_MUTEX(khugepaged_mutex); + /* default scan 8*512 pte (or vmas) every 30 second */ static unsigned int khugepaged_pages_to_scan __read_mostly; static unsigned int khugepaged_pages_collapsed; @@ -1952,8 +1955,6 @@ static void set_recommended_min_free_kbytes(void) int start_stop_khugepaged(void) { - static struct task_struct *khugepaged_thread __read_mostly; - static DEFINE_MUTEX(khugepaged_mutex); int err = 0; mutex_lock(&khugepaged_mutex); @@ -1980,3 +1981,11 @@ int start_stop_khugepaged(void) mutex_unlock(&khugepaged_mutex); return err; } + +void khugepaged_min_free_kbytes_update(void) +{ + mutex_lock(&khugepaged_mutex); + if (khugepaged_enabled() && khugepaged_thread) + set_recommended_min_free_kbytes(); + mutex_unlock(&khugepaged_mutex); +} diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 545800433dfb..4325e7d58115 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -66,6 +66,7 @@ #include #include #include +#include #include #include @@ -7399,6 +7400,8 @@ int __meminit init_per_zone_wmark_min(void) setup_min_slab_ratio(); #endif + khugepaged_min_free_kbytes_update(); + return 0; } postcore_initcall(init_per_zone_wmark_min) -- GitLab From d71f3fb99620532fee0d163bfb30f0cc7bcdbaee Mon Sep 17 00:00:00 2001 From: Anant Thazhemadam Date: Mon, 5 Oct 2020 18:59:58 +0530 Subject: [PATCH 1222/1304] net: usb: rtl8150: set random MAC address when set_ethernet_addr() fails commit f45a4248ea4cc13ed50618ff066849f9587226b2 upstream. When get_registers() fails in set_ethernet_addr(),the uninitialized value of node_id gets copied over as the address. So, check the return value of get_registers(). If get_registers() executed successfully (i.e., it returns sizeof(node_id)), copy over the MAC address using ether_addr_copy() (instead of using memcpy()). Else, if get_registers() failed instead, a randomly generated MAC address is set as the MAC address instead. Reported-by: syzbot+abbc768b560c84d92fd3@syzkaller.appspotmail.com Tested-by: syzbot+abbc768b560c84d92fd3@syzkaller.appspotmail.com Acked-by: Petko Manolov Signed-off-by: Anant Thazhemadam Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/usb/rtl8150.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c index 80373a9171dd..933d1a74bcdb 100644 --- a/drivers/net/usb/rtl8150.c +++ b/drivers/net/usb/rtl8150.c @@ -277,12 +277,20 @@ static int write_mii_word(rtl8150_t * dev, u8 phy, __u8 indx, u16 reg) return 1; } -static inline void set_ethernet_addr(rtl8150_t * dev) +static void set_ethernet_addr(rtl8150_t *dev) { - u8 node_id[6]; + u8 node_id[ETH_ALEN]; + int ret; + + ret = get_registers(dev, IDR, sizeof(node_id), node_id); - get_registers(dev, IDR, sizeof(node_id), node_id); - memcpy(dev->netdev->dev_addr, node_id, sizeof(node_id)); + if (ret == sizeof(node_id)) { + ether_addr_copy(dev->netdev->dev_addr, node_id); + } else { + eth_hw_addr_random(dev->netdev); + netdev_notice(dev->netdev, "Assigned a random MAC address: %pM\n", + dev->netdev->dev_addr); + } } static int rtl8150_set_mac_address(struct net_device *netdev, void *p) -- GitLab From 47f6500403ea31aa0c8e329aeee607671d0f9086 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Wed, 14 Oct 2020 10:31:27 +0200 Subject: [PATCH 1223/1304] Linux 4.19.151 Tested-by: Jon Hunter Tested-by: Linux Kernel Functional Testing Tested-by: Guenter Roeck Tested-by: Pavel Machek (CIP) Tested-by: Shuah Khan Link: https://lore.kernel.org/r/20201012132629.469542486@linuxfoundation.org Signed-off-by: Greg Kroah-Hartman --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 65485185bec2..f2c9db9b4015 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 4 PATCHLEVEL = 19 -SUBLEVEL = 150 +SUBLEVEL = 151 EXTRAVERSION = NAME = "People's Front" -- GitLab From 71ca95b7e94938387b83c50cac061802f924bc68 Mon Sep 17 00:00:00 2001 From: Nathan Chancellor Date: Mon, 12 Oct 2020 12:41:31 -0700 Subject: [PATCH 1224/1304] FROMLIST: arm64: vdso32: Allow ld.lld to properly link the VDSO As it stands now, the vdso32 Makefile hardcodes the linker to ld.bfd using -fuse-ld=bfd with $(CC). This was taken from the arm vDSO Makefile, as the comment notes, done in commit d2b30cd4b722 ("ARM: 8384/1: VDSO: force use of BFD linker"). Commit fe00e50b2db8 ("ARM: 8858/1: vdso: use $(LD) instead of $(CC) to link VDSO") changed that Makefile to use $(LD) directly instead of through $(CC), which matches how the rest of the kernel operates. Since then, LD=ld.lld means that the arm vDSO will be linked with ld.lld, which has shown no problems so far. Allow ld.lld to link this vDSO as we do the regular arm vDSO. To do this, we need to do a few things: * Add a LD_COMPAT variable, which defaults to $(CROSS_COMPILE_COMPAT)ld with gcc and $(LD) if LLVM is 1, which will be ld.lld, or $(CROSS_COMPILE_COMPAT)ld if not, which matches the logic of the main Makefile. It is overrideable for further customization and avoiding breakage. * Eliminate cc32-ldoption, which matches commit 055efab3120b ("kbuild: drop support for cc-ldoption"). With those, we can use $(LD_COMPAT) in cmd_ldvdso and change the flags from compiler linker flags to linker flags directly. We eliminate -mfloat-abi=soft because it is not handled by the linker. Link: https://github.com/ClangBuiltLinux/linux/issues/1033 Reported-by: Nick Desaulniers Signed-off-by: Nathan Chancellor Bug: 141693040 Test: BUILD_CONFIG=common/build.config.gki.aarch64 ./build/build.sh Link: https://lore.kernel.org/linux-arm-kernel/20201013033947.2257501-1-natechancellor@gmail.com/ Change-Id: I8a7e5736294a2cb9b60edcfdddb4937003fe6c01 Signed-off-by: Nick Desaulniers --- arch/arm64/kernel/vdso32/Makefile | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile index 9c5c5d6ccb07..8fddb291c0e2 100644 --- a/arch/arm64/kernel/vdso32/Makefile +++ b/arch/arm64/kernel/vdso32/Makefile @@ -22,16 +22,21 @@ endif CC_COMPAT ?= $(CC) CC_COMPAT += $(CC_COMPAT_CLANG_FLAGS) + +ifeq ($(LLVM),1) +LD_COMPAT ?= $(LD) +else +LD_COMPAT ?= $(CROSS_COMPILE_COMPAT)ld +endif else CC_COMPAT ?= $(CROSS_COMPILE_COMPAT)gcc +LD_COMPAT ?= $(CROSS_COMPILE_COMPAT)ld endif cc32-option = $(call try-run,\ $(CC_COMPAT) $(1) -c -x c /dev/null -o "$$TMP",$(1),$(2)) cc32-disable-warning = $(call try-run,\ $(CC_COMPAT) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1))) -cc32-ldoption = $(call try-run,\ - $(CC_COMPAT) $(1) -nostdlib -x c /dev/null -o "$$TMP",$(1),$(2)) cc32-as-instr = $(call try-run,\ printf "%b\n" "$(1)" | $(CC_COMPAT) $(VDSO_AFLAGS) -c -x assembler -o "$$TMP" -,$(2),$(3)) @@ -114,14 +119,10 @@ dmbinstr := $(call cc32-as-instr,dmb ishld,-DCONFIG_AS_DMB_ISHLD=1) VDSO_CFLAGS += $(dmbinstr) VDSO_AFLAGS += $(dmbinstr) -VDSO_LDFLAGS := $(VDSO_CPPFLAGS) # From arm vDSO Makefile -VDSO_LDFLAGS += -Wl,-Bsymbolic -Wl,--no-undefined -Wl,-soname=linux-vdso.so.1 -VDSO_LDFLAGS += -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096 -VDSO_LDFLAGS += -nostdlib -shared -mfloat-abi=soft -VDSO_LDFLAGS += $(call cc32-ldoption,-Wl$(comma)--hash-style=sysv) -VDSO_LDFLAGS += $(call cc32-ldoption,-Wl$(comma)--build-id) -VDSO_LDFLAGS += $(call cc32-ldoption,-fuse-ld=bfd) +VDSO_LDFLAGS += -Bsymbolic --no-undefined -soname=linux-vdso.so.1 +VDSO_LDFLAGS += -z max-page-size=4096 -z common-page-size=4096 +VDSO_LDFLAGS += -nostdlib -shared --hash-style=sysv --build-id # Borrow vdsomunge.c from the arm vDSO @@ -182,8 +183,8 @@ quiet_cmd_vdsold_and_vdso_check = LD32 $@ cmd_vdsold_and_vdso_check = $(cmd_vdsold); $(cmd_vdso_check) quiet_cmd_vdsold = LD32 $@ - cmd_vdsold = $(CC_COMPAT) -Wp,-MD,$(depfile) $(VDSO_LDFLAGS) \ - -Wl,-T $(filter %.lds,$^) $(filter %.o,$^) -o $@ + cmd_vdsold = $(LD_COMPAT) $(VDSO_LDFLAGS) \ + -T $(filter %.lds,$^) $(filter %.o,$^) -o $@ quiet_cmd_vdsocc = CC32 $@ cmd_vdsocc = $(CC_COMPAT) -Wp,-MD,$(depfile) $(VDSO_CFLAGS) -c -o $@ $< quiet_cmd_vdsocc_gettimeofday = CC32 $@ -- GitLab From 43f18d00cb162cdd79436a3518a7046f31927235 Mon Sep 17 00:00:00 2001 From: Manikandan Mohan Date: Thu, 30 Jul 2020 09:37:41 -0700 Subject: [PATCH 1225/1304] cnss2: Add support for PCIE gen switch Add support for PCIE gen switch request from wlan driver using QMI message to FW. Change-Id: I6ea4ee234a38162cb9093d36d580c872b4d62644 Signed-off-by: Manikandan Mohan --- drivers/net/wireless/cnss2/main.c | 20 ++++++++++ drivers/net/wireless/cnss2/main.h | 2 + drivers/net/wireless/cnss2/qmi.c | 62 +++++++++++++++++++++++++++++++ drivers/net/wireless/cnss2/qmi.h | 4 ++ include/net/cnss2.h | 2 +- 5 files changed, 89 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/cnss2/main.c b/drivers/net/wireless/cnss2/main.c index 3d54e80eca4e..6a757e0d6e8d 100644 --- a/drivers/net/wireless/cnss2/main.c +++ b/drivers/net/wireless/cnss2/main.c @@ -357,6 +357,24 @@ int cnss_set_fw_log_mode(struct device *dev, u8 fw_log_mode) } EXPORT_SYMBOL(cnss_set_fw_log_mode); +int cnss_set_pcie_gen_speed(struct device *dev, u8 pcie_gen_speed) +{ + struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev); + + if (plat_priv->device_id != QCA6490_DEVICE_ID || + !plat_priv->fw_pcie_gen_switch) + return -ENOTSUPP; + + if (pcie_gen_speed < QMI_PCIE_GEN_SPEED_1_V01 || + pcie_gen_speed > QMI_PCIE_GEN_SPEED_3_V01) + return -EINVAL; + + cnss_pr_dbg("WLAN provided PCIE gen speed: %d\n", pcie_gen_speed); + plat_priv->pcie_gen_speed = pcie_gen_speed; + return 0; +} +EXPORT_SYMBOL(cnss_set_pcie_gen_speed); + static int cnss_fw_mem_ready_hdlr(struct cnss_plat_data *plat_priv) { int ret = 0; @@ -433,6 +451,8 @@ static int cnss_fw_ready_hdlr(struct cnss_plat_data *plat_priv) set_bit(CNSS_FW_READY, &plat_priv->driver_state); clear_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state); + cnss_wlfw_send_pcie_gen_speed_sync(plat_priv); + if (test_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state)) { clear_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state); clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state); diff --git a/drivers/net/wireless/cnss2/main.h b/drivers/net/wireless/cnss2/main.h index c2b458fdedfc..b6d487d5de8c 100644 --- a/drivers/net/wireless/cnss2/main.h +++ b/drivers/net/wireless/cnss2/main.h @@ -403,6 +403,8 @@ struct cnss_plat_data { int (*get_info_cb)(void *ctx, void *event, int event_len); u8 use_nv_mac; u8 set_wlaon_pwr_ctrl; + u8 fw_pcie_gen_switch; + u8 pcie_gen_speed; }; #ifdef CONFIG_ARCH_QCOM diff --git a/drivers/net/wireless/cnss2/qmi.c b/drivers/net/wireless/cnss2/qmi.c index ab1d1056465a..da9ba280f2e7 100644 --- a/drivers/net/wireless/cnss2/qmi.c +++ b/drivers/net/wireless/cnss2/qmi.c @@ -461,6 +461,10 @@ int cnss_wlfw_tgt_cap_send_sync(struct cnss_plat_data *plat_priv) if (resp->otp_version_valid) plat_priv->otp_version = resp->otp_version; + if (resp->fw_caps_valid) + plat_priv->fw_pcie_gen_switch = + !!(resp->fw_caps & QMI_WLFW_HOST_PCIE_GEN_SWITCH_V01); + cnss_pr_dbg("Target capability: chip_id: 0x%x, chip_family: 0x%x, board_id: 0x%x, soc_id: 0x%x, fw_version: 0x%x, fw_build_timestamp: %s, fw_build_id: %s, otp_version: 0x%x\n", plat_priv->chip_info.chip_id, plat_priv->chip_info.chip_family, @@ -1296,6 +1300,64 @@ int cnss_wlfw_ini_send_sync(struct cnss_plat_data *plat_priv, return ret; } +int cnss_wlfw_send_pcie_gen_speed_sync(struct cnss_plat_data *plat_priv) +{ + struct wlfw_pcie_gen_switch_req_msg_v01 req; + struct wlfw_pcie_gen_switch_resp_msg_v01 resp; + struct qmi_txn txn; + int ret = 0; + + if (!plat_priv) + return -ENODEV; + + if (plat_priv->pcie_gen_speed == QMI_PCIE_GEN_SPEED_INVALID_V01 || + !plat_priv->fw_pcie_gen_switch) { + cnss_pr_dbg("PCIE Gen speed not setup\n"); + return 0; + } + + cnss_pr_dbg("Sending PCIE Gen speed: %d state: 0x%lx\n", + plat_priv->pcie_gen_speed, plat_priv->driver_state); + req.pcie_speed = (enum wlfw_pcie_gen_speed_v01) + plat_priv->pcie_gen_speed; + + ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn, + wlfw_pcie_gen_switch_resp_msg_v01_ei, &resp); + if (ret < 0) { + cnss_pr_err("Failed to initialize txn for PCIE speed switch err: %d\n", + ret); + goto out; + } + + ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn, + QMI_WLFW_PCIE_GEN_SWITCH_REQ_V01, + WLFW_PCIE_GEN_SWITCH_REQ_MSG_V01_MAX_MSG_LEN, + wlfw_pcie_gen_switch_req_msg_v01_ei, &req); + if (ret < 0) { + qmi_txn_cancel(&txn); + cnss_pr_err("Failed to send PCIE speed switch, err: %d\n", ret); + goto out; + } + + ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF); + if (ret < 0) { + cnss_pr_err("Failed to wait for PCIE Gen switch resp, err: %d\n", + ret); + goto out; + } + + if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { + cnss_pr_err("PCIE Gen Switch req failed, Speed: %d, result: %d, err: %d\n", + plat_priv->pcie_gen_speed, resp.resp.result, + resp.resp.error); + ret = -resp.resp.result; + } +out: + /* Reset PCIE Gen speed after one time use */ + plat_priv->pcie_gen_speed = QMI_PCIE_GEN_SPEED_INVALID_V01; + return ret; +} + int cnss_wlfw_antenna_switch_send_sync(struct cnss_plat_data *plat_priv) { struct wlfw_antenna_switch_req_msg_v01 *req; diff --git a/drivers/net/wireless/cnss2/qmi.h b/drivers/net/wireless/cnss2/qmi.h index baa5811d835f..533a0affc47b 100644 --- a/drivers/net/wireless/cnss2/qmi.h +++ b/drivers/net/wireless/cnss2/qmi.h @@ -71,6 +71,7 @@ int cnss_wlfw_qdss_trace_mem_info_send_sync(struct cnss_plat_data *plat_priv); int cnss_register_ims_service(struct cnss_plat_data *plat_priv); void cnss_unregister_ims_service(struct cnss_plat_data *plat_priv); void cnss_ignore_qmi_failure(bool ignore); +int cnss_wlfw_send_pcie_gen_speed_sync(struct cnss_plat_data *plat_priv); #else #define QMI_WLFW_TIMEOUT_MS 10000 @@ -226,6 +227,9 @@ static inline void cnss_unregister_ims_service(struct cnss_plat_data *plat_priv) {} void cnss_ignore_qmi_failure(bool ignore) {}; +static inline +int cnss_wlfw_send_pcie_gen_speed_sync(struct cnss_plat_data *plat_priv) {} + #endif /* CONFIG_CNSS2_QMI */ #endif /* _CNSS_QMI_H */ diff --git a/include/net/cnss2.h b/include/net/cnss2.h index d928451f33c6..52202fdafdd0 100644 --- a/include/net/cnss2.h +++ b/include/net/cnss2.h @@ -240,5 +240,5 @@ extern int cnss_athdiag_write(struct device *dev, uint32_t offset, uint32_t mem_type, uint32_t data_len, uint8_t *input); extern int cnss_set_fw_log_mode(struct device *dev, uint8_t fw_log_mode); - +extern int cnss_set_pcie_gen_speed(struct device *dev, u8 pcie_gen_speed); #endif /* _NET_CNSS2_H */ -- GitLab From b801d568c7d85f816f1afb6e869f069c3ebe8b66 Mon Sep 17 00:00:00 2001 From: Leo Yan Date: Tue, 5 May 2020 21:36:42 +0800 Subject: [PATCH 1226/1304] perf cs-etm: Move definition of 'traceid_list' global variable from header file commit 168200b6d6ea0cb5765943ec5da5b8149701f36a upstream. The variable 'traceid_list' is defined in the header file cs-etm.h, if multiple C files include cs-etm.h the compiler might complaint for multiple definition of 'traceid_list'. To fix multiple definition error, move the definition of 'traceid_list' into cs-etm.c. Fixes: cd8bfd8c973e ("perf tools: Add processing of coresight metadata") Reported-by: Thomas Backlund Signed-off-by: Leo Yan Reviewed-by: Mathieu Poirier Reviewed-by: Mike Leach Tested-by: Mike Leach Tested-by: Thomas Backlund Cc: Alexander Shishkin Cc: Jiri Olsa Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Suzuki Poulouse Cc: Tor Jeremiassen Cc: linux-arm-kernel@lists.infradead.org Link: http://lore.kernel.org/lkml/20200505133642.4756-1-leo.yan@linaro.org Signed-off-by: Arnaldo Carvalho de Melo Cc: Paul Barker Signed-off-by: Greg Kroah-Hartman --- tools/perf/util/cs-etm.c | 3 +++ tools/perf/util/cs-etm.h | 3 --- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c index 7b5e15cc6b71..ad33b99f5d21 100644 --- a/tools/perf/util/cs-etm.c +++ b/tools/perf/util/cs-etm.c @@ -87,6 +87,9 @@ struct cs_etm_queue { struct cs_etm_packet *packet; }; +/* RB tree for quick conversion between traceID and metadata pointers */ +static struct intlist *traceid_list; + static int cs_etm__update_queues(struct cs_etm_auxtrace *etm); static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm, pid_t tid, u64 time_); diff --git a/tools/perf/util/cs-etm.h b/tools/perf/util/cs-etm.h index 37f8d48179ca..c7ef97b198c7 100644 --- a/tools/perf/util/cs-etm.h +++ b/tools/perf/util/cs-etm.h @@ -53,9 +53,6 @@ enum { CS_ETMV4_PRIV_MAX, }; -/* RB tree for quick conversion between traceID and CPUs */ -struct intlist *traceid_list; - #define KiB(x) ((x) * 1024) #define MiB(x) ((x) * 1024 * 1024) -- GitLab From e7f04b0725ff3e2781a080dd34b76b2d5a447ef1 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 18 Apr 2019 10:57:49 +0100 Subject: [PATCH 1227/1304] ARM: 8858/1: vdso: use $(LD) instead of $(CC) to link VDSO [ Upstream commit fe00e50b2db8c60e4ec90befad1f5bab8ca2c800 ] We use $(LD) to link vmlinux, modules, decompressors, etc. VDSO is the only exceptional case where $(CC) is used as the linker driver, but I do not know why we need to do so. VDSO uses a special linker script, and does not link standard libraries at all. I changed the Makefile to use $(LD) rather than $(CC). I confirmed the same vdso.so.raw was still produced. Users will be able to use their favorite linker (e.g. lld instead of of bfd) by passing LD= from the command line. My plan is to rewrite all VDSO Makefiles to use $(LD), then delete cc-ldoption. Signed-off-by: Masahiro Yamada Reviewed-by: Nick Desaulniers Signed-off-by: Russell King Signed-off-by: Sasha Levin --- arch/arm/vdso/Makefile | 21 ++++++++------------- 1 file changed, 8 insertions(+), 13 deletions(-) diff --git a/arch/arm/vdso/Makefile b/arch/arm/vdso/Makefile index f4efff9d3afb..fadf554d9391 100644 --- a/arch/arm/vdso/Makefile +++ b/arch/arm/vdso/Makefile @@ -10,12 +10,12 @@ obj-vdso := $(addprefix $(obj)/, $(obj-vdso)) ccflags-y := -fPIC -fno-common -fno-builtin -fno-stack-protector ccflags-y += -DDISABLE_BRANCH_PROFILING -VDSO_LDFLAGS := -Wl,-Bsymbolic -Wl,--no-undefined -Wl,-soname=linux-vdso.so.1 -VDSO_LDFLAGS += -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096 -VDSO_LDFLAGS += -nostdlib -shared -VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) -VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--build-id) -VDSO_LDFLAGS += $(call cc-ldoption, -fuse-ld=bfd) +ldflags-y = -Bsymbolic --no-undefined -soname=linux-vdso.so.1 \ + -z max-page-size=4096 -z common-page-size=4096 \ + -nostdlib -shared \ + $(call ld-option, --hash-style=sysv) \ + $(call ld-option, --build-id) \ + -T obj-$(CONFIG_VDSO) += vdso.o extra-$(CONFIG_VDSO) += vdso.lds @@ -37,8 +37,8 @@ KCOV_INSTRUMENT := n $(obj)/vdso.o : $(obj)/vdso.so # Link rule for the .so file -$(obj)/vdso.so.raw: $(src)/vdso.lds $(obj-vdso) FORCE - $(call if_changed,vdsold) +$(obj)/vdso.so.raw: $(obj)/vdso.lds $(obj-vdso) FORCE + $(call if_changed,ld) $(obj)/vdso.so.dbg: $(obj)/vdso.so.raw $(obj)/vdsomunge FORCE $(call if_changed,vdsomunge) @@ -48,11 +48,6 @@ $(obj)/%.so: OBJCOPYFLAGS := -S $(obj)/%.so: $(obj)/%.so.dbg FORCE $(call if_changed,objcopy) -# Actual build commands -quiet_cmd_vdsold = VDSO $@ - cmd_vdsold = $(CC) $(c_flags) $(VDSO_LDFLAGS) \ - -Wl,-T $(filter %.lds,$^) $(filter %.o,$^) -o $@ - quiet_cmd_vdsomunge = MUNGE $@ cmd_vdsomunge = $(objtree)/$(obj)/vdsomunge $< $@ -- GitLab From 444a6d0fe930838444abf7c9ff5175f93e1a5d72 Mon Sep 17 00:00:00 2001 From: Dmitry Golovin Date: Tue, 19 Nov 2019 16:39:42 +0100 Subject: [PATCH 1228/1304] ARM: 8939/1: kbuild: use correct nm executable [ Upstream commit 29c623d64f0dcd6aa10e0eabd16233e77114090b ] Since $(NM) variable can be easily overridden for the whole build, it's better to use it instead of $(CROSS_COMPILE)nm. The use of $(CROSS_COMPILE) prefixed variables where their calculated equivalents can be used is incorrect. This fixes issues with builds where $(NM) is set to llvm-nm. Link: https://github.com/ClangBuiltLinux/linux/issues/766 Signed-off-by: Dmitry Golovin Suggested-by: Nick Desaulniers Cc: Matthias Maennich Reviewed-by: Nathan Chancellor Tested-by: Nathan Chancellor Reviewed-by: Masahiro Yamada Reviewed-by: Nick Desaulniers Signed-off-by: Russell King Signed-off-by: Sasha Levin --- arch/arm/boot/compressed/Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile index 1f5a5ffe7fcf..c762004572ef 100644 --- a/arch/arm/boot/compressed/Makefile +++ b/arch/arm/boot/compressed/Makefile @@ -120,7 +120,7 @@ ccflags-y := -fpic $(call cc-option,-mno-single-pic-base,) -fno-builtin -I$(obj) asflags-y := -DZIMAGE # Supply kernel BSS size to the decompressor via a linker symbol. -KBSS_SZ = $(shell echo $$(($$($(CROSS_COMPILE)nm $(obj)/../../../../vmlinux | \ +KBSS_SZ = $(shell echo $$(($$($(NM) $(obj)/../../../../vmlinux | \ sed -n -e 's/^\([^ ]*\) [AB] __bss_start$$/-0x\1/p' \ -e 's/^\([^ ]*\) [AB] __bss_stop$$/+0x\1/p') )) ) LDFLAGS_vmlinux = --defsym _kernel_bss_size=$(KBSS_SZ) @@ -166,7 +166,7 @@ $(obj)/bswapsdi2.S: $(srctree)/arch/$(SRCARCH)/lib/bswapsdi2.S # The .data section is already discarded by the linker script so no need # to bother about it here. check_for_bad_syms = \ -bad_syms=$$($(CROSS_COMPILE)nm $@ | sed -n 's/^.\{8\} [bc] \(.*\)/\1/p') && \ +bad_syms=$$($(NM) $@ | sed -n 's/^.\{8\} [bc] \(.*\)/\1/p') && \ [ -z "$$bad_syms" ] || \ ( echo "following symbols must have non local/private scope:" >&2; \ echo "$$bad_syms" >&2; rm -f $@; false ) -- GitLab From 23759ba06279602c611a4349096ac0699c949b64 Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Mon, 17 Jun 2019 13:29:19 +0100 Subject: [PATCH 1229/1304] ARM: 8867/1: vdso: pass --be8 to linker if necessary [ Upstream commit c5d0e49e8d8f1a23034fdf8e935afc0c8f7ae27d ] The commit fe00e50b2db8 ("ARM: 8858/1: vdso: use $(LD) instead of $(CC) to link VDSO") removed the passing of CFLAGS, since ld doesn't take those directly. However, prior, big-endian ARM was relying on gcc to translate its -mbe8 option into ld's --be8 option. Lacking this, ld generated be32 code, making the VDSO generate SIGILL when called by userspace. This commit passes --be8 if CONFIG_CPU_ENDIAN_BE8 is enabled. Signed-off-by: Jason A. Donenfeld Cc: Masahiro Yamada Cc: Arnd Bergmann Cc: Ard Biesheuvel Signed-off-by: Russell King Signed-off-by: Sasha Levin --- arch/arm/vdso/Makefile | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/arch/arm/vdso/Makefile b/arch/arm/vdso/Makefile index fadf554d9391..1f5ec9741e6d 100644 --- a/arch/arm/vdso/Makefile +++ b/arch/arm/vdso/Makefile @@ -10,9 +10,10 @@ obj-vdso := $(addprefix $(obj)/, $(obj-vdso)) ccflags-y := -fPIC -fno-common -fno-builtin -fno-stack-protector ccflags-y += -DDISABLE_BRANCH_PROFILING -ldflags-y = -Bsymbolic --no-undefined -soname=linux-vdso.so.1 \ +ldflags-$(CONFIG_CPU_ENDIAN_BE8) := --be8 +ldflags-y := -Bsymbolic --no-undefined -soname=linux-vdso.so.1 \ -z max-page-size=4096 -z common-page-size=4096 \ - -nostdlib -shared \ + -nostdlib -shared $(ldflags-y) \ $(call ld-option, --hash-style=sysv) \ $(call ld-option, --build-id) \ -T -- GitLab From 128278f444ab3a0d38759c4935092d256edc77d0 Mon Sep 17 00:00:00 2001 From: Luiz Augusto von Dentz Date: Thu, 6 Aug 2020 11:17:11 -0700 Subject: [PATCH 1230/1304] Bluetooth: A2MP: Fix not initializing all members commit eddb7732119d53400f48a02536a84c509692faa8 upstream. This fixes various places where a stack variable is used uninitialized. Signed-off-by: Luiz Augusto von Dentz Signed-off-by: Marcel Holtmann Signed-off-by: Greg Kroah-Hartman --- net/bluetooth/a2mp.c | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/net/bluetooth/a2mp.c b/net/bluetooth/a2mp.c index 51c2cf2d8923..be9640e9ca00 100644 --- a/net/bluetooth/a2mp.c +++ b/net/bluetooth/a2mp.c @@ -233,6 +233,9 @@ static int a2mp_discover_rsp(struct amp_mgr *mgr, struct sk_buff *skb, struct a2mp_info_req req; found = true; + + memset(&req, 0, sizeof(req)); + req.id = cl->id; a2mp_send(mgr, A2MP_GETINFO_REQ, __next_ident(mgr), sizeof(req), &req); @@ -312,6 +315,8 @@ static int a2mp_getinfo_req(struct amp_mgr *mgr, struct sk_buff *skb, if (!hdev || hdev->dev_type != HCI_AMP) { struct a2mp_info_rsp rsp; + memset(&rsp, 0, sizeof(rsp)); + rsp.id = req->id; rsp.status = A2MP_STATUS_INVALID_CTRL_ID; @@ -355,6 +360,8 @@ static int a2mp_getinfo_rsp(struct amp_mgr *mgr, struct sk_buff *skb, if (!ctrl) return -ENOMEM; + memset(&req, 0, sizeof(req)); + req.id = rsp->id; a2mp_send(mgr, A2MP_GETAMPASSOC_REQ, __next_ident(mgr), sizeof(req), &req); @@ -383,6 +390,8 @@ static int a2mp_getampassoc_req(struct amp_mgr *mgr, struct sk_buff *skb, struct a2mp_amp_assoc_rsp rsp; rsp.id = req->id; + memset(&rsp, 0, sizeof(rsp)); + if (tmp) { rsp.status = A2MP_STATUS_COLLISION_OCCURED; amp_mgr_put(tmp); @@ -471,7 +480,6 @@ static int a2mp_createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb, struct a2mp_cmd *hdr) { struct a2mp_physlink_req *req = (void *) skb->data; - struct a2mp_physlink_rsp rsp; struct hci_dev *hdev; struct hci_conn *hcon; @@ -482,6 +490,8 @@ static int a2mp_createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb, BT_DBG("local_id %d, remote_id %d", req->local_id, req->remote_id); + memset(&rsp, 0, sizeof(rsp)); + rsp.local_id = req->remote_id; rsp.remote_id = req->local_id; @@ -560,6 +570,8 @@ static int a2mp_discphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb, BT_DBG("local_id %d remote_id %d", req->local_id, req->remote_id); + memset(&rsp, 0, sizeof(rsp)); + rsp.local_id = req->remote_id; rsp.remote_id = req->local_id; rsp.status = A2MP_STATUS_SUCCESS; @@ -682,6 +694,8 @@ static int a2mp_chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb) if (err) { struct a2mp_cmd_rej rej; + memset(&rej, 0, sizeof(rej)); + rej.reason = cpu_to_le16(0); hdr = (void *) skb->data; @@ -905,6 +919,8 @@ void a2mp_send_getinfo_rsp(struct hci_dev *hdev) BT_DBG("%s mgr %p", hdev->name, mgr); + memset(&rsp, 0, sizeof(rsp)); + rsp.id = hdev->id; rsp.status = A2MP_STATUS_INVALID_CTRL_ID; @@ -1002,6 +1018,8 @@ void a2mp_send_create_phy_link_rsp(struct hci_dev *hdev, u8 status) if (!mgr) return; + memset(&rsp, 0, sizeof(rsp)); + hs_hcon = hci_conn_hash_lookup_state(hdev, AMP_LINK, BT_CONNECT); if (!hs_hcon) { rsp.status = A2MP_STATUS_UNABLE_START_LINK_CREATION; @@ -1034,6 +1052,8 @@ void a2mp_discover_amp(struct l2cap_chan *chan) mgr->bredr_chan = chan; + memset(&req, 0, sizeof(req)); + req.mtu = cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU); req.ext_feat = 0; a2mp_send(mgr, A2MP_DISCOVER_REQ, 1, sizeof(req), &req); -- GitLab From 360f80e34292dbe91c23e893f90cd357aff8b68a Mon Sep 17 00:00:00 2001 From: Luiz Augusto von Dentz Date: Thu, 6 Aug 2020 11:17:12 -0700 Subject: [PATCH 1231/1304] Bluetooth: L2CAP: Fix calling sk_filter on non-socket based channel commit f19425641cb2572a33cb074d5e30283720bd4d22 upstream. Only sockets will have the chan->data set to an actual sk, channels like A2MP would have its own data which would likely cause a crash when calling sk_filter, in order to fix this a new callback has been introduced so channels can implement their own filtering if necessary. Signed-off-by: Luiz Augusto von Dentz Signed-off-by: Marcel Holtmann Signed-off-by: Greg Kroah-Hartman --- include/net/bluetooth/l2cap.h | 2 ++ net/bluetooth/l2cap_core.c | 7 ++++--- net/bluetooth/l2cap_sock.c | 14 ++++++++++++++ 3 files changed, 20 insertions(+), 3 deletions(-) diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h index 0697fd413087..21dbd38f724d 100644 --- a/include/net/bluetooth/l2cap.h +++ b/include/net/bluetooth/l2cap.h @@ -619,6 +619,8 @@ struct l2cap_ops { struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan, unsigned long hdr_len, unsigned long len, int nb); + int (*filter) (struct l2cap_chan * chan, + struct sk_buff *skb); }; struct l2cap_conn { diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index c04107d44601..f1ff83321023 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -6683,9 +6683,10 @@ static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb) goto drop; } - if ((chan->mode == L2CAP_MODE_ERTM || - chan->mode == L2CAP_MODE_STREAMING) && sk_filter(chan->data, skb)) - goto drop; + if (chan->ops->filter) { + if (chan->ops->filter(chan, skb)) + goto drop; + } if (!control->sframe) { int err; diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index 5572042f0453..2a85dc3be8bf 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c @@ -1476,6 +1476,19 @@ static void l2cap_sock_suspend_cb(struct l2cap_chan *chan) sk->sk_state_change(sk); } +static int l2cap_sock_filter(struct l2cap_chan *chan, struct sk_buff *skb) +{ + struct sock *sk = chan->data; + + switch (chan->mode) { + case L2CAP_MODE_ERTM: + case L2CAP_MODE_STREAMING: + return sk_filter(sk, skb); + } + + return 0; +} + static const struct l2cap_ops l2cap_chan_ops = { .name = "L2CAP Socket Interface", .new_connection = l2cap_sock_new_connection_cb, @@ -1490,6 +1503,7 @@ static const struct l2cap_ops l2cap_chan_ops = { .set_shutdown = l2cap_sock_set_shutdown_cb, .get_sndtimeo = l2cap_sock_get_sndtimeo_cb, .alloc_skb = l2cap_sock_alloc_skb_cb, + .filter = l2cap_sock_filter, }; static void l2cap_sock_destruct(struct sock *sk) -- GitLab From 7b2e80606a5dead486a9f0bff5da5ac818842e2e Mon Sep 17 00:00:00 2001 From: Luiz Augusto von Dentz Date: Thu, 6 Aug 2020 11:17:14 -0700 Subject: [PATCH 1232/1304] Bluetooth: MGMT: Fix not checking if BT_HS is enabled commit b560a208cda0297fef6ff85bbfd58a8f0a52a543 upstream. This checks if BT_HS is enabled relecting it on MGMT_SETTING_HS instead of always reporting it as supported. Signed-off-by: Luiz Augusto von Dentz Signed-off-by: Marcel Holtmann Signed-off-by: Greg Kroah-Hartman --- net/bluetooth/mgmt.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index ccce954f8146..5340b1097afb 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -756,7 +756,8 @@ static u32 get_supported_settings(struct hci_dev *hdev) if (lmp_ssp_capable(hdev)) { settings |= MGMT_SETTING_SSP; - settings |= MGMT_SETTING_HS; + if (IS_ENABLED(CONFIG_BT_HS)) + settings |= MGMT_SETTING_HS; } if (lmp_sc_capable(hdev)) @@ -1771,6 +1772,10 @@ static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) BT_DBG("request for %s", hdev->name); + if (!IS_ENABLED(CONFIG_BT_HS)) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, + MGMT_STATUS_NOT_SUPPORTED); + status = mgmt_bredr_support(hdev); if (status) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status); -- GitLab From 0c75831bc108ec23c663d969181a4dd7e4b651bb Mon Sep 17 00:00:00 2001 From: Luiz Augusto von Dentz Date: Tue, 19 May 2020 13:25:19 -0700 Subject: [PATCH 1233/1304] Bluetooth: Consolidate encryption handling in hci_encrypt_cfm commit 3ca44c16b0dcc764b641ee4ac226909f5c421aa3 upstream. This makes hci_encrypt_cfm calls hci_connect_cfm in case the connection state is BT_CONFIG so callers don't have to check the state. Signed-off-by: Luiz Augusto von Dentz Signed-off-by: Marcel Holtmann Cc: Hans-Christian Noren Egtvedt Signed-off-by: Greg Kroah-Hartman --- include/net/bluetooth/hci_core.h | 20 ++++++++++++++++++-- net/bluetooth/hci_event.c | 28 +++------------------------- 2 files changed, 21 insertions(+), 27 deletions(-) diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index cc2d0c3b475b..d869d25693f2 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -1287,10 +1287,26 @@ static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status) conn->security_cfm_cb(conn, status); } -static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status, - __u8 encrypt) +static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status) { struct hci_cb *cb; + __u8 encrypt; + + if (conn->state == BT_CONFIG) { + if (status) + conn->state = BT_CONNECTED; + + hci_connect_cfm(conn, status); + hci_conn_drop(conn); + return; + } + + if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags)) + encrypt = 0x00; + else if (test_bit(HCI_CONN_AES_CCM, &conn->flags)) + encrypt = 0x02; + else + encrypt = 0x01; if (conn->sec_level == BT_SECURITY_SDP) conn->sec_level = BT_SECURITY_LOW; diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 310622086f74..2d707e0b74ed 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -2756,7 +2756,7 @@ static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) &cp); } else { clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); - hci_encrypt_cfm(conn, ev->status, 0x00); + hci_encrypt_cfm(conn, ev->status); } } @@ -2841,22 +2841,7 @@ static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status, conn->enc_key_size = rp->key_size; } - if (conn->state == BT_CONFIG) { - conn->state = BT_CONNECTED; - hci_connect_cfm(conn, 0); - hci_conn_drop(conn); - } else { - u8 encrypt; - - if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags)) - encrypt = 0x00; - else if (test_bit(HCI_CONN_AES_CCM, &conn->flags)) - encrypt = 0x02; - else - encrypt = 0x01; - - hci_encrypt_cfm(conn, 0, encrypt); - } + hci_encrypt_cfm(conn, 0); unlock: hci_dev_unlock(hdev); @@ -2955,14 +2940,7 @@ static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb) } notify: - if (conn->state == BT_CONFIG) { - if (!ev->status) - conn->state = BT_CONNECTED; - - hci_connect_cfm(conn, ev->status); - hci_conn_drop(conn); - } else - hci_encrypt_cfm(conn, ev->status, ev->encrypt); + hci_encrypt_cfm(conn, ev->status); unlock: hci_dev_unlock(hdev); -- GitLab From 3882085f1faf02932d9a69fb76adb76335c22a83 Mon Sep 17 00:00:00 2001 From: Patrick Steinhardt Date: Wed, 15 Jul 2020 19:43:33 +0200 Subject: [PATCH 1234/1304] Bluetooth: Fix update of connection state in `hci_encrypt_cfm` commit 339ddaa626995bc6218972ca241471f3717cc5f4 upstream. Starting with the upgrade to v5.8-rc3, I've noticed I wasn't able to connect to my Bluetooth headset properly anymore. While connecting to the device would eventually succeed, bluetoothd seemed to be confused about the current connection state where the state was flapping hence and forth. Bisecting this issue led to commit 3ca44c16b0dc (Bluetooth: Consolidate encryption handling in hci_encrypt_cfm, 2020-05-19), which refactored `hci_encrypt_cfm` to also handle updating the connection state. The commit in question changed the code to call `hci_connect_cfm` inside `hci_encrypt_cfm` and to change the connection state. But with the conversion, we now only update the connection state if a status was set already. In fact, the reverse should be true: the status should be updated if no status is yet set. So let's fix the isuse by reversing the condition. Fixes: 3ca44c16b0dc ("Bluetooth: Consolidate encryption handling in hci_encrypt_cfm") Signed-off-by: Patrick Steinhardt Acked-by: Luiz Augusto von Dentz Signed-off-by: Marcel Holtmann Signed-off-by: Greg Kroah-Hartman --- include/net/bluetooth/hci_core.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index d869d25693f2..8bca07406620 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -1293,7 +1293,7 @@ static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status) __u8 encrypt; if (conn->state == BT_CONFIG) { - if (status) + if (!status) conn->state = BT_CONNECTED; hci_connect_cfm(conn, status); -- GitLab From c1ad9bb3b04725c5c9a593abb1657e8a89c62fd7 Mon Sep 17 00:00:00 2001 From: Luiz Augusto von Dentz Date: Wed, 20 May 2020 14:20:14 -0700 Subject: [PATCH 1235/1304] Bluetooth: Disconnect if E0 is used for Level 4 commit 8746f135bb01872ff412d408ea1aa9ebd328c1f5 upstream. E0 is not allowed with Level 4: BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 3, Part C page 1319: '128-bit equivalent strength for link and encryption keys required using FIPS approved algorithms (E0 not allowed, SAFER+ not allowed, and P-192 not allowed; encryption key not shortened' SC enabled: > HCI Event: Read Remote Extended Features (0x23) plen 13 Status: Success (0x00) Handle: 256 Page: 1/2 Features: 0x0b 0x00 0x00 0x00 0x00 0x00 0x00 0x00 Secure Simple Pairing (Host Support) LE Supported (Host) Secure Connections (Host Support) > HCI Event: Encryption Change (0x08) plen 4 Status: Success (0x00) Handle: 256 Encryption: Enabled with AES-CCM (0x02) SC disabled: > HCI Event: Read Remote Extended Features (0x23) plen 13 Status: Success (0x00) Handle: 256 Page: 1/2 Features: 0x03 0x00 0x00 0x00 0x00 0x00 0x00 0x00 Secure Simple Pairing (Host Support) LE Supported (Host) > HCI Event: Encryption Change (0x08) plen 4 Status: Success (0x00) Handle: 256 Encryption: Enabled with E0 (0x01) [May 8 20:23] Bluetooth: hci0: Invalid security: expect AES but E0 was used < HCI Command: Disconnect (0x01|0x0006) plen 3 Handle: 256 Reason: Authentication Failure (0x05) Signed-off-by: Luiz Augusto von Dentz Signed-off-by: Marcel Holtmann Cc: Hans-Christian Noren Egtvedt Signed-off-by: Greg Kroah-Hartman --- include/net/bluetooth/hci_core.h | 10 ++++++---- net/bluetooth/hci_conn.c | 17 +++++++++++++++++ net/bluetooth/hci_event.c | 20 ++++++++------------ 3 files changed, 31 insertions(+), 16 deletions(-) diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 8bca07406620..319572809575 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -1308,11 +1308,13 @@ static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status) else encrypt = 0x01; - if (conn->sec_level == BT_SECURITY_SDP) - conn->sec_level = BT_SECURITY_LOW; + if (!status) { + if (conn->sec_level == BT_SECURITY_SDP) + conn->sec_level = BT_SECURITY_LOW; - if (conn->pending_sec_level > conn->sec_level) - conn->sec_level = conn->pending_sec_level; + if (conn->pending_sec_level > conn->sec_level) + conn->sec_level = conn->pending_sec_level; + } mutex_lock(&hci_cb_list_lock); list_for_each_entry(cb, &hci_cb_list, list) { diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index db735d0d931e..1b50e4ef2c68 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -1282,6 +1282,23 @@ int hci_conn_check_link_mode(struct hci_conn *conn) return 0; } + /* AES encryption is required for Level 4: + * + * BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 3, Part C + * page 1319: + * + * 128-bit equivalent strength for link and encryption keys + * required using FIPS approved algorithms (E0 not allowed, + * SAFER+ not allowed, and P-192 not allowed; encryption key + * not shortened) + */ + if (conn->sec_level == BT_SECURITY_FIPS && + !test_bit(HCI_CONN_AES_CCM, &conn->flags)) { + bt_dev_err(conn->hdev, + "Invalid security: Missing AES-CCM usage"); + return 0; + } + if (hci_conn_ssp_enabled(conn) && !test_bit(HCI_CONN_ENCRYPT, &conn->flags)) return 0; diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 2d707e0b74ed..d98d8e78b736 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -2890,27 +2890,23 @@ static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb) clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); + /* Check link security requirements are met */ + if (!hci_conn_check_link_mode(conn)) + ev->status = HCI_ERROR_AUTH_FAILURE; + if (ev->status && conn->state == BT_CONNECTED) { if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING) set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags); + /* Notify upper layers so they can cleanup before + * disconnecting. + */ + hci_encrypt_cfm(conn, ev->status); hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); hci_conn_drop(conn); goto unlock; } - /* In Secure Connections Only mode, do not allow any connections - * that are not encrypted with AES-CCM using a P-256 authenticated - * combination key. - */ - if (hci_dev_test_flag(hdev, HCI_SC_ONLY) && - (!test_bit(HCI_CONN_AES_CCM, &conn->flags) || - conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) { - hci_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE); - hci_conn_drop(conn); - goto unlock; - } - /* Try reading the encryption key size for encrypted ACL links */ if (!ev->status && ev->encrypt && conn->type == ACL_LINK) { struct hci_cp_read_enc_key_size cp; -- GitLab From d4503a3fd3bbf8ddecddb4d71be7110d7651eb8a Mon Sep 17 00:00:00 2001 From: Oliver Neukum Date: Thu, 24 Sep 2020 11:14:10 +0200 Subject: [PATCH 1236/1304] media: usbtv: Fix refcounting mixup commit bf65f8aabdb37bc1a785884374e919477fe13e10 upstream. The premature free in the error path is blocked by V4L refcounting, not USB refcounting. Thanks to Ben Hutchings for review. [v2] corrected attributions Signed-off-by: Oliver Neukum Fixes: 50e704453553 ("media: usbtv: prevent double free in error case") CC: stable@vger.kernel.org Reported-by: Ben Hutchings Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Greg Kroah-Hartman --- drivers/media/usb/usbtv/usbtv-core.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/media/usb/usbtv/usbtv-core.c b/drivers/media/usb/usbtv/usbtv-core.c index ee9c656d121f..2308c0b4f5e7 100644 --- a/drivers/media/usb/usbtv/usbtv-core.c +++ b/drivers/media/usb/usbtv/usbtv-core.c @@ -113,7 +113,8 @@ static int usbtv_probe(struct usb_interface *intf, usbtv_audio_fail: /* we must not free at this point */ - usb_get_dev(usbtv->udev); + v4l2_device_get(&usbtv->v4l2_dev); + /* this will undo the v4l2_device_get() */ usbtv_video_free(usbtv); usbtv_video_fail: -- GitLab From 32f09ec40aa0c7e511c97ac40a96d727c3708a0f Mon Sep 17 00:00:00 2001 From: Wilken Gottwalt Date: Sat, 3 Oct 2020 11:40:29 +0200 Subject: [PATCH 1237/1304] USB: serial: option: add Cellient MPL200 card commit 3e765cab8abe7f84cb80d4a7a973fc97d5742647 upstream. Add usb ids of the Cellient MPL200 card. Signed-off-by: Wilken Gottwalt Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/3db5418fe9e516f4b290736c5a199c9796025e3c.1601715478.git.wilken.gottwalt@mailbox.org Signed-off-by: Johan Hovold Signed-off-by: Greg Kroah-Hartman --- drivers/usb/serial/option.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 810f1010ab13..12a5c2b13a62 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -528,6 +528,7 @@ static void option_instat_callback(struct urb *urb); /* Cellient products */ #define CELLIENT_VENDOR_ID 0x2692 #define CELLIENT_PRODUCT_MEN200 0x9005 +#define CELLIENT_PRODUCT_MPL200 0x9025 /* Hyundai Petatel Inc. products */ #define PETATEL_VENDOR_ID 0x1ff4 @@ -1982,6 +1983,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x02, 0x01) }, { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x00, 0x00) }, { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) }, + { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MPL200), + .driver_info = RSVD(1) | RSVD(4) }, { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600A) }, { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600E) }, { USB_DEVICE_AND_INTERFACE_INFO(TPLINK_VENDOR_ID, TPLINK_PRODUCT_LTE, 0xff, 0x00, 0x00) }, /* TP-Link LTE Module */ -- GitLab From aa803a62b3e2c0d7e5d6a50c2a24724721df0b39 Mon Sep 17 00:00:00 2001 From: Leonid Bloch Date: Sun, 4 Oct 2020 18:58:13 +0300 Subject: [PATCH 1238/1304] USB: serial: option: Add Telit FT980-KS composition commit 924a9213358fb92fa3c3225d6d042aa058167405 upstream. This commit adds the following Telit FT980-KS composition: 0x1054: rndis, diag, adb, nmea, modem, modem, aux AT commands can be sent to /dev/ttyUSB2. Signed-off-by: Leonid Bloch Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/ce86bc05-f4e2-b199-0cdc-792715e3f275@asocscloud.com Link: https://lore.kernel.org/r/20201004155813.2342-1-lb.workbox@gmail.com Signed-off-by: Johan Hovold Signed-off-by: Greg Kroah-Hartman --- drivers/usb/serial/option.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 12a5c2b13a62..c773db129bf9 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -1187,6 +1187,8 @@ static const struct usb_device_id option_ids[] = { .driver_info = NCTRL(2) | RSVD(3) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1053, 0xff), /* Telit FN980 (ECM) */ .driver_info = NCTRL(0) | RSVD(1) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1054, 0xff), /* Telit FT980-KS */ + .driver_info = NCTRL(2) | RSVD(3) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910), .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM), -- GitLab From a9a19989b45a183925cd7790f25d44bc7f181d6d Mon Sep 17 00:00:00 2001 From: Anant Thazhemadam Date: Sat, 10 Oct 2020 13:59:32 +0530 Subject: [PATCH 1239/1304] staging: comedi: check validity of wMaxPacketSize of usb endpoints found commit e1f13c879a7c21bd207dc6242455e8e3a1e88b40 upstream. While finding usb endpoints in vmk80xx_find_usb_endpoints(), check if wMaxPacketSize = 0 for the endpoints found. Some devices have isochronous endpoints that have wMaxPacketSize = 0 (as required by the USB-2 spec). However, since this doesn't apply here, wMaxPacketSize = 0 can be considered to be invalid. Reported-by: syzbot+009f546aa1370056b1c2@syzkaller.appspotmail.com Tested-by: syzbot+009f546aa1370056b1c2@syzkaller.appspotmail.com Signed-off-by: Anant Thazhemadam Cc: stable Link: https://lore.kernel.org/r/20201010082933.5417-1-anant.thazhemadam@gmail.com Signed-off-by: Greg Kroah-Hartman Signed-off-by: Greg Kroah-Hartman --- drivers/staging/comedi/drivers/vmk80xx.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/staging/comedi/drivers/vmk80xx.c b/drivers/staging/comedi/drivers/vmk80xx.c index 65dc6c51037e..7956abcbae22 100644 --- a/drivers/staging/comedi/drivers/vmk80xx.c +++ b/drivers/staging/comedi/drivers/vmk80xx.c @@ -667,6 +667,9 @@ static int vmk80xx_find_usb_endpoints(struct comedi_device *dev) if (!devpriv->ep_rx || !devpriv->ep_tx) return -ENODEV; + if (!usb_endpoint_maxp(devpriv->ep_rx) || !usb_endpoint_maxp(devpriv->ep_tx)) + return -EINVAL; + return 0; } -- GitLab From cd86e2ee197dc2065ad84b73cb1db44f8e920dfd Mon Sep 17 00:00:00 2001 From: Scott Chen Date: Thu, 24 Sep 2020 14:27:45 +0800 Subject: [PATCH 1240/1304] USB: serial: pl2303: add device-id for HP GC device commit 031f9664f8f9356cee662335bc56c93d16e75665 upstream. This is adds a device id for HP LD381 which is a pl2303GC-base device. Signed-off-by: Scott Chen Cc: stable@vger.kernel.org Signed-off-by: Johan Hovold Signed-off-by: Greg Kroah-Hartman --- drivers/usb/serial/pl2303.c | 1 + drivers/usb/serial/pl2303.h | 1 + 2 files changed, 2 insertions(+) diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index 7751b94ac7f5..2d78ad2842a4 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c @@ -94,6 +94,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) }, { USB_DEVICE(HP_VENDOR_ID, HP_LD220TA_PRODUCT_ID) }, { USB_DEVICE(HP_VENDOR_ID, HP_LD381_PRODUCT_ID) }, + { USB_DEVICE(HP_VENDOR_ID, HP_LD381GC_PRODUCT_ID) }, { USB_DEVICE(HP_VENDOR_ID, HP_LD960_PRODUCT_ID) }, { USB_DEVICE(HP_VENDOR_ID, HP_LD960TA_PRODUCT_ID) }, { USB_DEVICE(HP_VENDOR_ID, HP_LCM220_PRODUCT_ID) }, diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h index c98db6b650a5..a897680473a7 100644 --- a/drivers/usb/serial/pl2303.h +++ b/drivers/usb/serial/pl2303.h @@ -121,6 +121,7 @@ /* Hewlett-Packard POS Pole Displays */ #define HP_VENDOR_ID 0x03f0 +#define HP_LD381GC_PRODUCT_ID 0x0183 #define HP_LM920_PRODUCT_ID 0x026b #define HP_TD620_PRODUCT_ID 0x0956 #define HP_LD960_PRODUCT_ID 0x0b39 -- GitLab From 2b00a51112f47080471c6e576ae33a54fcee715c Mon Sep 17 00:00:00 2001 From: "Mychaela N. Falconia" Date: Wed, 16 Sep 2020 01:56:29 +0000 Subject: [PATCH 1241/1304] USB: serial: ftdi_sio: add support for FreeCalypso JTAG+UART adapters commit 6cf87e5edd9944e1d3b6efd966ea401effc304ee upstream. There exist many FT2232-based JTAG+UART adapter designs in which FT2232 Channel A is used for JTAG and Channel B is used for UART. The best way to handle them in Linux is to have the ftdi_sio driver create a ttyUSB device only for Channel B and not for Channel A: a ttyUSB device for Channel A would be bogus and will disappear as soon as the user runs OpenOCD or other applications that access Channel A for JTAG from userspace, causing undesirable noise for users. The ftdi_sio driver already has a dedicated quirk for such JTAG+UART FT2232 adapters, and it requires assigning custom USB IDs to such adapters and adding these IDs to the driver with the ftdi_jtag_quirk applied. Boutique hardware manufacturer Falconia Partners LLC has created a couple of JTAG+UART adapter designs (one buffered, one unbuffered) as part of FreeCalypso project, and this hardware is specifically made to be used with Linux hosts, with the intent that Channel A will be accessed only from userspace via appropriate applications, and that Channel B will be supported by the ftdi_sio kernel driver, presenting a standard ttyUSB device to userspace. Toward this end the hardware manufacturer will be programming FT2232 EEPROMs with custom USB IDs, specifically with the intent that these IDs will be recognized by the ftdi_sio driver with the ftdi_jtag_quirk applied. Signed-off-by: Mychaela N. Falconia [johan: insert in PID order and drop unused define] Cc: stable@vger.kernel.org Signed-off-by: Johan Hovold Signed-off-by: Greg Kroah-Hartman --- drivers/usb/serial/ftdi_sio.c | 5 +++++ drivers/usb/serial/ftdi_sio_ids.h | 7 +++++++ 2 files changed, 12 insertions(+) diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index f0f630e1cf1c..b2364e379429 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -1027,6 +1027,11 @@ static const struct usb_device_id id_table_combined[] = { /* U-Blox devices */ { USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ZED_PID) }, { USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ODIN_PID) }, + /* FreeCalypso USB adapters */ + { USB_DEVICE(FTDI_VID, FTDI_FALCONIA_JTAG_BUF_PID), + .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, + { USB_DEVICE(FTDI_VID, FTDI_FALCONIA_JTAG_UNBUF_PID), + .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { } /* Terminating entry */ }; diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index b5ca17a5967a..3d47c6d72256 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -39,6 +39,13 @@ #define FTDI_LUMEL_PD12_PID 0x6002 +/* + * Custom USB adapters made by Falconia Partners LLC + * for FreeCalypso project, ID codes allocated to Falconia by FTDI. + */ +#define FTDI_FALCONIA_JTAG_BUF_PID 0x7150 +#define FTDI_FALCONIA_JTAG_UNBUF_PID 0x7151 + /* Sienna Serial Interface by Secyourit GmbH */ #define FTDI_SIENNA_PID 0x8348 -- GitLab From 0364aee683c37679ae91f7bf9e399c5cd6eba126 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Mon, 21 Sep 2020 15:08:50 +0200 Subject: [PATCH 1242/1304] reiserfs: Initialize inode keys properly commit 4443390e08d34d5771ab444f601cf71b3c9634a4 upstream. reiserfs_read_locked_inode() didn't initialize key length properly. Use _make_cpu_key() macro for key initialization so that all key member are properly initialized. CC: stable@vger.kernel.org Reported-by: syzbot+d94d02749498bb7bab4b@syzkaller.appspotmail.com Signed-off-by: Jan Kara Signed-off-by: Greg Kroah-Hartman --- fs/reiserfs/inode.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index 6419e6dacc39..70387650436c 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c @@ -1553,11 +1553,7 @@ void reiserfs_read_locked_inode(struct inode *inode, * set version 1, version 2 could be used too, because stat data * key is the same in both versions */ - key.version = KEY_FORMAT_3_5; - key.on_disk_key.k_dir_id = dirino; - key.on_disk_key.k_objectid = inode->i_ino; - key.on_disk_key.k_offset = 0; - key.on_disk_key.k_type = 0; + _make_cpu_key(&key, KEY_FORMAT_3_5, dirino, inode->i_ino, 0, 0, 3); /* look for the object's stat data */ retval = search_item(inode->i_sb, &key, &path_to_sd); -- GitLab From 9fd231b255b7e81330969cb6d97782bbf0888f13 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Wed, 30 Sep 2020 17:08:20 +0200 Subject: [PATCH 1243/1304] reiserfs: Fix oops during mount commit c2bb80b8bdd04dfe32364b78b61b6a47f717af52 upstream. With suitably crafted reiserfs image and mount command reiserfs will crash when trying to verify that XATTR_ROOT directory can be looked up in / as that recurses back to xattr code like: xattr_lookup+0x24/0x280 fs/reiserfs/xattr.c:395 reiserfs_xattr_get+0x89/0x540 fs/reiserfs/xattr.c:677 reiserfs_get_acl+0x63/0x690 fs/reiserfs/xattr_acl.c:209 get_acl+0x152/0x2e0 fs/posix_acl.c:141 check_acl fs/namei.c:277 [inline] acl_permission_check fs/namei.c:309 [inline] generic_permission+0x2ba/0x550 fs/namei.c:353 do_inode_permission fs/namei.c:398 [inline] inode_permission+0x234/0x4a0 fs/namei.c:463 lookup_one_len+0xa6/0x200 fs/namei.c:2557 reiserfs_lookup_privroot+0x85/0x1e0 fs/reiserfs/xattr.c:972 reiserfs_fill_super+0x2b51/0x3240 fs/reiserfs/super.c:2176 mount_bdev+0x24f/0x360 fs/super.c:1417 Fix the problem by bailing from reiserfs_xattr_get() when xattrs are not yet initialized. CC: stable@vger.kernel.org Reported-by: syzbot+9b33c9b118d77ff59b6f@syzkaller.appspotmail.com Signed-off-by: Jan Kara Signed-off-by: Greg Kroah-Hartman --- fs/reiserfs/xattr.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c index ee216925a709..0a397f179fd6 100644 --- a/fs/reiserfs/xattr.c +++ b/fs/reiserfs/xattr.c @@ -665,6 +665,13 @@ reiserfs_xattr_get(struct inode *inode, const char *name, void *buffer, if (get_inode_sd_version(inode) == STAT_DATA_V1) return -EOPNOTSUPP; + /* + * priv_root needn't be initialized during mount so allow initial + * lookups to succeed. + */ + if (!REISERFS_SB(inode->i_sb)->priv_root) + return 0; + dentry = xattr_lookup(inode, name, XATTR_REPLACE); if (IS_ERR(dentry)) { err = PTR_ERR(dentry); -- GitLab From bc9c21f81faf4df23dcc11b2bc1b6d3bc00b0d84 Mon Sep 17 00:00:00 2001 From: Arnaud Patard Date: Fri, 2 Aug 2019 10:32:40 +0200 Subject: [PATCH 1244/1304] drivers/net/ethernet/marvell/mvmdio.c: Fix non OF case commit d934423ac26ed373dfe089734d505dca5ff679b6 upstream. Orion5.x systems are still using machine files and not device-tree. Commit 96cb4342382290c9 ("net: mvmdio: allow up to three clocks to be specified for orion-mdio") has replaced devm_clk_get() with of_clk_get(), leading to a oops at boot and not working network, as reported in https://lists.debian.org/debian-arm/2019/07/msg00088.html and possibly in https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=908712. Link: https://lists.debian.org/debian-arm/2019/07/msg00088.html Fixes: 96cb4342382290c9 ("net: mvmdio: allow up to three clocks to be specified for orion-mdio") Signed-off-by: Arnaud Patard Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/marvell/mvmdio.c | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c index ee7857298361..cf7e10fbab0e 100644 --- a/drivers/net/ethernet/marvell/mvmdio.c +++ b/drivers/net/ethernet/marvell/mvmdio.c @@ -319,15 +319,25 @@ static int orion_mdio_probe(struct platform_device *pdev) init_waitqueue_head(&dev->smi_busy_wait); - for (i = 0; i < ARRAY_SIZE(dev->clk); i++) { - dev->clk[i] = of_clk_get(pdev->dev.of_node, i); - if (PTR_ERR(dev->clk[i]) == -EPROBE_DEFER) { + if (pdev->dev.of_node) { + for (i = 0; i < ARRAY_SIZE(dev->clk); i++) { + dev->clk[i] = of_clk_get(pdev->dev.of_node, i); + if (PTR_ERR(dev->clk[i]) == -EPROBE_DEFER) { + ret = -EPROBE_DEFER; + goto out_clk; + } + if (IS_ERR(dev->clk[i])) + break; + clk_prepare_enable(dev->clk[i]); + } + } else { + dev->clk[0] = clk_get(&pdev->dev, NULL); + if (PTR_ERR(dev->clk[0]) == -EPROBE_DEFER) { ret = -EPROBE_DEFER; goto out_clk; } - if (IS_ERR(dev->clk[i])) - break; - clk_prepare_enable(dev->clk[i]); + if (!IS_ERR(dev->clk[0])) + clk_prepare_enable(dev->clk[0]); } dev->err_interrupt = platform_get_irq(pdev, 0); -- GitLab From fc86d27bdb53384b97f317272efdff70efb0c839 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 2 Oct 2020 17:55:22 +1000 Subject: [PATCH 1245/1304] crypto: bcm - Verify GCM/CCM key length in setkey MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 10a2f0b311094ffd45463a529a410a51ca025f27 upstream. The setkey function for GCM/CCM algorithms didn't verify the key length before copying the key and subtracting the salt length. This patch delays the copying of the key til after the verification has been done. It also adds checks on the key length to ensure that it's at least as long as the salt. Fixes: 9d12ba86f818 ("crypto: brcm - Add Broadcom SPU driver") Cc: Reported-by: kiyin(尹亮) Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- drivers/crypto/bcm/cipher.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c index 0b1fc5664b1d..c2736274ad63 100644 --- a/drivers/crypto/bcm/cipher.c +++ b/drivers/crypto/bcm/cipher.c @@ -2980,7 +2980,6 @@ static int aead_gcm_ccm_setkey(struct crypto_aead *cipher, ctx->enckeylen = keylen; ctx->authkeylen = 0; - memcpy(ctx->enckey, key, ctx->enckeylen); switch (ctx->enckeylen) { case AES_KEYSIZE_128: @@ -2996,6 +2995,8 @@ static int aead_gcm_ccm_setkey(struct crypto_aead *cipher, goto badkey; } + memcpy(ctx->enckey, key, ctx->enckeylen); + flow_log(" enckeylen:%u authkeylen:%u\n", ctx->enckeylen, ctx->authkeylen); flow_dump(" enc: ", ctx->enckey, ctx->enckeylen); @@ -3056,6 +3057,10 @@ static int aead_gcm_esp_setkey(struct crypto_aead *cipher, struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher); flow_log("%s\n", __func__); + + if (keylen < GCM_ESP_SALT_SIZE) + return -EINVAL; + ctx->salt_len = GCM_ESP_SALT_SIZE; ctx->salt_offset = GCM_ESP_SALT_OFFSET; memcpy(ctx->salt, key + keylen - GCM_ESP_SALT_SIZE, GCM_ESP_SALT_SIZE); @@ -3084,6 +3089,10 @@ static int rfc4543_gcm_esp_setkey(struct crypto_aead *cipher, struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher); flow_log("%s\n", __func__); + + if (keylen < GCM_ESP_SALT_SIZE) + return -EINVAL; + ctx->salt_len = GCM_ESP_SALT_SIZE; ctx->salt_offset = GCM_ESP_SALT_OFFSET; memcpy(ctx->salt, key + keylen - GCM_ESP_SALT_SIZE, GCM_ESP_SALT_SIZE); @@ -3113,6 +3122,10 @@ static int aead_ccm_esp_setkey(struct crypto_aead *cipher, struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher); flow_log("%s\n", __func__); + + if (keylen < CCM_ESP_SALT_SIZE) + return -EINVAL; + ctx->salt_len = CCM_ESP_SALT_SIZE; ctx->salt_offset = CCM_ESP_SALT_OFFSET; memcpy(ctx->salt, key + keylen - CCM_ESP_SALT_SIZE, CCM_ESP_SALT_SIZE); -- GitLab From e2c5f02307795960c595bf5a9fef4fc3b0cb858d Mon Sep 17 00:00:00 2001 From: Dominik Przychodni Date: Mon, 31 Aug 2020 11:59:59 +0100 Subject: [PATCH 1246/1304] crypto: qat - check cipher length for aead AES-CBC-HMAC-SHA commit 45cb6653b0c355fc1445a8069ba78a4ce8720511 upstream. Return -EINVAL for authenc(hmac(sha1),cbc(aes)), authenc(hmac(sha256),cbc(aes)) and authenc(hmac(sha512),cbc(aes)) if the cipher length is not multiple of the AES block. This is to prevent an undefined device behaviour. Fixes: d370cec32194 ("crypto: qat - Intel(R) QAT crypto interface") Cc: Signed-off-by: Dominik Przychodni [giovanni.cabiddu@intel.com: reworded commit message] Signed-off-by: Giovanni Cabiddu Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- drivers/crypto/qat/qat_common/qat_algs.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c index 1138e41d6805..883342a45be7 100644 --- a/drivers/crypto/qat/qat_common/qat_algs.c +++ b/drivers/crypto/qat/qat_common/qat_algs.c @@ -828,6 +828,11 @@ static int qat_alg_aead_dec(struct aead_request *areq) struct icp_qat_fw_la_bulk_req *msg; int digst_size = crypto_aead_authsize(aead_tfm); int ret, ctr = 0; + u32 cipher_len; + + cipher_len = areq->cryptlen - digst_size; + if (cipher_len % AES_BLOCK_SIZE != 0) + return -EINVAL; ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req); if (unlikely(ret)) @@ -842,7 +847,7 @@ static int qat_alg_aead_dec(struct aead_request *areq) qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp; qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp; cipher_param = (void *)&qat_req->req.serv_specif_rqpars; - cipher_param->cipher_length = areq->cryptlen - digst_size; + cipher_param->cipher_length = cipher_len; cipher_param->cipher_offset = areq->assoclen; memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE); auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param)); @@ -871,6 +876,9 @@ static int qat_alg_aead_enc(struct aead_request *areq) uint8_t *iv = areq->iv; int ret, ctr = 0; + if (areq->cryptlen % AES_BLOCK_SIZE != 0) + return -EINVAL; + ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req); if (unlikely(ret)) return ret; -- GitLab From ad326970d25cc85128cd22d62398751ad072efff Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Sat, 17 Oct 2020 10:12:58 +0200 Subject: [PATCH 1247/1304] Linux 4.19.152 Tested-by: Pavel Machek (CIP) Tested-by: Jon Hunter Tested-by: Guenter Roeck Tested-by: Linux Kernel Functional Testing Link: https://lore.kernel.org/r/20201016090437.301376476@linuxfoundation.org Signed-off-by: Greg Kroah-Hartman --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index f2c9db9b4015..aa79ce7bfdc7 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 4 PATCHLEVEL = 19 -SUBLEVEL = 151 +SUBLEVEL = 152 EXTRAVERSION = NAME = "People's Front" -- GitLab From b8d22b3e2f46cb3531a97a06f6752420c12ef947 Mon Sep 17 00:00:00 2001 From: Praveen Kurapati Date: Wed, 23 Sep 2020 22:57:20 +0530 Subject: [PATCH 1248/1304] msm: ipa3: Add change to not reset HOLB timer For Q6 endpoints add change to not reset the HOLB timer value to zero instead of default value. Add change to enable HOLB twice for IPA 4.x targets. Change-Id: Ic9596e711b037d24ae25835cb6dd193ec040d723 Signed-off-by: Praveen Kurapati --- drivers/platform/msm/ipa/ipa_v3/ipa.c | 10 +++++----- drivers/platform/msm/ipa/ipa_v3/ipa_client.c | 6 ++++-- drivers/platform/msm/ipa/ipa_v3/ipa_utils.c | 4 ++-- 3 files changed, 11 insertions(+), 9 deletions(-) diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c index 9e87612219c5..77654e7f0b48 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c @@ -3059,16 +3059,16 @@ static void ipa3_q6_avoid_holb(void) * setting HOLB on Q6 pipes, and from APPS perspective * they are not valid, therefore, the above function * will fail. + * Also don't reset the HOLB timer to 0 for Q6 pipes. */ - ipahal_write_reg_n_fields( - IPA_ENDP_INIT_HOL_BLOCK_TIMER_n, - ep_idx, &ep_holb); ipahal_write_reg_n_fields( IPA_ENDP_INIT_HOL_BLOCK_EN_n, ep_idx, &ep_holb); - /* IPA4.5 issue requires HOLB_EN to be written twice */ - if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5) + /* For targets > IPA_4.0 issue requires HOLB_EN to be + * written twice. + */ + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) ipahal_write_reg_n_fields( IPA_ENDP_INIT_HOL_BLOCK_EN_n, ep_idx, &ep_holb); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c index 5aa0dc413668..4d771cb56b6a 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c @@ -1341,8 +1341,10 @@ int ipa3_set_reset_client_cons_pipe_sus_holb(bool set_reset, IPA_ENDP_INIT_HOL_BLOCK_EN_n, pipe_idx, &ep_holb); - /* IPA4.5 issue requires HOLB_EN to be written twice */ - if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5) + /* For targets > IPA_4.0 issue requires HOLB_EN to be + * written twice. + */ + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) ipahal_write_reg_n_fields( IPA_ENDP_INIT_HOL_BLOCK_EN_n, pipe_idx, &ep_holb); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c index 1afda31add22..e40f17098115 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c @@ -5294,8 +5294,8 @@ int ipa3_cfg_ep_holb(u32 clnt_hdl, const struct ipa_ep_cfg_holb *ep_holb) ipa3_ctx->ep[clnt_hdl].holb.en = IPA_HOLB_TMR_EN; ipahal_write_reg_n_fields(IPA_ENDP_INIT_HOL_BLOCK_EN_n, clnt_hdl, ep_holb); - /* IPA4.5 issue requires HOLB_EN to be written twice */ - if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5) + /* For targets > IPA_4.0 issue requires HOLB_EN to be written twice */ + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) ipahal_write_reg_n_fields(IPA_ENDP_INIT_HOL_BLOCK_EN_n, clnt_hdl, ep_holb); -- GitLab From 46ae8ddee75318c51f57ce33aadd7ad94fe16f2a Mon Sep 17 00:00:00 2001 From: Mukesh Kumar Savaliya Date: Fri, 19 Jun 2020 17:34:18 +0530 Subject: [PATCH 1249/1304] i3c: i3c-master-qcom-geni: Fix IBI and Hot join related issues This patch fixes two issues observed during recent test with SN220. Fix IBI issue to set the payload len based on clients config and configures the same for master. Also fix the hot join issue observed during unregistering IBI which basically creates race between generation of hot join event and unregistering of IBI. Hence avoid disabling the interrupt and resetting IBI controller. Change-Id: I7c151a90375d8c65f2332462332ef576945fb4fd Signed-off-by: Mukesh Kumar Savaliya --- drivers/i3c/master/i3c-master-qcom-geni.c | 199 ++++++++++++++-------- 1 file changed, 125 insertions(+), 74 deletions(-) diff --git a/drivers/i3c/master/i3c-master-qcom-geni.c b/drivers/i3c/master/i3c-master-qcom-geni.c index b45dff609b35..ee00b2936cfa 100644 --- a/drivers/i3c/master/i3c-master-qcom-geni.c +++ b/drivers/i3c/master/i3c-master-qcom-geni.c @@ -151,12 +151,17 @@ #define CFG_FAIL_STALL_DIFF_EN BIT(20) #define ADDR_ASSOCIATED_W_OTHER_GPII_EN BIT(21) +/* Enable bits for GPIIn, n:[0-11] */ +#define GPIIn_IBI_EN(n) BIT(n) + /* IBI_CMD fields */ #define IBI_CMD_OPCODE BIT(0) #define I3C_SLAVE_RW BIT(15) #define STALL BIT(21) #define I3C_SLAVE_ADDR_SHIFT 8 #define I3C_SLAVE_MASK 0x7f +#define NUM_OF_MDB_SHIFT 16 +#define IBI_NUM_OF_MDB_MSK GENMASK(18, 16) /* IBI_GEN_CONFIG fields */ #define IBI_C_ENABLE BIT(0) @@ -319,6 +324,9 @@ struct geni_i3c_clk_fld { u32 i2c_t_cycle_cnt; }; +static void geni_i3c_enable_ibi_ctrl(struct geni_i3c_dev *gi3c, bool enable); +static void geni_i3c_enable_ibi_irq(struct geni_i3c_dev *gi3c, bool enable); + static struct geni_i3c_dev* to_geni_i3c_master(struct i3c_master_controller *master) { @@ -1430,38 +1438,14 @@ static int geni_i3c_master_disable_ibi(struct i3c_dev_desc *dev) static void qcom_geni_i3c_ibi_conf(struct geni_i3c_dev *gi3c) { - u32 val, timeout; - gi3c->ibi.err = 0; reinit_completion(&gi3c->ibi.done); /* set the configuration for 100Khz OD speed */ geni_write_reg(0x5FD74322, gi3c->se.ibi_base, IBI_SCL_PP_TIMING_CONFIG); - /* Enable I3C IBI controller */ - val = geni_read_reg(gi3c->se.ibi_base, IBI_GEN_CONFIG); - val |= IBI_C_ENABLE; - geni_write_reg(val, gi3c->se.ibi_base, IBI_GEN_CONFIG); - - /* enable ENABLE_CHANGE */ - val = geni_read_reg(gi3c->se.ibi_base, IBI_GEN_IRQ_EN); - val |= IBI_C_ENABLE; - geni_write_reg(val, gi3c->se.ibi_base, IBI_GEN_IRQ_EN); - - /* wait for ENABLE_CHANGE */ - timeout = wait_for_completion_timeout(&gi3c->ibi.done, XFER_TIMEOUT); - if (!timeout) { - GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev, - "timeout while ENABLE_CHANGE bit\n"); - return; - } - - /* enable manager interrupts */ - geni_write_reg(0x1B, gi3c->se.ibi_base, IBI_GEN_IRQ_EN); - - /* Enable GPII0 interrupts */ - geni_write_reg(0x1, gi3c->se.ibi_base, IBI_GPII_IBI_EN); - geni_write_reg(~0u, gi3c->se.ibi_base, IBI_IRQ_EN(0)); + geni_i3c_enable_ibi_ctrl(gi3c, true); + geni_i3c_enable_ibi_irq(gi3c, true); gi3c->ibi.is_init = true; } @@ -1472,6 +1456,7 @@ static int geni_i3c_master_request_ibi(struct i3c_dev_desc *dev, struct geni_i3c_dev *gi3c = to_geni_i3c_master(m); struct geni_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); unsigned long i, flags; + unsigned int payload_len = req->max_payload_len; if (!gi3c->ibi.hw_support) return -EPERM; @@ -1505,6 +1490,7 @@ static int geni_i3c_master_request_ibi(struct i3c_dev_desc *dev, cmd = ((dev->info.dyn_addr & I3C_SLAVE_MASK) << I3C_SLAVE_ADDR_SHIFT) | I3C_SLAVE_RW | STALL; + cmd |= ((payload_len << NUM_OF_MDB_SHIFT) & IBI_NUM_OF_MDB_MSK); geni_write_reg(cmd, gi3c->se.ibi_base, IBI_CMD(0)); /* wait for adding slave IBI */ @@ -1565,50 +1551,110 @@ static int qcom_deallocate_ibi_table_entry(struct geni_i3c_dev *gi3c) return 0; } -static void qcom_geni_i3c_ibi_unconf(struct geni_i3c_dev *gi3c) +static void geni_i3c_enable_hotjoin_irq(struct geni_i3c_dev *gi3c, bool enable) { - u32 val, timeout; - int ret = 0; + u32 val; - val = geni_read_reg(gi3c->se.ibi_base, IBI_ALLOCATED_ENTRIES_GPII(0)); - if (val) { - ret = qcom_deallocate_ibi_table_entry(gi3c); - if (ret) - return; - } + //Disable hot-join, until next probe happens + val = geni_read_reg(gi3c->se.ibi_base, IBI_GEN_IRQ_EN); + if (enable) + val |= HOT_JOIN_IRQ_EN; + else + val &= ~HOT_JOIN_IRQ_EN; + geni_write_reg(val, gi3c->se.ibi_base, IBI_GEN_IRQ_EN); - /* disable interrupts */ - geni_write_reg(0, gi3c->se.ibi_base, IBI_GPII_IBI_EN); - geni_write_reg(0, gi3c->se.ibi_base, IBI_IRQ_EN(0)); + GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev, + "%s:%s\n", __func__, (enable) ? "Enabled" : "Disabled"); +} - /* check if any IBI is enabled, if not then reset HW */ - val = geni_read_reg(gi3c->se.ibi_base, IBI_GPII_IBI_EN); - if (!val) { +static void geni_i3c_enable_ibi_irq(struct geni_i3c_dev *gi3c, bool enable) +{ + u32 val; - gi3c->ibi.err = 0; + if (enable) { + /* enable manager interrupts : HPG sec 4.1 */ + val = geni_read_reg(gi3c->se.ibi_base, IBI_GEN_IRQ_EN); + val |= (val & 0x1B); + geni_write_reg(val, gi3c->se.ibi_base, IBI_GEN_IRQ_EN); + + /* Enable GPII0 interrupts */ + geni_write_reg(GPIIn_IBI_EN(0), gi3c->se.ibi_base, + IBI_GPII_IBI_EN); + geni_write_reg(~0u, gi3c->se.ibi_base, IBI_IRQ_EN(0)); + } else { + geni_write_reg(0, gi3c->se.ibi_base, IBI_GPII_IBI_EN); + geni_write_reg(0, gi3c->se.ibi_base, IBI_IRQ_EN(0)); + geni_write_reg(0, gi3c->se.ibi_base, IBI_GEN_IRQ_EN); + } +} + +static void geni_i3c_enable_ibi_ctrl(struct geni_i3c_dev *gi3c, bool enable) +{ + u32 val, timeout; + + if (enable) { reinit_completion(&gi3c->ibi.done); - val = geni_read_reg(gi3c->se.ibi_base, IBI_GEN_CONFIG); - val &= ~IBI_C_ENABLE; - geni_write_reg(val, gi3c->se.ibi_base, IBI_GEN_CONFIG); + /* enable ENABLE_CHANGE */ + val = geni_read_reg(gi3c->se.ibi_base, IBI_GEN_IRQ_EN); + val |= IBI_C_ENABLE; + geni_write_reg(val, gi3c->se.ibi_base, IBI_GEN_IRQ_EN); + /* Enable I3C IBI controller, if not in enabled state */ + val = geni_read_reg(gi3c->se.ibi_base, IBI_GEN_CONFIG); + if (!(val & IBI_C_ENABLE)) { + val |= IBI_C_ENABLE; + geni_write_reg(val, gi3c->se.ibi_base, IBI_GEN_CONFIG); - /* wait for ENABLE change */ - timeout = wait_for_completion_timeout(&gi3c->ibi.done, - XFER_TIMEOUT); - if (!timeout) { - GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev, - "timeout while disabling IBI controller\n"); + /* wait for ENABLE_CHANGE */ + timeout = wait_for_completion_timeout(&gi3c->ibi.done, + XFER_TIMEOUT); + if (!timeout) { + GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev, + "timeout while ENABLE_CHANGE bit\n"); return; + } + GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev, + "%s: IBI ctrl enabled\n", __func__); } + } else { + /* Disable IBI controller */ - if (gi3c->ibi.err) { + /* check if any IBI is enabled, if not then disable IBI ctrl */ + val = geni_read_reg(gi3c->se.ibi_base, IBI_GPII_IBI_EN); + if (!val) { + gi3c->ibi.err = 0; + reinit_completion(&gi3c->ibi.done); + + val = geni_read_reg(gi3c->se.ibi_base, IBI_GEN_CONFIG); + val &= ~IBI_C_ENABLE; + geni_write_reg(val, gi3c->se.ibi_base, IBI_GEN_CONFIG); + + /* wait for ENABLE change */ + timeout = wait_for_completion_timeout(&gi3c->ibi.done, + XFER_TIMEOUT); + if (!timeout) { + GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev, + "timeout disabling IBI: 0x%x\n", gi3c->ibi.err); + return; + } GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev, - "error while disabling IBI controller 0x%x\n", - gi3c->ibi.err); - return; + "%s: IBI ctrl disabled\n", __func__); } + } +} + + +static void qcom_geni_i3c_ibi_unconf(struct geni_i3c_dev *gi3c) +{ + u32 val; + int ret = 0; + val = geni_read_reg(gi3c->se.ibi_base, IBI_ALLOCATED_ENTRIES_GPII(0)); + if (val) { + ret = qcom_deallocate_ibi_table_entry(gi3c); + if (ret) + return; } gi3c->ibi.is_init = false; @@ -1966,17 +2012,6 @@ static int geni_i3c_probe(struct platform_device *pdev) geni_se_init(gi3c->se.base, gi3c->tx_wm, tx_depth); se_config_packing(gi3c->se.base, BITS_PER_BYTE, PACKING_BYTES_PW, true); - gi3c->hj_wl = wakeup_source_register(gi3c->se.dev, - dev_name(gi3c->se.dev)); - if (!gi3c->hj_wl) { - GENI_SE_ERR(gi3c->ipcl, false, gi3c->se.dev, "wakeup source registration failed\n"); - se_geni_resources_off(&gi3c->se.i3c_rsc); - return -ENOMEM; - } - - INIT_WORK(&gi3c->hj_wd, geni_i3c_hotjoin); - gi3c->hj_wq = alloc_workqueue("%s", 0, 0, dev_name(gi3c->se.dev)); - ret = i3c_ibi_rsrcs_init(gi3c, pdev); if (ret) { se_geni_resources_off(&gi3c->se.i3c_rsc); @@ -1999,8 +2034,19 @@ static int geni_i3c_probe(struct platform_device *pdev) GENI_SE_ERR(gi3c->ipcl, false, gi3c->se.dev, "i3c_master_register failed:%d\n", ret); - //enable hot-join IRQ also - geni_write_reg(~0u, gi3c->se.ibi_base, IBI_GEN_IRQ_EN); + // hot-join + gi3c->hj_wl = wakeup_source_register(gi3c->se.dev, + dev_name(gi3c->se.dev)); + if (!gi3c->hj_wl) { + GENI_SE_ERR(gi3c->ipcl, false, gi3c->se.dev, + "wakeup source registration failed\n"); + se_geni_resources_off(&gi3c->se.i3c_rsc); + return -ENOMEM; + } + + INIT_WORK(&gi3c->hj_wd, geni_i3c_hotjoin); + gi3c->hj_wq = alloc_workqueue("%s", 0, 0, dev_name(gi3c->se.dev)); + geni_i3c_enable_hotjoin_irq(gi3c, true); GENI_SE_DBG(gi3c->ipcl, false, gi3c->se.dev, "I3C probed\n"); @@ -2010,17 +2056,20 @@ static int geni_i3c_probe(struct platform_device *pdev) static int geni_i3c_remove(struct platform_device *pdev) { struct geni_i3c_dev *gi3c = platform_get_drvdata(pdev); - int ret = 0, val = 0; + int ret = 0; //Disable hot-join, until next probe happens - val = geni_read_reg(gi3c->se.ibi_base, IBI_GEN_IRQ_EN); - val &= ~HOT_JOIN_IRQ_EN; - geni_write_reg(val, gi3c->se.ibi_base, IBI_GEN_IRQ_EN); + geni_i3c_enable_hotjoin_irq(gi3c, false); + destroy_workqueue(gi3c->hj_wq); + wakeup_source_unregister(gi3c->hj_wl); if (gi3c->ibi.is_init) qcom_geni_i3c_ibi_unconf(gi3c); - destroy_workqueue(gi3c->hj_wq); - wakeup_source_unregister(gi3c->hj_wl); + geni_i3c_enable_ibi_ctrl(gi3c, false); + + /* Potentially to be done before pinctrl change */ + geni_i3c_enable_ibi_irq(gi3c, false); + /*force suspend to avoid the auto suspend caused by driver removal*/ pm_runtime_force_suspend(gi3c->se.dev); ret = pinctrl_select_state(gi3c->se.i3c_rsc.geni_pinctrl, @@ -2028,7 +2077,9 @@ static int geni_i3c_remove(struct platform_device *pdev) if (ret) GENI_SE_DBG(gi3c->ipcl, false, gi3c->se.dev, " i3c: pinctrl_select_state failed\n"); + ret = i3c_master_unregister(&gi3c->ctrlr); + /* TBD : If we need debug for previous session, Don't delete logs */ if (gi3c->ipcl) ipc_log_context_destroy(gi3c->ipcl); return ret; -- GitLab From 3ce8314562b5604b2fe660dbfc9fd6738385247e Mon Sep 17 00:00:00 2001 From: Govind Singh Date: Tue, 20 Oct 2020 14:04:50 +0530 Subject: [PATCH 1250/1304] icnss: check SKIP_QMI test bit for exported qmi messages SKIP_QMI QUIRK gets set in QMI bypass build flavor. Check SKIP_QMI test bit for exported qmi messages when SKIP_QMI QUIRK is set. Change-Id: Id26574614c312f4fcfa7c5091ddf332baa037ae1 Signed-off-by: Govind Singh --- drivers/soc/qcom/icnss.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c index 74ce019939c4..439df9208385 100644 --- a/drivers/soc/qcom/icnss.c +++ b/drivers/soc/qcom/icnss.c @@ -2270,6 +2270,9 @@ int icnss_set_fw_log_mode(struct device *dev, uint8_t fw_log_mode) if (!dev) return -ENODEV; + if (test_bit(SKIP_QMI, &quirks)) + return 0; + if (test_bit(ICNSS_FW_DOWN, &penv->state) || !test_bit(ICNSS_FW_READY, &penv->state)) { icnss_pr_err("FW down, ignoring fw_log_mode state: 0x%lx\n", @@ -2363,6 +2366,9 @@ int icnss_wlan_enable(struct device *dev, struct icnss_wlan_enable_cfg *config, enum icnss_driver_mode mode, const char *host_version) { + if (test_bit(SKIP_QMI, &quirks)) + return 0; + if (test_bit(ICNSS_FW_DOWN, &penv->state) || !test_bit(ICNSS_FW_READY, &penv->state)) { icnss_pr_err("FW down, ignoring wlan_enable state: 0x%lx\n", @@ -2382,6 +2388,9 @@ EXPORT_SYMBOL(icnss_wlan_enable); int icnss_wlan_disable(struct device *dev, enum icnss_driver_mode mode) { + if (test_bit(SKIP_QMI, &quirks)) + return 0; + if (test_bit(ICNSS_FW_DOWN, &penv->state)) { icnss_pr_dbg("FW down, ignoring wlan_disable state: 0x%lx\n", penv->state); -- GitLab From 7ced4b974d4f0009fbf961b040b7e327a7242f46 Mon Sep 17 00:00:00 2001 From: Can Guo Date: Tue, 20 Oct 2020 02:11:44 -0700 Subject: [PATCH 1251/1304] scsi: ufs: Add back a missing sanity check to ufshcd_read_desc_param() Commit a78751da13c9e ("scsi: ufs: Fix unexpected values get from ufshcd_read_desc_param()") wrongly deletes a condition check for tmp buffer allocation, this change adds it back. Change-Id: I0f27f30c90fd6592e10af039ce8227d48a85c790 Signed-off-by: Can Guo --- drivers/scsi/ufs/ufshcd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index d3fd46eb6ca6..133316c0139f 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -4569,7 +4569,7 @@ int ufshcd_read_desc_param(struct ufs_hba *hba, } /* Check whether we need temp memory */ - if (param_offset != 0) { + if (param_offset != 0 || param_size < buff_len) { desc_buf = kzalloc(buff_len, GFP_KERNEL); if (!desc_buf) return -ENOMEM; -- GitLab From 1bbefa38096244568eacdc298512873d4a9a0e9e Mon Sep 17 00:00:00 2001 From: Nirmal Abraham Date: Fri, 16 Oct 2020 16:15:38 +0530 Subject: [PATCH 1252/1304] fbdev: msm: disable cpu sync during dma_buf_map_attachment In some usecases, the CPU_SYNC attribute is causing around 2 - 2.5ms execution time per call to dma_buf_map_attachment while mapping the display buffers causing frame drops. Disable CPU SYNC to resolve this. Change-Id: I5bdf2049b265d7ed8b0a42ac92bea57296aca9f6 Signed-off-by: Nirmal Abraham --- drivers/video/fbdev/msm/mdss_mdp_util.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/video/fbdev/msm/mdss_mdp_util.c b/drivers/video/fbdev/msm/mdss_mdp_util.c index 2af08beb4fcb..176024d06c0e 100644 --- a/drivers/video/fbdev/msm/mdss_mdp_util.c +++ b/drivers/video/fbdev/msm/mdss_mdp_util.c @@ -1027,7 +1027,7 @@ static int mdss_mdp_get_img(struct msmfb_data *img, data->srcp_attachment->dma_map_attrs |= - DMA_ATTR_DELAYED_UNMAP; + (DMA_ATTR_DELAYED_UNMAP | DMA_ATTR_SKIP_CPU_SYNC); data->srcp_table = dma_buf_map_attachment(data->srcp_attachment, -- GitLab From d4f9da65111363a06b23f507293f100a0f3ba3d2 Mon Sep 17 00:00:00 2001 From: Mukesh Kumar Savaliya Date: Fri, 19 Jun 2020 23:53:08 +0530 Subject: [PATCH 1253/1304] i3c: i3c-master-qcom-geni: Manage probe time resources This change enhances the probe routine and cleanup the resources properly if the failure is observed on previous operations. Also add proper logging to exactly highlight the failure. Change-Id: Idedc9e424a739758c3b8aa08675fb3e06bf67bb2 Signed-off-by: Mukesh Kumar Savaliya --- drivers/i3c/master/i3c-master-qcom-geni.c | 90 ++++++++++++++--------- 1 file changed, 56 insertions(+), 34 deletions(-) diff --git a/drivers/i3c/master/i3c-master-qcom-geni.c b/drivers/i3c/master/i3c-master-qcom-geni.c index b45dff609b35..3c2afb286c95 100644 --- a/drivers/i3c/master/i3c-master-qcom-geni.c +++ b/drivers/i3c/master/i3c-master-qcom-geni.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include @@ -1298,8 +1299,11 @@ static int geni_i3c_master_bus_init(struct i3c_master_controller *m) /* Get an address for the master. */ ret = i3c_master_get_free_addr(m, 0); - if (ret < 0) + if (ret < 0) { + GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev, + "%s: error No free addr:%d\n", __func__, ret); goto err_cleanup; + } info.dyn_addr = ret; info.dcr = I3C_DCR_GENERIC_DEVICE; @@ -1742,20 +1746,19 @@ static int i3c_geni_rsrcs_init(struct geni_i3c_dev *gi3c, (DEFAULT_SE_CLK * DEFAULT_BUS_WIDTH)); if (ret) { GENI_SE_DBG(gi3c->ipcl, false, gi3c->se.dev, - "geni_se_resources_init\n"); + "geni_se_resources_init Failed:%d\n", ret); return ret; } ret = device_property_read_u32(&pdev->dev, "se-clock-frequency", - &gi3c->clk_src_freq); + &gi3c->clk_src_freq); if (ret) { GENI_SE_DBG(gi3c->ipcl, false, gi3c->se.dev, "SE clk freq not specified, default to 100 MHz.\n"); gi3c->clk_src_freq = 100000000; } - ret = device_property_read_u32(&pdev->dev, "dfs-index", - &gi3c->dfs_idx); + ret = device_property_read_u32(&pdev->dev, "dfs-index", &gi3c->dfs_idx); if (ret) gi3c->dfs_idx = 0xf; @@ -1848,8 +1851,7 @@ static int i3c_ibi_rsrcs_init(struct geni_i3c_dev *gi3c, IRQF_TRIGGER_HIGH, dev_name(&pdev->dev), gi3c); if (ret) { GENI_SE_ERR(gi3c->ipcl, false, gi3c->se.dev, - "Request_irq failed:%d: err:%d\n", - gi3c->ibi.mngr_irq, ret); + "Request_irq:%d: err:%d\n", gi3c->ibi.mngr_irq, ret); return ret; } @@ -1871,12 +1873,11 @@ static int i3c_ibi_rsrcs_init(struct geni_i3c_dev *gi3c, } ret = devm_request_irq(&pdev->dev, gi3c->ibi.gpii_irq[0], - geni_i3c_ibi_irq, IRQF_TRIGGER_HIGH, - dev_name(&pdev->dev), gi3c); + geni_i3c_ibi_irq, IRQF_TRIGGER_HIGH, + dev_name(&pdev->dev), gi3c); if (ret) { GENI_SE_ERR(gi3c->ipcl, false, gi3c->se.dev, - "Request_irq failed:%d: err:%d\n", - gi3c->ibi.gpii_irq[0], ret); + "Request_irq failed:%d: err:%d\n", gi3c->ibi.gpii_irq[0], ret); return ret; } @@ -1907,58 +1908,68 @@ static int geni_i3c_probe(struct platform_device *pdev) gi3c->se.dev = &pdev->dev; gi3c->ipcl = ipc_log_context_create(4, dev_name(gi3c->se.dev), 0); + if (!gi3c->ipcl) + dev_info(&pdev->dev, "Error creating IPC Log\n"); ret = i3c_geni_rsrcs_init(gi3c, pdev); - if (ret) - return ret; + if (ret) { + GENI_SE_ERR(gi3c->ipcl, false, gi3c->se.dev, + "Error:%d i3c_geni_rsrcs_init\n", ret); + goto cleanup_init; + } ret = i3c_geni_rsrcs_clk_init(gi3c); - if (ret) - return ret; + if (ret) { + GENI_SE_ERR(gi3c->ipcl, false, gi3c->se.dev, + "Error:%d i3c_geni_rsrcs_clk_init\n", ret); + goto cleanup_init; + } gi3c->irq = platform_get_irq(pdev, 0); if (gi3c->irq < 0) { + ret = gi3c->irq; GENI_SE_ERR(gi3c->ipcl, false, gi3c->se.dev, - "IRQ error for i3c-master-geni\n"); - return gi3c->irq; + "IRQ error=%d for i3c-master-geni\n", ret); + goto cleanup_init; } init_completion(&gi3c->done); mutex_init(&gi3c->lock); spin_lock_init(&gi3c->spinlock); platform_set_drvdata(pdev, gi3c); + + /* Keep interrupt disabled so the system can enter low-power mode */ + irq_set_status_flags(gi3c->irq, IRQ_NOAUTOEN); ret = devm_request_irq(&pdev->dev, gi3c->irq, geni_i3c_irq, IRQF_TRIGGER_HIGH, dev_name(&pdev->dev), gi3c); if (ret) { GENI_SE_ERR(gi3c->ipcl, false, gi3c->se.dev, - "Request_irq failed:%d: err:%d\n", - gi3c->irq, ret); - return ret; + "i3c irq failed:%d: err:%d\n", gi3c->irq, ret); + goto cleanup_init; } - /* Disable the interrupt so that the system can enter low-power mode */ - disable_irq(gi3c->irq); ret = se_geni_resources_on(&gi3c->se.i3c_rsc); if (ret) { GENI_SE_ERR(gi3c->ipcl, false, gi3c->se.dev, "Error turning on resources %d\n", ret); - return ret; + goto cleanup_init; } proto = get_se_proto(gi3c->se.base); if (proto != I3C) { GENI_SE_ERR(gi3c->ipcl, false, gi3c->se.dev, "Invalid proto %d\n", proto); - se_geni_resources_off(&gi3c->se.i3c_rsc); - return -ENXIO; + ret = -ENXIO; + goto geni_resources_off; } se_mode = geni_read_reg(gi3c->se.base, GENI_IF_FIFO_DISABLE_RO); if (se_mode) { + /* GSI mode not supported */ GENI_SE_ERR(gi3c->ipcl, false, gi3c->se.dev, "Non supported mode %d\n", se_mode); - se_geni_resources_off(&gi3c->se.i3c_rsc); - return -ENXIO; + ret = -ENXIO; + goto geni_resources_off; } tx_depth = get_tx_fifo_depth(gi3c->se.base); @@ -1979,8 +1990,9 @@ static int geni_i3c_probe(struct platform_device *pdev) ret = i3c_ibi_rsrcs_init(gi3c, pdev); if (ret) { - se_geni_resources_off(&gi3c->se.i3c_rsc); - return ret; + GENI_SE_ERR(gi3c->ipcl, false, gi3c->se.dev, + "Error: %d, i3c_ibi_rsrcs_init\n", ret); + goto geni_resources_off; } se_geni_resources_off(&gi3c->se.i3c_rsc); @@ -1995,16 +2007,26 @@ static int geni_i3c_probe(struct platform_device *pdev) ret = i3c_master_register(&gi3c->ctrlr, &pdev->dev, &geni_i3c_master_ops, false); - if (ret) - GENI_SE_ERR(gi3c->ipcl, false, gi3c->se.dev, - "i3c_master_register failed:%d\n", ret); + if (ret) { + GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev, + "I3C master registration failed=%d, continue\n", ret); + + /* NOTE : This may fail on 7E NACK, but should return 0 */ + ret = 0; + } //enable hot-join IRQ also geni_write_reg(~0u, gi3c->se.ibi_base, IBI_GEN_IRQ_EN); - GENI_SE_DBG(gi3c->ipcl, false, gi3c->se.dev, "I3C probed\n"); + GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev, "I3C probed:%d\n", ret); + return ret; - return 0; +geni_resources_off: + se_geni_resources_off(&gi3c->se.i3c_rsc); + +cleanup_init: + GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev, "I3C probe failed\n"); + return ret; } static int geni_i3c_remove(struct platform_device *pdev) -- GitLab From 25c1e316e0ba69394675eb6a532b6b25a8f56b1c Mon Sep 17 00:00:00 2001 From: Naman Padhiar Date: Tue, 20 Oct 2020 23:44:51 +0530 Subject: [PATCH 1254/1304] icnss2: Avoid race between SOC WAKE REQ/RESP There is a race between soc wake release in event queue and soc wake request called from driver. Due to which ref count is not updated properly and results in number of soc wake release send to FW is more than number of soc wake request. Fix it by atomically incrementing ref count. Change-Id: I1e134af8a1170d804aeaea53ea6bd288bbcb42b8 Signed-off-by: Naman Padhiar --- drivers/soc/qcom/icnss2/main.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/soc/qcom/icnss2/main.c b/drivers/soc/qcom/icnss2/main.c index a24323780645..f6e988686632 100644 --- a/drivers/soc/qcom/icnss2/main.c +++ b/drivers/soc/qcom/icnss2/main.c @@ -2329,7 +2329,6 @@ EXPORT_SYMBOL(icnss_set_fw_log_mode); int icnss_force_wake_request(struct device *dev) { struct icnss_priv *priv = dev_get_drvdata(dev); - int count = 0; if (!dev) return -ENODEV; @@ -2339,9 +2338,9 @@ int icnss_force_wake_request(struct device *dev) return -EINVAL; } - if (atomic_read(&priv->soc_wake_ref_count)) { - count = atomic_inc_return(&priv->soc_wake_ref_count); - icnss_pr_dbg("SOC already awake, Ref count: %d", count); + if (atomic_inc_not_zero(&priv->soc_wake_ref_count)) { + icnss_pr_dbg("SOC already awake, Ref count: %d", + atomic_read(&priv->soc_wake_ref_count)); return 0; } -- GitLab From f549b9e1ff001eebdec5fad8885f9f51a528048f Mon Sep 17 00:00:00 2001 From: Chandana Kishori Chiluveru Date: Mon, 24 Aug 2020 19:44:06 +0530 Subject: [PATCH 1255/1304] serial: msm_geni_serial: memset RX buffer to Zero Currently driver allocating RX buffer once in port open and use the same buffer for each RX transfer. This buffer is not cleared in driver. In some scenarios when SW got DMA_DONE interrupt and the RX buffer we are still seeing the previously completed RX data in DDR buffer. DMA_DONE from HW doesn't confirm that the DATA is copied to DDR, sometimes we are queuing the stale data from previous transfer to tty flip_buffer. This change is to memset RX buffer to zero after tty_flip_buffer_push so that memset change will easily help us to identify suck scenarios. Change-Id: If14bdf0377cd6764280f515b1b8e67f5629e32ce Signed-off-by: Chandana Kishori Chiluveru --- drivers/tty/serial/msm_geni_serial.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/tty/serial/msm_geni_serial.c b/drivers/tty/serial/msm_geni_serial.c index ec7534cd49ef..ae2285f681f9 100644 --- a/drivers/tty/serial/msm_geni_serial.c +++ b/drivers/tty/serial/msm_geni_serial.c @@ -1743,6 +1743,14 @@ static int msm_geni_serial_handle_dma_rx(struct uart_port *uport, bool drop_rx) tty_flip_buffer_push(tport); dump_ipc(msm_port->ipc_log_rx, "DMA Rx", (char *)msm_port->rx_buf, 0, rx_bytes); + + /* + * DMA_DONE interrupt doesn't confirm that the DATA is copied to + * DDR memory, sometimes we are queuing the stale data from previous + * transfer to tty flip_buffer, adding memset to zero + * change to idenetify such scenario. + */ + memset(msm_port->rx_buf, 0, rx_bytes); exit_handle_dma_rx: return ret; -- GitLab From d7a0d1e124f2246a0b84ea0dc221560b76c83f52 Mon Sep 17 00:00:00 2001 From: Jordan Crouse Date: Thu, 24 Sep 2020 09:05:53 -0600 Subject: [PATCH 1256/1304] msm: kgsl: Don't allow re-importing memory owned by KGSL Don't allow IOCTL_KGSL_MAP_USER_MEM to import user memory that was already allocated and mapped by KGSL in the first place. Remapping memory never makes sense and it messes up reference counting in the pools. Change-Id: Ic0dedbade96ac6b30dcbbb794bf57a597f1bb351 Signed-off-by: Jordan Crouse Signed-off-by: Pranav Patel --- drivers/gpu/msm/kgsl.c | 56 +++++++++++++++++++----------------------- 1 file changed, 25 insertions(+), 31 deletions(-) diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c index 9f1eaa28f3c0..07ee99671d36 100644 --- a/drivers/gpu/msm/kgsl.c +++ b/drivers/gpu/msm/kgsl.c @@ -2402,14 +2402,6 @@ long kgsl_ioctl_cmdstream_freememontimestamp_ctxtid( return ret; } -static inline int _check_region(unsigned long start, unsigned long size, - uint64_t len) -{ - uint64_t end = ((uint64_t) start) + size; - - return (end > len); -} - static int check_vma_flags(struct vm_area_struct *vma, unsigned int flags) { @@ -2424,23 +2416,27 @@ static int check_vma_flags(struct vm_area_struct *vma, return -EFAULT; } -static int check_vma(struct vm_area_struct *vma, struct file *vmfile, - struct kgsl_memdesc *memdesc) +static int check_vma(unsigned long hostptr, u64 size) { - if (vma == NULL || vma->vm_file != vmfile) - return -EINVAL; + struct vm_area_struct *vma; + unsigned long cur = hostptr; - /* userspace may not know the size, in which case use the whole vma */ - if (memdesc->size == 0) - memdesc->size = vma->vm_end - vma->vm_start; - /* range checking */ - if (vma->vm_start != memdesc->useraddr || - (memdesc->useraddr + memdesc->size) != vma->vm_end) - return -EINVAL; - return check_vma_flags(vma, memdesc->flags); + while (cur < (hostptr + size)) { + vma = find_vma(current->mm, cur); + if (!vma) + return false; + + /* Don't remap memory that we already own */ + if (vma->vm_file && vma->vm_file->f_op == &kgsl_fops) + return false; + + cur = vma->vm_end; + } + + return true; } -static int memdesc_sg_virt(struct kgsl_memdesc *memdesc, struct file *vmfile) +static int memdesc_sg_virt(struct kgsl_memdesc *memdesc) { int ret = 0; long npages = 0, i; @@ -2463,18 +2459,16 @@ static int memdesc_sg_virt(struct kgsl_memdesc *memdesc, struct file *vmfile) } down_read(¤t->mm->mmap_sem); - /* If we have vmfile, make sure we map the correct vma and map it all */ - if (vmfile != NULL) - ret = check_vma(find_vma(current->mm, memdesc->useraddr), - vmfile, memdesc); - - if (ret == 0) { - npages = get_user_pages(memdesc->useraddr, - sglen, write, pages, NULL); - ret = (npages < 0) ? (int)npages : 0; + if (!check_vma(memdesc->useraddr, memdesc->size)) { + up_read(¤t->mm->mmap_sem); + ret = -EFAULT; + goto out; } + + npages = get_user_pages(memdesc->useraddr, sglen, write, pages, NULL); up_read(¤t->mm->mmap_sem); + ret = (npages < 0) ? (int)npages : 0; if (ret) goto out; @@ -2525,7 +2519,7 @@ static int kgsl_setup_anon_useraddr(struct kgsl_pagetable *pagetable, entry->memdesc.gpuaddr = (uint64_t) entry->memdesc.useraddr; } - return memdesc_sg_virt(&entry->memdesc, NULL); + return memdesc_sg_virt(&entry->memdesc); } #ifdef CONFIG_DMA_SHARED_BUFFER -- GitLab From 7265cb7f4b210c7c9294f5144a013d33db91489f Mon Sep 17 00:00:00 2001 From: Chandana Kishori Chiluveru Date: Tue, 6 Oct 2020 18:58:55 +0530 Subject: [PATCH 1257/1304] msm_geni_serial: Add ioctl for adding new IPC log in uart Add ioctl for adding new IPC log buffer in UART driver. If BT host driver observes a fault in BT transfers this ioctl needs to be called. On this ioctl call uart driver will change the logging to new file so that the issue scenario could be preserved in old UART IPC log files. Change-Id: I0da719159d20f60602922f482467178d038b443c Signed-off-by: Prudhvi Yarlagadda Signed-off-by: Chandana Kishori Chiluveru --- drivers/platform/msm/qcom-geni-se.c | 11 ++++++----- drivers/tty/serial/msm_geni_serial.c | 22 ++++++++++++++++++++++ include/uapi/asm-generic/ioctls.h | 1 + 3 files changed, 29 insertions(+), 5 deletions(-) diff --git a/drivers/platform/msm/qcom-geni-se.c b/drivers/platform/msm/qcom-geni-se.c index d31e9bd4a781..16e799ba9367 100644 --- a/drivers/platform/msm/qcom-geni-se.c +++ b/drivers/platform/msm/qcom-geni-se.c @@ -1576,7 +1576,8 @@ void geni_se_dump_dbg_regs(struct se_geni_rsc *rsc, void __iomem *base, geni_se_dev->bus_bw_noc))) return; if (unlikely(list_empty(&rsc->ab_list) || list_empty(&rsc->ib_list))) { - GENI_SE_DBG(ipc, false, NULL, "%s: Clocks not on\n", __func__); + GENI_SE_ERR(ipc, true, rsc->ctrl_dev, "%s: Clocks not on\n", + __func__); return; } m_cmd0 = geni_read_reg(base, SE_GENI_M_CMD0); @@ -1596,16 +1597,16 @@ void geni_se_dump_dbg_regs(struct se_geni_rsc *rsc, void __iomem *base, se_dma_tx_len = geni_read_reg(base, SE_DMA_TX_LEN); se_dma_tx_len_in = geni_read_reg(base, SE_DMA_TX_LEN_IN); - GENI_SE_DBG(ipc, false, NULL, + GENI_SE_ERR(ipc, true, rsc->ctrl_dev, "%s: m_cmd0:0x%x, m_irq_status:0x%x, s_irq_status:0x%x, geni_status:0x%x, geni_ios:0x%x\n", __func__, m_cmd0, m_irq_status, s_irq_status, geni_status, geni_ios); - GENI_SE_DBG(ipc, false, NULL, + GENI_SE_ERR(ipc, true, rsc->ctrl_dev, "dma_rx_irq:0x%x, dma_tx_irq:0x%x, rx_fifo_sts:0x%x, tx_fifo_sts:0x%x\n" , dma_rx_irq, dma_tx_irq, rx_fifo_status, tx_fifo_status); - GENI_SE_DBG(ipc, false, NULL, + GENI_SE_ERR(ipc, true, rsc->ctrl_dev, "se_dma_dbg:0x%x, m_cmd_ctrl:0x%x, dma_rxlen:0x%x, dma_rxlen_in:0x%x\n", se_dma_dbg, m_cmd_ctrl, se_dma_rx_len, se_dma_rx_len_in); - GENI_SE_DBG(ipc, false, NULL, + GENI_SE_ERR(ipc, true, rsc->ctrl_dev, "dma_txlen:0x%x, dma_txlen_in:0x%x\n", se_dma_tx_len, se_dma_tx_len_in); } EXPORT_SYMBOL(geni_se_dump_dbg_regs); diff --git a/drivers/tty/serial/msm_geni_serial.c b/drivers/tty/serial/msm_geni_serial.c index ae2285f681f9..6e21f3a4d232 100644 --- a/drivers/tty/serial/msm_geni_serial.c +++ b/drivers/tty/serial/msm_geni_serial.c @@ -188,6 +188,7 @@ struct msm_geni_serial_port { void *ipc_log_pwr; void *ipc_log_misc; void *console_log; + void *ipc_log_single; unsigned int cur_baud; int ioctl_count; int edge_count; @@ -568,6 +569,7 @@ static int msm_geni_serial_ioctl(struct uart_port *uport, unsigned int cmd, unsigned long arg) { int ret = -ENOIOCTLCMD; + struct msm_geni_serial_port *port = GET_DEV_PORT(uport); switch (cmd) { case TIOCPMGET: { @@ -582,6 +584,16 @@ static int msm_geni_serial_ioctl(struct uart_port *uport, unsigned int cmd, ret = !pm_runtime_status_suspended(uport->dev); break; } + case TIOCFAULT: { + geni_se_dump_dbg_regs(&port->serial_rsc, + uport->membase, port->ipc_log_misc); + port->ipc_log_rx = port->ipc_log_single; + port->ipc_log_tx = port->ipc_log_single; + port->ipc_log_misc = port->ipc_log_single; + port->ipc_log_pwr = port->ipc_log_single; + ret = 0; + break; + } default: break; } @@ -2888,6 +2900,16 @@ static void msm_geni_serial_debug_init(struct uart_port *uport, bool console) if (!msm_port->ipc_log_misc) dev_info(uport->dev, "Err in Misc IPC Log\n"); } + /* New set of UART IPC log for RX Invalid case */ + memset(name, 0, sizeof(name)); + if (!msm_port->ipc_log_single) { + scnprintf(name, sizeof(name), "%s%s", + dev_name(uport->dev), "_single"); + msm_port->ipc_log_single = ipc_log_context_create( + IPC_LOG_MISC_PAGES, name, 0); + if (!msm_port->ipc_log_single) + dev_info(uport->dev, "Err in single IPC Log\n"); + } } else { memset(name, 0, sizeof(name)); if (!msm_port->console_log) { diff --git a/include/uapi/asm-generic/ioctls.h b/include/uapi/asm-generic/ioctls.h index 457479af9916..44ccfd8818f2 100644 --- a/include/uapi/asm-generic/ioctls.h +++ b/include/uapi/asm-generic/ioctls.h @@ -79,6 +79,7 @@ #define TIOCGPTLCK _IOR('T', 0x39, int) /* Get Pty lock state */ #define TIOCGEXCL _IOR('T', 0x40, int) /* Get exclusive mode state */ #define TIOCGPTPEER _IO('T', 0x41) /* Safely open the slave */ +#define TIOCFAULT 0x544C /* Uart fault */ #define TIOCPMGET 0x544D /* PM get */ #define TIOCPMPUT 0x544E /* PM put */ #define TIOCPMACT 0x544F /* PM is active */ -- GitLab From 660cc57548e3c68666d31b0fd4e9885664ebe3b0 Mon Sep 17 00:00:00 2001 From: Chandana Kishori Chiluveru Date: Thu, 10 Sep 2020 13:29:46 +0530 Subject: [PATCH 1258/1304] serial: msm_geni_serial: Add delay for rx invalid transfer Check for RX data in rx buffer for faulty transfer and add delay of 2 msecs delay in order for dma rx transfer to be actually completed. Also dump required qup registers when the uart dma rx data is seen as all zeros. Change-Id: Ifaa0cb7d22b8419ffacfee7212b0a28ed8ebf2a1 Signed-off-by: Chandana Kishori Chiluveru --- drivers/tty/serial/msm_geni_serial.c | 38 ++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/drivers/tty/serial/msm_geni_serial.c b/drivers/tty/serial/msm_geni_serial.c index 6e21f3a4d232..99d4e5915e0c 100644 --- a/drivers/tty/serial/msm_geni_serial.c +++ b/drivers/tty/serial/msm_geni_serial.c @@ -1712,6 +1712,40 @@ static int msm_geni_serial_handle_tx(struct uart_port *uport, bool done, return 0; } +static void check_rx_buf(char *buf, struct uart_port *uport, int size) +{ + struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport); + unsigned int rx_data; + bool fault = false; + + rx_data = *(u32 *)buf; + /* check for first 4 bytes of RX data for faulty zero pattern */ + if (rx_data == 0x0) { + if (size <= 4) { + fault = true; + } else { + /* + * check for last 4 bytes of data in RX buffer for + * faulty pattern + */ + if (memcmp(buf+(size-4), "\x0\x0\x0\x0", 4) == 0) + fault = true; + } + + if (fault) { + IPC_LOG_MSG(msm_port->ipc_log_rx, + "RX Invalid packet %s\n", __func__); + geni_se_dump_dbg_regs(&msm_port->serial_rsc, + uport->membase, msm_port->ipc_log_misc); + /* + * Add 2 msecs delay in order for dma rx transfer + * to be actually completed. + */ + udelay(2000); + } + } +} + static int msm_geni_serial_handle_dma_rx(struct uart_port *uport, bool drop_rx) { struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport); @@ -1740,6 +1774,10 @@ static int msm_geni_serial_handle_dma_rx(struct uart_port *uport, bool drop_rx) __func__, rx_bytes); goto exit_handle_dma_rx; } + + /* Check RX buffer data for faulty pattern*/ + check_rx_buf((char *)msm_port->rx_buf, uport, rx_bytes); + if (drop_rx) goto exit_handle_dma_rx; -- GitLab From 91703cf8c654454cfca4f8dc6f8ced9c9c6e7079 Mon Sep 17 00:00:00 2001 From: Chandana Kishori Chiluveru Date: Thu, 4 Jun 2020 09:12:26 +0530 Subject: [PATCH 1259/1304] serial: msm_geni_serial: Add new UART IPC log file in DMA mode This patch adds new IPC log file to dump IRQ registers to know the interrupt status in dma mode. This will help in debugging interrupt storm issues in dma mode. Also increase the IPC log buffer size. Change-Id: I33a32e8d237769808a9c7553c9c3e6d1024d9f50 Signed-off-by: Chandana Kishori Chiluveru --- drivers/tty/serial/msm_geni_serial.c | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/drivers/tty/serial/msm_geni_serial.c b/drivers/tty/serial/msm_geni_serial.c index 99d4e5915e0c..deb2c0266baa 100644 --- a/drivers/tty/serial/msm_geni_serial.c +++ b/drivers/tty/serial/msm_geni_serial.c @@ -114,9 +114,9 @@ #define WAIT_XFER_MAX_ITER (2) #define WAIT_XFER_MAX_TIMEOUT_US (10000) #define WAIT_XFER_MIN_TIMEOUT_US (9000) -#define IPC_LOG_PWR_PAGES (6) -#define IPC_LOG_MISC_PAGES (10) -#define IPC_LOG_TX_RX_PAGES (10) +#define IPC_LOG_PWR_PAGES (10) +#define IPC_LOG_MISC_PAGES (30) +#define IPC_LOG_TX_RX_PAGES (30) #define DATA_BYTES_PER_LINE (32) #define M_IRQ_BITS (M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN |\ @@ -189,6 +189,7 @@ struct msm_geni_serial_port { void *ipc_log_misc; void *console_log; void *ipc_log_single; + void *ipc_log_irqstatus; unsigned int cur_baud; int ioctl_count; int edge_count; @@ -1944,6 +1945,12 @@ static void msm_geni_serial_handle_isr(struct uart_port *uport, dma_rx_status = geni_read_reg_nolog(uport->membase, SE_DMA_RX_IRQ_STAT); + if (m_irq_status || s_irq_status || + dma_tx_status || dma_rx_status) + IPC_LOG_MSG(msm_port->ipc_log_irqstatus, + "%s: sirq:0x%x mirq:0x%x dma_txirq:0x%x dma_rxirq:0x%x\n", + __func__, s_irq_status, m_irq_status, + dma_tx_status, dma_rx_status); if (dma_tx_status) { geni_write_reg_nolog(dma_tx_status, uport->membase, @@ -2895,7 +2902,7 @@ static void console_unregister(struct uart_driver *drv) static void msm_geni_serial_debug_init(struct uart_port *uport, bool console) { struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport); - char name[30]; + char name[35]; msm_port->dbg = debugfs_create_dir(dev_name(uport->dev), NULL); if (IS_ERR_OR_NULL(msm_port->dbg)) @@ -2948,6 +2955,15 @@ static void msm_geni_serial_debug_init(struct uart_port *uport, bool console) if (!msm_port->ipc_log_single) dev_info(uport->dev, "Err in single IPC Log\n"); } + memset(name, 0, sizeof(name)); + if (!msm_port->ipc_log_irqstatus) { + scnprintf(name, sizeof(name), "%s%s", + dev_name(uport->dev), "_irqstatus"); + msm_port->ipc_log_irqstatus = ipc_log_context_create( + IPC_LOG_MISC_PAGES, name, 0); + if (!msm_port->ipc_log_irqstatus) + dev_info(uport->dev, "Err in irqstatus IPC Log\n"); + } } else { memset(name, 0, sizeof(name)); if (!msm_port->console_log) { -- GitLab From 2a4b18237acfb92592dc6e68fe51c5b8750f9408 Mon Sep 17 00:00:00 2001 From: Ziqi Chen Date: Wed, 21 Oct 2020 16:22:33 +0800 Subject: [PATCH 1260/1304] scsi: ufs: reomove Rst_N pulling up action in ufshcd_resume() There are two times H/W reset during UFS power on. It will impact NAND lifetime. We just need to keep one and remove the other one. Change-Id: Ie0ddbc458d36d0b7fb7e3786775432efea850e10 Signed-off-by: Ziqi Chen --- drivers/scsi/ufs/ufshcd.c | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index d3fd46eb6ca6..f958da8e25df 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -10540,13 +10540,6 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) /* enable the host irq as host controller would be active soon */ ufshcd_enable_irq(hba); - /* Pull up RST_n before device reset */ - if (ufshcd_is_link_off(hba)) { - ret = ufshcd_deassert_device_reset(hba); - if (ret) - goto disable_irq_and_vops_clks; - } - /* * Call vendor specific resume callback. As these callbacks may access * vendor specific host controller register space call them when the @@ -10554,7 +10547,7 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) */ ret = ufshcd_vops_resume(hba, pm_op); if (ret) - goto assert_device_reset; + goto disable_irq_and_vops_clks; if (ufshcd_is_link_hibern8(hba)) { ret = ufshcd_uic_hibern8_exit(hba); @@ -10645,9 +10638,6 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) hba->hibern8_on_idle.state = HIBERN8_ENTERED; vendor_suspend: ufshcd_vops_suspend(hba, pm_op); -assert_device_reset: - if (ufshcd_is_link_off(hba)) - ufshcd_assert_device_reset(hba); disable_irq_and_vops_clks: ufshcd_disable_irq(hba); if (hba->clk_scaling.is_allowed) -- GitLab From 670dc321d02b34b9a58f01cabc8762c70e9c25cd Mon Sep 17 00:00:00 2001 From: Ashok Raj Deenadayalan Date: Sun, 30 Aug 2020 16:04:17 +0530 Subject: [PATCH 1261/1304] msm: ipa: Add support of IPA2 driver Add support of IPAv2 driver for sdm660 target Make following changes: - Fix copyright info. - Fix checkpatch warnings and errors (Require some changes in indentation) Change-Id: I831beb3f9b6e376803001a37d022914735dc7bf8 Signed-off-by: Praveen Kurapati Signed-off-by: Ashok Raj Deenadayalan Signed-off-by: Swetha Chikkaboraiah --- drivers/platform/msm/ipa/Makefile | 1 + drivers/platform/msm/ipa/ipa_v2/Makefile | 9 + drivers/platform/msm/ipa/ipa_v2/ipa.c | 5120 ++++++++++++++++ drivers/platform/msm/ipa/ipa_v2/ipa_client.c | 930 +++ drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c | 2327 ++++++++ drivers/platform/msm/ipa/ipa_v2/ipa_dma.c | 902 +++ drivers/platform/msm/ipa/ipa_v2/ipa_dp.c | 3736 ++++++++++++ drivers/platform/msm/ipa/ipa_v2/ipa_flt.c | 1549 +++++ drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c | 1580 +++++ drivers/platform/msm/ipa/ipa_v2/ipa_hw_defs.h | 443 ++ drivers/platform/msm/ipa/ipa_v2/ipa_i.h | 1973 ++++++ .../platform/msm/ipa/ipa_v2/ipa_interrupts.c | 380 ++ drivers/platform/msm/ipa/ipa_v2/ipa_intf.c | 835 +++ drivers/platform/msm/ipa/ipa_v2/ipa_mhi.c | 336 ++ drivers/platform/msm/ipa/ipa_v2/ipa_nat.c | 878 +++ .../platform/msm/ipa/ipa_v2/ipa_qmi_service.c | 1274 ++++ .../platform/msm/ipa/ipa_v2/ipa_qmi_service.h | 333 ++ .../msm/ipa/ipa_v2/ipa_qmi_service_v01.c | 2418 ++++++++ .../platform/msm/ipa/ipa_v2/ipa_ram_mmap.h | 553 ++ drivers/platform/msm/ipa/ipa_v2/ipa_reg.h | 312 + drivers/platform/msm/ipa/ipa_v2/ipa_rt.c | 1669 ++++++ drivers/platform/msm/ipa/ipa_v2/ipa_trace.h | 145 + drivers/platform/msm/ipa/ipa_v2/ipa_uc.c | 940 +++ drivers/platform/msm/ipa/ipa_v2/ipa_uc_mhi.c | 959 +++ drivers/platform/msm/ipa/ipa_v2/ipa_uc_ntn.c | 448 ++ .../msm/ipa/ipa_v2/ipa_uc_offload_i.h | 608 ++ drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c | 1892 ++++++ drivers/platform/msm/ipa/ipa_v2/ipa_utils.c | 5271 +++++++++++++++++ drivers/platform/msm/ipa/ipa_v2/ipa_wdi3_i.c | 557 ++ drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c | 3237 ++++++++++ .../msm/ipa/ipa_v2/rmnet_ipa_fd_ioctl.c | 414 ++ drivers/platform/msm/ipa/ipa_v2/teth_bridge.c | 233 + 32 files changed, 42262 insertions(+) create mode 100644 drivers/platform/msm/ipa/ipa_v2/Makefile create mode 100644 drivers/platform/msm/ipa/ipa_v2/ipa.c create mode 100644 drivers/platform/msm/ipa/ipa_v2/ipa_client.c create mode 100644 drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c create mode 100644 drivers/platform/msm/ipa/ipa_v2/ipa_dma.c create mode 100644 drivers/platform/msm/ipa/ipa_v2/ipa_dp.c create mode 100644 drivers/platform/msm/ipa/ipa_v2/ipa_flt.c create mode 100644 drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c create mode 100644 drivers/platform/msm/ipa/ipa_v2/ipa_hw_defs.h create mode 100644 drivers/platform/msm/ipa/ipa_v2/ipa_i.h create mode 100644 drivers/platform/msm/ipa/ipa_v2/ipa_interrupts.c create mode 100644 drivers/platform/msm/ipa/ipa_v2/ipa_intf.c create mode 100644 drivers/platform/msm/ipa/ipa_v2/ipa_mhi.c create mode 100644 drivers/platform/msm/ipa/ipa_v2/ipa_nat.c create mode 100644 drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c create mode 100644 drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.h create mode 100644 drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service_v01.c create mode 100644 drivers/platform/msm/ipa/ipa_v2/ipa_ram_mmap.h create mode 100644 drivers/platform/msm/ipa/ipa_v2/ipa_reg.h create mode 100644 drivers/platform/msm/ipa/ipa_v2/ipa_rt.c create mode 100644 drivers/platform/msm/ipa/ipa_v2/ipa_trace.h create mode 100644 drivers/platform/msm/ipa/ipa_v2/ipa_uc.c create mode 100644 drivers/platform/msm/ipa/ipa_v2/ipa_uc_mhi.c create mode 100644 drivers/platform/msm/ipa/ipa_v2/ipa_uc_ntn.c create mode 100644 drivers/platform/msm/ipa/ipa_v2/ipa_uc_offload_i.h create mode 100644 drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c create mode 100644 drivers/platform/msm/ipa/ipa_v2/ipa_utils.c create mode 100644 drivers/platform/msm/ipa/ipa_v2/ipa_wdi3_i.c create mode 100644 drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c create mode 100644 drivers/platform/msm/ipa/ipa_v2/rmnet_ipa_fd_ioctl.c create mode 100644 drivers/platform/msm/ipa/ipa_v2/teth_bridge.c diff --git a/drivers/platform/msm/ipa/Makefile b/drivers/platform/msm/ipa/Makefile index d1c26b6bed6d..d1e4d52dcb80 100644 --- a/drivers/platform/msm/ipa/Makefile +++ b/drivers/platform/msm/ipa/Makefile @@ -1,5 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_IPA) += ipa_v2/ ipa_clients/ ipa_common obj-$(CONFIG_IPA3) += ipa_v3/ ipa_clients/ ipa_common obj-$(CONFIG_IPA_UT) += test/ diff --git a/drivers/platform/msm/ipa/ipa_v2/Makefile b/drivers/platform/msm/ipa/ipa_v2/Makefile new file mode 100644 index 000000000000..b70f49a9e1e6 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v2/Makefile @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0-only + +obj-$(CONFIG_IPA) += ipat.o +ipat-y := ipa.o ipa_debugfs.o ipa_hdr.o ipa_flt.o ipa_rt.o ipa_dp.o ipa_client.o \ + ipa_utils.o ipa_nat.o ipa_intf.o teth_bridge.o ipa_interrupts.o \ + ipa_uc.o ipa_uc_wdi.o ipa_dma.o ipa_uc_mhi.o ipa_mhi.o ipa_uc_ntn.o \ + ipa_wdi3_i.o + +obj-$(CONFIG_RMNET_IPA) += rmnet_ipa.o ipa_qmi_service_v01.o ipa_qmi_service.o rmnet_ipa_fd_ioctl.o diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa.c b/drivers/platform/msm/ipa/ipa_v2/ipa.c new file mode 100644 index 000000000000..5425d1d57a6a --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v2/ipa.c @@ -0,0 +1,5120 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2018, 2020, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "ipa_i.h" +#include "../ipa_rm_i.h" + +#define CREATE_TRACE_POINTS +#include "ipa_trace.h" + +#define IPA_SUMMING_THRESHOLD (0x10) +#define IPA_PIPE_MEM_START_OFST (0x0) +#define IPA_PIPE_MEM_SIZE (0x0) +#define IPA_MOBILE_AP_MODE(x) (x == IPA_MODE_MOBILE_AP_ETH || \ + x == IPA_MODE_MOBILE_AP_WAN || \ + x == IPA_MODE_MOBILE_AP_WLAN) +#define IPA_CNOC_CLK_RATE (75 * 1000 * 1000UL) +#define IPA_A5_MUX_HEADER_LENGTH (8) +#define IPA_ROUTING_RULE_BYTE_SIZE (4) +#define IPA_BAM_CNFG_BITS_VALv1_1 (0x7FFFE004) +#define IPA_BAM_CNFG_BITS_VALv2_0 (0xFFFFE004) +#define IPA_STATUS_CLEAR_OFST (0x3f28) +#define IPA_STATUS_CLEAR_SIZE (32) + +#define IPA_AGGR_MAX_STR_LENGTH (10) + +#define CLEANUP_TAG_PROCESS_TIMEOUT 150 + +#define IPA2_ACTIVE_CLIENTS_TABLE_BUF_SIZE 2048 + +#define IPA2_ACTIVE_CLIENT_LOG_TYPE_EP 0 +#define IPA2_ACTIVE_CLIENT_LOG_TYPE_SIMPLE 1 +#define IPA2_ACTIVE_CLIENT_LOG_TYPE_RESOURCE 2 +#define IPA2_ACTIVE_CLIENT_LOG_TYPE_SPECIAL 3 + +#define MAX_POLLING_ITERATION 40 +#define MIN_POLLING_ITERATION 1 +#define ONE_MSEC 1 + +#define IPA_AGGR_STR_IN_BYTES(str) \ + (strnlen((str), IPA_AGGR_MAX_STR_LENGTH - 1) + 1) + +#define IPA_SPS_PROD_TIMEOUT_MSEC 100 + +#ifdef CONFIG_COMPAT +#define IPA_IOC_ADD_HDR32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_ADD_HDR, \ + compat_uptr_t) +#define IPA_IOC_DEL_HDR32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_DEL_HDR, \ + compat_uptr_t) +#define IPA_IOC_ADD_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_ADD_RT_RULE, \ + compat_uptr_t) +#define IPA_IOC_DEL_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_DEL_RT_RULE, \ + compat_uptr_t) +#define IPA_IOC_ADD_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_ADD_FLT_RULE, \ + compat_uptr_t) +#define IPA_IOC_DEL_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_DEL_FLT_RULE, \ + compat_uptr_t) +#define IPA_IOC_GET_RT_TBL32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_GET_RT_TBL, \ + compat_uptr_t) +#define IPA_IOC_COPY_HDR32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_COPY_HDR, \ + compat_uptr_t) +#define IPA_IOC_QUERY_INTF32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_QUERY_INTF, \ + compat_uptr_t) +#define IPA_IOC_QUERY_INTF_TX_PROPS32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_QUERY_INTF_TX_PROPS, \ + compat_uptr_t) +#define IPA_IOC_QUERY_INTF_RX_PROPS32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_QUERY_INTF_RX_PROPS, \ + compat_uptr_t) +#define IPA_IOC_QUERY_INTF_EXT_PROPS32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_QUERY_INTF_EXT_PROPS, \ + compat_uptr_t) +#define IPA_IOC_GET_HDR32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_GET_HDR, \ + compat_uptr_t) +#define IPA_IOC_ALLOC_NAT_MEM32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_ALLOC_NAT_MEM, \ + compat_uptr_t) +#define IPA_IOC_V4_INIT_NAT32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_V4_INIT_NAT, \ + compat_uptr_t) +#define IPA_IOC_NAT_DMA32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_NAT_DMA, \ + compat_uptr_t) +#define IPA_IOC_V4_DEL_NAT32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_V4_DEL_NAT, \ + compat_uptr_t) +#define IPA_IOC_GET_NAT_OFFSET32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_GET_NAT_OFFSET, \ + compat_uptr_t) +#define IPA_IOC_PULL_MSG32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_PULL_MSG, \ + compat_uptr_t) +#define IPA_IOC_RM_ADD_DEPENDENCY32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_RM_ADD_DEPENDENCY, \ + compat_uptr_t) +#define IPA_IOC_RM_DEL_DEPENDENCY32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_RM_DEL_DEPENDENCY, \ + compat_uptr_t) +#define IPA_IOC_GENERATE_FLT_EQ32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_GENERATE_FLT_EQ, \ + compat_uptr_t) +#define IPA_IOC_QUERY_RT_TBL_INDEX32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_QUERY_RT_TBL_INDEX, \ + compat_uptr_t) +#define IPA_IOC_WRITE_QMAPID32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_WRITE_QMAPID, \ + compat_uptr_t) +#define IPA_IOC_MDFY_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_MDFY_FLT_RULE, \ + compat_uptr_t) +#define IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_ADD, \ + compat_uptr_t) +#define IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_DEL, \ + compat_uptr_t) +#define IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_NOTIFY_WAN_EMBMS_CONNECTED, \ + compat_uptr_t) +#define IPA_IOC_ADD_HDR_PROC_CTX32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_ADD_HDR_PROC_CTX, \ + compat_uptr_t) +#define IPA_IOC_DEL_HDR_PROC_CTX32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_DEL_HDR_PROC_CTX, \ + compat_uptr_t) +#define IPA_IOC_MDFY_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_MDFY_RT_RULE, \ + compat_uptr_t) + +/** + * struct ipa_ioc_nat_alloc_mem32 - nat table memory allocation + * properties + * @dev_name: input parameter, the name of table + * @size: input parameter, size of table in bytes + * @offset: output parameter, offset into page in case of system memory + */ +struct ipa_ioc_nat_alloc_mem32 { + char dev_name[IPA_RESOURCE_NAME_MAX]; + compat_size_t size; + compat_off_t offset; +}; +#endif + +static void ipa_start_tag_process(struct work_struct *work); +static DECLARE_WORK(ipa_tag_work, ipa_start_tag_process); + +static void ipa_sps_release_resource(struct work_struct *work); +static DECLARE_DELAYED_WORK(ipa_sps_release_resource_work, + ipa_sps_release_resource); + +static struct ipa_plat_drv_res ipa_res = {0, }; + +struct msm_bus_scale_pdata *bus_scale_table; + +static struct clk *ipa_clk_src; +static struct clk *ipa_clk; +static struct clk *smmu_clk; +static struct clk *sys_noc_ipa_axi_clk; +static struct clk *ipa_cnoc_clk; +static struct clk *ipa_inactivity_clk; + +struct ipa_context *ipa_ctx; +static struct device *master_dev; +struct platform_device *ipa_pdev; +static struct { + bool present; + bool arm_smmu; + bool fast_map; + bool s1_bypass; + u32 ipa_base; + u32 ipa_size; +} smmu_info; + +static char *active_clients_table_buf; + +int ipa2_active_clients_log_print_buffer(char *buf, int size) +{ + int i; + int nbytes; + int cnt = 0; + int start_idx; + int end_idx; + + start_idx = (ipa_ctx->ipa2_active_clients_logging.log_tail + 1) % + IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES; + end_idx = ipa_ctx->ipa2_active_clients_logging.log_head; + for (i = start_idx; i != end_idx; + i = (i + 1) % IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES) { + nbytes = scnprintf(buf + cnt, size - cnt, "%s\n", + ipa_ctx->ipa2_active_clients_logging + .log_buffer[i]); + cnt += nbytes; + } + + return cnt; +} + +int ipa2_active_clients_log_print_table(char *buf, int size) +{ + int i; + struct ipa2_active_client_htable_entry *iterator; + int cnt = 0; + + cnt = scnprintf(buf, size, "\n---- Active Clients Table ----\n"); + hash_for_each(ipa_ctx->ipa2_active_clients_logging.htable, i, + iterator, list) { + switch (iterator->type) { + case IPA2_ACTIVE_CLIENT_LOG_TYPE_EP: + cnt += scnprintf(buf + cnt, size - cnt, + "%-40s %-3d ENDPOINT\n", + iterator->id_string, iterator->count); + break; + case IPA2_ACTIVE_CLIENT_LOG_TYPE_SIMPLE: + cnt += scnprintf(buf + cnt, size - cnt, + "%-40s %-3d SIMPLE\n", + iterator->id_string, iterator->count); + break; + case IPA2_ACTIVE_CLIENT_LOG_TYPE_RESOURCE: + cnt += scnprintf(buf + cnt, size - cnt, + "%-40s %-3d RESOURCE\n", + iterator->id_string, iterator->count); + break; + case IPA2_ACTIVE_CLIENT_LOG_TYPE_SPECIAL: + cnt += scnprintf(buf + cnt, size - cnt, + "%-40s %-3d SPECIAL\n", + iterator->id_string, iterator->count); + break; + default: + IPAERR("Trying to print illegal active_clients type"); + break; + } + } + cnt += scnprintf(buf + cnt, size - cnt, + "\nTotal active clients count: %d\n", + ipa_ctx->ipa_active_clients.cnt); + + return cnt; +} + + +static int ipa2_clean_modem_rule(void) +{ + struct ipa_install_fltr_rule_req_msg_v01 *req; + int val = 0; + + req = kzalloc( + sizeof(struct ipa_install_fltr_rule_req_msg_v01), + GFP_KERNEL); + if (!req) { + IPAERR("mem allocated failed!\n"); + return -ENOMEM; + } + req->filter_spec_list_valid = false; + req->filter_spec_list_len = 0; + req->source_pipe_index_valid = 0; + val = qmi_filter_request_send(req); + kfree(req); + + return val; +} + +static int ipa2_active_clients_panic_notifier(struct notifier_block *this, + unsigned long event, void *ptr) +{ + ipa_active_clients_lock(); + ipa2_active_clients_log_print_table(active_clients_table_buf, + IPA2_ACTIVE_CLIENTS_TABLE_BUF_SIZE); + IPAERR("%s", active_clients_table_buf); + ipa_active_clients_unlock(); + + return NOTIFY_DONE; +} + +static struct notifier_block ipa2_active_clients_panic_blk = { + .notifier_call = ipa2_active_clients_panic_notifier, +}; + +static int ipa2_active_clients_log_insert(const char *string) +{ + int head; + int tail; + + head = ipa_ctx->ipa2_active_clients_logging.log_head; + tail = ipa_ctx->ipa2_active_clients_logging.log_tail; + + if (!ipa_ctx->ipa2_active_clients_logging.log_rdy) + return -EPERM; + memset(ipa_ctx->ipa2_active_clients_logging.log_buffer[head], '_', + IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN); + strlcpy(ipa_ctx->ipa2_active_clients_logging.log_buffer[head], string, + (size_t)IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN); + head = (head + 1) % IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES; + if (tail == head) + tail = (tail + 1) % IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES; + + ipa_ctx->ipa2_active_clients_logging.log_tail = tail; + ipa_ctx->ipa2_active_clients_logging.log_head = head; + + return 0; +} + +static int ipa2_active_clients_log_init(void) +{ + int i; + + ipa_ctx->ipa2_active_clients_logging.log_buffer[0] = kcalloc( + IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES, + sizeof(char[IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN]), + GFP_KERNEL); + active_clients_table_buf = kzalloc(sizeof( + char[IPA2_ACTIVE_CLIENTS_TABLE_BUF_SIZE]), GFP_KERNEL); + if (ipa_ctx->ipa2_active_clients_logging.log_buffer == NULL) { + IPAERR("Active Clients Logging memory allocation failed"); + goto bail; + } + for (i = 0; i < IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES; i++) { + ipa_ctx->ipa2_active_clients_logging.log_buffer[i] = + ipa_ctx->ipa2_active_clients_logging.log_buffer[0] + + (IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN * i); + } + ipa_ctx->ipa2_active_clients_logging.log_head = 0; + ipa_ctx->ipa2_active_clients_logging.log_tail = + IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1; + hash_init(ipa_ctx->ipa2_active_clients_logging.htable); + atomic_notifier_chain_register(&panic_notifier_list, + &ipa2_active_clients_panic_blk); + ipa_ctx->ipa2_active_clients_logging.log_rdy = true; + + return 0; + +bail: + return -ENOMEM; +} + +void ipa2_active_clients_log_clear(void) +{ + ipa_active_clients_lock(); + ipa_ctx->ipa2_active_clients_logging.log_head = 0; + ipa_ctx->ipa2_active_clients_logging.log_tail = + IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1; + ipa_active_clients_unlock(); +} + +static void ipa2_active_clients_log_destroy(void) +{ + ipa_ctx->ipa2_active_clients_logging.log_rdy = false; + kfree(active_clients_table_buf); + active_clients_table_buf = NULL; + kfree(ipa_ctx->ipa2_active_clients_logging.log_buffer[0]); + ipa_ctx->ipa2_active_clients_logging.log_head = 0; + ipa_ctx->ipa2_active_clients_logging.log_tail = + IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1; +} + +enum ipa_smmu_cb_type { + IPA_SMMU_CB_AP, + IPA_SMMU_CB_WLAN, + IPA_SMMU_CB_UC, + IPA_SMMU_CB_MAX + +}; + +static struct ipa_smmu_cb_ctx smmu_cb[IPA_SMMU_CB_MAX]; + +struct iommu_domain *ipa2_get_smmu_domain(void) +{ + if (smmu_cb[IPA_SMMU_CB_AP].valid) + return smmu_cb[IPA_SMMU_CB_AP].mapping->domain; + + IPAERR("CB not valid\n"); + + return NULL; +} + +struct iommu_domain *ipa2_get_uc_smmu_domain(void) +{ + if (smmu_cb[IPA_SMMU_CB_UC].valid) + return smmu_cb[IPA_SMMU_CB_UC].mapping->domain; + + IPAERR("CB not valid\n"); + + return NULL; +} + +struct iommu_domain *ipa2_get_wlan_smmu_domain(void) +{ + if (smmu_cb[IPA_SMMU_CB_WLAN].valid) + return smmu_cb[IPA_SMMU_CB_WLAN].iommu; + + IPAERR("CB not valid\n"); + + return NULL; +} + +struct device *ipa2_get_dma_dev(void) +{ + return ipa_ctx->pdev; +} + +/** + * ipa2_get_smmu_ctx()- Return the smmu context + * + * Return value: pointer to smmu context address + */ +struct ipa_smmu_cb_ctx *ipa2_get_smmu_ctx(void) +{ + return &smmu_cb[IPA_SMMU_CB_AP]; +} + + +/** + * ipa2_get_wlan_smmu_ctx()- Return the wlan smmu context + * + * Return value: pointer to smmu context address + */ +struct ipa_smmu_cb_ctx *ipa2_get_wlan_smmu_ctx(void) +{ + return &smmu_cb[IPA_SMMU_CB_WLAN]; +} + +/** + * ipa2_get_uc_smmu_ctx()- Return the uc smmu context + * + * Return value: pointer to smmu context address + */ +struct ipa_smmu_cb_ctx *ipa2_get_uc_smmu_ctx(void) +{ + return &smmu_cb[IPA_SMMU_CB_UC]; +} + +static int ipa_open(struct inode *inode, struct file *filp) +{ + struct ipa_context *ctx = NULL; + + IPADBG_LOW("ENTER\n"); + ctx = container_of(inode->i_cdev, struct ipa_context, cdev); + filp->private_data = ctx; + + return 0; +} + +/** + * ipa_flow_control() - Enable/Disable flow control on a particular client. + * Return codes: + * None + */ +void ipa_flow_control(enum ipa_client_type ipa_client, + bool enable, uint32_t qmap_id) +{ + struct ipa_ep_cfg_ctrl ep_ctrl = {0}; + int ep_idx; + struct ipa_ep_context *ep; + + /* Check if tethered flow control is needed or not.*/ + if (!ipa_ctx->tethered_flow_control) { + IPADBG("Apps flow control is not needed\n"); + return; + } + + /* Check if ep is valid. */ + ep_idx = ipa2_get_ep_mapping(ipa_client); + if (ep_idx == -1) { + IPADBG("Invalid IPA client\n"); + return; + } + + ep = &ipa_ctx->ep[ep_idx]; + if (!ep->valid || (ep->client != IPA_CLIENT_USB_PROD)) { + IPADBG("EP not valid/Not applicable for client.\n"); + return; + } + + spin_lock(&ipa_ctx->disconnect_lock); + /* Check if the QMAP_ID matches. */ + if (ep->cfg.meta.qmap_id != qmap_id) { + IPADBG("Flow control ind not for same flow: %u %u\n", + ep->cfg.meta.qmap_id, qmap_id); + spin_unlock(&ipa_ctx->disconnect_lock); + return; + } + if (!ep->disconnect_in_progress) { + if (enable) { + IPADBG("Enabling Flow\n"); + ep_ctrl.ipa_ep_delay = false; + IPA_STATS_INC_CNT(ipa_ctx->stats.flow_enable); + } else { + IPADBG("Disabling Flow\n"); + ep_ctrl.ipa_ep_delay = true; + IPA_STATS_INC_CNT(ipa_ctx->stats.flow_disable); + } + ep_ctrl.ipa_ep_suspend = false; + ipa2_cfg_ep_ctrl(ep_idx, &ep_ctrl); + } else { + IPADBG("EP disconnect is in progress\n"); + } + spin_unlock(&ipa_ctx->disconnect_lock); +} + +static void ipa_wan_msg_free_cb(void *buff, u32 len, u32 type) +{ + if (!buff) { + IPAERR("Null buffer\n"); + return; + } + + if (type != WAN_UPSTREAM_ROUTE_ADD && + type != WAN_UPSTREAM_ROUTE_DEL && + type != WAN_EMBMS_CONNECT) { + IPAERR("Wrong type given. buff %p type %d\n", buff, type); + return; + } + + kfree(buff); +} + +static int ipa_send_wan_msg(unsigned long usr_param, uint8_t msg_type, + bool is_cache) +{ + int retval; + struct ipa_wan_msg *wan_msg; + struct ipa_msg_meta msg_meta; + struct ipa_wan_msg cache_wan_msg; + + wan_msg = kzalloc(sizeof(struct ipa_wan_msg), GFP_KERNEL); + if (!wan_msg) { + IPAERR("no memory\n"); + return -ENOMEM; + } + + if (copy_from_user((u8 *)wan_msg, (u8 *)usr_param, + sizeof(struct ipa_wan_msg))) { + kfree(wan_msg); + return -EFAULT; + } + + memcpy(&cache_wan_msg, wan_msg, sizeof(cache_wan_msg)); + + memset(&msg_meta, 0, sizeof(struct ipa_msg_meta)); + msg_meta.msg_type = msg_type; + msg_meta.msg_len = sizeof(struct ipa_wan_msg); + retval = ipa2_send_msg(&msg_meta, wan_msg, ipa_wan_msg_free_cb); + if (retval) { + IPAERR("ipa2_send_msg failed: %d\n", retval); + kfree(wan_msg); + return retval; + } + + if (is_cache) { + mutex_lock(&ipa_ctx->ipa_cne_evt_lock); + + /* cache the cne event */ + memcpy(&ipa_ctx->ipa_cne_evt_req_cache[ + ipa_ctx->num_ipa_cne_evt_req].wan_msg, + &cache_wan_msg, + sizeof(cache_wan_msg)); + + memcpy(&ipa_ctx->ipa_cne_evt_req_cache[ + ipa_ctx->num_ipa_cne_evt_req].msg_meta, + &msg_meta, + sizeof(struct ipa_msg_meta)); + + ipa_ctx->num_ipa_cne_evt_req++; + ipa_ctx->num_ipa_cne_evt_req %= IPA_MAX_NUM_REQ_CACHE; + mutex_unlock(&ipa_ctx->ipa_cne_evt_lock); + } + + return 0; +} + + +static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + int retval = 0; + u32 pyld_sz; + u8 header[128] = { 0 }; + u8 *param = NULL; + struct ipa_ioc_nat_alloc_mem nat_mem; + struct ipa_ioc_v4_nat_init nat_init; + struct ipa_ioc_v4_nat_del nat_del; + struct ipa_ioc_rm_dependency rm_depend; + size_t sz; + int pre_entry; + + IPADBG("cmd=%x nr=%d\n", cmd, _IOC_NR(cmd)); + + if (_IOC_TYPE(cmd) != IPA_IOC_MAGIC) + return -ENOTTY; + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + switch (cmd) { + case IPA_IOC_ALLOC_NAT_MEM: + if (copy_from_user((u8 *)&nat_mem, (u8 *)arg, + sizeof(struct ipa_ioc_nat_alloc_mem))) { + retval = -EFAULT; + break; + } + /* null terminate the string */ + nat_mem.dev_name[IPA_RESOURCE_NAME_MAX - 1] = '\0'; + + if (ipa2_allocate_nat_device(&nat_mem)) { + retval = -EFAULT; + break; + } + if (copy_to_user((u8 *)arg, (u8 *)&nat_mem, + sizeof(struct ipa_ioc_nat_alloc_mem))) { + retval = -EFAULT; + break; + } + break; + case IPA_IOC_V4_INIT_NAT: + if (copy_from_user((u8 *)&nat_init, (u8 *)arg, + sizeof(struct ipa_ioc_v4_nat_init))) { + retval = -EFAULT; + break; + } + if (ipa2_nat_init_cmd(&nat_init)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_NAT_DMA: + if (copy_from_user(header, (u8 *)arg, + sizeof(struct ipa_ioc_nat_dma_cmd))) { + retval = -EFAULT; + break; + } + pre_entry = + ((struct ipa_ioc_nat_dma_cmd *)header)->entries; + pyld_sz = + sizeof(struct ipa_ioc_nat_dma_cmd) + + pre_entry * sizeof(struct ipa_ioc_nat_dma_one); + param = memdup_user((const void __user *)arg, pyld_sz); + if (!param) { + retval = -ENOMEM; + break; + } + + if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_nat_dma_cmd *)param)->entries + != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_nat_dma_cmd *)param)->entries, + pre_entry); + retval = -EFAULT; + break; + } + if (ipa2_nat_dma_cmd((struct ipa_ioc_nat_dma_cmd *)param)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_V4_DEL_NAT: + if (copy_from_user((u8 *)&nat_del, (u8 *)arg, + sizeof(struct ipa_ioc_v4_nat_del))) { + retval = -EFAULT; + break; + } + if (ipa2_nat_del_cmd(&nat_del)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_ADD_HDR: + if (copy_from_user(header, (u8 *)arg, + sizeof(struct ipa_ioc_add_hdr))) { + retval = -EFAULT; + break; + } + pre_entry = + ((struct ipa_ioc_add_hdr *)header)->num_hdrs; + pyld_sz = + sizeof(struct ipa_ioc_add_hdr) + + pre_entry * sizeof(struct ipa_hdr_add); + param = memdup_user((const void __user *)arg, pyld_sz); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_add_hdr *)param)->num_hdrs + != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_add_hdr *)param)->num_hdrs, + pre_entry); + retval = -EFAULT; + break; + } + if (ipa2_add_hdr_usr((struct ipa_ioc_add_hdr *)param, + true)) { + retval = -EFAULT; + break; + } + if (copy_to_user((u8 *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_DEL_HDR: + if (copy_from_user(header, (u8 *)arg, + sizeof(struct ipa_ioc_del_hdr))) { + retval = -EFAULT; + break; + } + pre_entry = + ((struct ipa_ioc_del_hdr *)header)->num_hdls; + pyld_sz = + sizeof(struct ipa_ioc_del_hdr) + + pre_entry * sizeof(struct ipa_hdr_del); + param = memdup_user((const void __user *)arg, pyld_sz); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_del_hdr *)param)->num_hdls + != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_del_hdr *)param)->num_hdls, + pre_entry); + retval = -EFAULT; + break; + } + if (ipa2_del_hdr_by_user((struct ipa_ioc_del_hdr *)param, + true)) { + retval = -EFAULT; + break; + } + if (copy_to_user((u8 *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_ADD_RT_RULE: + if (copy_from_user(header, (u8 *)arg, + sizeof(struct ipa_ioc_add_rt_rule))) { + retval = -EFAULT; + break; + } + pre_entry = + ((struct ipa_ioc_add_rt_rule *)header)->num_rules; + pyld_sz = + sizeof(struct ipa_ioc_add_rt_rule) + + pre_entry * sizeof(struct ipa_rt_rule_add); + param = memdup_user((const void __user *)arg, pyld_sz); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_add_rt_rule *)param)->num_rules + != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_add_rt_rule *)param)-> + num_rules, + pre_entry); + retval = -EFAULT; + break; + } + if (ipa2_add_rt_rule_usr((struct ipa_ioc_add_rt_rule *)param, + true)) { + retval = -EFAULT; + break; + } + if (copy_to_user((u8 *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_MDFY_RT_RULE: + if (copy_from_user(header, (u8 *)arg, + sizeof(struct ipa_ioc_mdfy_rt_rule))) { + retval = -EFAULT; + break; + } + pre_entry = + ((struct ipa_ioc_mdfy_rt_rule *)header)->num_rules; + pyld_sz = + sizeof(struct ipa_ioc_mdfy_rt_rule) + + pre_entry * sizeof(struct ipa_rt_rule_mdfy); + param = memdup_user((const void __user *)arg, pyld_sz); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_mdfy_rt_rule *)param)->num_rules + != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_mdfy_rt_rule *)param)-> + num_rules, + pre_entry); + retval = -EFAULT; + break; + } + if (ipa2_mdfy_rt_rule((struct ipa_ioc_mdfy_rt_rule *)param)) { + retval = -EFAULT; + break; + } + if (copy_to_user((u8 *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_DEL_RT_RULE: + if (copy_from_user(header, (u8 *)arg, + sizeof(struct ipa_ioc_del_rt_rule))) { + retval = -EFAULT; + break; + } + pre_entry = + ((struct ipa_ioc_del_rt_rule *)header)->num_hdls; + pyld_sz = + sizeof(struct ipa_ioc_del_rt_rule) + + pre_entry * sizeof(struct ipa_rt_rule_del); + param = memdup_user((const void __user *)arg, pyld_sz); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_del_rt_rule *)param)->num_hdls + != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_del_rt_rule *)param)->num_hdls, + pre_entry); + retval = -EFAULT; + break; + } + if (ipa2_del_rt_rule((struct ipa_ioc_del_rt_rule *)param)) { + retval = -EFAULT; + break; + } + if (copy_to_user((u8 *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_ADD_FLT_RULE: + if (copy_from_user(header, (u8 *)arg, + sizeof(struct ipa_ioc_add_flt_rule))) { + retval = -EFAULT; + break; + } + pre_entry = + ((struct ipa_ioc_add_flt_rule *)header)->num_rules; + pyld_sz = + sizeof(struct ipa_ioc_add_flt_rule) + + pre_entry * sizeof(struct ipa_flt_rule_add); + param = memdup_user((const void __user *)arg, pyld_sz); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_add_flt_rule *)param)->num_rules + != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_add_flt_rule *)param)-> + num_rules, + pre_entry); + retval = -EFAULT; + break; + } + if (ipa2_add_flt_rule_usr((struct ipa_ioc_add_flt_rule *)param, + true)) { + retval = -EFAULT; + break; + } + if (copy_to_user((u8 *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_DEL_FLT_RULE: + if (copy_from_user(header, (u8 *)arg, + sizeof(struct ipa_ioc_del_flt_rule))) { + retval = -EFAULT; + break; + } + pre_entry = + ((struct ipa_ioc_del_flt_rule *)header)->num_hdls; + pyld_sz = + sizeof(struct ipa_ioc_del_flt_rule) + + pre_entry * sizeof(struct ipa_flt_rule_del); + param = memdup_user((const void __user *)arg, pyld_sz); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_del_flt_rule *)param)->num_hdls + != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_del_flt_rule *)param)-> + num_hdls, + pre_entry); + retval = -EFAULT; + break; + } + if (ipa2_del_flt_rule((struct ipa_ioc_del_flt_rule *)param)) { + retval = -EFAULT; + break; + } + if (copy_to_user((u8 *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_MDFY_FLT_RULE: + if (copy_from_user(header, (u8 *)arg, + sizeof(struct ipa_ioc_mdfy_flt_rule))) { + retval = -EFAULT; + break; + } + pre_entry = + ((struct ipa_ioc_mdfy_flt_rule *)header)->num_rules; + pyld_sz = + sizeof(struct ipa_ioc_mdfy_flt_rule) + + pre_entry * sizeof(struct ipa_flt_rule_mdfy); + param = memdup_user((const void __user *)arg, pyld_sz); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_mdfy_flt_rule *)param)->num_rules + != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_mdfy_flt_rule *)param)-> + num_rules, + pre_entry); + retval = -EFAULT; + break; + } + if (ipa2_mdfy_flt_rule((struct ipa_ioc_mdfy_flt_rule *)param)) { + retval = -EFAULT; + break; + } + if (copy_to_user((u8 *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_COMMIT_HDR: + retval = ipa2_commit_hdr(); + break; + case IPA_IOC_RESET_HDR: + retval = ipa2_reset_hdr(false); + break; + case IPA_IOC_COMMIT_RT: + retval = ipa2_commit_rt(arg); + break; + case IPA_IOC_RESET_RT: + retval = ipa2_reset_rt(arg, false); + break; + case IPA_IOC_COMMIT_FLT: + retval = ipa2_commit_flt(arg); + break; + case IPA_IOC_RESET_FLT: + retval = ipa2_reset_flt(arg, false); + break; + case IPA_IOC_GET_RT_TBL: + if (copy_from_user(header, (u8 *)arg, + sizeof(struct ipa_ioc_get_rt_tbl))) { + retval = -EFAULT; + break; + } + if (ipa2_get_rt_tbl((struct ipa_ioc_get_rt_tbl *)header)) { + retval = -EFAULT; + break; + } + if (copy_to_user((u8 *)arg, header, + sizeof(struct ipa_ioc_get_rt_tbl))) { + retval = -EFAULT; + break; + } + break; + case IPA_IOC_PUT_RT_TBL: + retval = ipa2_put_rt_tbl(arg); + break; + case IPA_IOC_GET_HDR: + if (copy_from_user(header, (u8 *)arg, + sizeof(struct ipa_ioc_get_hdr))) { + retval = -EFAULT; + break; + } + if (ipa2_get_hdr((struct ipa_ioc_get_hdr *)header)) { + retval = -EFAULT; + break; + } + if (copy_to_user((u8 *)arg, header, + sizeof(struct ipa_ioc_get_hdr))) { + retval = -EFAULT; + break; + } + break; + case IPA_IOC_PUT_HDR: + retval = ipa2_put_hdr(arg); + break; + case IPA_IOC_SET_FLT: + retval = ipa_cfg_filter(arg); + break; + case IPA_IOC_COPY_HDR: + if (copy_from_user(header, (u8 *)arg, + sizeof(struct ipa_ioc_copy_hdr))) { + retval = -EFAULT; + break; + } + if (ipa2_copy_hdr((struct ipa_ioc_copy_hdr *)header)) { + retval = -EFAULT; + break; + } + if (copy_to_user((u8 *)arg, header, + sizeof(struct ipa_ioc_copy_hdr))) { + retval = -EFAULT; + break; + } + break; + case IPA_IOC_QUERY_INTF: + if (copy_from_user(header, (u8 *)arg, + sizeof(struct ipa_ioc_query_intf))) { + retval = -EFAULT; + break; + } + if (ipa_query_intf((struct ipa_ioc_query_intf *)header)) { + retval = -1; + break; + } + if (copy_to_user((u8 *)arg, header, + sizeof(struct ipa_ioc_query_intf))) { + retval = -EFAULT; + break; + } + break; + case IPA_IOC_QUERY_INTF_TX_PROPS: + sz = sizeof(struct ipa_ioc_query_intf_tx_props); + if (copy_from_user(header, (u8 *)arg, sz)) { + retval = -EFAULT; + break; + } + + if (((struct ipa_ioc_query_intf_tx_props *)header)->num_tx_props + > IPA_NUM_PROPS_MAX) { + retval = -EFAULT; + break; + } + pre_entry = + ((struct ipa_ioc_query_intf_tx_props *) + header)->num_tx_props; + pyld_sz = sz + pre_entry * + sizeof(struct ipa_ioc_tx_intf_prop); + param = memdup_user((const void __user *)arg, pyld_sz); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_query_intf_tx_props *) + param)->num_tx_props + != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_query_intf_tx_props *) + param)->num_tx_props, pre_entry); + retval = -EFAULT; + break; + } + if (ipa_query_intf_tx_props( + (struct ipa_ioc_query_intf_tx_props *)param)) { + retval = -1; + break; + } + if (copy_to_user((u8 *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + case IPA_IOC_QUERY_INTF_RX_PROPS: + sz = sizeof(struct ipa_ioc_query_intf_rx_props); + if (copy_from_user(header, (u8 *)arg, sz)) { + retval = -EFAULT; + break; + } + + if (((struct ipa_ioc_query_intf_rx_props *)header)->num_rx_props + > IPA_NUM_PROPS_MAX) { + retval = -EFAULT; + break; + } + pre_entry = + ((struct ipa_ioc_query_intf_rx_props *) + header)->num_rx_props; + pyld_sz = sz + pre_entry * + sizeof(struct ipa_ioc_rx_intf_prop); + param = memdup_user((const void __user *)arg, pyld_sz); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_query_intf_rx_props *) + param)->num_rx_props != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_query_intf_rx_props *) + param)->num_rx_props, pre_entry); + retval = -EFAULT; + break; + } + if (ipa_query_intf_rx_props( + (struct ipa_ioc_query_intf_rx_props *)param)) { + retval = -1; + break; + } + if (copy_to_user((u8 *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + case IPA_IOC_QUERY_INTF_EXT_PROPS: + sz = sizeof(struct ipa_ioc_query_intf_ext_props); + if (copy_from_user(header, (u8 *)arg, sz)) { + retval = -EFAULT; + break; + } + + if (((struct ipa_ioc_query_intf_ext_props *) + header)->num_ext_props > IPA_NUM_PROPS_MAX) { + retval = -EFAULT; + break; + } + pre_entry = + ((struct ipa_ioc_query_intf_ext_props *) + header)->num_ext_props; + pyld_sz = sz + pre_entry * + sizeof(struct ipa_ioc_ext_intf_prop); + param = memdup_user((const void __user *)arg, pyld_sz); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_query_intf_ext_props *) + param)->num_ext_props != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_query_intf_ext_props *) + param)->num_ext_props, pre_entry); + retval = -EFAULT; + break; + } + if (ipa_query_intf_ext_props( + (struct ipa_ioc_query_intf_ext_props *)param)) { + retval = -1; + break; + } + if (copy_to_user((u8 *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + case IPA_IOC_PULL_MSG: + if (copy_from_user(header, (u8 *)arg, + sizeof(struct ipa_msg_meta))) { + retval = -EFAULT; + break; + } + pre_entry = + ((struct ipa_msg_meta *)header)->msg_len; + pyld_sz = sizeof(struct ipa_msg_meta) + + pre_entry; + param = memdup_user((const void __user *)arg, pyld_sz); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_msg_meta *)param)->msg_len + != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_msg_meta *)param)->msg_len, + pre_entry); + retval = -EFAULT; + break; + } + if (ipa_pull_msg((struct ipa_msg_meta *)param, + (char *)param + sizeof(struct ipa_msg_meta), + ((struct ipa_msg_meta *)param)->msg_len) != + ((struct ipa_msg_meta *)param)->msg_len) { + retval = -1; + break; + } + if (copy_to_user((u8 *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + case IPA_IOC_RM_ADD_DEPENDENCY: + if (copy_from_user((u8 *)&rm_depend, (u8 *)arg, + sizeof(struct ipa_ioc_rm_dependency))) { + retval = -EFAULT; + break; + } + retval = ipa_rm_add_dependency_from_ioctl( + rm_depend.resource_name, rm_depend.depends_on_name); + break; + case IPA_IOC_RM_DEL_DEPENDENCY: + if (copy_from_user((u8 *)&rm_depend, (u8 *)arg, + sizeof(struct ipa_ioc_rm_dependency))) { + retval = -EFAULT; + break; + } + retval = ipa_rm_delete_dependency_from_ioctl( + rm_depend.resource_name, rm_depend.depends_on_name); + break; + case IPA_IOC_GENERATE_FLT_EQ: + { + struct ipa_ioc_generate_flt_eq flt_eq; + + if (copy_from_user(&flt_eq, (u8 *)arg, + sizeof(struct ipa_ioc_generate_flt_eq))) { + retval = -EFAULT; + break; + } + if (ipa_generate_flt_eq(flt_eq.ip, &flt_eq.attrib, + &flt_eq.eq_attrib)) { + retval = -EFAULT; + break; + } + if (copy_to_user((u8 *)arg, &flt_eq, + sizeof(struct ipa_ioc_generate_flt_eq))) { + retval = -EFAULT; + break; + } + break; + } + case IPA_IOC_QUERY_EP_MAPPING: + { + retval = ipa2_get_ep_mapping(arg); + break; + } + case IPA_IOC_QUERY_RT_TBL_INDEX: + if (copy_from_user(header, (u8 *)arg, + sizeof(struct ipa_ioc_get_rt_tbl_indx))) { + retval = -EFAULT; + break; + } + if (ipa2_query_rt_index( + (struct ipa_ioc_get_rt_tbl_indx *)header)) { + retval = -EFAULT; + break; + } + if (copy_to_user((u8 *)arg, header, + sizeof(struct ipa_ioc_get_rt_tbl_indx))) { + retval = -EFAULT; + break; + } + break; + case IPA_IOC_WRITE_QMAPID: + if (copy_from_user(header, (u8 *)arg, + sizeof(struct ipa_ioc_write_qmapid))) { + retval = -EFAULT; + break; + } + if (ipa2_write_qmap_id((struct ipa_ioc_write_qmapid *)header)) { + retval = -EFAULT; + break; + } + if (copy_to_user((u8 *)arg, header, + sizeof(struct ipa_ioc_write_qmapid))) { + retval = -EFAULT; + break; + } + break; + case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD: + retval = ipa_send_wan_msg(arg, WAN_UPSTREAM_ROUTE_ADD, true); + if (retval) { + IPAERR("ipa_send_wan_msg failed: %d\n", retval); + break; + } + break; + case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL: + retval = ipa_send_wan_msg(arg, WAN_UPSTREAM_ROUTE_DEL, true); + if (retval) { + IPAERR("ipa_send_wan_msg failed: %d\n", retval); + break; + } + break; + case IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED: + retval = ipa_send_wan_msg(arg, WAN_EMBMS_CONNECT, false); + if (retval) { + IPAERR("ipa_send_wan_msg failed: %d\n", retval); + break; + } + break; + case IPA_IOC_ADD_HDR_PROC_CTX: + if (copy_from_user(header, (u8 *)arg, + sizeof(struct ipa_ioc_add_hdr_proc_ctx))) { + retval = -EFAULT; + break; + } + pre_entry = + ((struct ipa_ioc_add_hdr_proc_ctx *) + header)->num_proc_ctxs; + pyld_sz = + sizeof(struct ipa_ioc_add_hdr_proc_ctx) + + pre_entry * sizeof(struct ipa_hdr_proc_ctx_add); + param = memdup_user((const void __user *)arg, pyld_sz); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_add_hdr_proc_ctx *) + param)->num_proc_ctxs != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_add_hdr_proc_ctx *) + param)->num_proc_ctxs, pre_entry); + retval = -EFAULT; + break; + } + if (ipa2_add_hdr_proc_ctx( + (struct ipa_ioc_add_hdr_proc_ctx *)param, true)) { + retval = -EFAULT; + break; + } + if (copy_to_user((u8 *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + case IPA_IOC_DEL_HDR_PROC_CTX: + if (copy_from_user(header, (u8 *)arg, + sizeof(struct ipa_ioc_del_hdr_proc_ctx))) { + retval = -EFAULT; + break; + } + pre_entry = + ((struct ipa_ioc_del_hdr_proc_ctx *)header)->num_hdls; + pyld_sz = + sizeof(struct ipa_ioc_del_hdr_proc_ctx) + + pre_entry * sizeof(struct ipa_hdr_proc_ctx_del); + param = memdup_user((const void __user *)arg, pyld_sz); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + /* add check in case user-space module compromised */ + if (unlikely(((struct ipa_ioc_del_hdr_proc_ctx *) + param)->num_hdls != pre_entry)) { + IPAERR_RL("current %d pre %d\n", + ((struct ipa_ioc_del_hdr_proc_ctx *)param)-> + num_hdls, + pre_entry); + retval = -EFAULT; + break; + } + if (ipa2_del_hdr_proc_ctx_by_user( + (struct ipa_ioc_del_hdr_proc_ctx *)param, true)) { + retval = -EFAULT; + break; + } + if (copy_to_user((u8 *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_GET_HW_VERSION: + pyld_sz = sizeof(enum ipa_hw_type); + param = kmemdup(&ipa_ctx->ipa_hw_type, pyld_sz, GFP_KERNEL); + if (!param) { + retval = -ENOMEM; + break; + } + memcpy(param, &ipa_ctx->ipa_hw_type, pyld_sz); + if (copy_to_user((u8 *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_CLEANUP: + /*Route and filter rules will also be clean*/ + IPADBG("Got IPA_IOC_CLEANUP\n"); + retval = ipa2_reset_hdr(true); + memset(&nat_del, 0, sizeof(nat_del)); + nat_del.table_index = 0; + retval = ipa2_nat_del_cmd(&nat_del); + retval = ipa2_clean_modem_rule(); + break; + + case IPA_IOC_QUERY_WLAN_CLIENT: + IPADBG("Got IPA_IOC_QUERY_WLAN_CLIENT\n"); + retval = ipa2_resend_wlan_msg(); + break; + + default: + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return -ENOTTY; + } + kfree(param); + + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + + return retval; +} + +/** + * ipa_setup_dflt_rt_tables() - Setup default routing tables + * + * Return codes: + * 0: success + * -ENOMEM: failed to allocate memory + * -EPERM: failed to add the tables + */ +int ipa_setup_dflt_rt_tables(void) +{ + struct ipa_ioc_add_rt_rule *rt_rule; + struct ipa_rt_rule_add *rt_rule_entry; + + rt_rule = + kzalloc(sizeof(struct ipa_ioc_add_rt_rule) + 1 * + sizeof(struct ipa_rt_rule_add), GFP_KERNEL); + if (!rt_rule) { + IPAERR("fail to alloc mem\n"); + return -ENOMEM; + } + /* setup a default v4 route to point to Apps */ + rt_rule->num_rules = 1; + rt_rule->commit = 1; + rt_rule->ip = IPA_IP_v4; + strlcpy(rt_rule->rt_tbl_name, IPA_DFLT_RT_TBL_NAME, + IPA_RESOURCE_NAME_MAX); + + rt_rule_entry = &rt_rule->rules[0]; + rt_rule_entry->at_rear = 1; + rt_rule_entry->rule.dst = IPA_CLIENT_APPS_LAN_CONS; + rt_rule_entry->rule.hdr_hdl = ipa_ctx->excp_hdr_hdl; + + if (ipa2_add_rt_rule(rt_rule)) { + IPAERR("fail to add dflt v4 rule\n"); + kfree(rt_rule); + return -EPERM; + } + IPADBG("dflt v4 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl); + ipa_ctx->dflt_v4_rt_rule_hdl = rt_rule_entry->rt_rule_hdl; + + /* setup a default v6 route to point to A5 */ + rt_rule->ip = IPA_IP_v6; + if (ipa2_add_rt_rule(rt_rule)) { + IPAERR("fail to add dflt v6 rule\n"); + kfree(rt_rule); + return -EPERM; + } + IPADBG("dflt v6 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl); + ipa_ctx->dflt_v6_rt_rule_hdl = rt_rule_entry->rt_rule_hdl; + + /* + * because these tables are the very first to be added, they will both + * have the same index (0) which is essential for programming the + * "route" end-point config + */ + + kfree(rt_rule); + + return 0; +} + +static int ipa_setup_exception_path(void) +{ + struct ipa_ioc_add_hdr *hdr; + struct ipa_hdr_add *hdr_entry; + struct ipa_route route = { 0 }; + int ret; + + /* install the basic exception header */ + hdr = kzalloc(sizeof(struct ipa_ioc_add_hdr) + 1 * + sizeof(struct ipa_hdr_add), GFP_KERNEL); + if (!hdr) { + IPAERR("fail to alloc exception hdr\n"); + return -ENOMEM; + } + hdr->num_hdrs = 1; + hdr->commit = 1; + hdr_entry = &hdr->hdr[0]; + + if (ipa_ctx->ipa_hw_type == IPA_HW_v1_1) { + strlcpy(hdr_entry->name, IPA_A5_MUX_HDR_NAME, + IPA_RESOURCE_NAME_MAX); + /* set template for the A5_MUX hdr in header addition block */ + hdr_entry->hdr_len = IPA_A5_MUX_HEADER_LENGTH; + } else if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_0) { + strlcpy(hdr_entry->name, IPA_LAN_RX_HDR_NAME, + IPA_RESOURCE_NAME_MAX); + hdr_entry->hdr_len = IPA_LAN_RX_HEADER_LENGTH; + } else { + WARN_ON(1); + } + + if (ipa2_add_hdr(hdr)) { + IPAERR("fail to add exception hdr\n"); + ret = -EPERM; + goto bail; + } + + if (hdr_entry->status) { + IPAERR("fail to add exception hdr\n"); + ret = -EPERM; + goto bail; + } + + ipa_ctx->excp_hdr_hdl = hdr_entry->hdr_hdl; + + /* set the route register to pass exception packets to Apps */ + route.route_def_pipe = ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS); + route.route_frag_def_pipe = ipa2_get_ep_mapping( + IPA_CLIENT_APPS_LAN_CONS); + route.route_def_hdr_table = !ipa_ctx->hdr_tbl_lcl; + + if (ipa_cfg_route(&route)) { + IPAERR("fail to add exception hdr\n"); + ret = -EPERM; + goto bail; + } + + ret = 0; +bail: + kfree(hdr); + return ret; +} + +static int ipa_init_smem_region(int memory_region_size, + int memory_region_offset) +{ + struct ipa_hw_imm_cmd_dma_shared_mem *cmd = NULL; + struct ipa_desc desc; + struct ipa_mem_buffer mem; + gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0); + int rc; + + if (memory_region_size == 0) + return 0; + + memset(&desc, 0, sizeof(desc)); + memset(&mem, 0, sizeof(mem)); + + mem.size = memory_region_size; + mem.base = dma_zalloc_coherent(ipa_ctx->pdev, mem.size, + &mem.phys_base, GFP_KERNEL); + if (!mem.base) { + IPAERR("failed to alloc DMA buff of size %d\n", mem.size); + return -ENOMEM; + } + + cmd = kzalloc(sizeof(*cmd), + flag); + if (cmd == NULL) { + IPAERR("Failed to alloc immediate command object\n"); + rc = -ENOMEM; + goto fail_send_cmd; + } + + cmd->size = mem.size; + cmd->system_addr = mem.phys_base; + cmd->local_addr = ipa_ctx->smem_restricted_bytes + + memory_region_offset; + desc.opcode = IPA_DMA_SHARED_MEM; + desc.pyld = cmd; + desc.len = sizeof(*cmd); + desc.type = IPA_IMM_CMD_DESC; + + rc = ipa_send_cmd(1, &desc); + if (rc) { + IPAERR("failed to send immediate command (error %d)\n", rc); + rc = -EFAULT; + } + + kfree(cmd); +fail_send_cmd: + dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, + mem.phys_base); + + return rc; +} + +/** + * ipa_init_q6_smem() - Initialize Q6 general memory and + * header memory regions in IPA. + * + * Return codes: + * 0: success + * -ENOMEM: failed to allocate dma memory + * -EFAULT: failed to send IPA command to initialize the memory + */ +int ipa_init_q6_smem(void) +{ + int rc; + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + if (ipa_ctx->ipa_hw_type == IPA_HW_v2_0) + rc = ipa_init_smem_region(IPA_MEM_PART(modem_size) - + IPA_MEM_RAM_MODEM_NETWORK_STATS_SIZE, + IPA_MEM_PART(modem_ofst)); + else + rc = ipa_init_smem_region(IPA_MEM_PART(modem_size), + IPA_MEM_PART(modem_ofst)); + + if (rc) { + IPAERR("failed to initialize Modem RAM memory\n"); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return rc; + } + + rc = ipa_init_smem_region(IPA_MEM_PART(modem_hdr_size), + IPA_MEM_PART(modem_hdr_ofst)); + if (rc) { + IPAERR("failed to initialize Modem HDRs RAM memory\n"); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return rc; + } + + rc = ipa_init_smem_region(IPA_MEM_PART(modem_hdr_proc_ctx_size), + IPA_MEM_PART(modem_hdr_proc_ctx_ofst)); + if (rc) { + IPAERR("failed to initialize Modem proc ctx RAM memory\n"); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return rc; + } + + rc = ipa_init_smem_region(IPA_MEM_PART(modem_comp_decomp_size), + IPA_MEM_PART(modem_comp_decomp_ofst)); + if (rc) { + IPAERR("failed to initialize Modem Comp/Decomp RAM memory\n"); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return rc; + } + + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + + return rc; +} + +static void ipa_free_buffer(void *user1, int user2) +{ + kfree(user1); +} + +int ipa_q6_pipe_delay(bool zip_pipes) +{ + u32 reg_val = 0; + int client_idx; + int ep_idx; + + /* For ZIP pipes, processing is done in AFTER_SHUTDOWN callback. */ + for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) { + /* Skip the processing for non Q6 pipes. */ + if (!IPA_CLIENT_IS_Q6_PROD(client_idx)) + continue; + /* Skip the processing for NON-ZIP pipes. */ + else if (zip_pipes && IPA_CLIENT_IS_Q6_NON_ZIP_PROD(client_idx)) + continue; + /* Skip the processing for ZIP pipes. */ + else if (!zip_pipes && IPA_CLIENT_IS_Q6_ZIP_PROD(client_idx)) + continue; + + ep_idx = ipa2_get_ep_mapping(client_idx); + if (ep_idx == -1) + continue; + + IPA_SETFIELD_IN_REG(reg_val, 1, + IPA_ENDP_INIT_CTRL_N_ENDP_DELAY_SHFT, + IPA_ENDP_INIT_CTRL_N_ENDP_DELAY_BMSK); + + ipa_write_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_CTRL_N_OFST(ep_idx), reg_val); + } + + return 0; +} + +int ipa_q6_monitor_holb_mitigation(bool enable) +{ + int ep_idx; + int client_idx; + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) { + if (IPA_CLIENT_IS_Q6_NON_ZIP_CONS(client_idx)) { + ep_idx = ipa2_get_ep_mapping(client_idx); + if (ep_idx == -1) + continue; + /* Send a command to Uc to enable/disable + * holb monitoring. + */ + ipa_uc_monitor_holb(client_idx, enable); + } + } + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + + return 0; +} + +static int ipa_q6_avoid_holb(bool zip_pipes) +{ + u32 reg_val; + int ep_idx; + int client_idx; + struct ipa_ep_cfg_ctrl avoid_holb; + + memset(&avoid_holb, 0, sizeof(avoid_holb)); + avoid_holb.ipa_ep_suspend = true; + + /* For ZIP pipes, processing is done in AFTER_SHUTDOWN callback. */ + for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) { + /* Skip the processing for non Q6 pipes. */ + if (!IPA_CLIENT_IS_Q6_CONS(client_idx)) + continue; + /* Skip the processing for NON-ZIP pipes. */ + else if (zip_pipes && IPA_CLIENT_IS_Q6_NON_ZIP_CONS(client_idx)) + continue; + /* Skip the processing for ZIP pipes. */ + else if (!zip_pipes && IPA_CLIENT_IS_Q6_ZIP_CONS(client_idx)) + continue; + + ep_idx = ipa2_get_ep_mapping(client_idx); + if (ep_idx == -1) + continue; + + /* + * ipa2_cfg_ep_holb is not used here because we are + * setting HOLB on Q6 pipes, and from APPS perspective + * they are not valid, therefore, the above function + * will fail. + */ + reg_val = 0; + IPA_SETFIELD_IN_REG(reg_val, 0, + IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_TIMER_SHFT, + IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_TIMER_BMSK); + + ipa_write_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_OFST_v2_0(ep_idx), + reg_val); + + reg_val = 0; + IPA_SETFIELD_IN_REG(reg_val, 1, + IPA_ENDP_INIT_HOL_BLOCK_EN_N_EN_SHFT, + IPA_ENDP_INIT_HOL_BLOCK_EN_N_EN_BMSK); + + ipa_write_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_HOL_BLOCK_EN_N_OFST_v2_0(ep_idx), + reg_val); + + ipa2_cfg_ep_ctrl(ep_idx, &avoid_holb); + } + + return 0; +} + +static u32 ipa_get_max_flt_rt_cmds(u32 num_pipes) +{ + u32 max_cmds = 0; + + /* As many filter tables as there are pipes, x2 for IPv4 and IPv6 */ + max_cmds += num_pipes * 2; + + /* For each of the Modem routing tables */ + max_cmds += (IPA_MEM_PART(v4_modem_rt_index_hi) - + IPA_MEM_PART(v4_modem_rt_index_lo) + 1); + + max_cmds += (IPA_MEM_PART(v6_modem_rt_index_hi) - + IPA_MEM_PART(v6_modem_rt_index_lo) + 1); + + return max_cmds; +} + +static int ipa_q6_clean_q6_tables(void) +{ + struct ipa_desc *desc; + struct ipa_hw_imm_cmd_dma_shared_mem *cmd = NULL; + int pipe_idx; + int num_cmds = 0; + int index; + int retval; + struct ipa_mem_buffer mem = { 0 }; + u32 *entry; + u32 max_cmds = ipa_get_max_flt_rt_cmds(ipa_ctx->ipa_num_pipes); + gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0); + + mem.base = dma_alloc_coherent(ipa_ctx->pdev, 4, &mem.phys_base, + GFP_ATOMIC); + if (!mem.base) { + IPAERR("failed to alloc DMA buff of size 4\n"); + return -ENOMEM; + } + + mem.size = 4; + entry = mem.base; + *entry = ipa_ctx->empty_rt_tbl_mem.phys_base; + + desc = kcalloc(max_cmds, sizeof(struct ipa_desc), GFP_KERNEL); + if (!desc) { + IPAERR("failed to allocate memory\n"); + retval = -ENOMEM; + goto bail_dma; + } + + cmd = kcalloc(max_cmds, sizeof(struct ipa_hw_imm_cmd_dma_shared_mem), + flag); + if (!cmd) { + IPAERR("failed to allocate memory\n"); + retval = -ENOMEM; + goto bail_desc; + } + + /* + * Iterating over all the pipes which are either invalid but connected + * or connected but not configured by AP. + */ + for (pipe_idx = 0; pipe_idx < ipa_ctx->ipa_num_pipes; pipe_idx++) { + if (!ipa_ctx->ep[pipe_idx].valid || + ipa_ctx->ep[pipe_idx].skip_ep_cfg) { + /* + * Need to point v4 and v6 fltr tables to an empty + * table + */ + cmd[num_cmds].size = mem.size; + cmd[num_cmds].system_addr = mem.phys_base; + cmd[num_cmds].local_addr = + ipa_ctx->smem_restricted_bytes + + IPA_MEM_PART(v4_flt_ofst) + 8 + pipe_idx * 4; + + desc[num_cmds].opcode = IPA_DMA_SHARED_MEM; + desc[num_cmds].pyld = &cmd[num_cmds]; + desc[num_cmds].len = sizeof(*cmd); + desc[num_cmds].type = IPA_IMM_CMD_DESC; + num_cmds++; + + cmd[num_cmds].size = mem.size; + cmd[num_cmds].system_addr = mem.phys_base; + cmd[num_cmds].local_addr = + ipa_ctx->smem_restricted_bytes + + IPA_MEM_PART(v6_flt_ofst) + 8 + pipe_idx * 4; + + desc[num_cmds].opcode = IPA_DMA_SHARED_MEM; + desc[num_cmds].pyld = &cmd[num_cmds]; + desc[num_cmds].len = sizeof(*cmd); + desc[num_cmds].type = IPA_IMM_CMD_DESC; + num_cmds++; + } + } + + /* Need to point v4/v6 modem routing tables to an empty table */ + for (index = IPA_MEM_PART(v4_modem_rt_index_lo); + index <= IPA_MEM_PART(v4_modem_rt_index_hi); + index++) { + cmd[num_cmds].size = mem.size; + cmd[num_cmds].system_addr = mem.phys_base; + cmd[num_cmds].local_addr = ipa_ctx->smem_restricted_bytes + + IPA_MEM_PART(v4_rt_ofst) + index * 4; + + desc[num_cmds].opcode = IPA_DMA_SHARED_MEM; + desc[num_cmds].pyld = &cmd[num_cmds]; + desc[num_cmds].len = sizeof(*cmd); + desc[num_cmds].type = IPA_IMM_CMD_DESC; + num_cmds++; + } + + for (index = IPA_MEM_PART(v6_modem_rt_index_lo); + index <= IPA_MEM_PART(v6_modem_rt_index_hi); + index++) { + cmd[num_cmds].size = mem.size; + cmd[num_cmds].system_addr = mem.phys_base; + cmd[num_cmds].local_addr = ipa_ctx->smem_restricted_bytes + + IPA_MEM_PART(v6_rt_ofst) + index * 4; + + desc[num_cmds].opcode = IPA_DMA_SHARED_MEM; + desc[num_cmds].pyld = &cmd[num_cmds]; + desc[num_cmds].len = sizeof(*cmd); + desc[num_cmds].type = IPA_IMM_CMD_DESC; + num_cmds++; + } + + retval = ipa_send_cmd(num_cmds, desc); + if (retval) { + IPAERR("failed to send immediate command (error %d)\n", retval); + retval = -EFAULT; + } + + kfree(cmd); + +bail_desc: + kfree(desc); + +bail_dma: + dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base); + + return retval; +} + +static void ipa_q6_disable_agg_reg(struct ipa_register_write *reg_write, + int ep_idx) +{ + reg_write->skip_pipeline_clear = 0; + + reg_write->offset = IPA_ENDP_INIT_AGGR_N_OFST_v2_0(ep_idx); + reg_write->value = + (1 & IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_BMSK) << + IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_SHFT; + reg_write->value_mask = + IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_BMSK << + IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_SHFT; + + reg_write->value |= + ((0 & IPA_ENDP_INIT_AGGR_N_AGGR_EN_BMSK) << + IPA_ENDP_INIT_AGGR_N_AGGR_EN_SHFT); + reg_write->value_mask |= + ((IPA_ENDP_INIT_AGGR_N_AGGR_EN_BMSK << + IPA_ENDP_INIT_AGGR_N_AGGR_EN_SHFT)); +} + +static int ipa_q6_set_ex_path_dis_agg(void) +{ + int ep_idx; + int client_idx; + struct ipa_desc *desc; + int num_descs = 0; + int index; + struct ipa_register_write *reg_write; + int retval; + gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0); + + desc = kcalloc(ipa_ctx->ipa_num_pipes, sizeof(struct ipa_desc), + GFP_KERNEL); + if (!desc) { + IPAERR("failed to allocate memory\n"); + return -ENOMEM; + } + + /* Set the exception path to AP */ + for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) { + ep_idx = ipa2_get_ep_mapping(client_idx); + if (ep_idx == -1) + continue; + + if (ipa_ctx->ep[ep_idx].valid && + ipa_ctx->ep[ep_idx].skip_ep_cfg) { + ipa_assert_on(num_descs >= ipa_ctx->ipa_num_pipes); + reg_write = kzalloc(sizeof(*reg_write), flag); + + if (!reg_write) { + IPAERR("failed to allocate memory\n"); + return -ENOMEM; + } + reg_write->skip_pipeline_clear = 0; + reg_write->offset = IPA_ENDP_STATUS_n_OFST(ep_idx); + reg_write->value = + (ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS) & + IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK) << + IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT; + reg_write->value_mask = + IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK << + IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT; + + desc[num_descs].opcode = IPA_REGISTER_WRITE; + desc[num_descs].pyld = reg_write; + desc[num_descs].len = sizeof(*reg_write); + desc[num_descs].type = IPA_IMM_CMD_DESC; + desc[num_descs].callback = ipa_free_buffer; + desc[num_descs].user1 = reg_write; + num_descs++; + } + } + + /* Disable AGGR on IPA->Q6 pipes */ + for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) { + ep_idx = ipa2_get_ep_mapping(client_idx); + if (ep_idx == -1) + continue; + if (IPA_CLIENT_IS_Q6_NON_ZIP_CONS(client_idx) || + IPA_CLIENT_IS_Q6_ZIP_CONS(client_idx)) { + reg_write = kzalloc(sizeof(*reg_write), flag); + + if (!reg_write) { + IPAERR("failed to allocate memory\n"); + return -ENOMEM; + } + + ipa_q6_disable_agg_reg(reg_write, ep_idx); + + desc[num_descs].opcode = IPA_REGISTER_WRITE; + desc[num_descs].pyld = reg_write; + desc[num_descs].len = sizeof(*reg_write); + desc[num_descs].type = IPA_IMM_CMD_DESC; + desc[num_descs].callback = ipa_free_buffer; + desc[num_descs].user1 = reg_write; + num_descs++; + } + } + + /* Will wait 150msecs for IPA tag process completion */ + retval = ipa_tag_process(desc, num_descs, + msecs_to_jiffies(CLEANUP_TAG_PROCESS_TIMEOUT)); + if (retval) { + IPAERR("TAG process failed! (error %d)\n", retval); + /* For timeout error ipa_free_buffer cb will free user1 */ + if (retval != -ETIME) { + for (index = 0; index < num_descs; index++) + kfree(desc[index].user1); + retval = -EINVAL; + } + } + + kfree(desc); + + return retval; +} + +/** + * ipa_q6_pre_shutdown_cleanup() - A cleanup for all Q6 related configuration + * in IPA HW before modem shutdown. This is performed in + * case of SSR. + * + * Return codes: + * 0: success + * This is a mandatory procedure, in case one of the steps fails, the + * AP needs to restart. + */ +int ipa_q6_pre_shutdown_cleanup(void) +{ + /* If uC has notified the APPS upon a ZIP engine error, + * APPS need to assert (This is a non recoverable error). + */ + if (ipa_ctx->uc_ctx.uc_zip_error) + ipa_assert(); + + IPA_ACTIVE_CLIENTS_INC_SPECIAL("Q6"); + + /* + * Do not delay Q6 pipes here. This may result in IPA reading a + * DMA_TASK with lock bit set and then Q6 pipe delay is set. In this + * situation IPA will be remain locked as the DMA_TASK with unlock + * bit will not be read by IPA as pipe delay is enabled. IPA uC will + * wait for pipe to be empty before issuing a BAM pipe reset. + */ + + if (ipa_q6_monitor_holb_mitigation(false)) { + IPAERR("Failed to disable HOLB monitroing on Q6 pipes\n"); + ipa_assert(); + } + + if (ipa_q6_avoid_holb(false)) { + IPAERR("Failed to set HOLB on Q6 pipes\n"); + ipa_assert(); + } + if (ipa_q6_clean_q6_tables()) { + IPAERR("Failed to clean Q6 tables\n"); + ipa_assert(); + } + if (ipa_q6_set_ex_path_dis_agg()) { + IPAERR("Failed to disable aggregation on Q6 pipes\n"); + ipa_assert(); + } + + ipa_ctx->q6_proxy_clk_vote_valid = true; + return 0; +} + +/** + * ipa_q6_post_shutdown_cleanup() - A cleanup for the Q6 pipes + * in IPA HW after modem shutdown. This is performed + * in case of SSR. + * + * Return codes: + * 0: success + * This is a mandatory procedure, in case one of the steps fails, the + * AP needs to restart. + */ +int ipa_q6_post_shutdown_cleanup(void) +{ + int client_idx; + int res; + + /* + * Do not delay Q6 pipes here. This may result in IPA reading a + * DMA_TASK with lock bit set and then Q6 pipe delay is set. In this + * situation IPA will be remain locked as the DMA_TASK with unlock + * bit will not be read by IPA as pipe delay is enabled. IPA uC will + * wait for pipe to be empty before issuing a BAM pipe reset. + */ + + if (ipa_q6_avoid_holb(true)) { + IPAERR("Failed to set HOLB on Q6 ZIP pipes\n"); + ipa_assert(); + } + + if (!ipa_ctx->uc_ctx.uc_loaded) { + IPAERR("uC is not loaded, won't reset Q6 pipes\n"); + return 0; + } + + for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) + if (IPA_CLIENT_IS_Q6_NON_ZIP_CONS(client_idx) || + IPA_CLIENT_IS_Q6_ZIP_CONS(client_idx) || + IPA_CLIENT_IS_Q6_NON_ZIP_PROD(client_idx) || + IPA_CLIENT_IS_Q6_ZIP_PROD(client_idx)) { + res = ipa_uc_reset_pipe(client_idx); + if (res) + ipa_assert(); + } + return 0; +} + +int _ipa_init_sram_v2(void) +{ + u32 *ipa_sram_mmio; + unsigned long phys_addr; + struct ipa_hw_imm_cmd_dma_shared_mem *cmd = NULL; + struct ipa_desc desc = {0}; + struct ipa_mem_buffer mem; + gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0); + int rc = 0; + + phys_addr = ipa_ctx->ipa_wrapper_base + + ipa_ctx->ctrl->ipa_reg_base_ofst + + IPA_SRAM_DIRECT_ACCESS_N_OFST_v2_0( + ipa_ctx->smem_restricted_bytes / 4); + + ipa_sram_mmio = ioremap(phys_addr, + ipa_ctx->smem_sz - ipa_ctx->smem_restricted_bytes); + if (!ipa_sram_mmio) { + IPAERR("fail to ioremap IPA SRAM\n"); + return -ENOMEM; + } + +#define IPA_SRAM_SET(ofst, val) (ipa_sram_mmio[(ofst - 4) / 4] = val) + + IPA_SRAM_SET(IPA_MEM_PART(v6_flt_ofst) - 4, IPA_MEM_CANARY_VAL); + IPA_SRAM_SET(IPA_MEM_PART(v6_flt_ofst), IPA_MEM_CANARY_VAL); + IPA_SRAM_SET(IPA_MEM_PART(v4_rt_ofst) - 4, IPA_MEM_CANARY_VAL); + IPA_SRAM_SET(IPA_MEM_PART(v4_rt_ofst), IPA_MEM_CANARY_VAL); + IPA_SRAM_SET(IPA_MEM_PART(v6_rt_ofst), IPA_MEM_CANARY_VAL); + IPA_SRAM_SET(IPA_MEM_PART(modem_hdr_ofst), IPA_MEM_CANARY_VAL); + IPA_SRAM_SET(IPA_MEM_PART(modem_ofst), IPA_MEM_CANARY_VAL); + IPA_SRAM_SET(IPA_MEM_PART(apps_v4_flt_ofst), IPA_MEM_CANARY_VAL); + IPA_SRAM_SET(IPA_MEM_PART(uc_info_ofst), IPA_MEM_CANARY_VAL); + + iounmap(ipa_sram_mmio); + + mem.size = IPA_STATUS_CLEAR_SIZE; + mem.base = dma_zalloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base, + GFP_KERNEL); + if (!mem.base) { + IPAERR("fail to alloc DMA buff of size %d\n", mem.size); + return -ENOMEM; + } + + cmd = kzalloc(sizeof(*cmd), flag); + if (cmd == NULL) { + IPAERR("Failed to alloc immediate command object\n"); + rc = -ENOMEM; + goto fail_send_cmd; + } + + cmd->size = mem.size; + cmd->system_addr = mem.phys_base; + cmd->local_addr = IPA_STATUS_CLEAR_OFST; + desc.opcode = IPA_DMA_SHARED_MEM; + desc.pyld = (void *)cmd; + desc.len = sizeof(struct ipa_hw_imm_cmd_dma_shared_mem); + desc.type = IPA_IMM_CMD_DESC; + + if (ipa_send_cmd(1, &desc)) { + IPAERR("fail to send immediate command\n"); + rc = -EFAULT; + } + + kfree(cmd); +fail_send_cmd: + dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base); + return rc; +} + +int _ipa_init_sram_v2_5(void) +{ + u32 *ipa_sram_mmio; + unsigned long phys_addr; + + phys_addr = ipa_ctx->ipa_wrapper_base + + ipa_ctx->ctrl->ipa_reg_base_ofst + + IPA_SRAM_SW_FIRST_v2_5; + + ipa_sram_mmio = ioremap(phys_addr, + ipa_ctx->smem_sz - ipa_ctx->smem_restricted_bytes); + if (!ipa_sram_mmio) { + IPAERR("fail to ioremap IPA SRAM\n"); + return -ENOMEM; + } + +#define IPA_SRAM_SET(ofst, val) (ipa_sram_mmio[(ofst - 4) / 4] = val) + + IPA_SRAM_SET(IPA_MEM_PART(v4_flt_ofst) - 4, IPA_MEM_CANARY_VAL); + IPA_SRAM_SET(IPA_MEM_PART(v4_flt_ofst), IPA_MEM_CANARY_VAL); + IPA_SRAM_SET(IPA_MEM_PART(v6_flt_ofst) - 4, IPA_MEM_CANARY_VAL); + IPA_SRAM_SET(IPA_MEM_PART(v6_flt_ofst), IPA_MEM_CANARY_VAL); + IPA_SRAM_SET(IPA_MEM_PART(v4_rt_ofst) - 4, IPA_MEM_CANARY_VAL); + IPA_SRAM_SET(IPA_MEM_PART(v4_rt_ofst), IPA_MEM_CANARY_VAL); + IPA_SRAM_SET(IPA_MEM_PART(v6_rt_ofst), IPA_MEM_CANARY_VAL); + IPA_SRAM_SET(IPA_MEM_PART(modem_hdr_ofst), IPA_MEM_CANARY_VAL); + IPA_SRAM_SET(IPA_MEM_PART(modem_hdr_proc_ctx_ofst) - 4, + IPA_MEM_CANARY_VAL); + IPA_SRAM_SET(IPA_MEM_PART(modem_hdr_proc_ctx_ofst), IPA_MEM_CANARY_VAL); + IPA_SRAM_SET(IPA_MEM_PART(modem_ofst), IPA_MEM_CANARY_VAL); + IPA_SRAM_SET(IPA_MEM_PART(end_ofst), IPA_MEM_CANARY_VAL); + + iounmap(ipa_sram_mmio); + + return 0; +} + +static inline void ipa_sram_set_canary(u32 *sram_mmio, int offset) +{ + /* Set 4 bytes of CANARY before the offset */ + sram_mmio[(offset - 4) / 4] = IPA_MEM_CANARY_VAL; +} + +int _ipa_init_sram_v2_6L(void) +{ + u32 *ipa_sram_mmio; + unsigned long phys_addr; + + phys_addr = ipa_ctx->ipa_wrapper_base + + ipa_ctx->ctrl->ipa_reg_base_ofst + + IPA_SRAM_SW_FIRST_v2_5; + + ipa_sram_mmio = ioremap(phys_addr, + ipa_ctx->smem_sz - ipa_ctx->smem_restricted_bytes); + if (!ipa_sram_mmio) { + IPAERR("fail to ioremap IPA SRAM\n"); + return -ENOMEM; + } + + /* Consult with ipa_ram_mmap.h on the location of the CANARY values */ + ipa_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_flt_ofst) - 4); + ipa_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_flt_ofst)); + ipa_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_flt_ofst) - 4); + ipa_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_flt_ofst)); + ipa_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_ofst) - 4); + ipa_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_ofst)); + ipa_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_rt_ofst)); + ipa_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_hdr_ofst)); + ipa_sram_set_canary(ipa_sram_mmio, + IPA_MEM_PART(modem_comp_decomp_ofst) - 4); + ipa_sram_set_canary(ipa_sram_mmio, + IPA_MEM_PART(modem_comp_decomp_ofst)); + ipa_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_ofst)); + ipa_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(end_ofst)); + + iounmap(ipa_sram_mmio); + + return 0; +} + +int _ipa_init_hdr_v2(void) +{ + struct ipa_desc desc = { 0 }; + struct ipa_mem_buffer mem; + struct ipa_hdr_init_local *cmd = NULL; + gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0); + int rc = 0; + + mem.size = IPA_MEM_PART(modem_hdr_size) + IPA_MEM_PART(apps_hdr_size); + mem.base = dma_zalloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base, + GFP_KERNEL); + if (!mem.base) { + IPAERR("fail to alloc DMA buff of size %d\n", mem.size); + return -ENOMEM; + } + + cmd = kzalloc(sizeof(*cmd), flag); + if (cmd == NULL) { + IPAERR("Failed to alloc header init command object\n"); + rc = -ENOMEM; + goto fail_send_cmd; + } + + cmd->hdr_table_src_addr = mem.phys_base; + cmd->size_hdr_table = mem.size; + cmd->hdr_table_dst_addr = ipa_ctx->smem_restricted_bytes + + IPA_MEM_PART(modem_hdr_ofst); + + desc.opcode = IPA_HDR_INIT_LOCAL; + desc.pyld = (void *)cmd; + desc.len = sizeof(struct ipa_hdr_init_local); + desc.type = IPA_IMM_CMD_DESC; + IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size); + + if (ipa_send_cmd(1, &desc)) { + IPAERR("fail to send immediate command\n"); + rc = -EFAULT; + } + + kfree(cmd); +fail_send_cmd: + dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base); + return rc; +} + +int _ipa_init_hdr_v2_5(void) +{ + struct ipa_desc desc = { 0 }; + struct ipa_mem_buffer mem; + struct ipa_hdr_init_local *cmd = NULL; + struct ipa_hw_imm_cmd_dma_shared_mem *dma_cmd = NULL; + gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0); + + mem.size = IPA_MEM_PART(modem_hdr_size) + IPA_MEM_PART(apps_hdr_size); + mem.base = dma_zalloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base, + GFP_KERNEL); + if (!mem.base) { + IPAERR("fail to alloc DMA buff of size %d\n", mem.size); + return -ENOMEM; + } + + cmd = kzalloc(sizeof(*cmd), flag); + if (cmd == NULL) { + IPAERR("Failed to alloc header init command object\n"); + dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, + mem.phys_base); + return -ENOMEM; + } + + cmd->hdr_table_src_addr = mem.phys_base; + cmd->size_hdr_table = mem.size; + cmd->hdr_table_dst_addr = ipa_ctx->smem_restricted_bytes + + IPA_MEM_PART(modem_hdr_ofst); + + desc.opcode = IPA_HDR_INIT_LOCAL; + desc.pyld = (void *)cmd; + desc.len = sizeof(struct ipa_hdr_init_local); + desc.type = IPA_IMM_CMD_DESC; + IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size); + + if (ipa_send_cmd(1, &desc)) { + IPAERR("fail to send immediate command\n"); + kfree(cmd); + dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, + mem.phys_base); + return -EFAULT; + } + + kfree(cmd); + dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base); + + mem.size = IPA_MEM_PART(modem_hdr_proc_ctx_size) + + IPA_MEM_PART(apps_hdr_proc_ctx_size); + mem.base = dma_zalloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base, + GFP_KERNEL); + if (!mem.base) { + IPAERR("fail to alloc DMA buff of size %d\n", mem.size); + return -ENOMEM; + } + memset(&desc, 0, sizeof(desc)); + + dma_cmd = kzalloc(sizeof(*dma_cmd), flag); + if (dma_cmd == NULL) { + IPAERR("Failed to alloc immediate command object\n"); + dma_free_coherent(ipa_ctx->pdev, + mem.size, + mem.base, + mem.phys_base); + return -ENOMEM; + } + + dma_cmd->system_addr = mem.phys_base; + dma_cmd->local_addr = ipa_ctx->smem_restricted_bytes + + IPA_MEM_PART(modem_hdr_proc_ctx_ofst); + dma_cmd->size = mem.size; + desc.opcode = IPA_DMA_SHARED_MEM; + desc.pyld = (void *)dma_cmd; + desc.len = sizeof(struct ipa_hw_imm_cmd_dma_shared_mem); + desc.type = IPA_IMM_CMD_DESC; + IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size); + + if (ipa_send_cmd(1, &desc)) { + IPAERR("fail to send immediate command\n"); + kfree(dma_cmd); + dma_free_coherent(ipa_ctx->pdev, + mem.size, + mem.base, + mem.phys_base); + return -EFAULT; + } + + ipa_write_reg(ipa_ctx->mmio, + IPA_LOCAL_PKT_PROC_CNTXT_BASE_OFST, + dma_cmd->local_addr); + + kfree(dma_cmd); + dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base); + + return 0; +} + +int _ipa_init_hdr_v2_6L(void) +{ + /* Same implementation as IPAv2 */ + return _ipa_init_hdr_v2(); +} + +int _ipa_init_rt4_v2(void) +{ + struct ipa_desc desc = { 0 }; + struct ipa_mem_buffer mem; + struct ipa_ip_v4_routing_init *v4_cmd = NULL; + gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0); + u32 *entry; + int i; + int rc = 0; + + for (i = IPA_MEM_PART(v4_modem_rt_index_lo); + i <= IPA_MEM_PART(v4_modem_rt_index_hi); + i++) + ipa_ctx->rt_idx_bitmap[IPA_IP_v4] |= (1 << i); + IPADBG("v4 rt bitmap 0x%lx\n", ipa_ctx->rt_idx_bitmap[IPA_IP_v4]); + + mem.size = IPA_MEM_PART(v4_rt_size); + mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base, + GFP_KERNEL); + if (!mem.base) { + IPAERR("fail to alloc DMA buff of size %d\n", mem.size); + return -ENOMEM; + } + + entry = mem.base; + for (i = 0; i < IPA_MEM_PART(v4_num_index); i++) { + *entry = ipa_ctx->empty_rt_tbl_mem.phys_base; + entry++; + } + + v4_cmd = kzalloc(sizeof(*v4_cmd), flag); + if (v4_cmd == NULL) { + IPAERR("Failed to alloc v4 routing init command object\n"); + rc = -ENOMEM; + goto fail_send_cmd; + } + + desc.opcode = IPA_IP_V4_ROUTING_INIT; + v4_cmd->ipv4_rules_addr = mem.phys_base; + v4_cmd->size_ipv4_rules = mem.size; + v4_cmd->ipv4_addr = ipa_ctx->smem_restricted_bytes + + IPA_MEM_PART(v4_rt_ofst); + IPADBG("putting Routing IPv4 rules to phys 0x%x", + v4_cmd->ipv4_addr); + + desc.pyld = (void *)v4_cmd; + desc.len = sizeof(struct ipa_ip_v4_routing_init); + desc.type = IPA_IMM_CMD_DESC; + IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size); + + if (ipa_send_cmd(1, &desc)) { + IPAERR("fail to send immediate command\n"); + rc = -EFAULT; + } + + kfree(v4_cmd); +fail_send_cmd: + dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base); + return rc; +} + +int _ipa_init_rt6_v2(void) +{ + struct ipa_desc desc = { 0 }; + struct ipa_mem_buffer mem; + struct ipa_ip_v6_routing_init *v6_cmd = NULL; + gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0); + u32 *entry; + int i; + int rc = 0; + + for (i = IPA_MEM_PART(v6_modem_rt_index_lo); + i <= IPA_MEM_PART(v6_modem_rt_index_hi); + i++) + ipa_ctx->rt_idx_bitmap[IPA_IP_v6] |= (1 << i); + IPADBG("v6 rt bitmap 0x%lx\n", ipa_ctx->rt_idx_bitmap[IPA_IP_v6]); + + mem.size = IPA_MEM_PART(v6_rt_size); + mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base, + GFP_KERNEL); + if (!mem.base) { + IPAERR("fail to alloc DMA buff of size %d\n", mem.size); + return -ENOMEM; + } + + entry = mem.base; + for (i = 0; i < IPA_MEM_PART(v6_num_index); i++) { + *entry = ipa_ctx->empty_rt_tbl_mem.phys_base; + entry++; + } + + v6_cmd = kzalloc(sizeof(*v6_cmd), flag); + if (v6_cmd == NULL) { + IPAERR("Failed to alloc v6 routing init command object\n"); + rc = -ENOMEM; + goto fail_send_cmd; + } + + desc.opcode = IPA_IP_V6_ROUTING_INIT; + v6_cmd->ipv6_rules_addr = mem.phys_base; + v6_cmd->size_ipv6_rules = mem.size; + v6_cmd->ipv6_addr = ipa_ctx->smem_restricted_bytes + + IPA_MEM_PART(v6_rt_ofst); + IPADBG("putting Routing IPv6 rules to phys 0x%x", + v6_cmd->ipv6_addr); + + desc.pyld = (void *)v6_cmd; + desc.len = sizeof(struct ipa_ip_v6_routing_init); + desc.type = IPA_IMM_CMD_DESC; + IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size); + + if (ipa_send_cmd(1, &desc)) { + IPAERR("fail to send immediate command\n"); + rc = -EFAULT; + } + + kfree(v6_cmd); +fail_send_cmd: + dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base); + return rc; +} + +int _ipa_init_flt4_v2(void) +{ + struct ipa_desc desc = { 0 }; + struct ipa_mem_buffer mem; + struct ipa_ip_v4_filter_init *v4_cmd = NULL; + gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0); + u32 *entry; + int i; + int rc = 0; + + mem.size = IPA_MEM_PART(v4_flt_size); + mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base, + GFP_KERNEL); + if (!mem.base) { + IPAERR("fail to alloc DMA buff of size %d\n", mem.size); + return -ENOMEM; + } + + entry = mem.base; + + *entry = ((0xFFFFF << 1) | 0x1); + entry++; + + for (i = 0; i <= ipa_ctx->ipa_num_pipes; i++) { + *entry = ipa_ctx->empty_rt_tbl_mem.phys_base; + entry++; + } + + v4_cmd = kzalloc(sizeof(*v4_cmd), flag); + if (v4_cmd == NULL) { + IPAERR("Failed to alloc v4 fliter init command object\n"); + rc = -ENOMEM; + goto fail_send_cmd; + } + + desc.opcode = IPA_IP_V4_FILTER_INIT; + v4_cmd->ipv4_rules_addr = mem.phys_base; + v4_cmd->size_ipv4_rules = mem.size; + v4_cmd->ipv4_addr = ipa_ctx->smem_restricted_bytes + + IPA_MEM_PART(v4_flt_ofst); + IPADBG("putting Filtering IPv4 rules to phys 0x%x", + v4_cmd->ipv4_addr); + + desc.pyld = (void *)v4_cmd; + desc.len = sizeof(struct ipa_ip_v4_filter_init); + desc.type = IPA_IMM_CMD_DESC; + IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size); + + if (ipa_send_cmd(1, &desc)) { + IPAERR("fail to send immediate command\n"); + rc = -EFAULT; + } + + kfree(v4_cmd); +fail_send_cmd: + dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base); + return rc; +} + +int _ipa_init_flt6_v2(void) +{ + struct ipa_desc desc = { 0 }; + struct ipa_mem_buffer mem; + struct ipa_ip_v6_filter_init *v6_cmd = NULL; + gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0); + u32 *entry; + int i; + int rc = 0; + + mem.size = IPA_MEM_PART(v6_flt_size); + mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base, + GFP_KERNEL); + if (!mem.base) { + IPAERR("fail to alloc DMA buff of size %d\n", mem.size); + return -ENOMEM; + } + + entry = mem.base; + + *entry = (0xFFFFF << 1) | 0x1; + entry++; + + for (i = 0; i <= ipa_ctx->ipa_num_pipes; i++) { + *entry = ipa_ctx->empty_rt_tbl_mem.phys_base; + entry++; + } + + v6_cmd = kzalloc(sizeof(*v6_cmd), flag); + if (v6_cmd == NULL) { + IPAERR("Failed to alloc v6 fliter init command object\n"); + rc = -ENOMEM; + goto fail_send_cmd; + } + + desc.opcode = IPA_IP_V6_FILTER_INIT; + v6_cmd->ipv6_rules_addr = mem.phys_base; + v6_cmd->size_ipv6_rules = mem.size; + v6_cmd->ipv6_addr = ipa_ctx->smem_restricted_bytes + + IPA_MEM_PART(v6_flt_ofst); + IPADBG("putting Filtering IPv6 rules to phys 0x%x", + v6_cmd->ipv6_addr); + + desc.pyld = (void *)v6_cmd; + desc.len = sizeof(struct ipa_ip_v6_filter_init); + desc.type = IPA_IMM_CMD_DESC; + IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size); + + if (ipa_send_cmd(1, &desc)) { + IPAERR("fail to send immediate command\n"); + rc = -EFAULT; + } + + kfree(v6_cmd); +fail_send_cmd: + dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base); + return rc; +} + +static int ipa_setup_apps_pipes(void) +{ + struct ipa_sys_connect_params sys_in; + int result = 0; + + /* CMD OUT (A5->IPA) */ + memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params)); + sys_in.client = IPA_CLIENT_APPS_CMD_PROD; + sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ; + sys_in.ipa_ep_cfg.mode.mode = IPA_DMA; + sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_APPS_LAN_CONS; + sys_in.skip_ep_cfg = true; + if (ipa2_setup_sys_pipe(&sys_in, &ipa_ctx->clnt_hdl_cmd)) { + IPAERR(":setup sys pipe failed.\n"); + result = -EPERM; + goto fail_cmd; + } + IPADBG("Apps to IPA cmd pipe is connected\n"); + + ipa_ctx->ctrl->ipa_init_sram(); + IPADBG("SRAM initialized\n"); + + ipa_ctx->ctrl->ipa_init_hdr(); + IPADBG("HDR initialized\n"); + + ipa_ctx->ctrl->ipa_init_rt4(); + IPADBG("V4 RT initialized\n"); + + ipa_ctx->ctrl->ipa_init_rt6(); + IPADBG("V6 RT initialized\n"); + + ipa_ctx->ctrl->ipa_init_flt4(); + IPADBG("V4 FLT initialized\n"); + + ipa_ctx->ctrl->ipa_init_flt6(); + IPADBG("V6 FLT initialized\n"); + + if (ipa_setup_exception_path()) { + IPAERR(":fail to setup excp path\n"); + result = -EPERM; + goto fail_schedule_delayed_work; + } + IPADBG("Exception path was successfully set"); + + if (ipa_setup_dflt_rt_tables()) { + IPAERR(":fail to setup dflt routes\n"); + result = -EPERM; + goto fail_schedule_delayed_work; + } + IPADBG("default routing was set\n"); + + /* LAN IN (IPA->A5) */ + memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params)); + sys_in.client = IPA_CLIENT_APPS_LAN_CONS; + sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ; + if (ipa_ctx->ipa_hw_type == IPA_HW_v1_1) { + sys_in.ipa_ep_cfg.hdr.hdr_a5_mux = 1; + sys_in.ipa_ep_cfg.hdr.hdr_len = IPA_A5_MUX_HEADER_LENGTH; + } else if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_0) { + sys_in.notify = ipa_lan_rx_cb; + sys_in.priv = NULL; + sys_in.ipa_ep_cfg.hdr.hdr_len = IPA_LAN_RX_HEADER_LENGTH; + sys_in.ipa_ep_cfg.hdr_ext.hdr_little_endian = false; + sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_valid = true; + sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad = IPA_HDR_PAD; + sys_in.ipa_ep_cfg.hdr_ext.hdr_payload_len_inc_padding = false; + sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_offset = 0; + sys_in.ipa_ep_cfg.hdr_ext.hdr_pad_to_alignment = 2; + sys_in.ipa_ep_cfg.cfg.cs_offload_en = IPA_ENABLE_CS_OFFLOAD_DL; + } else { + WARN_ON(1); + } + + /** + * ipa_lan_rx_cb() intended to notify the source EP about packet + * being received on the LAN_CONS via calling the source EP call-back. + * There could be a race condition with calling this call-back. Other + * thread may nullify it - e.g. on EP disconnect. + * This lock intended to protect the access to the source EP call-back + */ + spin_lock_init(&ipa_ctx->disconnect_lock); + if (ipa2_setup_sys_pipe(&sys_in, &ipa_ctx->clnt_hdl_data_in)) { + IPAERR(":setup sys pipe failed.\n"); + result = -EPERM; + goto fail_schedule_delayed_work; + } + + /* LAN-WAN OUT (A5->IPA) */ + memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params)); + sys_in.client = IPA_CLIENT_APPS_LAN_WAN_PROD; + sys_in.desc_fifo_sz = IPA_SYS_TX_DATA_DESC_FIFO_SZ; + sys_in.ipa_ep_cfg.mode.mode = IPA_BASIC; + if (ipa2_setup_sys_pipe(&sys_in, &ipa_ctx->clnt_hdl_data_out)) { + IPAERR(":setup sys pipe failed.\n"); + result = -EPERM; + goto fail_data_out; + } + + return 0; + +fail_data_out: + ipa2_teardown_sys_pipe(ipa_ctx->clnt_hdl_data_in); +fail_schedule_delayed_work: + if (ipa_ctx->dflt_v6_rt_rule_hdl) + __ipa_del_rt_rule(ipa_ctx->dflt_v6_rt_rule_hdl); + if (ipa_ctx->dflt_v4_rt_rule_hdl) + __ipa_del_rt_rule(ipa_ctx->dflt_v4_rt_rule_hdl); + if (ipa_ctx->excp_hdr_hdl) + __ipa_del_hdr(ipa_ctx->excp_hdr_hdl, false); + ipa2_teardown_sys_pipe(ipa_ctx->clnt_hdl_cmd); +fail_cmd: + return result; +} + +static void ipa_teardown_apps_pipes(void) +{ + ipa2_teardown_sys_pipe(ipa_ctx->clnt_hdl_data_out); + ipa2_teardown_sys_pipe(ipa_ctx->clnt_hdl_data_in); + __ipa_del_rt_rule(ipa_ctx->dflt_v6_rt_rule_hdl); + __ipa_del_rt_rule(ipa_ctx->dflt_v4_rt_rule_hdl); + __ipa_del_hdr(ipa_ctx->excp_hdr_hdl, false); + ipa2_teardown_sys_pipe(ipa_ctx->clnt_hdl_cmd); +} + +#ifdef CONFIG_COMPAT +long compat_ipa_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + int retval = 0; + struct ipa_ioc_nat_alloc_mem32 nat_mem32; + struct ipa_ioc_nat_alloc_mem nat_mem; + + switch (cmd) { + case IPA_IOC_ADD_HDR32: + cmd = IPA_IOC_ADD_HDR; + break; + case IPA_IOC_DEL_HDR32: + cmd = IPA_IOC_DEL_HDR; + break; + case IPA_IOC_ADD_RT_RULE32: + cmd = IPA_IOC_ADD_RT_RULE; + break; + case IPA_IOC_DEL_RT_RULE32: + cmd = IPA_IOC_DEL_RT_RULE; + break; + case IPA_IOC_ADD_FLT_RULE32: + cmd = IPA_IOC_ADD_FLT_RULE; + break; + case IPA_IOC_DEL_FLT_RULE32: + cmd = IPA_IOC_DEL_FLT_RULE; + break; + case IPA_IOC_GET_RT_TBL32: + cmd = IPA_IOC_GET_RT_TBL; + break; + case IPA_IOC_COPY_HDR32: + cmd = IPA_IOC_COPY_HDR; + break; + case IPA_IOC_QUERY_INTF32: + cmd = IPA_IOC_QUERY_INTF; + break; + case IPA_IOC_QUERY_INTF_TX_PROPS32: + cmd = IPA_IOC_QUERY_INTF_TX_PROPS; + break; + case IPA_IOC_QUERY_INTF_RX_PROPS32: + cmd = IPA_IOC_QUERY_INTF_RX_PROPS; + break; + case IPA_IOC_QUERY_INTF_EXT_PROPS32: + cmd = IPA_IOC_QUERY_INTF_EXT_PROPS; + break; + case IPA_IOC_GET_HDR32: + cmd = IPA_IOC_GET_HDR; + break; + case IPA_IOC_ALLOC_NAT_MEM32: + if (copy_from_user((u8 *)&nat_mem32, (u8 *)arg, + sizeof(struct ipa_ioc_nat_alloc_mem32))) { + retval = -EFAULT; + goto ret; + } + memcpy(nat_mem.dev_name, nat_mem32.dev_name, + IPA_RESOURCE_NAME_MAX); + nat_mem.size = (size_t)nat_mem32.size; + nat_mem.offset = (off_t)nat_mem32.offset; + + /* null terminate the string */ + nat_mem.dev_name[IPA_RESOURCE_NAME_MAX - 1] = '\0'; + + if (ipa2_allocate_nat_device(&nat_mem)) { + retval = -EFAULT; + goto ret; + } + nat_mem32.offset = (compat_off_t)nat_mem.offset; + if (copy_to_user((u8 *)arg, (u8 *)&nat_mem32, + sizeof(struct ipa_ioc_nat_alloc_mem32))) { + retval = -EFAULT; + } +ret: + return retval; + case IPA_IOC_V4_INIT_NAT32: + cmd = IPA_IOC_V4_INIT_NAT; + break; + case IPA_IOC_NAT_DMA32: + cmd = IPA_IOC_NAT_DMA; + break; + case IPA_IOC_V4_DEL_NAT32: + cmd = IPA_IOC_V4_DEL_NAT; + break; + case IPA_IOC_GET_NAT_OFFSET32: + cmd = IPA_IOC_GET_NAT_OFFSET; + break; + case IPA_IOC_PULL_MSG32: + cmd = IPA_IOC_PULL_MSG; + break; + case IPA_IOC_RM_ADD_DEPENDENCY32: + cmd = IPA_IOC_RM_ADD_DEPENDENCY; + break; + case IPA_IOC_RM_DEL_DEPENDENCY32: + cmd = IPA_IOC_RM_DEL_DEPENDENCY; + break; + case IPA_IOC_GENERATE_FLT_EQ32: + cmd = IPA_IOC_GENERATE_FLT_EQ; + break; + case IPA_IOC_QUERY_RT_TBL_INDEX32: + cmd = IPA_IOC_QUERY_RT_TBL_INDEX; + break; + case IPA_IOC_WRITE_QMAPID32: + cmd = IPA_IOC_WRITE_QMAPID; + break; + case IPA_IOC_MDFY_FLT_RULE32: + cmd = IPA_IOC_MDFY_FLT_RULE; + break; + case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD32: + cmd = IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD; + break; + case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL32: + cmd = IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL; + break; + case IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED32: + cmd = IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED; + break; + case IPA_IOC_MDFY_RT_RULE32: + cmd = IPA_IOC_MDFY_RT_RULE; + break; + case IPA_IOC_COMMIT_HDR: + case IPA_IOC_RESET_HDR: + case IPA_IOC_COMMIT_RT: + case IPA_IOC_RESET_RT: + case IPA_IOC_COMMIT_FLT: + case IPA_IOC_RESET_FLT: + case IPA_IOC_DUMP: + case IPA_IOC_PUT_RT_TBL: + case IPA_IOC_PUT_HDR: + case IPA_IOC_SET_FLT: + case IPA_IOC_QUERY_EP_MAPPING: + break; + default: + return -ENOIOCTLCMD; + } + return ipa_ioctl(file, cmd, (unsigned long) compat_ptr(arg)); +} +#endif + +static const struct file_operations ipa_drv_fops = { + .owner = THIS_MODULE, + .open = ipa_open, + .read = ipa_read, + .unlocked_ioctl = ipa_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = compat_ipa_ioctl, +#endif +}; + +static int ipa_get_clks(struct device *dev) +{ + ipa_clk = clk_get(dev, "core_clk"); + if (IS_ERR(ipa_clk)) { + if (ipa_clk != ERR_PTR(-EPROBE_DEFER)) + IPAERR("fail to get ipa clk\n"); + return PTR_ERR(ipa_clk); + } + + if (smmu_info.present && smmu_info.arm_smmu) { + smmu_clk = clk_get(dev, "smmu_clk"); + if (IS_ERR(smmu_clk)) { + if (smmu_clk != ERR_PTR(-EPROBE_DEFER)) + IPAERR("fail to get smmu clk\n"); + return PTR_ERR(smmu_clk); + } + + if (clk_get_rate(smmu_clk) == 0) { + long rate = clk_round_rate(smmu_clk, 1000); + + clk_set_rate(smmu_clk, rate); + } + } + + if (ipa_ctx->ipa_hw_type < IPA_HW_v2_0) { + ipa_cnoc_clk = clk_get(dev, "iface_clk"); + if (IS_ERR(ipa_cnoc_clk)) { + ipa_cnoc_clk = NULL; + IPAERR("fail to get cnoc clk\n"); + return -ENODEV; + } + + ipa_clk_src = clk_get(dev, "core_src_clk"); + if (IS_ERR(ipa_clk_src)) { + ipa_clk_src = NULL; + IPAERR("fail to get ipa clk src\n"); + return -ENODEV; + } + + sys_noc_ipa_axi_clk = clk_get(dev, "bus_clk"); + if (IS_ERR(sys_noc_ipa_axi_clk)) { + sys_noc_ipa_axi_clk = NULL; + IPAERR("fail to get sys_noc_ipa_axi clk\n"); + return -ENODEV; + } + + ipa_inactivity_clk = clk_get(dev, "inactivity_clk"); + if (IS_ERR(ipa_inactivity_clk)) { + ipa_inactivity_clk = NULL; + IPAERR("fail to get inactivity clk\n"); + return -ENODEV; + } + } + + return 0; +} + +void _ipa_enable_clks_v2_0(void) +{ + IPADBG_LOW("enabling gcc_ipa_clk\n"); + if (ipa_clk) { + clk_prepare(ipa_clk); + clk_enable(ipa_clk); + IPADBG_LOW("curr_ipa_clk_rate=%d", ipa_ctx->curr_ipa_clk_rate); + clk_set_rate(ipa_clk, ipa_ctx->curr_ipa_clk_rate); + ipa_uc_notify_clk_state(true); + } else { + WARN_ON(1); + } + + if (smmu_clk) + clk_prepare_enable(smmu_clk); + /* Enable the BAM IRQ. */ + ipa_sps_irq_control_all(true); + ipa_suspend_apps_pipes(false); +} + +void _ipa_enable_clks_v1_1(void) +{ + + if (ipa_cnoc_clk) { + clk_prepare(ipa_cnoc_clk); + clk_enable(ipa_cnoc_clk); + clk_set_rate(ipa_cnoc_clk, IPA_CNOC_CLK_RATE); + } else { + WARN_ON(1); + } + + if (ipa_clk_src) + clk_set_rate(ipa_clk_src, + ipa_ctx->curr_ipa_clk_rate); + else + WARN_ON(1); + + if (ipa_clk) + clk_prepare(ipa_clk); + else + WARN_ON(1); + + if (sys_noc_ipa_axi_clk) + clk_prepare(sys_noc_ipa_axi_clk); + else + WARN_ON(1); + + if (ipa_inactivity_clk) + clk_prepare(ipa_inactivity_clk); + else + WARN_ON(1); + + if (ipa_clk) + clk_enable(ipa_clk); + else + WARN_ON(1); + + if (sys_noc_ipa_axi_clk) + clk_enable(sys_noc_ipa_axi_clk); + else + WARN_ON(1); + + if (ipa_inactivity_clk) + clk_enable(ipa_inactivity_clk); + else + WARN_ON(1); + +} + +static unsigned int ipa_get_bus_vote(void) +{ + unsigned int idx = 1; + + if (ipa_ctx->curr_ipa_clk_rate == ipa_ctx->ctrl->ipa_clk_rate_svs) { + idx = 1; + } else if (ipa_ctx->curr_ipa_clk_rate == + ipa_ctx->ctrl->ipa_clk_rate_nominal) { + if (ipa_ctx->ctrl->msm_bus_data_ptr->num_usecases <= 2) + idx = 1; + else + idx = 2; + } else if (ipa_ctx->curr_ipa_clk_rate == + ipa_ctx->ctrl->ipa_clk_rate_turbo) { + idx = ipa_ctx->ctrl->msm_bus_data_ptr->num_usecases - 1; + } else { + WARN_ON(1); + } + + IPADBG("curr %d idx %d\n", ipa_ctx->curr_ipa_clk_rate, idx); + + return idx; +} + +/** + * ipa_enable_clks() - Turn on IPA clocks + * + * Return codes: + * None + */ +void ipa_enable_clks(void) +{ + IPADBG("enabling IPA clocks and bus voting\n"); + + ipa_ctx->ctrl->ipa_enable_clks(); + + if (ipa_ctx->ipa_hw_mode != IPA_HW_MODE_VIRTUAL) + if (msm_bus_scale_client_update_request(ipa_ctx->ipa_bus_hdl, + ipa_get_bus_vote())) + WARN_ON(1); +} + +void _ipa_disable_clks_v1_1(void) +{ + + if (ipa_inactivity_clk) + clk_disable_unprepare(ipa_inactivity_clk); + else + WARN_ON(1); + + if (sys_noc_ipa_axi_clk) + clk_disable_unprepare(sys_noc_ipa_axi_clk); + else + WARN_ON(1); + + if (ipa_clk) + clk_disable_unprepare(ipa_clk); + else + WARN_ON(1); + + if (ipa_cnoc_clk) + clk_disable_unprepare(ipa_cnoc_clk); + else + WARN_ON(1); + +} + +void _ipa_disable_clks_v2_0(void) +{ + IPADBG_LOW("disabling gcc_ipa_clk\n"); + ipa_suspend_apps_pipes(true); + ipa_sps_irq_control_all(false); + ipa_uc_notify_clk_state(false); + if (ipa_clk) + clk_disable_unprepare(ipa_clk); + else + WARN_ON(1); + + if (smmu_clk) + clk_disable_unprepare(smmu_clk); +} + +/** + * ipa_disable_clks() - Turn off IPA clocks + * + * Return codes: + * None + */ +void ipa_disable_clks(void) +{ + IPADBG_LOW("disabling IPA clocks and bus voting\n"); + + ipa_ctx->ctrl->ipa_disable_clks(); + + if (ipa_ctx->ipa_hw_mode != IPA_HW_MODE_VIRTUAL) + if (msm_bus_scale_client_update_request(ipa_ctx->ipa_bus_hdl, + 0)) + WARN_ON(1); +} + +/** + * ipa_start_tag_process() - Send TAG packet and wait for it to come back + * + * This function is called prior to clock gating when active client counter + * is 1. TAG process ensures that there are no packets inside IPA HW that + * were not submitted to peer's BAM. During TAG process all aggregation frames + * are (force) closed. + * + * Return codes: + * None + */ +static void ipa_start_tag_process(struct work_struct *work) +{ + int res; + + IPADBG("starting TAG process\n"); + /* close aggregation frames on all pipes */ + res = ipa_tag_aggr_force_close(-1); + if (res) + IPAERR("ipa_tag_aggr_force_close failed %d\n", res); + + IPA_ACTIVE_CLIENTS_DEC_SPECIAL("TAG_PROCESS"); + + IPADBG("TAG process done\n"); +} + +/** + * ipa2_active_clients_log_mod() - Log a modification in the active clients + * reference count + * + * This method logs any modification in the active clients reference count: + * It logs the modification in the circular history buffer + * It logs the modification in the hash table - looking for an entry, + * creating one if needed and deleting one if needed. + * + * @id: ipa2_active client logging info struct to hold the log information + * @inc: a boolean variable to indicate whether the modification is an increase + * or decrease + * @int_ctx: a boolean variable to indicate whether this call is being made from + * an interrupt context and therefore should allocate GFP_ATOMIC memory + * + * Method process: + * - Hash the unique identifier string + * - Find the hash in the table + * 1)If found, increase or decrease the reference count + * 2)If not found, allocate a new hash table entry struct and initialize it + * - Remove and deallocate unneeded data structure + * - Log the call in the circular history buffer (unless it is a simple call) + */ +void ipa2_active_clients_log_mod(struct ipa_active_client_logging_info *id, + bool inc, bool int_ctx) +{ + char temp_str[IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN]; + unsigned long long t; + unsigned long nanosec_rem; + struct ipa2_active_client_htable_entry *hentry; + struct ipa2_active_client_htable_entry *hfound; + u32 hkey; + char str_to_hash[IPA2_ACTIVE_CLIENTS_LOG_NAME_LEN]; + + hfound = NULL; + memset(str_to_hash, 0, IPA2_ACTIVE_CLIENTS_LOG_NAME_LEN); + strlcpy(str_to_hash, id->id_string, IPA2_ACTIVE_CLIENTS_LOG_NAME_LEN); + hkey = jhash(str_to_hash, IPA2_ACTIVE_CLIENTS_LOG_NAME_LEN, + 0); + hash_for_each_possible(ipa_ctx->ipa2_active_clients_logging.htable, + hentry, list, hkey) { + if (!strcmp(hentry->id_string, id->id_string)) { + hentry->count = hentry->count + (inc ? 1 : -1); + hfound = hentry; + } + } + if (hfound == NULL) { + hentry = NULL; + hentry = kzalloc(sizeof( + struct ipa2_active_client_htable_entry), + int_ctx ? GFP_ATOMIC : GFP_KERNEL); + if (hentry == NULL) { + IPAERR("failed allocating active clients hash entry"); + return; + } + hentry->type = id->type; + strlcpy(hentry->id_string, id->id_string, + IPA2_ACTIVE_CLIENTS_LOG_NAME_LEN); + INIT_HLIST_NODE(&hentry->list); + hentry->count = inc ? 1 : -1; + hash_add(ipa_ctx->ipa2_active_clients_logging.htable, + &hentry->list, hkey); + } else if (hfound->count == 0) { + hash_del(&hfound->list); + kfree(hfound); + } + + if (id->type != SIMPLE) { + t = local_clock(); + nanosec_rem = do_div(t, 1000000000) / 1000; + snprintf(temp_str, IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN, + inc ? "[%5lu.%06lu] ^ %s, %s: %d" : + "[%5lu.%06lu] v %s, %s: %d", + (unsigned long)t, nanosec_rem, + id->id_string, id->file, id->line); + ipa2_active_clients_log_insert(temp_str); + } +} + +void ipa2_active_clients_log_dec(struct ipa_active_client_logging_info *id, + bool int_ctx) +{ + ipa2_active_clients_log_mod(id, false, int_ctx); +} + +void ipa2_active_clients_log_inc(struct ipa_active_client_logging_info *id, + bool int_ctx) +{ + ipa2_active_clients_log_mod(id, true, int_ctx); +} + +/** + * ipa_inc_client_enable_clks() - Increase active clients counter, and + * enable ipa clocks if necessary + * + * Please do not use this API, use the wrapper macros instead (ipa_i.h) + * IPA2_ACTIVE_CLIENTS_INC_XXXX(); + * + * Return codes: + * None + */ +void ipa2_inc_client_enable_clks(struct ipa_active_client_logging_info *id) +{ + ipa_active_clients_lock(); + ipa2_active_clients_log_inc(id, false); + ipa_ctx->ipa_active_clients.cnt++; + if (ipa_ctx->ipa_active_clients.cnt == 1) + ipa_enable_clks(); + IPADBG_LOW("active clients = %d\n", ipa_ctx->ipa_active_clients.cnt); + ipa_active_clients_unlock(); +} + +/** + * ipa_inc_client_enable_clks_no_block() - Only increment the number of active + * clients if no asynchronous actions should be done. Asynchronous actions are + * locking a mutex and waking up IPA HW. + * + * Please do not use this API, use the wrapper macros instead (ipa_i.h) + * + * + * Return codes: 0 for success + * -EPERM if an asynchronous action should have been done + */ +int ipa2_inc_client_enable_clks_no_block(struct ipa_active_client_logging_info + *id) +{ + int res = 0; + unsigned long flags; + + if (ipa_active_clients_trylock(&flags) == 0) + return -EPERM; + + if (ipa_ctx->ipa_active_clients.cnt == 0) { + res = -EPERM; + goto bail; + } + + ipa2_active_clients_log_inc(id, true); + + ipa_ctx->ipa_active_clients.cnt++; + IPADBG_LOW("active clients = %d\n", ipa_ctx->ipa_active_clients.cnt); +bail: + ipa_active_clients_trylock_unlock(&flags); + + return res; +} + +/** + * ipa_dec_client_disable_clks() - Decrease active clients counter + * + * In case that there are no active clients this function also starts + * TAG process. When TAG progress ends ipa clocks will be gated. + * start_tag_process_again flag is set during this function to signal TAG + * process to start again as there was another client that may send data to ipa + * + * Please do not use this API, use the wrapper macros instead (ipa_i.h) + * IPA2_ACTIVE_CLIENTS_DEC_XXXX(); + * + * Return codes: + * None + */ +void ipa2_dec_client_disable_clks(struct ipa_active_client_logging_info *id) +{ + struct ipa_active_client_logging_info log_info; + + ipa_active_clients_lock(); + ipa2_active_clients_log_dec(id, false); + ipa_ctx->ipa_active_clients.cnt--; + IPADBG_LOW("active clients = %d\n", ipa_ctx->ipa_active_clients.cnt); + if (ipa_ctx->ipa_active_clients.cnt == 0) { + if (ipa_ctx->tag_process_before_gating) { + IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, + "TAG_PROCESS"); + ipa2_active_clients_log_inc(&log_info, false); + ipa_ctx->tag_process_before_gating = false; + /* + * When TAG process ends, active clients will be + * decreased + */ + ipa_ctx->ipa_active_clients.cnt = 1; + queue_work(ipa_ctx->power_mgmt_wq, &ipa_tag_work); + } else { + ipa_disable_clks(); + } + } + ipa_active_clients_unlock(); +} + +/** + * ipa_inc_acquire_wakelock() - Increase active clients counter, and + * acquire wakelock if necessary + * + * Return codes: + * None + */ +void ipa_inc_acquire_wakelock(enum ipa_wakelock_ref_client ref_client) +{ + unsigned long flags; + + if (ref_client >= IPA_WAKELOCK_REF_CLIENT_MAX) + return; + spin_lock_irqsave(&ipa_ctx->wakelock_ref_cnt.spinlock, flags); + if (ipa_ctx->wakelock_ref_cnt.cnt & (1 << ref_client)) + IPAERR("client enum %d mask already set. ref cnt = %d\n", + ref_client, ipa_ctx->wakelock_ref_cnt.cnt); + ipa_ctx->wakelock_ref_cnt.cnt |= (1 << ref_client); + if (ipa_ctx->wakelock_ref_cnt.cnt) + __pm_stay_awake(&ipa_ctx->w_lock); + IPADBG_LOW("active wakelock ref cnt = %d client enum %d\n", + ipa_ctx->wakelock_ref_cnt.cnt, ref_client); + spin_unlock_irqrestore(&ipa_ctx->wakelock_ref_cnt.spinlock, flags); +} + +/** + * ipa_dec_release_wakelock() - Decrease active clients counter + * + * In case if the ref count is 0, release the wakelock. + * + * Return codes: + * None + */ +void ipa_dec_release_wakelock(enum ipa_wakelock_ref_client ref_client) +{ + unsigned long flags; + + if (ref_client >= IPA_WAKELOCK_REF_CLIENT_MAX) + return; + spin_lock_irqsave(&ipa_ctx->wakelock_ref_cnt.spinlock, flags); + ipa_ctx->wakelock_ref_cnt.cnt &= ~(1 << ref_client); + IPADBG_LOW("active wakelock ref cnt = %d client enum %d\n", + ipa_ctx->wakelock_ref_cnt.cnt, ref_client); + if (ipa_ctx->wakelock_ref_cnt.cnt == 0) + __pm_relax(&ipa_ctx->w_lock); + spin_unlock_irqrestore(&ipa_ctx->wakelock_ref_cnt.spinlock, flags); +} + +static int ipa_setup_bam_cfg(const struct ipa_plat_drv_res *res) +{ + void *ipa_bam_mmio; + int reg_val; + int retval = 0; + + ipa_bam_mmio = ioremap(res->ipa_mem_base + IPA_BAM_REG_BASE_OFST, + IPA_BAM_REMAP_SIZE); + if (!ipa_bam_mmio) + return -ENOMEM; + switch (ipa_ctx->ipa_hw_type) { + case IPA_HW_v1_1: + reg_val = IPA_BAM_CNFG_BITS_VALv1_1; + break; + case IPA_HW_v2_0: + case IPA_HW_v2_5: + case IPA_HW_v2_6L: + reg_val = IPA_BAM_CNFG_BITS_VALv2_0; + break; + default: + retval = -EPERM; + goto fail; + } + if (ipa_ctx->ipa_hw_type < IPA_HW_v2_5) + ipa_write_reg(ipa_bam_mmio, IPA_BAM_CNFG_BITS_OFST, reg_val); +fail: + iounmap(ipa_bam_mmio); + + return retval; +} + +int ipa2_set_required_perf_profile(enum ipa_voltage_level floor_voltage, + u32 bandwidth_mbps) +{ + enum ipa_voltage_level needed_voltage; + u32 clk_rate; + + IPADBG_LOW("floor_voltage=%d, bandwidth_mbps=%u", + floor_voltage, bandwidth_mbps); + + if (floor_voltage < IPA_VOLTAGE_UNSPECIFIED || + floor_voltage >= IPA_VOLTAGE_MAX) { + IPAERR("bad voltage\n"); + return -EINVAL; + } + + if (ipa_ctx->enable_clock_scaling) { + IPADBG_LOW("Clock scaling is enabled\n"); + if (bandwidth_mbps >= + ipa_ctx->ctrl->clock_scaling_bw_threshold_turbo) + needed_voltage = IPA_VOLTAGE_TURBO; + else if (bandwidth_mbps >= + ipa_ctx->ctrl->clock_scaling_bw_threshold_nominal) + needed_voltage = IPA_VOLTAGE_NOMINAL; + else + needed_voltage = IPA_VOLTAGE_SVS; + } else { + IPADBG_LOW("Clock scaling is disabled\n"); + needed_voltage = IPA_VOLTAGE_NOMINAL; + } + + needed_voltage = max(needed_voltage, floor_voltage); + switch (needed_voltage) { + case IPA_VOLTAGE_SVS: + clk_rate = ipa_ctx->ctrl->ipa_clk_rate_svs; + break; + case IPA_VOLTAGE_NOMINAL: + clk_rate = ipa_ctx->ctrl->ipa_clk_rate_nominal; + break; + case IPA_VOLTAGE_TURBO: + clk_rate = ipa_ctx->ctrl->ipa_clk_rate_turbo; + break; + default: + IPAERR("bad voltage\n"); + WARN_ON(1); + return -EFAULT; + } + + if (clk_rate == ipa_ctx->curr_ipa_clk_rate) { + IPADBG_LOW("Same voltage\n"); + return 0; + } + + ipa_active_clients_lock(); + ipa_ctx->curr_ipa_clk_rate = clk_rate; + IPADBG_LOW("setting clock rate to %u\n", ipa_ctx->curr_ipa_clk_rate); + if (ipa_ctx->ipa_active_clients.cnt > 0) { + struct ipa_active_client_logging_info log_info; + + /* + * clk_set_rate should be called with unlocked lock to allow + * clients to get a reference to IPA clock synchronously. + * Hold a reference to IPA clock here to make sure clock + * state does not change during set_rate. + */ + IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info); + ipa_ctx->ipa_active_clients.cnt++; + ipa2_active_clients_log_inc(&log_info, false); + ipa_active_clients_unlock(); + + clk_set_rate(ipa_clk, ipa_ctx->curr_ipa_clk_rate); + if (ipa_ctx->ipa_hw_mode != IPA_HW_MODE_VIRTUAL) + if (msm_bus_scale_client_update_request( + ipa_ctx->ipa_bus_hdl, ipa_get_bus_vote())) + WARN_ON(1); + /* remove the vote added here */ + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + } else { + IPADBG_LOW("clocks are gated, not setting rate\n"); + ipa_active_clients_unlock(); + } + IPADBG_LOW("Done\n"); + return 0; +} + +static int ipa_init_flt_block(void) +{ + int result = 0; + + /* + * SW workaround for Improper Filter Behavior when neither Global nor + * Pipe Rules are present => configure dummy global filter rule + * always which results in a miss + */ + struct ipa_ioc_add_flt_rule *rules; + struct ipa_flt_rule_add *rule; + struct ipa_ioc_get_rt_tbl rt_lookup; + enum ipa_ip_type ip; + + if (ipa_ctx->ipa_hw_type >= IPA_HW_v1_1) { + size_t sz = sizeof(struct ipa_ioc_add_flt_rule) + + sizeof(struct ipa_flt_rule_add); + + rules = kmalloc(sz, GFP_KERNEL); + if (rules == NULL) { + IPAERR("fail to alloc mem for dummy filter rule\n"); + return -ENOMEM; + } + + IPADBG("Adding global rules for IPv4 and IPv6"); + for (ip = IPA_IP_v4; ip < IPA_IP_MAX; ip++) { + memset(&rt_lookup, 0, + sizeof(struct ipa_ioc_get_rt_tbl)); + rt_lookup.ip = ip; + strlcpy(rt_lookup.name, IPA_DFLT_RT_TBL_NAME, + IPA_RESOURCE_NAME_MAX); + ipa2_get_rt_tbl(&rt_lookup); + ipa2_put_rt_tbl(rt_lookup.hdl); + + memset(rules, 0, sz); + rule = &rules->rules[0]; + rules->commit = 1; + rules->ip = ip; + rules->global = 1; + rules->num_rules = 1; + rule->at_rear = 1; + if (ip == IPA_IP_v4) { + rule->rule.attrib.attrib_mask = + IPA_FLT_PROTOCOL | IPA_FLT_DST_ADDR; + rule->rule.attrib.u.v4.protocol = + IPA_INVALID_L4_PROTOCOL; + rule->rule.attrib.u.v4.dst_addr_mask = ~0; + rule->rule.attrib.u.v4.dst_addr = ~0; + } else if (ip == IPA_IP_v6) { + rule->rule.attrib.attrib_mask = + IPA_FLT_NEXT_HDR | IPA_FLT_DST_ADDR; + rule->rule.attrib.u.v6.next_hdr = + IPA_INVALID_L4_PROTOCOL; + rule->rule.attrib.u.v6.dst_addr_mask[0] = ~0; + rule->rule.attrib.u.v6.dst_addr_mask[1] = ~0; + rule->rule.attrib.u.v6.dst_addr_mask[2] = ~0; + rule->rule.attrib.u.v6.dst_addr_mask[3] = ~0; + rule->rule.attrib.u.v6.dst_addr[0] = ~0; + rule->rule.attrib.u.v6.dst_addr[1] = ~0; + rule->rule.attrib.u.v6.dst_addr[2] = ~0; + rule->rule.attrib.u.v6.dst_addr[3] = ~0; + } else { + result = -EINVAL; + WARN_ON(1); + break; + } + rule->rule.action = IPA_PASS_TO_ROUTING; + rule->rule.rt_tbl_hdl = rt_lookup.hdl; + rule->rule.retain_hdr = true; + + if (ipa2_add_flt_rule(rules) || + rules->rules[0].status) { + + result = -EINVAL; + WARN_ON(1); + break; + } + } + kfree(rules); + } + return result; +} + +static void ipa_sps_process_irq_schedule_rel(void) +{ + queue_delayed_work(ipa_ctx->sps_power_mgmt_wq, + &ipa_sps_release_resource_work, + msecs_to_jiffies(IPA_SPS_PROD_TIMEOUT_MSEC)); +} + +/** + * ipa_suspend_handler() - Handles the suspend interrupt: + * wakes up the suspended peripheral by requesting its consumer + * @interrupt: Interrupt type + * @private_data: The client's private data + * @interrupt_data: Interrupt specific information data + */ +void ipa_suspend_handler(enum ipa_irq_type interrupt, + void *private_data, + void *interrupt_data) +{ + enum ipa_rm_resource_name resource; + u32 suspend_data = + ((struct ipa_tx_suspend_irq_data *)interrupt_data)->endpoints; + u32 bmsk = 1; + u32 i = 0; + int res; + struct ipa_ep_cfg_holb holb_cfg; + + IPADBG("interrupt=%d, interrupt_data=%u\n", interrupt, suspend_data); + memset(&holb_cfg, 0, sizeof(holb_cfg)); + holb_cfg.tmr_val = 0; + + for (i = 0; i < ipa_ctx->ipa_num_pipes; i++) { + if ((suspend_data & bmsk) && (ipa_ctx->ep[i].valid)) { + if (IPA_CLIENT_IS_APPS_CONS(ipa_ctx->ep[i].client)) { + /* + * pipe will be unsuspended as part of + * enabling IPA clocks + */ + mutex_lock(&ipa_ctx->sps_pm.sps_pm_lock); + if (!atomic_read( + &ipa_ctx->sps_pm.dec_clients) + ) { + IPA_ACTIVE_CLIENTS_INC_EP( + ipa_ctx->ep[i].client); + IPADBG("Pipes un-suspended.\n"); + IPADBG("Enter poll mode.\n"); + atomic_set( + &ipa_ctx->sps_pm.dec_clients, + 1); + /* + * acquire wake lock as long as suspend + * vote is held + */ + ipa_inc_acquire_wakelock( + IPA_WAKELOCK_REF_CLIENT_SPS); + ipa_sps_process_irq_schedule_rel(); + } + mutex_unlock(&ipa_ctx->sps_pm.sps_pm_lock); + } else { + resource = ipa2_get_rm_resource_from_ep(i); + res = ipa_rm_request_resource_with_timer( + resource); + if ((res == -EPERM) && + IPA_CLIENT_IS_CONS( + ipa_ctx->ep[i].client)) { + holb_cfg.en = 1; + res = ipa2_cfg_ep_holb_by_client( + ipa_ctx->ep[i].client, &holb_cfg); + if (res) { + IPAERR("holb en fail\n"); + IPAERR("IPAHW stall\n"); + ipa_assert(); + } + } + } + } + bmsk = bmsk << 1; + } +} + +/** + * ipa2_restore_suspend_handler() - restores the original suspend IRQ handler + * as it was registered in the IPA init sequence. + * Return codes: + * 0: success + * -EPERM: failed to remove current handler or failed to add original handler + */ +int ipa2_restore_suspend_handler(void) +{ + int result = 0; + + result = ipa2_remove_interrupt_handler(IPA_TX_SUSPEND_IRQ); + if (result) { + IPAERR("remove handler for suspend interrupt failed\n"); + return -EPERM; + } + + result = ipa2_add_interrupt_handler(IPA_TX_SUSPEND_IRQ, + ipa_suspend_handler, true, NULL); + if (result) { + IPAERR("register handler for suspend interrupt failed\n"); + result = -EPERM; + } + + return result; +} + +static int apps_cons_release_resource(void) +{ + return 0; +} + +static int apps_cons_request_resource(void) +{ + return 0; +} + +static void ipa_sps_release_resource(struct work_struct *work) +{ + mutex_lock(&ipa_ctx->sps_pm.sps_pm_lock); + /* check whether still need to decrease client usage */ + if (atomic_read(&ipa_ctx->sps_pm.dec_clients)) { + if (atomic_read(&ipa_ctx->sps_pm.eot_activity)) { + IPADBG("EOT pending Re-scheduling\n"); + ipa_sps_process_irq_schedule_rel(); + } else { + atomic_set(&ipa_ctx->sps_pm.dec_clients, 0); + ipa_dec_release_wakelock(IPA_WAKELOCK_REF_CLIENT_SPS); + IPA_ACTIVE_CLIENTS_DEC_SPECIAL("SPS_RESOURCE"); + } + } + atomic_set(&ipa_ctx->sps_pm.eot_activity, 0); + mutex_unlock(&ipa_ctx->sps_pm.sps_pm_lock); +} + +int ipa_create_apps_resource(void) +{ + struct ipa_rm_create_params apps_cons_create_params; + struct ipa_rm_perf_profile profile; + int result = 0; + + memset(&apps_cons_create_params, 0, + sizeof(apps_cons_create_params)); + apps_cons_create_params.name = IPA_RM_RESOURCE_APPS_CONS; + apps_cons_create_params.request_resource = apps_cons_request_resource; + apps_cons_create_params.release_resource = apps_cons_release_resource; + result = ipa_rm_create_resource(&apps_cons_create_params); + if (result) { + IPAERR("ipa_rm_create_resource failed\n"); + return result; + } + + profile.max_supported_bandwidth_mbps = IPA_APPS_MAX_BW_IN_MBPS; + ipa_rm_set_perf_profile(IPA_RM_RESOURCE_APPS_CONS, &profile); + + return result; +} + + +/** + * ipa_init() - Initialize the IPA Driver + * @resource_p: contain platform specific values from DST file + * @pdev: The platform device structure representing the IPA driver + * + * Function initialization process: + * - Allocate memory for the driver context data struct + * - Initializing the ipa_ctx with: + * 1)parsed values from the dts file + * 2)parameters passed to the module initialization + * 3)read HW values(such as core memory size) + * - Map IPA core registers to CPU memory + * - Restart IPA core(HW reset) + * - Register IPA BAM to SPS driver and get a BAM handler + * - Set configuration for IPA BAM via BAM_CNFG_BITS + * - Initialize the look-aside caches(kmem_cache/slab) for filter, + * routing and IPA-tree + * - Create memory pool with 4 objects for DMA operations(each object + * is 512Bytes long), this object will be use for tx(A5->IPA) + * - Initialize lists head(routing,filter,hdr,system pipes) + * - Initialize mutexes (for ipa_ctx and NAT memory mutexes) + * - Initialize spinlocks (for list related to A5<->IPA pipes) + * - Initialize 2 single-threaded work-queue named "ipa rx wq" and "ipa tx wq" + * - Initialize Red-Black-Tree(s) for handles of header,routing rule, + * routing table ,filtering rule + * - Setup all A5<->IPA pipes by calling to ipa_setup_a5_pipes + * - Preparing the descriptors for System pipes + * - Initialize the filter block by committing IPV4 and IPV6 default rules + * - Create empty routing table in system memory(no committing) + * - Initialize pipes memory pool with ipa_pipe_mem_init for supported platforms + * - Create a char-device for IPA + * - Initialize IPA RM (resource manager) + */ +static int ipa_init(const struct ipa_plat_drv_res *resource_p, + struct device *ipa_dev) +{ + int result = 0; + int i; + struct sps_bam_props bam_props = { 0 }; + struct ipa_flt_tbl *flt_tbl; + struct ipa_rt_tbl_set *rset; + struct ipa_active_client_logging_info log_info; + + IPADBG("IPA Driver initialization started\n"); + + /* + * since structure alignment is implementation dependent, add test to + * avoid different and incompatible data layouts + */ + BUILD_BUG_ON(sizeof(struct ipa_hw_pkt_status) != IPA_PKT_STATUS_SIZE); + + ipa_ctx = kzalloc(sizeof(*ipa_ctx), GFP_KERNEL); + if (!ipa_ctx) { + IPAERR(":kzalloc err.\n"); + result = -ENOMEM; + goto fail_mem_ctx; + } + + ipa_ctx->logbuf = ipc_log_context_create(IPA_IPC_LOG_PAGES, "ipa", 0); + if (ipa_ctx->logbuf == NULL) + IPADBG("failed to create IPC log, continue...\n"); + + ipa_ctx->pdev = ipa_dev; + ipa_ctx->uc_pdev = ipa_dev; + ipa_ctx->smmu_present = smmu_info.present; + if (!ipa_ctx->smmu_present) + ipa_ctx->smmu_s1_bypass = true; + else + ipa_ctx->smmu_s1_bypass = smmu_info.s1_bypass; + ipa_ctx->ipa_wrapper_base = resource_p->ipa_mem_base; + ipa_ctx->ipa_wrapper_size = resource_p->ipa_mem_size; + ipa_ctx->ipa_hw_type = resource_p->ipa_hw_type; + ipa_ctx->ipa_hw_mode = resource_p->ipa_hw_mode; + ipa_ctx->ipa_uc_monitor_holb = + resource_p->ipa_uc_monitor_holb; + ipa_ctx->use_ipa_teth_bridge = resource_p->use_ipa_teth_bridge; + ipa_ctx->ipa_bam_remote_mode = resource_p->ipa_bam_remote_mode; + ipa_ctx->modem_cfg_emb_pipe_flt = resource_p->modem_cfg_emb_pipe_flt; + ipa_ctx->ipa_wdi2 = resource_p->ipa_wdi2; + ipa_ctx->wan_rx_ring_size = resource_p->wan_rx_ring_size; + ipa_ctx->lan_rx_ring_size = resource_p->lan_rx_ring_size; + ipa_ctx->skip_uc_pipe_reset = resource_p->skip_uc_pipe_reset; + ipa_ctx->use_dma_zone = resource_p->use_dma_zone; + ipa_ctx->tethered_flow_control = resource_p->tethered_flow_control; + + /* Setting up IPA RX Polling Timeout Seconds */ + ipa_rx_timeout_min_max_calc(&ipa_ctx->ipa_rx_min_timeout_usec, + &ipa_ctx->ipa_rx_max_timeout_usec, + resource_p->ipa_rx_polling_sleep_msec); + + /* Setting up ipa polling iteration */ + if ((resource_p->ipa_polling_iteration >= MIN_POLLING_ITERATION) + && (resource_p->ipa_polling_iteration <= MAX_POLLING_ITERATION)) + ipa_ctx->ipa_polling_iteration = + resource_p->ipa_polling_iteration; + else + ipa_ctx->ipa_polling_iteration = MAX_POLLING_ITERATION; + + /* default aggregation parameters */ + ipa_ctx->aggregation_type = IPA_MBIM_16; + ipa_ctx->aggregation_byte_limit = 1; + ipa_ctx->aggregation_time_limit = 0; + ipa_ctx->ipa2_active_clients_logging.log_rdy = false; + + ipa_ctx->ctrl = kzalloc(sizeof(*ipa_ctx->ctrl), GFP_KERNEL); + if (!ipa_ctx->ctrl) { + IPAERR("memory allocation error for ctrl\n"); + result = -ENOMEM; + goto fail_mem_ctrl; + } + result = ipa_controller_static_bind(ipa_ctx->ctrl, + ipa_ctx->ipa_hw_type); + if (result) { + IPAERR("fail to static bind IPA ctrl.\n"); + result = -EFAULT; + goto fail_bind; + } + + IPADBG("hdr_lcl=%u ip4_rt=%u ip6_rt=%u ip4_flt=%u ip6_flt=%u\n", + ipa_ctx->hdr_tbl_lcl, ipa_ctx->ip4_rt_tbl_lcl, + ipa_ctx->ip6_rt_tbl_lcl, ipa_ctx->ip4_flt_tbl_lcl, + ipa_ctx->ip6_flt_tbl_lcl); + + if (bus_scale_table) { + IPADBG("Use bus scaling info from device tree\n"); + ipa_ctx->ctrl->msm_bus_data_ptr = bus_scale_table; + } + + if (ipa_ctx->ipa_hw_mode != IPA_HW_MODE_VIRTUAL) { + /* get BUS handle */ + ipa_ctx->ipa_bus_hdl = + msm_bus_scale_register_client( + ipa_ctx->ctrl->msm_bus_data_ptr); + if (!ipa_ctx->ipa_bus_hdl) { + IPAERR("fail to register with bus mgr!\n"); + result = -EPROBE_DEFER; + bus_scale_table = NULL; + goto fail_bus_reg; + } + } else { + IPADBG("Skipping bus scaling registration on Virtual plat\n"); + } + + result = ipa2_active_clients_log_init(); + if (result) + goto fail_init_active_client; + + /* get IPA clocks */ + result = ipa_get_clks(master_dev); + if (result) + goto fail_clk; + + /* Enable ipa_ctx->enable_clock_scaling */ + ipa_ctx->enable_clock_scaling = 1; + ipa_ctx->curr_ipa_clk_rate = ipa_ctx->ctrl->ipa_clk_rate_turbo; + + /* enable IPA clocks explicitly to allow the initialization */ + ipa_enable_clks(); + + /* setup IPA register access */ + ipa_ctx->mmio = ioremap(resource_p->ipa_mem_base + + ipa_ctx->ctrl->ipa_reg_base_ofst, + resource_p->ipa_mem_size); + if (!ipa_ctx->mmio) { + IPAERR(":ipa-base ioremap err.\n"); + result = -EFAULT; + goto fail_remap; + } + + result = ipa_init_hw(); + if (result) { + IPAERR(":error initializing HW.\n"); + result = -ENODEV; + goto fail_init_hw; + } + IPADBG("IPA HW initialization sequence completed"); + + ipa_ctx->ipa_num_pipes = ipa_get_num_pipes(); + ipa_ctx->ctrl->ipa_sram_read_settings(); + IPADBG("SRAM, size: 0x%x, restricted bytes: 0x%x\n", + ipa_ctx->smem_sz, ipa_ctx->smem_restricted_bytes); + + if (ipa_ctx->smem_reqd_sz > + ipa_ctx->smem_sz - ipa_ctx->smem_restricted_bytes) { + IPAERR("SW expect more core memory, needed %d, avail %d\n", + ipa_ctx->smem_reqd_sz, ipa_ctx->smem_sz - + ipa_ctx->smem_restricted_bytes); + result = -ENOMEM; + goto fail_init_hw; + } + + mutex_init(&ipa_ctx->ipa_active_clients.mutex); + spin_lock_init(&ipa_ctx->ipa_active_clients.spinlock); + IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, "PROXY_CLK_VOTE"); + ipa2_active_clients_log_inc(&log_info, false); + ipa_ctx->ipa_active_clients.cnt = 1; + + /* Create workqueues for power management */ + ipa_ctx->power_mgmt_wq = + create_singlethread_workqueue("ipa_power_mgmt"); + if (!ipa_ctx->power_mgmt_wq) { + IPAERR("failed to create power mgmt wq\n"); + result = -ENOMEM; + goto fail_init_hw; + } + + ipa_ctx->sps_power_mgmt_wq = + create_singlethread_workqueue("sps_ipa_power_mgmt"); + if (!ipa_ctx->sps_power_mgmt_wq) { + IPAERR("failed to create sps power mgmt wq\n"); + result = -ENOMEM; + goto fail_create_sps_wq; + } + + /* register IPA with SPS driver */ + bam_props.phys_addr = resource_p->bam_mem_base; + bam_props.virt_size = resource_p->bam_mem_size; + bam_props.irq = resource_p->bam_irq; + bam_props.num_pipes = ipa_ctx->ipa_num_pipes; + bam_props.summing_threshold = IPA_SUMMING_THRESHOLD; + bam_props.event_threshold = IPA_EVENT_THRESHOLD; + bam_props.options |= SPS_BAM_NO_LOCAL_CLK_GATING; + if (ipa_ctx->ipa_hw_mode != IPA_HW_MODE_VIRTUAL) + bam_props.options |= SPS_BAM_OPT_IRQ_WAKEUP; + if (ipa_ctx->ipa_bam_remote_mode) + bam_props.manage |= SPS_BAM_MGR_DEVICE_REMOTE; + if (!ipa_ctx->smmu_s1_bypass) + bam_props.options |= SPS_BAM_SMMU_EN; + bam_props.options |= SPS_BAM_CACHED_WP; + bam_props.ee = resource_p->ee; + bam_props.ipc_loglevel = 3; + + result = sps_register_bam_device(&bam_props, &ipa_ctx->bam_handle); + if (result) { + IPAERR(":bam register err.\n"); + result = -EPROBE_DEFER; + goto fail_register_bam_device; + } + IPADBG("IPA BAM is registered\n"); + + if (ipa_setup_bam_cfg(resource_p)) { + IPAERR(":bam cfg err.\n"); + result = -ENODEV; + goto fail_flt_rule_cache; + } + + /* init the lookaside cache */ + ipa_ctx->flt_rule_cache = kmem_cache_create("IPA_FLT", + sizeof(struct ipa_flt_entry), 0, 0, NULL); + if (!ipa_ctx->flt_rule_cache) { + IPAERR(":ipa flt cache create failed\n"); + result = -ENOMEM; + goto fail_flt_rule_cache; + } + ipa_ctx->rt_rule_cache = kmem_cache_create("IPA_RT", + sizeof(struct ipa_rt_entry), 0, 0, NULL); + if (!ipa_ctx->rt_rule_cache) { + IPAERR(":ipa rt cache create failed\n"); + result = -ENOMEM; + goto fail_rt_rule_cache; + } + ipa_ctx->hdr_cache = kmem_cache_create("IPA_HDR", + sizeof(struct ipa_hdr_entry), 0, 0, NULL); + if (!ipa_ctx->hdr_cache) { + IPAERR(":ipa hdr cache create failed\n"); + result = -ENOMEM; + goto fail_hdr_cache; + } + ipa_ctx->hdr_offset_cache = + kmem_cache_create("IPA_HDR_OFFSET", + sizeof(struct ipa_hdr_offset_entry), 0, 0, NULL); + if (!ipa_ctx->hdr_offset_cache) { + IPAERR(":ipa hdr off cache create failed\n"); + result = -ENOMEM; + goto fail_hdr_offset_cache; + } + ipa_ctx->hdr_proc_ctx_cache = kmem_cache_create("IPA_HDR_PROC_CTX", + sizeof(struct ipa_hdr_proc_ctx_entry), 0, 0, NULL); + if (!ipa_ctx->hdr_proc_ctx_cache) { + IPAERR(":ipa hdr proc ctx cache create failed\n"); + result = -ENOMEM; + goto fail_hdr_proc_ctx_cache; + } + ipa_ctx->hdr_proc_ctx_offset_cache = + kmem_cache_create("IPA_HDR_PROC_CTX_OFFSET", + sizeof(struct ipa_hdr_proc_ctx_offset_entry), 0, 0, NULL); + if (!ipa_ctx->hdr_proc_ctx_offset_cache) { + IPAERR(":ipa hdr proc ctx off cache create failed\n"); + result = -ENOMEM; + goto fail_hdr_proc_ctx_offset_cache; + } + ipa_ctx->rt_tbl_cache = kmem_cache_create("IPA_RT_TBL", + sizeof(struct ipa_rt_tbl), 0, 0, NULL); + if (!ipa_ctx->rt_tbl_cache) { + IPAERR(":ipa rt tbl cache create failed\n"); + result = -ENOMEM; + goto fail_rt_tbl_cache; + } + ipa_ctx->tx_pkt_wrapper_cache = + kmem_cache_create("IPA_TX_PKT_WRAPPER", + sizeof(struct ipa_tx_pkt_wrapper), 0, 0, NULL); + if (!ipa_ctx->tx_pkt_wrapper_cache) { + IPAERR(":ipa tx pkt wrapper cache create failed\n"); + result = -ENOMEM; + goto fail_tx_pkt_wrapper_cache; + } + ipa_ctx->rx_pkt_wrapper_cache = + kmem_cache_create("IPA_RX_PKT_WRAPPER", + sizeof(struct ipa_rx_pkt_wrapper), 0, 0, NULL); + if (!ipa_ctx->rx_pkt_wrapper_cache) { + IPAERR(":ipa rx pkt wrapper cache create failed\n"); + result = -ENOMEM; + goto fail_rx_pkt_wrapper_cache; + } + + /* Setup DMA pool */ + ipa_ctx->dma_pool = dma_pool_create("ipa_tx", ipa_ctx->pdev, + IPA_NUM_DESC_PER_SW_TX * sizeof(struct sps_iovec), + 0, 0); + if (!ipa_ctx->dma_pool) { + IPAERR("cannot alloc DMA pool.\n"); + result = -ENOMEM; + goto fail_dma_pool; + } + + ipa_ctx->glob_flt_tbl[IPA_IP_v4].in_sys = !ipa_ctx->ip4_flt_tbl_lcl; + ipa_ctx->glob_flt_tbl[IPA_IP_v6].in_sys = !ipa_ctx->ip6_flt_tbl_lcl; + + /* init the various list heads */ + INIT_LIST_HEAD(&ipa_ctx->glob_flt_tbl[IPA_IP_v4].head_flt_rule_list); + INIT_LIST_HEAD(&ipa_ctx->glob_flt_tbl[IPA_IP_v6].head_flt_rule_list); + INIT_LIST_HEAD(&ipa_ctx->hdr_tbl.head_hdr_entry_list); + for (i = 0; i < IPA_HDR_BIN_MAX; i++) { + INIT_LIST_HEAD(&ipa_ctx->hdr_tbl.head_offset_list[i]); + INIT_LIST_HEAD(&ipa_ctx->hdr_tbl.head_free_offset_list[i]); + } + INIT_LIST_HEAD(&ipa_ctx->hdr_proc_ctx_tbl.head_proc_ctx_entry_list); + for (i = 0; i < IPA_HDR_PROC_CTX_BIN_MAX; i++) { + INIT_LIST_HEAD(&ipa_ctx->hdr_proc_ctx_tbl.head_offset_list[i]); + INIT_LIST_HEAD( + &ipa_ctx->hdr_proc_ctx_tbl.head_free_offset_list[i]); + } + INIT_LIST_HEAD(&ipa_ctx->rt_tbl_set[IPA_IP_v4].head_rt_tbl_list); + INIT_LIST_HEAD(&ipa_ctx->rt_tbl_set[IPA_IP_v6].head_rt_tbl_list); + for (i = 0; i < ipa_ctx->ipa_num_pipes; i++) { + flt_tbl = &ipa_ctx->flt_tbl[i][IPA_IP_v4]; + INIT_LIST_HEAD(&flt_tbl->head_flt_rule_list); + flt_tbl->in_sys = !ipa_ctx->ip4_flt_tbl_lcl; + + flt_tbl = &ipa_ctx->flt_tbl[i][IPA_IP_v6]; + INIT_LIST_HEAD(&flt_tbl->head_flt_rule_list); + flt_tbl->in_sys = !ipa_ctx->ip6_flt_tbl_lcl; + } + + rset = &ipa_ctx->reap_rt_tbl_set[IPA_IP_v4]; + INIT_LIST_HEAD(&rset->head_rt_tbl_list); + rset = &ipa_ctx->reap_rt_tbl_set[IPA_IP_v6]; + INIT_LIST_HEAD(&rset->head_rt_tbl_list); + + INIT_LIST_HEAD(&ipa_ctx->intf_list); + INIT_LIST_HEAD(&ipa_ctx->msg_list); + INIT_LIST_HEAD(&ipa_ctx->pull_msg_list); + init_waitqueue_head(&ipa_ctx->msg_waitq); + mutex_init(&ipa_ctx->msg_lock); + + /* store wlan client-connect-msg-list */ + INIT_LIST_HEAD(&ipa_ctx->msg_wlan_client_list); + mutex_init(&ipa_ctx->msg_wlan_client_lock); + + mutex_init(&ipa_ctx->lock); + mutex_init(&ipa_ctx->nat_mem.lock); + mutex_init(&ipa_ctx->ipa_cne_evt_lock); + + idr_init(&ipa_ctx->ipa_idr); + spin_lock_init(&ipa_ctx->idr_lock); + + /* wlan related member */ + memset(&ipa_ctx->wc_memb, 0, sizeof(ipa_ctx->wc_memb)); + spin_lock_init(&ipa_ctx->wc_memb.wlan_spinlock); + spin_lock_init(&ipa_ctx->wc_memb.ipa_tx_mul_spinlock); + INIT_LIST_HEAD(&ipa_ctx->wc_memb.wlan_comm_desc_list); + /* + * setup an empty routing table in system memory, this will be used + * to delete a routing table cleanly and safely + */ + ipa_ctx->empty_rt_tbl_mem.size = IPA_ROUTING_RULE_BYTE_SIZE; + + ipa_ctx->empty_rt_tbl_mem.base = + dma_zalloc_coherent(ipa_ctx->pdev, + ipa_ctx->empty_rt_tbl_mem.size, + &ipa_ctx->empty_rt_tbl_mem.phys_base, + GFP_KERNEL); + if (!ipa_ctx->empty_rt_tbl_mem.base) { + IPAERR("DMA buff alloc fail %d bytes for empty routing tbl\n", + ipa_ctx->empty_rt_tbl_mem.size); + result = -ENOMEM; + goto fail_apps_pipes; + } + IPADBG("empty routing table was allocated in system memory"); + + /* setup the A5-IPA pipes */ + if (ipa_setup_apps_pipes()) { + IPAERR(":failed to setup IPA-Apps pipes.\n"); + result = -ENODEV; + goto fail_empty_rt_tbl; + } + IPADBG("IPA System2Bam pipes were connected\n"); + + if (ipa_init_flt_block()) { + IPAERR("fail to setup dummy filter rules\n"); + result = -ENODEV; + goto fail_empty_rt_tbl; + } + IPADBG("filter block was set with dummy filter rules"); + + /* setup the IPA pipe mem pool */ + if (resource_p->ipa_pipe_mem_size) + ipa_pipe_mem_init(resource_p->ipa_pipe_mem_start_ofst, + resource_p->ipa_pipe_mem_size); + + ipa_ctx->class = class_create(THIS_MODULE, DRV_NAME); + + result = alloc_chrdev_region(&ipa_ctx->dev_num, 0, 1, DRV_NAME); + if (result) { + IPAERR("alloc_chrdev_region err.\n"); + result = -ENODEV; + goto fail_alloc_chrdev_region; + } + + ipa_ctx->dev = device_create(ipa_ctx->class, NULL, ipa_ctx->dev_num, + ipa_ctx, DRV_NAME); + if (IS_ERR(ipa_ctx->dev)) { + IPAERR(":device_create err.\n"); + result = -ENODEV; + goto fail_device_create; + } + + cdev_init(&ipa_ctx->cdev, &ipa_drv_fops); + ipa_ctx->cdev.owner = THIS_MODULE; + ipa_ctx->cdev.ops = &ipa_drv_fops; /* from LDD3 */ + + result = cdev_add(&ipa_ctx->cdev, ipa_ctx->dev_num, 1); + if (result) { + IPAERR(":cdev_add err=%d\n", -result); + result = -ENODEV; + goto fail_cdev_add; + } + IPADBG("ipa cdev added successful. major:%d minor:%d\n", + MAJOR(ipa_ctx->dev_num), + MINOR(ipa_ctx->dev_num)); + + if (create_nat_device()) { + IPAERR("unable to create nat device\n"); + result = -ENODEV; + goto fail_nat_dev_add; + } + + + + /* Create a wakeup source. */ + wakeup_source_init(&ipa_ctx->w_lock, "IPA_WS"); + spin_lock_init(&ipa_ctx->wakelock_ref_cnt.spinlock); + + /* Initialize the SPS PM lock. */ + mutex_init(&ipa_ctx->sps_pm.sps_pm_lock); + + /* Initialize IPA RM (resource manager) */ + result = ipa_rm_initialize(); + if (result) { + IPAERR("RM initialization failed (%d)\n", -result); + result = -ENODEV; + goto fail_ipa_rm_init; + } + IPADBG("IPA resource manager initialized"); + + result = ipa_create_apps_resource(); + if (result) { + IPAERR("Failed to create APPS_CONS resource\n"); + result = -ENODEV; + goto fail_create_apps_resource; + } + + /*register IPA IRQ handler*/ + result = ipa_interrupts_init(resource_p->ipa_irq, 0, + master_dev); + if (result) { + IPAERR("ipa interrupts initialization failed\n"); + result = -ENODEV; + goto fail_ipa_interrupts_init; + } + + /*add handler for suspend interrupt*/ + result = ipa_add_interrupt_handler(IPA_TX_SUSPEND_IRQ, + ipa_suspend_handler, false, NULL); + if (result) { + IPAERR("register handler for suspend interrupt failed\n"); + result = -ENODEV; + goto fail_add_interrupt_handler; + } + + if (ipa_ctx->use_ipa_teth_bridge) { + /* Initialize the tethering bridge driver */ + result = teth_bridge_driver_init(); + if (result) { + IPAERR(":teth_bridge init failed (%d)\n", -result); + result = -ENODEV; + goto fail_add_interrupt_handler; + } + IPADBG("teth_bridge initialized"); + } + + ipa_debugfs_init(); + + result = ipa_uc_interface_init(); + if (result) + IPAERR(":ipa Uc interface init failed (%d)\n", -result); + else + IPADBG(":ipa Uc interface init ok\n"); + + result = ipa2_wdi_init(); + if (result) + IPAERR(":wdi init failed (%d)\n", -result); + else + IPADBG(":wdi init ok\n"); + + result = ipa_ntn_init(); + if (result) + IPAERR(":ntn init failed (%d)\n", -result); + else + IPADBG(":ntn init ok\n"); + + ipa_ctx->q6_proxy_clk_vote_valid = true; + + ipa_register_panic_hdlr(); + + pr_info("IPA driver initialization was successful.\n"); + + return 0; + +fail_add_interrupt_handler: + free_irq(resource_p->ipa_irq, master_dev); +fail_ipa_interrupts_init: + ipa_rm_delete_resource(IPA_RM_RESOURCE_APPS_CONS); +fail_create_apps_resource: + ipa_rm_exit(); +fail_ipa_rm_init: +fail_nat_dev_add: + cdev_del(&ipa_ctx->cdev); +fail_cdev_add: + device_destroy(ipa_ctx->class, ipa_ctx->dev_num); +fail_device_create: + unregister_chrdev_region(ipa_ctx->dev_num, 1); +fail_alloc_chrdev_region: + if (ipa_ctx->pipe_mem_pool) + gen_pool_destroy(ipa_ctx->pipe_mem_pool); +fail_empty_rt_tbl: + ipa_teardown_apps_pipes(); + dma_free_coherent(ipa_ctx->pdev, + ipa_ctx->empty_rt_tbl_mem.size, + ipa_ctx->empty_rt_tbl_mem.base, + ipa_ctx->empty_rt_tbl_mem.phys_base); +fail_apps_pipes: + idr_destroy(&ipa_ctx->ipa_idr); +fail_dma_pool: + kmem_cache_destroy(ipa_ctx->rx_pkt_wrapper_cache); +fail_rx_pkt_wrapper_cache: + kmem_cache_destroy(ipa_ctx->tx_pkt_wrapper_cache); +fail_tx_pkt_wrapper_cache: + kmem_cache_destroy(ipa_ctx->rt_tbl_cache); +fail_rt_tbl_cache: + kmem_cache_destroy(ipa_ctx->hdr_proc_ctx_offset_cache); +fail_hdr_proc_ctx_offset_cache: + kmem_cache_destroy(ipa_ctx->hdr_proc_ctx_cache); +fail_hdr_proc_ctx_cache: + kmem_cache_destroy(ipa_ctx->hdr_offset_cache); +fail_hdr_offset_cache: + kmem_cache_destroy(ipa_ctx->hdr_cache); +fail_hdr_cache: + kmem_cache_destroy(ipa_ctx->rt_rule_cache); +fail_rt_rule_cache: + kmem_cache_destroy(ipa_ctx->flt_rule_cache); +fail_flt_rule_cache: + sps_deregister_bam_device(ipa_ctx->bam_handle); +fail_register_bam_device: + destroy_workqueue(ipa_ctx->sps_power_mgmt_wq); +fail_create_sps_wq: + destroy_workqueue(ipa_ctx->power_mgmt_wq); +fail_init_hw: + iounmap(ipa_ctx->mmio); +fail_remap: + ipa_disable_clks(); +fail_clk: + ipa2_active_clients_log_destroy(); +fail_init_active_client: + msm_bus_scale_unregister_client(ipa_ctx->ipa_bus_hdl); + if (bus_scale_table) { + msm_bus_cl_clear_pdata(bus_scale_table); + bus_scale_table = NULL; + } +fail_bus_reg: +fail_bind: + kfree(ipa_ctx->ctrl); +fail_mem_ctrl: + ipc_log_context_destroy(ipa_ctx->logbuf); + kfree(ipa_ctx); + ipa_ctx = NULL; +fail_mem_ctx: + return result; +} + +static int get_ipa_dts_configuration(struct platform_device *pdev, + struct ipa_plat_drv_res *ipa_drv_res) +{ + int result; + struct resource *resource; + + /* initialize ipa_res */ + ipa_drv_res->ipa_pipe_mem_start_ofst = IPA_PIPE_MEM_START_OFST; + ipa_drv_res->ipa_pipe_mem_size = IPA_PIPE_MEM_SIZE; + ipa_drv_res->ipa_hw_type = 0; + ipa_drv_res->ipa_hw_mode = 0; + ipa_drv_res->ipa_uc_monitor_holb = false; + ipa_drv_res->ipa_bam_remote_mode = false; + ipa_drv_res->modem_cfg_emb_pipe_flt = false; + ipa_drv_res->ipa_wdi2 = false; + ipa_drv_res->wan_rx_ring_size = IPA_GENERIC_RX_POOL_SZ; + ipa_drv_res->lan_rx_ring_size = IPA_GENERIC_RX_POOL_SZ; + + /* Get IPA HW Version */ + result = of_property_read_u32(pdev->dev.of_node, "qcom,ipa-hw-ver", + &ipa_drv_res->ipa_hw_type); + if ((result) || (ipa_drv_res->ipa_hw_type == 0)) { + IPAERR(":get resource failed for ipa-hw-ver!\n"); + return -ENODEV; + } + IPADBG(": ipa_hw_type = %d", ipa_drv_res->ipa_hw_type); + + /* Get IPA HW mode */ + result = of_property_read_u32(pdev->dev.of_node, "qcom,ipa-hw-mode", + &ipa_drv_res->ipa_hw_mode); + if (result) + IPADBG("using default (IPA_MODE_NORMAL) for ipa-hw-mode\n"); + else + IPADBG(": found ipa_drv_res->ipa_hw_mode = %d", + ipa_drv_res->ipa_hw_mode); + + /* Check ipa_uc_monitor_holb enabled or disabled */ + ipa_drv_res->ipa_uc_monitor_holb = + of_property_read_bool(pdev->dev.of_node, + "qcom,ipa-uc-monitor-holb"); + IPADBG(": ipa uc monitor holb = %s\n", + ipa_drv_res->ipa_uc_monitor_holb + ? "Enabled" : "Disabled"); + + /* Get IPA WAN / LAN RX pool sizes */ + result = of_property_read_u32(pdev->dev.of_node, + "qcom,wan-rx-ring-size", + &ipa_drv_res->wan_rx_ring_size); + if (result) + IPADBG("using default for wan-rx-ring-size = %u\n", + ipa_drv_res->wan_rx_ring_size); + else + IPADBG(": found ipa_drv_res->wan-rx-ring-size = %u", + ipa_drv_res->wan_rx_ring_size); + + result = of_property_read_u32(pdev->dev.of_node, + "qcom,lan-rx-ring-size", + &ipa_drv_res->lan_rx_ring_size); + if (result) + IPADBG("using default for lan-rx-ring-size = %u\n", + ipa_drv_res->lan_rx_ring_size); + else + IPADBG(": found ipa_drv_res->lan-rx-ring-size = %u", + ipa_drv_res->lan_rx_ring_size); + + ipa_drv_res->use_ipa_teth_bridge = + of_property_read_bool(pdev->dev.of_node, + "qcom,use-ipa-tethering-bridge"); + IPADBG(": using TBDr = %s", + ipa_drv_res->use_ipa_teth_bridge + ? "True" : "False"); + + ipa_drv_res->ipa_bam_remote_mode = + of_property_read_bool(pdev->dev.of_node, + "qcom,ipa-bam-remote-mode"); + IPADBG(": ipa bam remote mode = %s\n", + ipa_drv_res->ipa_bam_remote_mode + ? "True" : "False"); + + ipa_drv_res->modem_cfg_emb_pipe_flt = + of_property_read_bool(pdev->dev.of_node, + "qcom,modem-cfg-emb-pipe-flt"); + IPADBG(": modem configure embedded pipe filtering = %s\n", + ipa_drv_res->modem_cfg_emb_pipe_flt + ? "True" : "False"); + + ipa_drv_res->ipa_wdi2 = + of_property_read_bool(pdev->dev.of_node, + "qcom,ipa-wdi2"); + IPADBG(": WDI-2.0 = %s\n", + ipa_drv_res->ipa_wdi2 + ? "True" : "False"); + + ipa_drv_res->skip_uc_pipe_reset = + of_property_read_bool(pdev->dev.of_node, + "qcom,skip-uc-pipe-reset"); + IPADBG(": skip uC pipe reset = %s\n", + ipa_drv_res->skip_uc_pipe_reset + ? "True" : "False"); + + ipa_drv_res->use_dma_zone = + of_property_read_bool(pdev->dev.of_node, + "qcom,use-dma-zone"); + IPADBG(": use dma zone = %s\n", + ipa_drv_res->use_dma_zone + ? "True" : "False"); + + ipa_drv_res->tethered_flow_control = + of_property_read_bool(pdev->dev.of_node, + "qcom,tethered-flow-control"); + IPADBG(": Use apps based flow control = %s\n", + ipa_drv_res->tethered_flow_control + ? "True" : "False"); + + /* Get IPA wrapper address */ + resource = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "ipa-base"); + if (!resource) { + IPAERR(":get resource failed for ipa-base!\n"); + return -ENODEV; + } + ipa_drv_res->ipa_mem_base = resource->start; + ipa_drv_res->ipa_mem_size = resource_size(resource); + IPADBG(": ipa-base = 0x%x, size = 0x%x\n", + ipa_drv_res->ipa_mem_base, + ipa_drv_res->ipa_mem_size); + + smmu_info.ipa_base = ipa_drv_res->ipa_mem_base; + smmu_info.ipa_size = ipa_drv_res->ipa_mem_size; + + /* Get IPA BAM address */ + resource = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "bam-base"); + if (!resource) { + IPAERR(":get resource failed for bam-base!\n"); + return -ENODEV; + } + ipa_drv_res->bam_mem_base = resource->start; + ipa_drv_res->bam_mem_size = resource_size(resource); + IPADBG(": bam-base = 0x%x, size = 0x%x\n", + ipa_drv_res->bam_mem_base, + ipa_drv_res->bam_mem_size); + + /* Get IPA pipe mem start ofst */ + resource = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "ipa-pipe-mem"); + if (!resource) { + IPADBG(":not using pipe memory - resource nonexisting\n"); + } else { + ipa_drv_res->ipa_pipe_mem_start_ofst = resource->start; + ipa_drv_res->ipa_pipe_mem_size = resource_size(resource); + IPADBG(":using pipe memory - at 0x%x of size 0x%x\n", + ipa_drv_res->ipa_pipe_mem_start_ofst, + ipa_drv_res->ipa_pipe_mem_size); + } + + /* Get IPA IRQ number */ + resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ, + "ipa-irq"); + if (!resource) { + IPAERR(":get resource failed for ipa-irq!\n"); + return -ENODEV; + } + ipa_drv_res->ipa_irq = resource->start; + IPADBG(":ipa-irq = %d\n", ipa_drv_res->ipa_irq); + + /* Get IPA BAM IRQ number */ + resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ, + "bam-irq"); + if (!resource) { + IPAERR(":get resource failed for bam-irq!\n"); + return -ENODEV; + } + ipa_drv_res->bam_irq = resource->start; + IPADBG(":ibam-irq = %d\n", ipa_drv_res->bam_irq); + + result = of_property_read_u32(pdev->dev.of_node, "qcom,ee", + &ipa_drv_res->ee); + if (result) + ipa_drv_res->ee = 0; + + /* Get IPA RX Polling Timeout Seconds */ + result = of_property_read_u32(pdev->dev.of_node, + "qcom,rx-polling-sleep-ms", + &ipa_drv_res->ipa_rx_polling_sleep_msec); + + if (result) { + ipa_drv_res->ipa_rx_polling_sleep_msec = ONE_MSEC; + IPADBG("using default polling timeout of 1MSec\n"); + } else { + IPADBG(": found ipa_drv_res->ipa_rx_polling_sleep_sec = %d", + ipa_drv_res->ipa_rx_polling_sleep_msec); + } + + /* Get IPA Polling Iteration */ + result = of_property_read_u32(pdev->dev.of_node, + "qcom,ipa-polling-iteration", + &ipa_drv_res->ipa_polling_iteration); + if (result) { + ipa_drv_res->ipa_polling_iteration = MAX_POLLING_ITERATION; + IPADBG("using default polling iteration\n"); + } else { + IPADBG(": found ipa_drv_res->ipa_polling_iteration = %d", + ipa_drv_res->ipa_polling_iteration); + } + + return 0; +} + +static int ipa_smmu_wlan_cb_probe(struct device *dev) +{ + struct ipa_smmu_cb_ctx *cb = ipa2_get_wlan_smmu_ctx(); + int atomic_ctx = 1; + int fast = 1; + int bypass = 1; + int ret; + + IPADBG("sub pdev=%p\n", dev); + + cb->dev = dev; + cb->iommu = iommu_domain_alloc(&platform_bus_type); + if (!cb->iommu) { + IPAERR("could not alloc iommu domain\n"); + /* assume this failure is because iommu driver is not ready */ + return -EPROBE_DEFER; + } + cb->valid = true; + + if (smmu_info.s1_bypass) { + if (iommu_domain_set_attr(cb->iommu, + DOMAIN_ATTR_S1_BYPASS, + &bypass)) { + IPAERR("couldn't set bypass\n"); + cb->valid = false; + return -EIO; + } + IPADBG("SMMU S1 BYPASS\n"); + } else { + if (iommu_domain_set_attr(cb->iommu, + DOMAIN_ATTR_ATOMIC, + &atomic_ctx)) { + IPAERR("couldn't set domain as atomic\n"); + cb->valid = false; + return -EIO; + } + IPADBG("SMMU atomic set\n"); + if (smmu_info.fast_map) { + if (iommu_domain_set_attr(cb->iommu, + DOMAIN_ATTR_FAST, + &fast)) { + IPAERR("couldn't set fast map\n"); + cb->valid = false; + return -EIO; + } + IPADBG("SMMU fast map set\n"); + } + } + + ret = iommu_attach_device(cb->iommu, dev); + if (ret) { + IPAERR("could not attach device ret=%d\n", ret); + cb->valid = false; + return ret; + } + + if (!smmu_info.s1_bypass) { + IPAERR("map IPA region to WLAN_CB IOMMU\n"); + ret = ipa_iommu_map(cb->iommu, + rounddown(smmu_info.ipa_base, PAGE_SIZE), + rounddown(smmu_info.ipa_base, PAGE_SIZE), + roundup(smmu_info.ipa_size, PAGE_SIZE), + IOMMU_READ | IOMMU_WRITE | IOMMU_MMIO); + if (ret) { + IPAERR("map IPA to WLAN_CB IOMMU failed ret=%d\n", + ret); + arm_iommu_detach_device(cb->dev); + cb->valid = false; + return ret; + } + } + + return 0; +} + +static int ipa_smmu_uc_cb_probe(struct device *dev) +{ + struct ipa_smmu_cb_ctx *cb = ipa2_get_uc_smmu_ctx(); + int atomic_ctx = 1; + int ret; + int fast = 1; + int bypass = 1; + u32 iova_ap_mapping[2]; + + IPADBG("UC CB PROBE sub pdev=%p\n", dev); + + ret = of_property_read_u32_array(dev->of_node, "qcom,iova-mapping", + iova_ap_mapping, 2); + if (ret) { + IPAERR("Fail to read UC start/size iova addresses\n"); + return ret; + } + cb->va_start = iova_ap_mapping[0]; + cb->va_size = iova_ap_mapping[1]; + cb->va_end = cb->va_start + cb->va_size; + IPADBG("UC va_start=0x%x va_sise=0x%x\n", cb->va_start, cb->va_size); + + if (dma_set_mask(dev, DMA_BIT_MASK(32)) || + dma_set_coherent_mask(dev, DMA_BIT_MASK(32))) { + IPAERR("DMA set mask failed\n"); + return -EOPNOTSUPP; + } + + IPADBG("UC CB PROBE=%p create IOMMU mapping\n", dev); + + cb->dev = dev; + cb->mapping = arm_iommu_create_mapping(&platform_bus_type, + cb->va_start, cb->va_size); + if (IS_ERR_OR_NULL(cb->mapping)) { + IPADBG("Fail to create mapping\n"); + /* assume this failure is because iommu driver is not ready */ + return -EPROBE_DEFER; + } + IPADBG("SMMU mapping created\n"); + cb->valid = true; + + IPADBG("UC CB PROBE sub pdev=%p set attribute\n", dev); + if (smmu_info.s1_bypass) { + if (iommu_domain_set_attr(cb->mapping->domain, + DOMAIN_ATTR_S1_BYPASS, + &bypass)) { + IPAERR("couldn't set bypass\n"); + arm_iommu_release_mapping(cb->mapping); + cb->valid = false; + return -EIO; + } + IPADBG("SMMU S1 BYPASS\n"); + } else { + if (iommu_domain_set_attr(cb->mapping->domain, + DOMAIN_ATTR_ATOMIC, + &atomic_ctx)) { + IPAERR("couldn't set domain as atomic\n"); + arm_iommu_release_mapping(cb->mapping); + cb->valid = false; + return -EIO; + } + IPADBG("SMMU atomic set\n"); + if (smmu_info.fast_map) { + if (iommu_domain_set_attr(cb->mapping->domain, + DOMAIN_ATTR_FAST, + &fast)) { + IPAERR("couldn't set fast map\n"); + arm_iommu_release_mapping(cb->mapping); + cb->valid = false; + return -EIO; + } + IPADBG("SMMU fast map set\n"); + } + } + + IPADBG("UC CB PROBE sub pdev=%p attaching IOMMU device\n", dev); + ret = arm_iommu_attach_device(cb->dev, cb->mapping); + if (ret) { + IPAERR("could not attach device ret=%d\n", ret); + arm_iommu_release_mapping(cb->mapping); + cb->valid = false; + return ret; + } + + cb->next_addr = cb->va_end; + ipa_ctx->uc_pdev = dev; + + IPADBG("UC CB PROBE pdev=%p attached\n", dev); + return 0; +} + +static int ipa_smmu_ap_cb_probe(struct device *dev) +{ + struct ipa_smmu_cb_ctx *cb = ipa2_get_smmu_ctx(); + int result; + int atomic_ctx = 1; + int fast = 1; + int bypass = 1; + u32 iova_ap_mapping[2]; + + IPADBG("AP CB probe: sub pdev=%p\n", dev); + + result = of_property_read_u32_array(dev->of_node, "qcom,iova-mapping", + iova_ap_mapping, 2); + if (result) { + IPAERR("Fail to read AP start/size iova addresses\n"); + return result; + } + cb->va_start = iova_ap_mapping[0]; + cb->va_size = iova_ap_mapping[1]; + cb->va_end = cb->va_start + cb->va_size; + IPADBG("AP va_start=0x%x va_sise=0x%x\n", cb->va_start, cb->va_size); + + if (dma_set_mask(dev, DMA_BIT_MASK(32)) || + dma_set_coherent_mask(dev, DMA_BIT_MASK(32))) { + IPAERR("DMA set mask failed\n"); + return -EOPNOTSUPP; + } + + cb->dev = dev; + cb->mapping = arm_iommu_create_mapping(&platform_bus_type, + cb->va_start, + cb->va_size); + if (IS_ERR_OR_NULL(cb->mapping)) { + IPADBG("Fail to create mapping\n"); + /* assume this failure is because iommu driver is not ready */ + return -EPROBE_DEFER; + } + IPADBG("SMMU mapping created\n"); + cb->valid = true; + + if (smmu_info.s1_bypass) { + if (iommu_domain_set_attr(cb->mapping->domain, + DOMAIN_ATTR_S1_BYPASS, + &bypass)) { + IPAERR("couldn't set bypass\n"); + arm_iommu_release_mapping(cb->mapping); + cb->valid = false; + return -EIO; + } + IPADBG("SMMU S1 BYPASS\n"); + } else { + if (iommu_domain_set_attr(cb->mapping->domain, + DOMAIN_ATTR_ATOMIC, + &atomic_ctx)) { + IPAERR("couldn't set domain as atomic\n"); + arm_iommu_release_mapping(cb->mapping); + cb->valid = false; + return -EIO; + } + IPADBG("SMMU atomic set\n"); + + if (iommu_domain_set_attr(cb->mapping->domain, + DOMAIN_ATTR_FAST, + &fast)) { + IPAERR("couldn't set fast map\n"); + arm_iommu_release_mapping(cb->mapping); + cb->valid = false; + return -EIO; + } + IPADBG("SMMU fast map set\n"); + } + + result = arm_iommu_attach_device(cb->dev, cb->mapping); + if (result) { + IPAERR("couldn't attach to IOMMU ret=%d\n", result); + cb->valid = false; + return result; + } + + if (!smmu_info.s1_bypass) { + IPAERR("map IPA region to AP_CB IOMMU\n"); + result = ipa_iommu_map(cb->mapping->domain, + rounddown(smmu_info.ipa_base, PAGE_SIZE), + rounddown(smmu_info.ipa_base, PAGE_SIZE), + roundup(smmu_info.ipa_size, PAGE_SIZE), + IOMMU_READ | IOMMU_WRITE | IOMMU_MMIO); + if (result) { + IPAERR("map IPA region to AP_CB IOMMU failed ret=%d\n", + result); + arm_iommu_release_mapping(cb->mapping); + cb->valid = false; + return result; + } + } + + smmu_info.present = true; + + if (!bus_scale_table) + bus_scale_table = msm_bus_cl_get_pdata(ipa_pdev); + + /* Proceed to real initialization */ + result = ipa_init(&ipa_res, dev); + if (result) { + IPAERR("ipa_init failed\n"); + arm_iommu_detach_device(cb->dev); + arm_iommu_release_mapping(cb->mapping); + cb->valid = false; + return result; + } + + return result; +} + +int ipa_plat_drv_probe(struct platform_device *pdev_p, + struct ipa_api_controller *api_ctrl, + const struct of_device_id *pdrv_match) +{ + int result; + struct device *dev = &pdev_p->dev; + + IPADBG("IPA driver probing started\n"); + + if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-ap-cb")) + return ipa_smmu_ap_cb_probe(dev); + + if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-wlan-cb")) + return ipa_smmu_wlan_cb_probe(dev); + + if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-uc-cb")) + return ipa_smmu_uc_cb_probe(dev); + + master_dev = dev; + if (!ipa_pdev) + ipa_pdev = pdev_p; + + result = get_ipa_dts_configuration(pdev_p, &ipa_res); + if (result) { + IPAERR("IPA dts parsing failed\n"); + return result; + } + + result = ipa2_bind_api_controller(ipa_res.ipa_hw_type, api_ctrl); + if (result) { + IPAERR("IPA API binding failed\n"); + return result; + } + + if (of_property_read_bool(pdev_p->dev.of_node, "qcom,arm-smmu")) { + if (of_property_read_bool(pdev_p->dev.of_node, + "qcom,smmu-s1-bypass")) + smmu_info.s1_bypass = true; + if (of_property_read_bool(pdev_p->dev.of_node, + "qcom,smmu-fast-map")) + smmu_info.fast_map = true; + smmu_info.arm_smmu = true; + pr_info("IPA smmu_info.s1_bypass=%d smmu_info.fast_map=%d\n", + smmu_info.s1_bypass, smmu_info.fast_map); + result = of_platform_populate(pdev_p->dev.of_node, + pdrv_match, NULL, &pdev_p->dev); + } else if (of_property_read_bool(pdev_p->dev.of_node, + "qcom,msm-smmu")) { + IPAERR("Legacy IOMMU not supported\n"); + result = -EOPNOTSUPP; + } else { + if (dma_set_mask(&pdev_p->dev, DMA_BIT_MASK(32)) || + dma_set_coherent_mask(&pdev_p->dev, + DMA_BIT_MASK(32))) { + IPAERR("DMA set mask failed\n"); + return -EOPNOTSUPP; + } + + if (!bus_scale_table) + bus_scale_table = msm_bus_cl_get_pdata(pdev_p); + + /* Proceed to real initialization */ + result = ipa_init(&ipa_res, dev); + if (result) { + IPAERR("ipa_init failed\n"); + return result; + } + } + + return result; +} + +/** + * ipa2_ap_suspend() - suspend callback for runtime_pm + * @dev: pointer to device + * + * This callback will be invoked by the runtime_pm framework when an AP suspend + * operation is invoked, usually by pressing a suspend button. + * + * Returns -EAGAIN to runtime_pm framework in case IPA is in use by AP. + * This will postpone the suspend operation until IPA is no longer used by AP. + */ +int ipa2_ap_suspend(struct device *dev) +{ + int i; + + IPADBG("Enter...\n"); + + /* In case there is a tx/rx handler in polling mode fail to suspend */ + for (i = 0; i < ipa_ctx->ipa_num_pipes; i++) { + if (ipa_ctx->ep[i].sys && + atomic_read(&ipa_ctx->ep[i].sys->curr_polling_state)) { + IPAERR("EP %d is in polling state, do not suspend\n", + i); + return -EAGAIN; + } + } + + /* release SPS IPA resource without waiting for inactivity timer */ + atomic_set(&ipa_ctx->sps_pm.eot_activity, 0); + ipa_sps_release_resource(NULL); + IPADBG("Exit\n"); + + return 0; +} + +/** + * ipa2_ap_resume() - resume callback for runtime_pm + * @dev: pointer to device + * + * This callback will be invoked by the runtime_pm framework when an AP resume + * operation is invoked. + * + * Always returns 0 since resume should always succeed. + */ +int ipa2_ap_resume(struct device *dev) +{ + return 0; +} + +struct ipa_context *ipa_get_ctx(void) +{ + return ipa_ctx; +} + +int ipa_iommu_map(struct iommu_domain *domain, + unsigned long iova, phys_addr_t paddr, size_t size, int prot) +{ + struct ipa_smmu_cb_ctx *ap_cb = ipa2_get_smmu_ctx(); + struct ipa_smmu_cb_ctx *uc_cb = ipa2_get_uc_smmu_ctx(); + + IPADBG("domain =0x%p iova 0x%lx\n", domain, iova); + IPADBG("paddr =0x%pa size 0x%x\n", &paddr, (u32)size); + + /* Checking the address overlapping */ + if (domain == ipa2_get_smmu_domain()) { + if (iova >= ap_cb->va_start && iova < ap_cb->va_end) + IPAERR("iommu AP overlap addr 0x%lx\n", iova); + } else if (domain == ipa2_get_wlan_smmu_domain()) { + /* wlan is one time map */ + } else if (domain == ipa2_get_uc_smmu_domain()) { + if (iova >= uc_cb->va_start && iova < uc_cb->va_end) + IPAERR("iommu uC overlap addr 0x%lx\n", iova); + } else { + IPAERR("Unexpected domain 0x%p\n", domain); + ipa_assert(); + return -EFAULT; + } + + return iommu_map(domain, iova, paddr, size, prot); +} + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("IPA HW device driver"); diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_client.c b/drivers/platform/msm/ipa/ipa_v2/ipa_client.c new file mode 100644 index 000000000000..df54de2184a0 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_client.c @@ -0,0 +1,930 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2017, 2020, The Linux Foundation. All rights reserved. + */ +#include +#include +#include +#include "ipa_i.h" + +/* + * These values were determined empirically and shows good E2E bi- + * directional throughputs + */ +#define IPA_HOLB_TMR_EN 0x1 +#define IPA_HOLB_TMR_DIS 0x0 +#define IPA_HOLB_TMR_DEFAULT_VAL 0x1ff + +#define IPA_PKT_FLUSH_TO_US 100 + +int ipa_enable_data_path(u32 clnt_hdl) +{ + struct ipa_ep_context *ep = &ipa_ctx->ep[clnt_hdl]; + struct ipa_ep_cfg_holb holb_cfg; + struct ipa_ep_cfg_ctrl ep_cfg_ctrl; + int res = 0; + + IPADBG("Enabling data path\n"); + /* From IPA 2.0, disable HOLB */ + if ((ipa_ctx->ipa_hw_type >= IPA_HW_v2_0) && + IPA_CLIENT_IS_CONS(ep->client)) { + memset(&holb_cfg, 0, sizeof(holb_cfg)); + holb_cfg.en = IPA_HOLB_TMR_DIS; + holb_cfg.tmr_val = 0; + res = ipa2_cfg_ep_holb(clnt_hdl, &holb_cfg); + } + + /* Enable the pipe */ + if (IPA_CLIENT_IS_CONS(ep->client) && + (ep->keep_ipa_awake || + ipa_ctx->resume_on_connect[ep->client] || + !ipa_should_pipe_be_suspended(ep->client))) { + memset(&ep_cfg_ctrl, 0, sizeof(ep_cfg_ctrl)); + ep_cfg_ctrl.ipa_ep_suspend = false; + ipa2_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl); + } + + return res; +} + +int ipa_disable_data_path(u32 clnt_hdl) +{ + struct ipa_ep_context *ep = &ipa_ctx->ep[clnt_hdl]; + struct ipa_ep_cfg_holb holb_cfg; + struct ipa_ep_cfg_ctrl ep_cfg_ctrl; + u32 aggr_init; + int res = 0; + + IPADBG("Disabling data path\n"); + /* On IPA 2.0, enable HOLB in order to prevent IPA from stalling */ + if ((ipa_ctx->ipa_hw_type >= IPA_HW_v2_0) && + IPA_CLIENT_IS_CONS(ep->client)) { + memset(&holb_cfg, 0, sizeof(holb_cfg)); + holb_cfg.en = IPA_HOLB_TMR_EN; + holb_cfg.tmr_val = 0; + res = ipa2_cfg_ep_holb(clnt_hdl, &holb_cfg); + } + + /* Suspend the pipe */ + if (IPA_CLIENT_IS_CONS(ep->client)) { + memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl)); + ep_cfg_ctrl.ipa_ep_suspend = true; + ipa2_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl); + } + + udelay(IPA_PKT_FLUSH_TO_US); + aggr_init = ipa_read_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_AGGR_N_OFST_v2_0(clnt_hdl)); + if (((aggr_init & IPA_ENDP_INIT_AGGR_N_AGGR_EN_BMSK) >> + IPA_ENDP_INIT_AGGR_N_AGGR_EN_SHFT) == IPA_ENABLE_AGGR) { + res = ipa_tag_aggr_force_close(clnt_hdl); + if (res) { + IPAERR("tag process timeout, client:%d err:%d\n", + clnt_hdl, res); + ipa_assert(); + } + } + + return res; +} + +int ipa2_enable_force_clear(u32 request_id, bool throttle_source, + u32 source_pipe_bitmask) +{ + struct ipa_enable_force_clear_datapath_req_msg_v01 req; + int result; + + memset(&req, 0, sizeof(req)); + req.request_id = request_id; + req.source_pipe_bitmask = source_pipe_bitmask; + if (throttle_source) { + req.throttle_source_valid = 1; + req.throttle_source = 1; + } + result = qmi_enable_force_clear_datapath_send(&req); + if (result) { + IPAERR("qmi_enable_force_clear_datapath_send failed %d\n", + result); + return result; + } + + return 0; +} + +int ipa2_disable_force_clear(u32 request_id) +{ + struct ipa_disable_force_clear_datapath_req_msg_v01 req; + int result; + + memset(&req, 0, sizeof(req)); + req.request_id = request_id; + result = qmi_disable_force_clear_datapath_send(&req); + if (result) { + IPAERR("qmi_disable_force_clear_datapath_send failed %d\n", + result); + return result; + } + + return 0; +} + +static int ipa2_smmu_map_peer_bam(unsigned long dev) +{ + phys_addr_t base; + u32 size; + struct iommu_domain *smmu_domain; + struct ipa_smmu_cb_ctx *cb = ipa2_get_smmu_ctx(); + + if (!ipa_ctx->smmu_s1_bypass) { + if (ipa_ctx->peer_bam_map_cnt == 0) { + if (sps_get_bam_addr(dev, &base, &size)) { + IPAERR("Fail to get addr\n"); + return -EINVAL; + } + smmu_domain = ipa2_get_smmu_domain(); + if (smmu_domain != NULL) { + if (ipa_iommu_map(smmu_domain, + cb->va_end, + rounddown(base, PAGE_SIZE), + roundup(size + base - + rounddown(base, PAGE_SIZE), PAGE_SIZE), + IOMMU_READ | IOMMU_WRITE | + IOMMU_MMIO)) { + IPAERR("Fail to ipa_iommu_map\n"); + return -EINVAL; + } + } + + ipa_ctx->peer_bam_iova = cb->va_end; + ipa_ctx->peer_bam_pa = base; + ipa_ctx->peer_bam_map_size = size; + ipa_ctx->peer_bam_dev = dev; + + IPADBG("Peer bam %lu mapped\n", dev); + } else { + WARN_ON(dev != ipa_ctx->peer_bam_dev); + } + + ipa_ctx->peer_bam_map_cnt++; + } + + return 0; +} + +static int ipa_connect_configure_sps(const struct ipa_connect_params *in, + struct ipa_ep_context *ep, int ipa_ep_idx) +{ + int result = -EFAULT; + + /* Default Config */ + ep->ep_hdl = sps_alloc_endpoint(); + + if (ipa2_smmu_map_peer_bam(in->client_bam_hdl)) { + IPAERR("fail to iommu map peer BAM.\n"); + return -EFAULT; + } + + if (ep->ep_hdl == NULL) { + IPAERR("SPS EP alloc failed EP.\n"); + return -EFAULT; + } + + result = sps_get_config(ep->ep_hdl, + &ep->connect); + if (result) { + IPAERR("fail to get config.\n"); + return -EFAULT; + } + + /* Specific Config */ + if (IPA_CLIENT_IS_CONS(in->client)) { + ep->connect.mode = SPS_MODE_SRC; + ep->connect.destination = + in->client_bam_hdl; + ep->connect.dest_iova = ipa_ctx->peer_bam_iova; + ep->connect.source = ipa_ctx->bam_handle; + ep->connect.dest_pipe_index = + in->client_ep_idx; + ep->connect.src_pipe_index = ipa_ep_idx; + } else { + ep->connect.mode = SPS_MODE_DEST; + ep->connect.source = in->client_bam_hdl; + ep->connect.source_iova = ipa_ctx->peer_bam_iova; + ep->connect.destination = ipa_ctx->bam_handle; + ep->connect.src_pipe_index = in->client_ep_idx; + ep->connect.dest_pipe_index = ipa_ep_idx; + } + + return 0; +} + +static int ipa_connect_allocate_fifo(const struct ipa_connect_params *in, + struct sps_mem_buffer *mem_buff_ptr, + bool *fifo_in_pipe_mem_ptr, + u32 *fifo_pipe_mem_ofst_ptr, + u32 fifo_size, int ipa_ep_idx) +{ + dma_addr_t dma_addr; + u32 ofst; + int result = -EFAULT; + struct iommu_domain *smmu_domain; + + mem_buff_ptr->size = fifo_size; + if (in->pipe_mem_preferred) { + if (ipa_pipe_mem_alloc(&ofst, fifo_size)) { + IPAERR("FIFO pipe mem alloc fail ep %u\n", + ipa_ep_idx); + mem_buff_ptr->base = + dma_alloc_coherent(ipa_ctx->pdev, + mem_buff_ptr->size, + &dma_addr, GFP_KERNEL); + } else { + memset(mem_buff_ptr, 0, sizeof(struct sps_mem_buffer)); + result = sps_setup_bam2bam_fifo(mem_buff_ptr, ofst, + fifo_size, 1); + WARN_ON(result); + *fifo_in_pipe_mem_ptr = true; + dma_addr = mem_buff_ptr->phys_base; + *fifo_pipe_mem_ofst_ptr = ofst; + } + } else { + mem_buff_ptr->base = + dma_alloc_coherent(ipa_ctx->pdev, mem_buff_ptr->size, + &dma_addr, GFP_KERNEL); + } + if (ipa_ctx->smmu_s1_bypass) { + mem_buff_ptr->phys_base = dma_addr; + } else { + mem_buff_ptr->iova = dma_addr; + smmu_domain = ipa2_get_smmu_domain(); + if (smmu_domain != NULL) { + mem_buff_ptr->phys_base = + iommu_iova_to_phys(smmu_domain, dma_addr); + } + } + if (mem_buff_ptr->base == NULL) { + IPAERR("fail to get DMA memory.\n"); + return -EFAULT; + } + + return 0; +} + +/** + * ipa2_connect() - low-level IPA client connect + * @in: [in] input parameters from client + * @sps: [out] sps output from IPA needed by client for sps_connect + * @clnt_hdl: [out] opaque client handle assigned by IPA to client + * + * Should be called by the driver of the peripheral that wants to connect to + * IPA in BAM-BAM mode. these peripherals are USB and HSIC. this api + * expects caller to take responsibility to add any needed headers, routing + * and filtering tables and rules as needed. + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_connect(const struct ipa_connect_params *in, + struct ipa_sps_params *sps, u32 *clnt_hdl) +{ + int ipa_ep_idx; + int result = -EFAULT; + struct ipa_ep_context *ep; + struct ipa_ep_cfg_status ep_status; + unsigned long base; + struct iommu_domain *smmu_domain; + + if (unlikely(!ipa_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return -EINVAL; + } + + IPADBG("connecting client\n"); + + if (in == NULL || sps == NULL || clnt_hdl == NULL || + in->client >= IPA_CLIENT_MAX || + in->desc_fifo_sz == 0 || in->data_fifo_sz == 0) { + IPAERR("bad parm.\n"); + return -EINVAL; + } + + ipa_ep_idx = ipa2_get_ep_mapping(in->client); + if (ipa_ep_idx == -1) { + IPAERR("fail to alloc EP.\n"); + goto fail; + } + + ep = &ipa_ctx->ep[ipa_ep_idx]; + + if (ep->valid) { + IPAERR("EP already allocated.\n"); + goto fail; + } + + memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context)); + IPA_ACTIVE_CLIENTS_INC_EP(in->client); + + + ep->skip_ep_cfg = in->skip_ep_cfg; + ep->valid = 1; + ep->client = in->client; + ep->client_notify = in->notify; + ep->priv = in->priv; + ep->keep_ipa_awake = in->keep_ipa_awake; + + /* Notify uc to start monitoring holb on USB BAM Producer pipe. */ + if (IPA_CLIENT_IS_USB_CONS(in->client)) { + ipa_uc_monitor_holb(in->client, true); + IPADBG("Enabling holb monitor for client:%d", in->client); + } + + result = ipa_enable_data_path(ipa_ep_idx); + if (result) { + IPAERR("enable data path failed res=%d clnt=%d.\n", result, + ipa_ep_idx); + goto ipa_cfg_ep_fail; + } + + if (!ep->skip_ep_cfg) { + if (ipa2_cfg_ep(ipa_ep_idx, &in->ipa_ep_cfg)) { + IPAERR("fail to configure EP.\n"); + goto ipa_cfg_ep_fail; + } + /* Setting EP status 0 */ + memset(&ep_status, 0, sizeof(ep_status)); + if (ipa2_cfg_ep_status(ipa_ep_idx, &ep_status)) { + IPAERR("fail to configure status of EP.\n"); + goto ipa_cfg_ep_fail; + } + IPADBG("ep configuration successful\n"); + } else { + IPADBG("Skipping endpoint configuration.\n"); + } + + result = ipa_connect_configure_sps(in, ep, ipa_ep_idx); + if (result) { + IPAERR("fail to configure SPS.\n"); + goto ipa_cfg_ep_fail; + } + + if (!ipa_ctx->smmu_s1_bypass && + (in->desc.base == NULL || + in->data.base == NULL)) { + IPAERR(" allocate FIFOs data_fifo=0x%p desc_fifo=0x%p.\n", + in->data.base, in->desc.base); + goto desc_mem_alloc_fail; + } + + if (in->desc.base == NULL) { + result = ipa_connect_allocate_fifo(in, &ep->connect.desc, + &ep->desc_fifo_in_pipe_mem, + &ep->desc_fifo_pipe_mem_ofst, + in->desc_fifo_sz, ipa_ep_idx); + if (result) { + IPAERR("fail to allocate DESC FIFO.\n"); + goto desc_mem_alloc_fail; + } + } else { + IPADBG("client allocated DESC FIFO\n"); + ep->connect.desc = in->desc; + ep->desc_fifo_client_allocated = true; + } + IPADBG("Descriptor FIFO pa=%pa, size=%d\n", &ep->connect.desc.phys_base, + ep->connect.desc.size); + + if (in->data.base == NULL) { + result = ipa_connect_allocate_fifo(in, &ep->connect.data, + &ep->data_fifo_in_pipe_mem, + &ep->data_fifo_pipe_mem_ofst, + in->data_fifo_sz, ipa_ep_idx); + if (result) { + IPAERR("fail to allocate DATA FIFO.\n"); + goto data_mem_alloc_fail; + } + } else { + IPADBG("client allocated DATA FIFO\n"); + ep->connect.data = in->data; + ep->data_fifo_client_allocated = true; + } + IPADBG("Data FIFO pa=%pa, size=%d\n", &ep->connect.data.phys_base, + ep->connect.data.size); + + if (!ipa_ctx->smmu_s1_bypass) { + ep->connect.data.iova = ep->connect.data.phys_base; + base = ep->connect.data.iova; + smmu_domain = ipa2_get_smmu_domain(); + if (smmu_domain != NULL) { + if (ipa_iommu_map(smmu_domain, + rounddown(base, PAGE_SIZE), + rounddown(base, PAGE_SIZE), + roundup(ep->connect.data.size + base - + rounddown(base, PAGE_SIZE), PAGE_SIZE), + IOMMU_READ | IOMMU_WRITE)) { + IPAERR("Fail to ipa_iommu_map data FIFO\n"); + goto iommu_map_data_fail; + } + } + ep->connect.desc.iova = ep->connect.desc.phys_base; + base = ep->connect.desc.iova; + if (smmu_domain != NULL) { + if (ipa_iommu_map(smmu_domain, + rounddown(base, PAGE_SIZE), + rounddown(base, PAGE_SIZE), + roundup(ep->connect.desc.size + base - + rounddown(base, PAGE_SIZE), PAGE_SIZE), + IOMMU_READ | IOMMU_WRITE)) { + IPAERR("Fail to ipa_iommu_map desc FIFO\n"); + goto iommu_map_desc_fail; + } + } + } + + if ((ipa_ctx->ipa_hw_type == IPA_HW_v2_0 || + ipa_ctx->ipa_hw_type == IPA_HW_v2_5 || + ipa_ctx->ipa_hw_type == IPA_HW_v2_6L) && + IPA_CLIENT_IS_USB_CONS(in->client)) + ep->connect.event_thresh = IPA_USB_EVENT_THRESHOLD; + else + ep->connect.event_thresh = IPA_EVENT_THRESHOLD; + ep->connect.options = SPS_O_AUTO_ENABLE; /* BAM-to-BAM */ + + result = ipa_sps_connect_safe(ep->ep_hdl, &ep->connect, in->client); + if (result) { + IPAERR("sps_connect fails.\n"); + goto sps_connect_fail; + } + + sps->ipa_bam_hdl = ipa_ctx->bam_handle; + sps->ipa_ep_idx = ipa_ep_idx; + *clnt_hdl = ipa_ep_idx; + memcpy(&sps->desc, &ep->connect.desc, sizeof(struct sps_mem_buffer)); + memcpy(&sps->data, &ep->connect.data, sizeof(struct sps_mem_buffer)); + + ipa_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg; + if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(in->client)) + ipa_install_dflt_flt_rules(ipa_ep_idx); + + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_DEC_EP(in->client); + + IPADBG("client %d (ep: %d) connected\n", in->client, ipa_ep_idx); + + return 0; + +sps_connect_fail: + if (!ipa_ctx->smmu_s1_bypass) { + base = ep->connect.desc.iova; + smmu_domain = ipa2_get_smmu_domain(); + if (smmu_domain != NULL) { + iommu_unmap(smmu_domain, + rounddown(base, PAGE_SIZE), + roundup(ep->connect.desc.size + base - + rounddown(base, PAGE_SIZE), PAGE_SIZE)); + } + } +iommu_map_desc_fail: + if (!ipa_ctx->smmu_s1_bypass) { + base = ep->connect.data.iova; + smmu_domain = ipa2_get_smmu_domain(); + if (smmu_domain != NULL) { + iommu_unmap(smmu_domain, + rounddown(base, PAGE_SIZE), + roundup(ep->connect.data.size + base - + rounddown(base, PAGE_SIZE), PAGE_SIZE)); + } + } +iommu_map_data_fail: + if (!ep->data_fifo_client_allocated) { + if (!ep->data_fifo_in_pipe_mem) + dma_free_coherent(ipa_ctx->pdev, + ep->connect.data.size, + ep->connect.data.base, + ep->connect.data.phys_base); + else + ipa_pipe_mem_free(ep->data_fifo_pipe_mem_ofst, + ep->connect.data.size); + } +data_mem_alloc_fail: + if (!ep->desc_fifo_client_allocated) { + if (!ep->desc_fifo_in_pipe_mem) + dma_free_coherent(ipa_ctx->pdev, + ep->connect.desc.size, + ep->connect.desc.base, + ep->connect.desc.phys_base); + else + ipa_pipe_mem_free(ep->desc_fifo_pipe_mem_ofst, + ep->connect.desc.size); + } +desc_mem_alloc_fail: + sps_free_endpoint(ep->ep_hdl); +ipa_cfg_ep_fail: + memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context)); + IPA_ACTIVE_CLIENTS_DEC_EP(in->client); +fail: + return result; +} + +static int ipa2_smmu_unmap_peer_bam(unsigned long dev) +{ + size_t len; + struct iommu_domain *smmu_domain; + struct ipa_smmu_cb_ctx *cb = ipa2_get_smmu_ctx(); + + if (!ipa_ctx->smmu_s1_bypass) { + WARN_ON(dev != ipa_ctx->peer_bam_dev); + ipa_ctx->peer_bam_map_cnt--; + if (ipa_ctx->peer_bam_map_cnt == 0) { + len = roundup(ipa_ctx->peer_bam_map_size + + ipa_ctx->peer_bam_pa - + rounddown(ipa_ctx->peer_bam_pa, + PAGE_SIZE), PAGE_SIZE); + smmu_domain = ipa2_get_smmu_domain(); + if (smmu_domain != NULL) { + if (iommu_unmap(smmu_domain, + cb->va_end, len) != len) { + IPAERR("Fail to iommu_unmap\n"); + return -EINVAL; + } + IPADBG("Peer bam %lu unmapped\n", dev); + } + } + } + + return 0; +} + +/** + * ipa2_disconnect() - low-level IPA client disconnect + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * + * Should be called by the driver of the peripheral that wants to disconnect + * from IPA in BAM-BAM mode. this api expects caller to take responsibility to + * free any needed headers, routing and filtering tables and rules as needed. + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_disconnect(u32 clnt_hdl) +{ + int result; + struct ipa_ep_context *ep; + unsigned long peer_bam; + unsigned long base; + struct iommu_domain *smmu_domain; + struct ipa_disable_force_clear_datapath_req_msg_v01 req = {0}; + int res; + enum ipa_client_type client_type; + + if (unlikely(!ipa_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return -EINVAL; + } + + if (clnt_hdl >= ipa_ctx->ipa_num_pipes || + ipa_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("bad parm.\n"); + return -EINVAL; + } + + ep = &ipa_ctx->ep[clnt_hdl]; + client_type = ipa2_get_client_mapping(clnt_hdl); + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_INC_EP(client_type); + + /* For USB 2.0 controller, first the ep will be disabled. + * so this sequence is not needed again when disconnecting the pipe. + */ + if (!ep->ep_disabled) { + /* Set Disconnect in Progress flag. */ + spin_lock(&ipa_ctx->disconnect_lock); + ep->disconnect_in_progress = true; + spin_unlock(&ipa_ctx->disconnect_lock); + + /* Notify uc to stop monitoring holb on USB BAM + * Producer pipe. + */ + if (IPA_CLIENT_IS_USB_CONS(ep->client)) { + ipa_uc_monitor_holb(ep->client, false); + IPADBG("Disabling holb monitor for client: %d\n", + ep->client); + } + + result = ipa_disable_data_path(clnt_hdl); + if (result) { + IPAERR("disable data path failed res=%d clnt=%d.\n", + result, clnt_hdl); + return -EPERM; + } + } + + result = sps_disconnect(ep->ep_hdl); + if (result) { + IPAERR("SPS disconnect failed.\n"); + return -EPERM; + } + + if (IPA_CLIENT_IS_CONS(ep->client)) + peer_bam = ep->connect.destination; + else + peer_bam = ep->connect.source; + + if (ipa2_smmu_unmap_peer_bam(peer_bam)) { + IPAERR("fail to iommu unmap peer BAM.\n"); + return -EPERM; + } + + if (!ep->desc_fifo_client_allocated && + ep->connect.desc.base) { + if (!ep->desc_fifo_in_pipe_mem) + dma_free_coherent(ipa_ctx->pdev, + ep->connect.desc.size, + ep->connect.desc.base, + ep->connect.desc.phys_base); + else + ipa_pipe_mem_free(ep->desc_fifo_pipe_mem_ofst, + ep->connect.desc.size); + } + + if (!ep->data_fifo_client_allocated && + ep->connect.data.base) { + if (!ep->data_fifo_in_pipe_mem) + dma_free_coherent(ipa_ctx->pdev, + ep->connect.data.size, + ep->connect.data.base, + ep->connect.data.phys_base); + else + ipa_pipe_mem_free(ep->data_fifo_pipe_mem_ofst, + ep->connect.data.size); + } + + if (!ipa_ctx->smmu_s1_bypass) { + base = ep->connect.desc.iova; + smmu_domain = ipa2_get_smmu_domain(); + if (smmu_domain != NULL) { + iommu_unmap(smmu_domain, + rounddown(base, PAGE_SIZE), + roundup(ep->connect.desc.size + base - + rounddown(base, PAGE_SIZE), PAGE_SIZE)); + } + } + + if (!ipa_ctx->smmu_s1_bypass) { + base = ep->connect.data.iova; + smmu_domain = ipa2_get_smmu_domain(); + if (smmu_domain != NULL) { + iommu_unmap(smmu_domain, + rounddown(base, PAGE_SIZE), + roundup(ep->connect.data.size + base - + rounddown(base, PAGE_SIZE), PAGE_SIZE)); + } + } + + result = sps_free_endpoint(ep->ep_hdl); + if (result) { + IPAERR("SPS de-alloc EP failed.\n"); + return -EPERM; + } + + ipa_delete_dflt_flt_rules(clnt_hdl); + + /* If APPS flow control is not enabled, send a message to modem to + * enable flow control honoring. + */ + if (!ipa_ctx->tethered_flow_control && ep->qmi_request_sent) { + /* Send a message to modem to disable flow control honoring. */ + req.request_id = clnt_hdl; + res = qmi_disable_force_clear_datapath_send(&req); + if (res) { + IPADBG("disable_force_clear_datapath failed %d\n", + res); + } + } + + spin_lock(&ipa_ctx->disconnect_lock); + memset(&ipa_ctx->ep[clnt_hdl], 0, sizeof(struct ipa_ep_context)); + spin_unlock(&ipa_ctx->disconnect_lock); + + IPA_ACTIVE_CLIENTS_DEC_EP(client_type); + + IPADBG("client (ep: %d) disconnected\n", clnt_hdl); + + return 0; +} + +/** + * ipa2_reset_endpoint() - reset an endpoint from BAM perspective + * @clnt_hdl: [in] IPA client handle + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_reset_endpoint(u32 clnt_hdl) +{ + int res; + struct ipa_ep_context *ep; + + if (unlikely(!ipa_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return -EINVAL; + } + + if (clnt_hdl >= ipa_ctx->ipa_num_pipes) { + IPAERR("Bad parameters.\n"); + return -EFAULT; + } + ep = &ipa_ctx->ep[clnt_hdl]; + + IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); + res = sps_disconnect(ep->ep_hdl); + if (res) { + IPAERR("sps_disconnect() failed, res=%d.\n", res); + goto bail; + } else { + res = ipa_sps_connect_safe(ep->ep_hdl, &ep->connect, + ep->client); + if (res) { + IPAERR("sps_connect() failed, res=%d.\n", res); + goto bail; + } + } + +bail: + IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); + + return res; +} + +/** + * ipa2_clear_endpoint_delay() - Remove ep delay set on the IPA pipe before + * client disconnect. + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * + * Should be called by the driver of the peripheral that wants to remove + * ep delay on IPA consumer ipe before disconnect in BAM-BAM mode. this api + * expects caller to take responsibility to free any needed headers, routing + * and filtering tables and rules as needed. + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_clear_endpoint_delay(u32 clnt_hdl) +{ + struct ipa_ep_context *ep; + struct ipa_ep_cfg_ctrl ep_ctrl = {0}; + struct ipa_enable_force_clear_datapath_req_msg_v01 req = {0}; + int res; + + if (unlikely(!ipa_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return -EINVAL; + } + + if (clnt_hdl >= ipa_ctx->ipa_num_pipes || + ipa_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("bad parm.\n"); + return -EINVAL; + } + + ep = &ipa_ctx->ep[clnt_hdl]; + + if (!ipa_ctx->tethered_flow_control) { + IPADBG("APPS flow control is not enabled\n"); + /* Send a message to modem to disable flow control honoring. */ + req.request_id = clnt_hdl; + req.source_pipe_bitmask = 1 << clnt_hdl; + res = qmi_enable_force_clear_datapath_send(&req); + if (res) { + IPADBG("enable_force_clear_datapath failed %d\n", + res); + } + ep->qmi_request_sent = true; + } + + IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); + /* Set disconnect in progress flag so further flow control events are + * not honored. + */ + spin_lock(&ipa_ctx->disconnect_lock); + ep->disconnect_in_progress = true; + spin_unlock(&ipa_ctx->disconnect_lock); + + /* If flow is disabled at this point, restore the ep state.*/ + ep_ctrl.ipa_ep_delay = false; + ep_ctrl.ipa_ep_suspend = false; + ipa2_cfg_ep_ctrl(clnt_hdl, &ep_ctrl); + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); + + IPADBG("client (ep: %d) removed ep delay\n", clnt_hdl); + + return 0; +} + +/** + * ipa2_disable_endpoint() - low-level IPA client disable endpoint + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * + * Should be called by the driver of the peripheral that wants to + * disable the pipe from IPA in BAM-BAM mode. + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_disable_endpoint(u32 clnt_hdl) +{ + int result; + struct ipa_ep_context *ep; + enum ipa_client_type client_type; + unsigned long bam; + + if (unlikely(!ipa_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return -EINVAL; + } + + if (clnt_hdl >= ipa_ctx->ipa_num_pipes || + ipa_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("bad parm.\n"); + return -EINVAL; + } + + ep = &ipa_ctx->ep[clnt_hdl]; + client_type = ipa2_get_client_mapping(clnt_hdl); + IPA_ACTIVE_CLIENTS_INC_EP(client_type); + + /* Set Disconnect in Progress flag. */ + spin_lock(&ipa_ctx->disconnect_lock); + ep->disconnect_in_progress = true; + spin_unlock(&ipa_ctx->disconnect_lock); + + /* Notify uc to stop monitoring holb on USB BAM Producer pipe. */ + if (IPA_CLIENT_IS_USB_CONS(ep->client)) { + ipa_uc_monitor_holb(ep->client, false); + IPADBG("Disabling holb monitor for client: %d\n", ep->client); + } + + result = ipa_disable_data_path(clnt_hdl); + if (result) { + IPAERR("disable data path failed res=%d clnt=%d.\n", result, + clnt_hdl); + goto fail; + } + + if (IPA_CLIENT_IS_CONS(ep->client)) + bam = ep->connect.source; + else + bam = ep->connect.destination; + + result = sps_pipe_reset(bam, clnt_hdl); + if (result) { + IPAERR("SPS pipe reset failed.\n"); + goto fail; + } + + ep->ep_disabled = true; + + IPA_ACTIVE_CLIENTS_DEC_EP(client_type); + + IPADBG("client (ep: %d) disabled\n", clnt_hdl); + + return 0; + +fail: + IPA_ACTIVE_CLIENTS_DEC_EP(client_type); + return -EPERM; +} + + +/** + * ipa_sps_connect_safe() - connect endpoint from BAM prespective + * @h: [in] sps pipe handle + * @connect: [in] sps connect parameters + * @ipa_client: [in] ipa client handle representing the pipe + * + * This function connects a BAM pipe using SPS driver sps_connect() API + * and by requesting uC interface to reset the pipe, avoids an IPA HW + * limitation that does not allow resetting a BAM pipe during traffic in + * IPA TX command queue. + * + * Returns: 0 on success, negative on failure + */ +int ipa_sps_connect_safe(struct sps_pipe *h, struct sps_connect *connect, + enum ipa_client_type ipa_client) +{ + int res; + + if (ipa_ctx->ipa_hw_type > IPA_HW_v2_5 || ipa_ctx->skip_uc_pipe_reset) { + IPADBG("uC pipe reset is not required\n"); + } else { + res = ipa_uc_reset_pipe(ipa_client); + if (res) + return res; + } + return sps_connect(h, connect); +} +EXPORT_SYMBOL(ipa_sps_connect_safe); diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c new file mode 100644 index 000000000000..c40cbeff140f --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c @@ -0,0 +1,2327 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ + +#ifdef CONFIG_DEBUG_FS + +#include +#include +#include +#include "ipa_i.h" +#include "../ipa_rm_i.h" + +#define IPA_MAX_MSG_LEN 4096 +#define IPA_DBG_CNTR_ON 127265 +#define IPA_DBG_CNTR_OFF 127264 +#define IPA_DBG_ACTIVE_CLIENTS_BUF_SIZE ((IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN \ + * IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES) \ + + IPA_MAX_MSG_LEN) + +#define RX_MIN_POLL_CNT "Rx Min Poll Count" +#define RX_MAX_POLL_CNT "Rx Max Poll Count" +#define MAX_COUNT_LENGTH 6 +#define MAX_POLLING_ITERATION 40 +#define MIN_POLLING_ITERATION 1 + +#define IPA_DUMP_STATUS_FIELD(f) \ + pr_err(#f "=0x%x\n", status->f) + +const char *ipa_excp_name[] = { + __stringify_1(IPA_A5_MUX_HDR_EXCP_RSVD0), + __stringify_1(IPA_A5_MUX_HDR_EXCP_RSVD1), + __stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_IHL), + __stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_REPLICATED), + __stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_TAG), + __stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_SW_FLT), + __stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_NAT), + __stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_IP), +}; + +const char *ipa_status_excp_name[] = { + __stringify_1(IPA_EXCP_DEAGGR), + __stringify_1(IPA_EXCP_REPLICATION), + __stringify_1(IPA_EXCP_IP), + __stringify_1(IPA_EXCP_IHL), + __stringify_1(IPA_EXCP_FRAG_MISS), + __stringify_1(IPA_EXCP_SW), + __stringify_1(IPA_EXCP_NAT), + __stringify_1(IPA_EXCP_NONE), +}; + +const char *ipa_event_name[] = { + __stringify(WLAN_CLIENT_CONNECT), + __stringify(WLAN_CLIENT_DISCONNECT), + __stringify(WLAN_CLIENT_POWER_SAVE_MODE), + __stringify(WLAN_CLIENT_NORMAL_MODE), + __stringify(SW_ROUTING_ENABLE), + __stringify(SW_ROUTING_DISABLE), + __stringify(WLAN_AP_CONNECT), + __stringify(WLAN_AP_DISCONNECT), + __stringify(WLAN_STA_CONNECT), + __stringify(WLAN_STA_DISCONNECT), + __stringify(WLAN_CLIENT_CONNECT_EX), + __stringify(WLAN_SWITCH_TO_SCC), + __stringify(WLAN_SWITCH_TO_MCC), + __stringify(WLAN_WDI_ENABLE), + __stringify(WLAN_WDI_DISABLE), + __stringify(WAN_UPSTREAM_ROUTE_ADD), + __stringify(WAN_UPSTREAM_ROUTE_DEL), + __stringify(WAN_EMBMS_CONNECT), + __stringify(WAN_XLAT_CONNECT), + __stringify(ECM_CONNECT), + __stringify(ECM_DISCONNECT), + __stringify(IPA_TETHERING_STATS_UPDATE_STATS), + __stringify(IPA_TETHERING_STATS_UPDATE_NETWORK_STATS), + __stringify(IPA_QUOTA_REACH), + __stringify(IPA_SSR_BEFORE_SHUTDOWN), + __stringify(IPA_SSR_AFTER_POWERUP), + __stringify(ADD_VLAN_IFACE), + __stringify(DEL_VLAN_IFACE), + __stringify(ADD_L2TP_VLAN_MAPPING), + __stringify(DEL_L2TP_VLAN_MAPPING), + __stringify(IPA_PER_CLIENT_STATS_CONNECT_EVENT), + __stringify(IPA_PER_CLIENT_STATS_DISCONNECT_EVENT), + __stringify(ADD_BRIDGE_VLAN_MAPPING), + __stringify(DEL_BRIDGE_VLAN_MAPPING), + __stringify(WLAN_FWR_SSR_BEFORE_SHUTDOWN), + __stringify(IPA_GSB_CONNECT), + __stringify(IPA_GSB_DISCONNECT), +}; + +const char *ipa_hdr_l2_type_name[] = { + __stringify(IPA_HDR_L2_NONE), + __stringify(IPA_HDR_L2_ETHERNET_II), + __stringify(IPA_HDR_L2_802_3), +}; + +const char *ipa_hdr_proc_type_name[] = { + __stringify(IPA_HDR_PROC_NONE), + __stringify(IPA_HDR_PROC_ETHII_TO_ETHII), + __stringify(IPA_HDR_PROC_ETHII_TO_802_3), + __stringify(IPA_HDR_PROC_802_3_TO_ETHII), + __stringify(IPA_HDR_PROC_802_3_TO_802_3), +}; + +static struct dentry *dent; +static struct dentry *dfile_gen_reg; +static struct dentry *dfile_ep_reg; +static struct dentry *dfile_keep_awake; +static struct dentry *dfile_ep_holb; +static struct dentry *dfile_hdr; +static struct dentry *dfile_proc_ctx; +static struct dentry *dfile_ip4_rt; +static struct dentry *dfile_ip6_rt; +static struct dentry *dfile_ip4_flt; +static struct dentry *dfile_ip6_flt; +static struct dentry *dfile_stats; +static struct dentry *dfile_wstats; +static struct dentry *dfile_wdi_stats; +static struct dentry *dfile_ntn_stats; +static struct dentry *dfile_dbg_cnt; +static struct dentry *dfile_msg; +static struct dentry *dfile_ip4_nat; +static struct dentry *dfile_rm_stats; +static struct dentry *dfile_status_stats; +static struct dentry *dfile_active_clients; +static struct dentry *dfile_ipa_rx_poll_timeout; +static struct dentry *dfile_ipa_poll_iteration; + +static char dbg_buff[IPA_MAX_MSG_LEN]; +static char *active_clients_buf; +static s8 ep_reg_idx; +static void *ipa_ipc_low_buff; + +int _ipa_read_gen_reg_v1_1(char *buff, int max_len) +{ + return scnprintf(dbg_buff, IPA_MAX_MSG_LEN, + "IPA_VERSION=0x%x\n" + "IPA_COMP_HW_VERSION=0x%x\n" + "IPA_ROUTE=0x%x\n" + "IPA_FILTER=0x%x\n" + "IPA_SHARED_MEM_SIZE=0x%x\n", + ipa_read_reg(ipa_ctx->mmio, IPA_VERSION_OFST), + ipa_read_reg(ipa_ctx->mmio, IPA_COMP_HW_VERSION_OFST), + ipa_read_reg(ipa_ctx->mmio, IPA_ROUTE_OFST_v1_1), + ipa_read_reg(ipa_ctx->mmio, IPA_FILTER_OFST_v1_1), + ipa_read_reg(ipa_ctx->mmio, + IPA_SHARED_MEM_SIZE_OFST_v1_1)); +} + +int _ipa_read_gen_reg_v2_0(char *buff, int max_len) +{ + return scnprintf(dbg_buff, IPA_MAX_MSG_LEN, + "IPA_VERSION=0x%x\n" + "IPA_COMP_HW_VERSION=0x%x\n" + "IPA_ROUTE=0x%x\n" + "IPA_FILTER=0x%x\n" + "IPA_SHARED_MEM_RESTRICTED=0x%x\n" + "IPA_SHARED_MEM_SIZE=0x%x\n", + ipa_read_reg(ipa_ctx->mmio, IPA_VERSION_OFST), + ipa_read_reg(ipa_ctx->mmio, IPA_COMP_HW_VERSION_OFST), + ipa_read_reg(ipa_ctx->mmio, IPA_ROUTE_OFST_v1_1), + ipa_read_reg(ipa_ctx->mmio, IPA_FILTER_OFST_v1_1), + ipa_read_reg_field(ipa_ctx->mmio, + IPA_SHARED_MEM_SIZE_OFST_v2_0, + IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_BMSK_v2_0, + IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_SHFT_v2_0), + ipa_read_reg_field(ipa_ctx->mmio, + IPA_SHARED_MEM_SIZE_OFST_v2_0, + IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_BMSK_v2_0, + IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_SHFT_v2_0)); +} + +static ssize_t ipa_read_gen_reg(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + int nbytes; + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + nbytes = ipa_ctx->ctrl->ipa_read_gen_reg(dbg_buff, IPA_MAX_MSG_LEN); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes); +} + +static ssize_t ipa_write_ep_holb(struct file *file, + const char __user *buf, size_t count, loff_t *ppos) +{ + struct ipa_ep_cfg_holb holb; + u32 en; + u32 tmr_val; + u32 ep_idx; + unsigned long missing; + char *sptr, *token; + + if (sizeof(dbg_buff) < count + 1) + return -EFAULT; + + missing = copy_from_user(dbg_buff, buf, count); + if (missing) + return -EFAULT; + + dbg_buff[count] = '\0'; + + sptr = dbg_buff; + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou32(token, 0, &ep_idx)) + return -EINVAL; + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou32(token, 0, &en)) + return -EINVAL; + + token = strsep(&sptr, " "); + if (!token) + return -EINVAL; + if (kstrtou32(token, 0, &tmr_val)) + return -EINVAL; + + holb.en = en; + holb.tmr_val = tmr_val; + + ipa2_cfg_ep_holb(ep_idx, &holb); + + return count; +} + +static ssize_t ipa_write_ep_reg(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + unsigned long missing; + s8 option = 0; + + if (sizeof(dbg_buff) < count + 1) + return -EFAULT; + + missing = copy_from_user(dbg_buff, buf, count); + if (missing) + return -EFAULT; + + dbg_buff[count] = '\0'; + if (kstrtos8(dbg_buff, 0, &option)) + return -EFAULT; + + if (option >= ipa_ctx->ipa_num_pipes) { + IPAERR("bad pipe specified %u\n", option); + return count; + } + + ep_reg_idx = option; + + return count; +} + +int _ipa_read_ep_reg_v1_1(char *buf, int max_len, int pipe) +{ + return scnprintf(dbg_buff, IPA_MAX_MSG_LEN, + "IPA_ENDP_INIT_NAT_%u=0x%x\n" + "IPA_ENDP_INIT_HDR_%u=0x%x\n" + "IPA_ENDP_INIT_MODE_%u=0x%x\n" + "IPA_ENDP_INIT_AGGR_%u=0x%x\n" + "IPA_ENDP_INIT_ROUTE_%u=0x%x\n" + "IPA_ENDP_INIT_CTRL_%u=0x%x\n" + "IPA_ENDP_INIT_HOL_EN_%u=0x%x\n" + "IPA_ENDP_INIT_HOL_TIMER_%u=0x%x\n", + pipe, ipa_read_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_NAT_N_OFST_v1_1(pipe)), + pipe, ipa_read_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_HDR_N_OFST_v1_1(pipe)), + pipe, ipa_read_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_MODE_N_OFST_v1_1(pipe)), + pipe, ipa_read_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_AGGR_N_OFST_v1_1(pipe)), + pipe, ipa_read_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_ROUTE_N_OFST_v1_1(pipe)), + pipe, ipa_read_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_CTRL_N_OFST(pipe)), + pipe, ipa_read_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_HOL_BLOCK_EN_N_OFST_v1_1(pipe)), + pipe, ipa_read_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_OFST_v1_1(pipe)) + ); +} + +int _ipa_read_ep_reg_v2_0(char *buf, int max_len, int pipe) +{ + return scnprintf( + dbg_buff, IPA_MAX_MSG_LEN, + "IPA_ENDP_INIT_NAT_%u=0x%x\n" + "IPA_ENDP_INIT_HDR_%u=0x%x\n" + "IPA_ENDP_INIT_HDR_EXT_%u=0x%x\n" + "IPA_ENDP_INIT_MODE_%u=0x%x\n" + "IPA_ENDP_INIT_AGGR_%u=0x%x\n" + "IPA_ENDP_INIT_ROUTE_%u=0x%x\n" + "IPA_ENDP_INIT_CTRL_%u=0x%x\n" + "IPA_ENDP_INIT_HOL_EN_%u=0x%x\n" + "IPA_ENDP_INIT_HOL_TIMER_%u=0x%x\n" + "IPA_ENDP_INIT_DEAGGR_%u=0x%x\n" + "IPA_ENDP_INIT_CFG_%u=0x%x\n", + pipe, ipa_read_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_NAT_N_OFST_v2_0(pipe)), + pipe, ipa_read_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_HDR_N_OFST_v2_0(pipe)), + pipe, ipa_read_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_HDR_EXT_n_OFST_v2_0(pipe)), + pipe, ipa_read_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_MODE_N_OFST_v2_0(pipe)), + pipe, ipa_read_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_AGGR_N_OFST_v2_0(pipe)), + pipe, ipa_read_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_ROUTE_N_OFST_v2_0(pipe)), + pipe, ipa_read_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_CTRL_N_OFST(pipe)), + pipe, ipa_read_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_HOL_BLOCK_EN_N_OFST_v2_0(pipe)), + pipe, ipa_read_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_OFST_v2_0(pipe)), + pipe, ipa_read_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_DEAGGR_n_OFST_v2_0(pipe)), + pipe, ipa_read_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_CFG_n_OFST(pipe))); +} + +static ssize_t ipa_read_ep_reg(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + int nbytes; + int i; + int start_idx; + int end_idx; + int size = 0; + int ret; + loff_t pos; + + /* negative ep_reg_idx means all registers */ + if (ep_reg_idx < 0) { + start_idx = 0; + end_idx = ipa_ctx->ipa_num_pipes; + } else { + start_idx = ep_reg_idx; + end_idx = start_idx + 1; + } + pos = *ppos; + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + for (i = start_idx; i < end_idx; i++) { + + nbytes = ipa_ctx->ctrl->ipa_read_ep_reg(dbg_buff, + IPA_MAX_MSG_LEN, i); + + *ppos = pos; + ret = simple_read_from_buffer(ubuf, count, ppos, dbg_buff, + nbytes); + if (ret < 0) { + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return ret; + } + + size += ret; + ubuf += nbytes; + count -= nbytes; + } + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + + *ppos = pos + size; + return size; +} + +static ssize_t ipa_write_keep_awake(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + unsigned long missing; + s8 option = 0; + + if (sizeof(dbg_buff) < count + 1) + return -EFAULT; + + missing = copy_from_user(dbg_buff, buf, count); + if (missing) + return -EFAULT; + + dbg_buff[count] = '\0'; + if (kstrtos8(dbg_buff, 0, &option)) + return -EFAULT; + + if (option == 1) + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + else if (option == 0) + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + else + return -EFAULT; + + return count; +} + +static ssize_t ipa_read_keep_awake(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + int nbytes; + + ipa_active_clients_lock(); + if (ipa_ctx->ipa_active_clients.cnt) + nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN, + "IPA APPS power state is ON\n"); + else + nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN, + "IPA APPS power state is OFF\n"); + ipa_active_clients_unlock(); + + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes); +} + +static ssize_t ipa_read_hdr(struct file *file, char __user *ubuf, size_t count, + loff_t *ppos) +{ + int nbytes = 0; + int i = 0; + struct ipa_hdr_entry *entry; + + mutex_lock(&ipa_ctx->lock); + + if (ipa_ctx->hdr_tbl_lcl) + pr_err("Table resides on local memory\n"); + else + pr_err("Table resides on system (ddr) memory\n"); + + list_for_each_entry(entry, &ipa_ctx->hdr_tbl.head_hdr_entry_list, + link) { + if (entry->cookie != IPA_HDR_COOKIE) + continue; + nbytes = scnprintf( + dbg_buff, + IPA_MAX_MSG_LEN, + "name:%s len=%d ref=%d partial=%d type=%s ", + entry->name, + entry->hdr_len, + entry->ref_cnt, + entry->is_partial, + ipa_hdr_l2_type_name[entry->type]); + + if (entry->is_hdr_proc_ctx) { + nbytes += scnprintf( + dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "phys_base=0x%pa ", + &entry->phys_base); + } else { + nbytes += scnprintf( + dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "ofst=%u ", + entry->offset_entry->offset >> 2); + } + for (i = 0; i < entry->hdr_len; i++) { + scnprintf(dbg_buff + nbytes + i * 2, + IPA_MAX_MSG_LEN - nbytes - i * 2, + "%02x", entry->hdr[i]); + } + scnprintf(dbg_buff + nbytes + entry->hdr_len * 2, + IPA_MAX_MSG_LEN - nbytes - entry->hdr_len * 2, + "\n"); + pr_err("%s\n", dbg_buff); + } + mutex_unlock(&ipa_ctx->lock); + + return 0; +} + +static int ipa_attrib_dump(struct ipa_rule_attrib *attrib, + enum ipa_ip_type ip) +{ + uint32_t addr[4]; + uint32_t mask[4]; + int nbytes = 0; + int i; + + if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "tos_value:%d ", attrib->tos_value); + + if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "tos_mask:%d ", attrib->tos_mask); + + if (attrib->attrib_mask & IPA_FLT_PROTOCOL) + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "protocol:%d ", attrib->u.v4.protocol); + + if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) { + if (ip == IPA_IP_v4) { + addr[0] = htonl(attrib->u.v4.src_addr); + mask[0] = htonl(attrib->u.v4.src_addr_mask); + nbytes = scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "src_addr:%pI4 src_addr_mask:%pI4 ", + addr + 0, mask + 0); + } else if (ip == IPA_IP_v6) { + for (i = 0; i < 4; i++) { + addr[i] = htonl(attrib->u.v6.src_addr[i]); + mask[i] = htonl(attrib->u.v6.src_addr_mask[i]); + } + nbytes = scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "src_addr:%pI6 src_addr_mask:%pI6 ", + addr + 0, mask + 0); + } else { + WARN_ON(1); + } + } + if (attrib->attrib_mask & IPA_FLT_DST_ADDR) { + if (ip == IPA_IP_v4) { + addr[0] = htonl(attrib->u.v4.dst_addr); + mask[0] = htonl(attrib->u.v4.dst_addr_mask); + nbytes = scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "dst_addr:%pI4 dst_addr_mask:%pI4 ", + addr + 0, mask + 0); + } else if (ip == IPA_IP_v6) { + for (i = 0; i < 4; i++) { + addr[i] = htonl(attrib->u.v6.dst_addr[i]); + mask[i] = htonl(attrib->u.v6.dst_addr_mask[i]); + } + nbytes = scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "dst_addr:%pI6 dst_addr_mask:%pI6 ", + addr + 0, mask + 0); + } else { + WARN_ON(1); + } + } + if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) { + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "src_port_range:%u %u ", + attrib->src_port_lo, + attrib->src_port_hi); + } + if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) { + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "dst_port_range:%u %u ", + attrib->dst_port_lo, + attrib->dst_port_hi); + } + if (attrib->attrib_mask & IPA_FLT_TYPE) + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "type:%d ", attrib->type); + + if (attrib->attrib_mask & IPA_FLT_CODE) + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "code:%d ", attrib->code); + + if (attrib->attrib_mask & IPA_FLT_SPI) + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "spi:%x ", attrib->spi); + + if (attrib->attrib_mask & IPA_FLT_SRC_PORT) + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "src_port:%u ", attrib->src_port); + + if (attrib->attrib_mask & IPA_FLT_DST_PORT) + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "dst_port:%u ", attrib->dst_port); + + if (attrib->attrib_mask & IPA_FLT_TC) + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "tc:%d ", attrib->u.v6.tc); + + if (attrib->attrib_mask & IPA_FLT_FLOW_LABEL) + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "flow_label:%x ", attrib->u.v6.flow_label); + + if (attrib->attrib_mask & IPA_FLT_NEXT_HDR) + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "next_hdr:%d ", attrib->u.v6.next_hdr); + + if (attrib->attrib_mask & IPA_FLT_META_DATA) { + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "metadata:%x metadata_mask:%x", + attrib->meta_data, attrib->meta_data_mask); + } + + if (attrib->attrib_mask & IPA_FLT_FRAGMENT) + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "frg "); + + if ((attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) || + (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3)) { + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "src_mac_addr:%pM ", attrib->src_mac_addr); + } + + if ((attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) || + (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3)) { + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "dst_mac_addr:%pM ", attrib->dst_mac_addr); + } + + if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "ether_type:%x ", attrib->ether_type); + + if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IP_TYPE) + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "l2tp inner ip type: %d ", attrib->type); + + if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IPV4_DST_ADDR) { + addr[0] = htonl(attrib->u.v4.dst_addr); + mask[0] = htonl(attrib->u.v4.dst_addr_mask); + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "dst_addr:%pI4 dst_addr_mask:%pI4 ", + addr, mask); + } + + pr_err("%s\n", dbg_buff); + return 0; +} + +static int ipa_attrib_dump_eq(struct ipa_ipfltri_rule_eq *attrib) +{ + uint8_t addr[16]; + uint8_t mask[16]; + int nbytes = 0; + int i; + int j; + + if (attrib->tos_eq_present) + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "tos_value:%d ", attrib->tos_eq); + + if (attrib->protocol_eq_present) + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "protocol:%d ", attrib->protocol_eq); + + if (attrib->num_ihl_offset_range_16 > + IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS) { + IPAERR_RL("num_ihl_offset_range_16 Max %d passed value %d\n", + IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS, + attrib->num_ihl_offset_range_16); + return -EPERM; + } + + for (i = 0; i < attrib->num_ihl_offset_range_16; i++) { + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "(ihl_ofst_range16: ofst:%u lo:%u hi:%u) ", + attrib->ihl_offset_range_16[i].offset, + attrib->ihl_offset_range_16[i].range_low, + attrib->ihl_offset_range_16[i].range_high); + } + + if (attrib->num_offset_meq_32 > IPA_IPFLTR_NUM_MEQ_32_EQNS) { + IPAERR_RL("num_offset_meq_32 Max %d passed value %d\n", + IPA_IPFLTR_NUM_MEQ_32_EQNS, attrib->num_offset_meq_32); + return -EPERM; + } + + for (i = 0; i < attrib->num_offset_meq_32; i++) { + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "(ofst_meq32: ofst:%u mask:0x%x val:0x%x) ", + attrib->offset_meq_32[i].offset, + attrib->offset_meq_32[i].mask, + attrib->offset_meq_32[i].value); + } + + if (attrib->tc_eq_present) + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "tc:%d ", attrib->tc_eq); + + if (attrib->fl_eq_present) + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "flow_label:%d ", attrib->fl_eq); + + if (attrib->ihl_offset_eq_16_present) { + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "(ihl_ofst_eq16:%d val:0x%x) ", + attrib->ihl_offset_eq_16.offset, + attrib->ihl_offset_eq_16.value); + } + + if (attrib->num_ihl_offset_meq_32 > IPA_IPFLTR_NUM_IHL_MEQ_32_EQNS) { + IPAERR_RL("num_ihl_offset_meq_32 Max %d passed value %d\n", + IPA_IPFLTR_NUM_IHL_MEQ_32_EQNS, attrib->num_ihl_offset_meq_32); + return -EPERM; + } + + for (i = 0; i < attrib->num_ihl_offset_meq_32; i++) { + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "(ihl_ofst_meq32: ofts:%d mask:0x%x val:0x%x) ", + attrib->ihl_offset_meq_32[i].offset, + attrib->ihl_offset_meq_32[i].mask, + attrib->ihl_offset_meq_32[i].value); + } + + if (attrib->num_offset_meq_128 > IPA_IPFLTR_NUM_MEQ_128_EQNS) { + IPAERR_RL("num_offset_meq_128 Max %d passed value %d\n", + IPA_IPFLTR_NUM_MEQ_128_EQNS, attrib->num_offset_meq_128); + return -EPERM; + } + + for (i = 0; i < attrib->num_offset_meq_128; i++) { + for (j = 0; j < 16; j++) { + addr[j] = attrib->offset_meq_128[i].value[j]; + mask[j] = attrib->offset_meq_128[i].mask[j]; + } + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "(ofst_meq128: ofst:%d mask:%pI6 val:%pI6) ", + attrib->offset_meq_128[i].offset, + mask + 0, + addr + 0); + } + + if (attrib->metadata_meq32_present) { + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "(metadata: ofst:%u mask:0x%x val:0x%x) ", + attrib->metadata_meq32.offset, + attrib->metadata_meq32.mask, + attrib->metadata_meq32.value); + } + + if (attrib->ipv4_frag_eq_present) + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "frg "); + + pr_err("%s\n", dbg_buff); + return 0; +} + +static ssize_t ipa_read_rt(struct file *file, char __user *ubuf, size_t count, + loff_t *ppos) +{ + int i = 0; + int nbytes = 0; + struct ipa_rt_tbl *tbl; + struct ipa_rt_entry *entry; + struct ipa_rt_tbl_set *set; + enum ipa_ip_type ip = (enum ipa_ip_type)file->private_data; + u32 ofst; + u32 ofst_words; + + set = &ipa_ctx->rt_tbl_set[ip]; + + mutex_lock(&ipa_ctx->lock); + + if (ip == IPA_IP_v6) { + if (ipa_ctx->ip6_rt_tbl_lcl) + pr_err("Table resides on local memory\n"); + else + pr_err("Table resides on system (ddr) memory\n"); + } else if (ip == IPA_IP_v4) { + if (ipa_ctx->ip4_rt_tbl_lcl) + pr_err("Table resides on local memory\n"); + else + pr_err("Table resides on system (ddr) memory\n"); + } + + list_for_each_entry(tbl, &set->head_rt_tbl_list, link) { + i = 0; + list_for_each_entry(entry, &tbl->head_rt_rule_list, link) { + if (entry->proc_ctx) { + ofst = entry->proc_ctx->offset_entry->offset; + ofst_words = + (ofst + + ipa_ctx->hdr_proc_ctx_tbl.start_offset) + >> 5; + + nbytes = scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "tbl_idx:%d tbl_name:%s tbl_ref:%u ", + entry->tbl->idx, entry->tbl->name, + entry->tbl->ref_cnt); + nbytes = scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "rule_idx:%d dst:%d ep:%d S:%u ", + i, entry->rule.dst, + ipa2_get_ep_mapping(entry->rule.dst), + !ipa_ctx->hdr_tbl_lcl); + nbytes = scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "proc_ctx[32B]:%u attrib_mask:%08x ", + ofst_words, + entry->rule.attrib.attrib_mask); + } else { + if (entry->hdr) + ofst = entry->hdr->offset_entry->offset; + else + ofst = 0; + + nbytes = scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "tbl_idx:%d tbl_name:%s tbl_ref:%u ", + entry->tbl->idx, entry->tbl->name, + entry->tbl->ref_cnt); + nbytes = scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "rule_idx:%d dst:%d ep:%d S:%u ", + i, entry->rule.dst, + ipa2_get_ep_mapping(entry->rule.dst), + !ipa_ctx->hdr_tbl_lcl); + nbytes = scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "hdr_ofst[words]:%u attrib_mask:%08x ", + ofst >> 2, + entry->rule.attrib.attrib_mask); + } + + pr_err("%s\n", dbg_buff); + ipa_attrib_dump(&entry->rule.attrib, ip); + i++; + } + } + mutex_unlock(&ipa_ctx->lock); + + return 0; +} + +static ssize_t ipa_read_proc_ctx(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + int nbytes = 0; + struct ipa_hdr_proc_ctx_tbl *tbl; + struct ipa_hdr_proc_ctx_entry *entry; + u32 ofst_words; + + tbl = &ipa_ctx->hdr_proc_ctx_tbl; + + mutex_lock(&ipa_ctx->lock); + + if (ipa_ctx->hdr_proc_ctx_tbl_lcl) + pr_info("Table resides on local memory\n"); + else + pr_info("Table resides on system(ddr) memory\n"); + + list_for_each_entry(entry, &tbl->head_proc_ctx_entry_list, link) { + ofst_words = (entry->offset_entry->offset + + ipa_ctx->hdr_proc_ctx_tbl.start_offset) + >> 5; + if (entry->hdr->is_hdr_proc_ctx) { + nbytes += scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "id:%u hdr_proc_type:%s proc_ctx[32B]:%u ", + entry->id, + ipa_hdr_proc_type_name[entry->type], + ofst_words); + nbytes += scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "hdr_phys_base:0x%pa\n", + &entry->hdr->phys_base); + } else { + nbytes += scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "id:%u hdr_proc_type:%s proc_ctx[32B]:%u ", + entry->id, + ipa_hdr_proc_type_name[entry->type], + ofst_words); + nbytes += scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "hdr[words]:%u\n", + entry->hdr->offset_entry->offset >> 2); + } + } + mutex_unlock(&ipa_ctx->lock); + + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes); +} + +static ssize_t ipa_read_flt(struct file *file, char __user *ubuf, size_t count, + loff_t *ppos) +{ + int i; + int j; + int nbytes = 0; + struct ipa_flt_tbl *tbl; + struct ipa_flt_entry *entry; + enum ipa_ip_type ip = (enum ipa_ip_type)file->private_data; + struct ipa_rt_tbl *rt_tbl; + u32 rt_tbl_idx; + u32 bitmap; + bool eq; + int res = 0; + + tbl = &ipa_ctx->glob_flt_tbl[ip]; + mutex_lock(&ipa_ctx->lock); + i = 0; + list_for_each_entry(entry, &tbl->head_flt_rule_list, link) { + if (entry->cookie != IPA_FLT_COOKIE) + continue; + if (entry->rule.eq_attrib_type) { + rt_tbl_idx = entry->rule.rt_tbl_idx; + bitmap = entry->rule.eq_attrib.rule_eq_bitmap; + eq = true; + } else { + rt_tbl = ipa_id_find(entry->rule.rt_tbl_hdl); + if (rt_tbl == NULL || + rt_tbl->cookie != IPA_RT_TBL_COOKIE) + rt_tbl_idx = ~0; + else + rt_tbl_idx = rt_tbl->idx; + bitmap = entry->rule.attrib.attrib_mask; + eq = false; + } + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "ep_idx:global rule_idx:%d act:%d rt_tbl_idx:%d ", + i, entry->rule.action, rt_tbl_idx); + nbytes = scnprintf(dbg_buff + nbytes, IPA_MAX_MSG_LEN - nbytes, + "attrib_mask:%08x retain_hdr:%d eq:%d ", + bitmap, entry->rule.retain_hdr, eq); + if (eq) { + res = ipa_attrib_dump_eq( + &entry->rule.eq_attrib); + if (res) { + IPAERR_RL("failed read attrib eq\n"); + goto bail; + } + } else + ipa_attrib_dump( + &entry->rule.attrib, ip); + i++; + } + + for (j = 0; j < ipa_ctx->ipa_num_pipes; j++) { + tbl = &ipa_ctx->flt_tbl[j][ip]; + i = 0; + list_for_each_entry(entry, &tbl->head_flt_rule_list, link) { + if (entry->cookie != IPA_FLT_COOKIE) + continue; + if (entry->rule.eq_attrib_type) { + rt_tbl_idx = entry->rule.rt_tbl_idx; + bitmap = entry->rule.eq_attrib.rule_eq_bitmap; + eq = true; + } else { + rt_tbl = ipa_id_find(entry->rule.rt_tbl_hdl); + if (rt_tbl == NULL || + rt_tbl->cookie != IPA_RT_TBL_COOKIE) + rt_tbl_idx = ~0; + else + rt_tbl_idx = rt_tbl->idx; + bitmap = entry->rule.attrib.attrib_mask; + eq = false; + } + nbytes = scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "ep_idx:%d rule_idx:%d act:%d rt_tbl_idx:%d ", + j, i, entry->rule.action, rt_tbl_idx); + nbytes = scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "attrib_mask:%08x retain_hdr:%d ", + bitmap, entry->rule.retain_hdr); + nbytes = scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "eq:%d ", eq); + pr_err("%s\n", dbg_buff); + if (eq) { + res = ipa_attrib_dump_eq( + &entry->rule.eq_attrib); + if (res) { + IPAERR_RL("failed read attrib eq\n"); + goto bail; + } + } else + ipa_attrib_dump( + &entry->rule.attrib, ip); + i++; + } + } +bail: + mutex_unlock(&ipa_ctx->lock); + + return res; +} + +static ssize_t ipa_read_stats(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + int nbytes; + int i; + int cnt = 0; + uint connect = 0; + + for (i = 0; i < ipa_ctx->ipa_num_pipes; i++) + connect |= (ipa_ctx->ep[i].valid << i); + + if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_0) { + nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN, + "sw_tx=%u\n" + "hw_tx=%u\n" + "tx_non_linear=%u\n" + "tx_compl=%u\n" + "wan_rx=%u\n" + "stat_compl=%u\n" + "lan_aggr_close=%u\n" + "wan_aggr_close=%u\n" + "act_clnt=%u\n" + "con_clnt_bmap=0x%x\n" + "wan_rx_empty=%u\n" + "wan_repl_rx_empty=%u\n" + "lan_rx_empty=%u\n" + "lan_repl_rx_empty=%u\n" + "flow_enable=%u\n" + "flow_disable=%u\n", + ipa_ctx->stats.tx_sw_pkts, + ipa_ctx->stats.tx_hw_pkts, + ipa_ctx->stats.tx_non_linear, + ipa_ctx->stats.tx_pkts_compl, + ipa_ctx->stats.rx_pkts, + ipa_ctx->stats.stat_compl, + ipa_ctx->stats.aggr_close, + ipa_ctx->stats.wan_aggr_close, + ipa_ctx->ipa_active_clients.cnt, + connect, + ipa_ctx->stats.wan_rx_empty, + ipa_ctx->stats.wan_repl_rx_empty, + ipa_ctx->stats.lan_rx_empty, + ipa_ctx->stats.lan_repl_rx_empty, + ipa_ctx->stats.flow_enable, + ipa_ctx->stats.flow_disable); + cnt += nbytes; + + for (i = 0; i < MAX_NUM_EXCP; i++) { + nbytes = scnprintf(dbg_buff + cnt, + IPA_MAX_MSG_LEN - cnt, + "lan_rx_excp[%u:%20s]=%u\n", i, + ipa_status_excp_name[i], + ipa_ctx->stats.rx_excp_pkts[i]); + cnt += nbytes; + } + } else { + nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN, + "sw_tx=%u\n" + "hw_tx=%u\n" + "rx=%u\n" + "rx_repl_repost=%u\n" + "rx_q_len=%u\n" + "act_clnt=%u\n" + "con_clnt_bmap=0x%x\n", + ipa_ctx->stats.tx_sw_pkts, + ipa_ctx->stats.tx_hw_pkts, + ipa_ctx->stats.rx_pkts, + ipa_ctx->stats.rx_repl_repost, + ipa_ctx->stats.rx_q_len, + ipa_ctx->ipa_active_clients.cnt, + connect); + cnt += nbytes; + + for (i = 0; i < MAX_NUM_EXCP; i++) { + nbytes = scnprintf(dbg_buff + cnt, + IPA_MAX_MSG_LEN - cnt, + "rx_excp[%u:%35s]=%u\n", i, ipa_excp_name[i], + ipa_ctx->stats.rx_excp_pkts[i]); + cnt += nbytes; + } + } + + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt); +} + +static ssize_t ipa_read_wstats(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + +#define HEAD_FRMT_STR "%25s\n" +#define FRMT_STR "%25s %10u\n" +#define FRMT_STR1 "%25s %10u\n\n" + + int cnt = 0; + int nbytes; + int ipa_ep_idx; + enum ipa_client_type client = IPA_CLIENT_WLAN1_PROD; + struct ipa_ep_context *ep; + + do { + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + HEAD_FRMT_STR, "Client IPA_CLIENT_WLAN1_PROD Stats:"); + cnt += nbytes; + + ipa_ep_idx = ipa2_get_ep_mapping(client); + if (ipa_ep_idx == -1) { + nbytes = scnprintf(dbg_buff + cnt, + IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR, "Not up"); + cnt += nbytes; + break; + } + + ep = &ipa_ctx->ep[ipa_ep_idx]; + if (ep->valid != 1) { + nbytes = scnprintf(dbg_buff + cnt, + IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR, "Not up"); + cnt += nbytes; + break; + } + + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + FRMT_STR, "Avail Fifo Desc:", + atomic_read(&ep->avail_fifo_desc)); + cnt += nbytes; + + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + FRMT_STR, "Rx Pkts Rcvd:", ep->wstats.rx_pkts_rcvd); + cnt += nbytes; + + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + FRMT_STR, "Rx Pkts Status Rcvd:", + ep->wstats.rx_pkts_status_rcvd); + cnt += nbytes; + + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + FRMT_STR, "Rx DH Rcvd:", ep->wstats.rx_hd_rcvd); + cnt += nbytes; + + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + FRMT_STR, "Rx DH Processed:", + ep->wstats.rx_hd_processed); + cnt += nbytes; + + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + FRMT_STR, "Rx DH Sent Back:", ep->wstats.rx_hd_reply); + cnt += nbytes; + + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + FRMT_STR, "Rx Pkt Leak:", ep->wstats.rx_pkt_leak); + cnt += nbytes; + + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + FRMT_STR1, "Rx DP Fail:", ep->wstats.rx_dp_fail); + cnt += nbytes; + + } while (0); + + client = IPA_CLIENT_WLAN1_CONS; + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR, + "Client IPA_CLIENT_WLAN1_CONS Stats:"); + cnt += nbytes; + while (1) { + ipa_ep_idx = ipa2_get_ep_mapping(client); + if (ipa_ep_idx == -1) { + nbytes = scnprintf(dbg_buff + cnt, + IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR, "Not up"); + cnt += nbytes; + goto nxt_clnt_cons; + } + + ep = &ipa_ctx->ep[ipa_ep_idx]; + if (ep->valid != 1) { + nbytes = scnprintf(dbg_buff + cnt, + IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR, "Not up"); + cnt += nbytes; + goto nxt_clnt_cons; + } + + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + FRMT_STR, "Tx Pkts Received:", ep->wstats.tx_pkts_rcvd); + cnt += nbytes; + + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + FRMT_STR, "Tx Pkts Sent:", ep->wstats.tx_pkts_sent); + cnt += nbytes; + + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + FRMT_STR1, "Tx Pkts Dropped:", + ep->wstats.tx_pkts_dropped); + cnt += nbytes; + +nxt_clnt_cons: + switch (client) { + case IPA_CLIENT_WLAN1_CONS: + client = IPA_CLIENT_WLAN2_CONS; + nbytes = scnprintf(dbg_buff + cnt, + IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR, + "Client IPA_CLIENT_WLAN2_CONS Stats:"); + cnt += nbytes; + continue; + case IPA_CLIENT_WLAN2_CONS: + client = IPA_CLIENT_WLAN3_CONS; + nbytes = scnprintf(dbg_buff + cnt, + IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR, + "Client IPA_CLIENT_WLAN3_CONS Stats:"); + cnt += nbytes; + continue; + case IPA_CLIENT_WLAN3_CONS: + client = IPA_CLIENT_WLAN4_CONS; + nbytes = scnprintf(dbg_buff + cnt, + IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR, + "Client IPA_CLIENT_WLAN4_CONS Stats:"); + cnt += nbytes; + continue; + case IPA_CLIENT_WLAN4_CONS: + default: + break; + } + break; + } + + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + "\n"HEAD_FRMT_STR, "All Wlan Consumer pipes stats:"); + cnt += nbytes; + + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, FRMT_STR, + "Tx Comm Buff Allocated:", + ipa_ctx->wc_memb.wlan_comm_total_cnt); + cnt += nbytes; + + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, FRMT_STR, + "Tx Comm Buff Avail:", ipa_ctx->wc_memb.wlan_comm_free_cnt); + cnt += nbytes; + + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, FRMT_STR1, + "Total Tx Pkts Freed:", ipa_ctx->wc_memb.total_tx_pkts_freed); + cnt += nbytes; + + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt); +} + +static ssize_t ipa_read_ntn(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ +#define TX_STATS(y) \ + ipa_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->tx_ch_stats[0].y +#define RX_STATS(y) \ + ipa_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->rx_ch_stats[0].y + + struct IpaHwStatsNTNInfoData_t stats; + int nbytes; + int cnt = 0; + + if (!ipa2_get_ntn_stats(&stats)) { + nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN, + "TX num_pkts_processed=%u\n" + "TX tail_ptr_val=%u\n" + "TX num_db_fired=%u\n" + "TX ringFull=%u\n" + "TX ringEmpty=%u\n" + "TX ringUsageHigh=%u\n" + "TX ringUsageLow=%u\n" + "TX RingUtilCount=%u\n" + "TX bamFifoFull=%u\n" + "TX bamFifoEmpty=%u\n" + "TX bamFifoUsageHigh=%u\n" + "TX bamFifoUsageLow=%u\n" + "TX bamUtilCount=%u\n" + "TX num_db=%u\n" + "TX num_unexpected_db=%u\n" + "TX num_bam_int_handled=%u\n" + "TX num_bam_int_in_non_running_state=%u\n" + "TX num_qmb_int_handled=%u\n" + "TX num_bam_int_handled_while_wait_for_bam=%u\n" + "TX num_bam_int_handled_while_not_in_bam=%u\n", + TX_STATS(num_pkts_processed), + TX_STATS(tail_ptr_val), + TX_STATS(num_db_fired), + TX_STATS(tx_comp_ring_stats.ringFull), + TX_STATS(tx_comp_ring_stats.ringEmpty), + TX_STATS(tx_comp_ring_stats.ringUsageHigh), + TX_STATS(tx_comp_ring_stats.ringUsageLow), + TX_STATS(tx_comp_ring_stats.RingUtilCount), + TX_STATS(bam_stats.bamFifoFull), + TX_STATS(bam_stats.bamFifoEmpty), + TX_STATS(bam_stats.bamFifoUsageHigh), + TX_STATS(bam_stats.bamFifoUsageLow), + TX_STATS(bam_stats.bamUtilCount), + TX_STATS(num_db), + TX_STATS(num_unexpected_db), + TX_STATS(num_bam_int_handled), + TX_STATS(num_bam_int_in_non_running_state), + TX_STATS(num_qmb_int_handled), + TX_STATS(num_bam_int_handled_while_wait_for_bam), + TX_STATS(num_bam_int_handled_while_not_in_bam)); + cnt += nbytes; + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + "RX max_outstanding_pkts=%u\n" + "RX num_pkts_processed=%u\n" + "RX rx_ring_rp_value=%u\n" + "RX ringFull=%u\n" + "RX ringEmpty=%u\n" + "RX ringUsageHigh=%u\n" + "RX ringUsageLow=%u\n" + "RX RingUtilCount=%u\n" + "RX bamFifoFull=%u\n" + "RX bamFifoEmpty=%u\n" + "RX bamFifoUsageHigh=%u\n" + "RX bamFifoUsageLow=%u\n" + "RX bamUtilCount=%u\n" + "RX num_bam_int_handled=%u\n" + "RX num_db=%u\n" + "RX num_unexpected_db=%u\n" + "RX num_pkts_in_dis_uninit_state=%u\n" + "num_ic_inj_vdev_change=%u\n" + "num_ic_inj_fw_desc_change=%u\n", + RX_STATS(max_outstanding_pkts), + RX_STATS(num_pkts_processed), + RX_STATS(rx_ring_rp_value), + RX_STATS(rx_ind_ring_stats.ringFull), + RX_STATS(rx_ind_ring_stats.ringEmpty), + RX_STATS(rx_ind_ring_stats.ringUsageHigh), + RX_STATS(rx_ind_ring_stats.ringUsageLow), + RX_STATS(rx_ind_ring_stats.RingUtilCount), + RX_STATS(bam_stats.bamFifoFull), + RX_STATS(bam_stats.bamFifoEmpty), + RX_STATS(bam_stats.bamFifoUsageHigh), + RX_STATS(bam_stats.bamFifoUsageLow), + RX_STATS(bam_stats.bamUtilCount), + RX_STATS(num_bam_int_handled), + RX_STATS(num_db), + RX_STATS(num_unexpected_db), + RX_STATS(num_pkts_in_dis_uninit_state), + RX_STATS(num_bam_int_handled_while_not_in_bam), + RX_STATS(num_bam_int_handled_while_in_bam_state)); + cnt += nbytes; + } else { + nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN, + "Fail to read NTN stats\n"); + cnt += nbytes; + } + + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt); +} + +static ssize_t ipa_read_wdi(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct IpaHwStatsWDIInfoData_t stats; + int nbytes; + int cnt = 0; + + if (!ipa2_get_wdi_stats(&stats)) { + nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN, + "TX num_pkts_processed=%u\n" + "TX copy_engine_doorbell_value=%u\n" + "TX num_db_fired=%u\n" + "TX ringFull=%u\n" + "TX ringEmpty=%u\n" + "TX ringUsageHigh=%u\n" + "TX ringUsageLow=%u\n" + "TX RingUtilCount=%u\n" + "TX bamFifoFull=%u\n" + "TX bamFifoEmpty=%u\n" + "TX bamFifoUsageHigh=%u\n" + "TX bamFifoUsageLow=%u\n" + "TX bamUtilCount=%u\n" + "TX num_db=%u\n" + "TX num_unexpected_db=%u\n" + "TX num_bam_int_handled=%u\n" + "TX num_bam_int_in_non_running_state=%u\n" + "TX num_qmb_int_handled=%u\n" + "TX num_bam_int_handled_while_wait_for_bam=%u\n", + stats.tx_ch_stats.num_pkts_processed, + stats.tx_ch_stats.copy_engine_doorbell_value, + stats.tx_ch_stats.num_db_fired, + stats.tx_ch_stats.tx_comp_ring_stats.ringFull, + stats.tx_ch_stats.tx_comp_ring_stats.ringEmpty, + stats.tx_ch_stats.tx_comp_ring_stats.ringUsageHigh, + stats.tx_ch_stats.tx_comp_ring_stats.ringUsageLow, + stats.tx_ch_stats.tx_comp_ring_stats.RingUtilCount, + stats.tx_ch_stats.bam_stats.bamFifoFull, + stats.tx_ch_stats.bam_stats.bamFifoEmpty, + stats.tx_ch_stats.bam_stats.bamFifoUsageHigh, + stats.tx_ch_stats.bam_stats.bamFifoUsageLow, + stats.tx_ch_stats.bam_stats.bamUtilCount, + stats.tx_ch_stats.num_db, + stats.tx_ch_stats.num_unexpected_db, + stats.tx_ch_stats.num_bam_int_handled, + stats.tx_ch_stats.num_bam_int_in_non_running_state, + stats.tx_ch_stats.num_qmb_int_handled, + stats.tx_ch_stats.num_bam_int_handled_while_wait_for_bam); + cnt += nbytes; + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + "RX max_outstanding_pkts=%u\n" + "RX num_pkts_processed=%u\n" + "RX rx_ring_rp_value=%u\n" + "RX ringFull=%u\n" + "RX ringEmpty=%u\n" + "RX ringUsageHigh=%u\n" + "RX ringUsageLow=%u\n" + "RX RingUtilCount=%u\n" + "RX bamFifoFull=%u\n" + "RX bamFifoEmpty=%u\n" + "RX bamFifoUsageHigh=%u\n" + "RX bamFifoUsageLow=%u\n" + "RX bamUtilCount=%u\n" + "RX num_bam_int_handled=%u\n" + "RX num_db=%u\n" + "RX num_unexpected_db=%u\n" + "RX num_pkts_in_dis_uninit_state=%u\n" + "RX num_ic_inj_vdev_change=%u\n" + "RX num_ic_inj_fw_desc_change=%u\n" + "RX num_qmb_int_handled=%u\n" + "RX reserved1=%u\n" + "RX reserved2=%u\n", + stats.rx_ch_stats.max_outstanding_pkts, + stats.rx_ch_stats.num_pkts_processed, + stats.rx_ch_stats.rx_ring_rp_value, + stats.rx_ch_stats.rx_ind_ring_stats.ringFull, + stats.rx_ch_stats.rx_ind_ring_stats.ringEmpty, + stats.rx_ch_stats.rx_ind_ring_stats.ringUsageHigh, + stats.rx_ch_stats.rx_ind_ring_stats.ringUsageLow, + stats.rx_ch_stats.rx_ind_ring_stats.RingUtilCount, + stats.rx_ch_stats.bam_stats.bamFifoFull, + stats.rx_ch_stats.bam_stats.bamFifoEmpty, + stats.rx_ch_stats.bam_stats.bamFifoUsageHigh, + stats.rx_ch_stats.bam_stats.bamFifoUsageLow, + stats.rx_ch_stats.bam_stats.bamUtilCount, + stats.rx_ch_stats.num_bam_int_handled, + stats.rx_ch_stats.num_db, + stats.rx_ch_stats.num_unexpected_db, + stats.rx_ch_stats.num_pkts_in_dis_uninit_state, + stats.rx_ch_stats.num_ic_inj_vdev_change, + stats.rx_ch_stats.num_ic_inj_fw_desc_change, + stats.rx_ch_stats.num_qmb_int_handled, + stats.rx_ch_stats.reserved1, + stats.rx_ch_stats.reserved2); + cnt += nbytes; + } else { + nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN, + "Fail to read WDI stats\n"); + cnt += nbytes; + } + + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt); +} + +void _ipa_write_dbg_cnt_v1_1(int option) +{ + if (option == 1) + ipa_write_reg(ipa_ctx->mmio, IPA_DEBUG_CNT_CTRL_N_OFST_v1_1(0), + IPA_DBG_CNTR_ON); + else + ipa_write_reg(ipa_ctx->mmio, IPA_DEBUG_CNT_CTRL_N_OFST_v1_1(0), + IPA_DBG_CNTR_OFF); +} + +void _ipa_write_dbg_cnt_v2_0(int option) +{ + if (option == 1) + ipa_write_reg(ipa_ctx->mmio, IPA_DEBUG_CNT_CTRL_N_OFST_v2_0(0), + IPA_DBG_CNTR_ON); + else + ipa_write_reg(ipa_ctx->mmio, IPA_DEBUG_CNT_CTRL_N_OFST_v2_0(0), + IPA_DBG_CNTR_OFF); +} + +static ssize_t ipa_write_dbg_cnt(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + unsigned long missing; + u32 option = 0; + + if (sizeof(dbg_buff) < count + 1) + return -EFAULT; + + missing = copy_from_user(dbg_buff, buf, count); + if (missing) + return -EFAULT; + + dbg_buff[count] = '\0'; + if (kstrtou32(dbg_buff, 0, &option)) + return -EFAULT; + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + ipa_ctx->ctrl->ipa_write_dbg_cnt(option); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + + return count; +} + +int _ipa_read_dbg_cnt_v1_1(char *buf, int max_len) +{ + int regval; + + regval = ipa_read_reg(ipa_ctx->mmio, + IPA_DEBUG_CNT_REG_N_OFST_v1_1(0)); + + return scnprintf(buf, max_len, + "IPA_DEBUG_CNT_REG_0=0x%x\n", regval); +} + +int _ipa_read_dbg_cnt_v2_0(char *buf, int max_len) +{ + int regval; + + regval = ipa_read_reg(ipa_ctx->mmio, + IPA_DEBUG_CNT_REG_N_OFST_v2_0(0)); + + return scnprintf(buf, max_len, + "IPA_DEBUG_CNT_REG_0=0x%x\n", regval); +} + +static ssize_t ipa_read_dbg_cnt(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + int nbytes; + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + nbytes = ipa_ctx->ctrl->ipa_read_dbg_cnt(dbg_buff, IPA_MAX_MSG_LEN); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes); +} + +static ssize_t ipa_read_msg(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + int nbytes; + int cnt = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(ipa_event_name); i++) { + nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, + "msg[%u:%27s] W:%u R:%u\n", i, + ipa_event_name[i], + ipa_ctx->stats.msg_w[i], + ipa_ctx->stats.msg_r[i]); + cnt += nbytes; + } + + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt); +} + +static ssize_t ipa_read_nat4(struct file *file, + char __user *ubuf, size_t count, + loff_t *ppos) +{ + +#define ENTRY_U32_FIELDS 8 +#define NAT_ENTRY_ENABLE 0x8000 +#define NAT_ENTRY_RST_FIN_BIT 0x4000 +#define BASE_TABLE 0 +#define EXPANSION_TABLE 1 + + u32 *base_tbl, *indx_tbl; + u32 tbl_size, *tmp; + u32 value, i, j, rule_id; + u16 enable, tbl_entry, flag; + u32 no_entries = 0; + int nbytes = 0; + + mutex_lock(&ipa_ctx->nat_mem.lock); + value = ipa_ctx->nat_mem.public_ip_addr; + pr_err( + "Table IP Address:%d.%d.%d.%d\n", + ((value & 0xFF000000) >> 24), + ((value & 0x00FF0000) >> 16), + ((value & 0x0000FF00) >> 8), + ((value & 0x000000FF))); + + pr_err("Table Size:%d\n", + ipa_ctx->nat_mem.size_base_tables); + + if (!ipa_ctx->nat_mem.size_expansion_tables) + pr_err("Expansion Table Size:%d\n", + ipa_ctx->nat_mem.size_expansion_tables); + else + pr_err("Expansion Table Size:%d\n", + ipa_ctx->nat_mem.size_expansion_tables-1); + + if (!ipa_ctx->nat_mem.is_sys_mem) + pr_err("Not supported for local(shared) memory\n"); + + /* Print Base tables */ + rule_id = 0; + for (j = 0; j < 2; j++) { + if (j == BASE_TABLE) { + tbl_size = ipa_ctx->nat_mem.size_base_tables; + base_tbl = (u32 *)ipa_ctx->nat_mem.ipv4_rules_addr; + + pr_err("\nBase Table:\n"); + } else { + if (!ipa_ctx->nat_mem.size_expansion_tables) + continue; + tbl_size = ipa_ctx->nat_mem.size_expansion_tables-1; + base_tbl = + (u32 *)ipa_ctx->nat_mem.ipv4_expansion_rules_addr; + + pr_err("\nExpansion Base Table:\n"); + } + + if (base_tbl != NULL) { + for (i = 0; i <= tbl_size; i++, rule_id++) { + tmp = base_tbl; + value = tmp[4]; + enable = ((value & 0xFFFF0000) >> 16); + + if (enable & NAT_ENTRY_ENABLE) { + no_entries++; + nbytes = scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "Rule:%d ", rule_id); + + value = *tmp; + nbytes = scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "Private_IP:%d.%d.%d.%d ", + ((value & 0xFF000000) >> 24), + ((value & 0x00FF0000) >> 16), + ((value & 0x0000FF00) >> 8), + ((value & 0x000000FF))); + tmp++; + + value = *tmp; + nbytes = scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "Target_IP:%d.%d.%d.%d ", + ((value & 0xFF000000) >> 24), + ((value & 0x00FF0000) >> 16), + ((value & 0x0000FF00) >> 8), + ((value & 0x000000FF))); + tmp++; + + value = *tmp; + nbytes = scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "Next_Index:%d Public_Port:%d ", + (value & 0x0000FFFF), + ((value & 0xFFFF0000) >> 16)); + tmp++; + + value = *tmp; + nbytes = scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "Private_Port:%d Target_Port:%d ", + (value & 0x0000FFFF), + ((value & 0xFFFF0000) >> 16)); + tmp++; + + value = *tmp; + flag = ((value & 0xFFFF0000) >> 16); + if (flag & NAT_ENTRY_RST_FIN_BIT) { + nbytes = scnprintf( + dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "IP_CKSM_delta:0x%x Flags:%s ", + (value & 0x0000FFFF), + "Direct_To_A5"); + } else { + nbytes = scnprintf( + dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "IP_CKSM_delta:0x%x Flags:%s ", + (value & 0x0000FFFF), + "Fwd_to_route"); + } + tmp++; + + value = *tmp; + nbytes = scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "Time_stamp:0x%x Proto:%d ", + (value & 0x00FFFFFF), + ((value & 0xFF000000) >> 24)); + tmp++; + + value = *tmp; + nbytes = scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "Prev_Index:%d Indx_tbl_entry:%d ", + (value & 0x0000FFFF), + ((value & 0xFFFF0000) >> 16)); + tmp++; + + value = *tmp; + nbytes = scnprintf(dbg_buff + nbytes, + IPA_MAX_MSG_LEN - nbytes, + "TCP_UDP_cksum_delta:0x%x ", + ((value & 0xFFFF0000) >> 16)); + pr_err("%s\n", dbg_buff); + } + + base_tbl += ENTRY_U32_FIELDS; + + } + } + } + + /* Print Index tables */ + rule_id = 0; + for (j = 0; j < 2; j++) { + if (j == BASE_TABLE) { + tbl_size = ipa_ctx->nat_mem.size_base_tables; + indx_tbl = (u32 *)ipa_ctx->nat_mem.index_table_addr; + + pr_err("\nIndex Table:\n"); + } else { + if (!ipa_ctx->nat_mem.size_expansion_tables) + continue; + tbl_size = ipa_ctx->nat_mem.size_expansion_tables-1; + indx_tbl = + (u32 *)ipa_ctx->nat_mem.index_table_expansion_addr; + + pr_err("\nExpansion Index Table:\n"); + } + + if (indx_tbl != NULL) { + for (i = 0; i <= tbl_size; i++, rule_id++) { + tmp = indx_tbl; + value = *tmp; + tbl_entry = (value & 0x0000FFFF); + + if (tbl_entry) { + value = *tmp; + pr_err( + "Rule:%d Table_Ent:%d Next_Index:%d\n", + rule_id, + tbl_entry, + ((value & 0xFFFF0000) >> 16)); + } + + indx_tbl++; + } + } + } + pr_err("Current No. Nat Entries: %d\n", no_entries); + mutex_unlock(&ipa_ctx->nat_mem.lock); + + return 0; +} + +static ssize_t ipa_rm_read_stats(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + int result, nbytes, cnt = 0; + + result = ipa_rm_stat(dbg_buff, IPA_MAX_MSG_LEN); + if (result < 0) { + nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN, + "Error in printing RM stat %d\n", result); + cnt += nbytes; + } else + cnt += result; + + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt); +} + +static void ipa_dump_status(struct ipa_hw_pkt_status *status) +{ + IPA_DUMP_STATUS_FIELD(status_opcode); + IPA_DUMP_STATUS_FIELD(exception); + IPA_DUMP_STATUS_FIELD(status_mask); + IPA_DUMP_STATUS_FIELD(pkt_len); + IPA_DUMP_STATUS_FIELD(endp_src_idx); + IPA_DUMP_STATUS_FIELD(endp_dest_idx); + IPA_DUMP_STATUS_FIELD(metadata); + + if (ipa_ctx->ipa_hw_type < IPA_HW_v2_5) { + IPA_DUMP_STATUS_FIELD(ipa_hw_v2_0_pkt_status.filt_local); + IPA_DUMP_STATUS_FIELD(ipa_hw_v2_0_pkt_status.filt_global); + IPA_DUMP_STATUS_FIELD(ipa_hw_v2_0_pkt_status.filt_pipe_idx); + IPA_DUMP_STATUS_FIELD(ipa_hw_v2_0_pkt_status.filt_match); + IPA_DUMP_STATUS_FIELD(ipa_hw_v2_0_pkt_status.filt_rule_idx); + IPA_DUMP_STATUS_FIELD(ipa_hw_v2_0_pkt_status.ret_hdr); + IPA_DUMP_STATUS_FIELD(ipa_hw_v2_0_pkt_status.tag_f_1); + } else { + IPA_DUMP_STATUS_FIELD(ipa_hw_v2_5_pkt_status.filt_local); + IPA_DUMP_STATUS_FIELD(ipa_hw_v2_5_pkt_status.filt_global); + IPA_DUMP_STATUS_FIELD(ipa_hw_v2_5_pkt_status.filt_pipe_idx); + IPA_DUMP_STATUS_FIELD(ipa_hw_v2_5_pkt_status.ret_hdr); + IPA_DUMP_STATUS_FIELD(ipa_hw_v2_5_pkt_status.filt_rule_idx); + IPA_DUMP_STATUS_FIELD(ipa_hw_v2_5_pkt_status.tag_f_1); + } + + IPA_DUMP_STATUS_FIELD(tag_f_2); + IPA_DUMP_STATUS_FIELD(time_day_ctr); + IPA_DUMP_STATUS_FIELD(nat_hit); + IPA_DUMP_STATUS_FIELD(nat_tbl_idx); + IPA_DUMP_STATUS_FIELD(nat_type); + IPA_DUMP_STATUS_FIELD(route_local); + IPA_DUMP_STATUS_FIELD(route_tbl_idx); + IPA_DUMP_STATUS_FIELD(route_match); + IPA_DUMP_STATUS_FIELD(ucp); + IPA_DUMP_STATUS_FIELD(route_rule_idx); + IPA_DUMP_STATUS_FIELD(hdr_local); + IPA_DUMP_STATUS_FIELD(hdr_offset); + IPA_DUMP_STATUS_FIELD(frag_hit); + IPA_DUMP_STATUS_FIELD(frag_rule); +} + +static ssize_t ipa_status_stats_read(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct ipa_status_stats *stats; + int i, j; + + stats = kzalloc(sizeof(*stats), GFP_KERNEL); + if (!stats) + return -EFAULT; + + for (i = 0; i < ipa_ctx->ipa_num_pipes; i++) { + if (!ipa_ctx->ep[i].sys || !ipa_ctx->ep[i].sys->status_stat) + continue; + + memcpy(stats, ipa_ctx->ep[i].sys->status_stat, sizeof(*stats)); + stats->curr = (stats->curr + IPA_MAX_STATUS_STAT_NUM - 1) + % IPA_MAX_STATUS_STAT_NUM; + pr_err("Statuses for pipe %d\n", i); + for (j = 0; j < IPA_MAX_STATUS_STAT_NUM; j++) { + pr_err("curr=%d\n", stats->curr); + ipa_dump_status(&stats->status[stats->curr]); + pr_err("\n\n\n"); + stats->curr = (stats->curr + 1) % + IPA_MAX_STATUS_STAT_NUM; + } + } + + kfree(stats); + return 0; +} + +static ssize_t ipa2_print_active_clients_log(struct file *file, + char __user *ubuf, size_t count, loff_t *ppos) +{ + int cnt; + int table_size; + + if (active_clients_buf == NULL) { + IPAERR("Active Clients buffer is not allocated"); + return 0; + } + memset(active_clients_buf, 0, IPA_DBG_ACTIVE_CLIENTS_BUF_SIZE); + ipa_active_clients_lock(); + cnt = ipa2_active_clients_log_print_buffer(active_clients_buf, + IPA_DBG_ACTIVE_CLIENTS_BUF_SIZE - IPA_MAX_MSG_LEN); + table_size = ipa2_active_clients_log_print_table(active_clients_buf + + cnt, IPA_MAX_MSG_LEN); + ipa_active_clients_unlock(); + + return simple_read_from_buffer(ubuf, count, ppos, active_clients_buf, + cnt + table_size); +} + +static ssize_t ipa2_clear_active_clients_log(struct file *file, + const char __user *ubuf, size_t count, loff_t *ppos) +{ + unsigned long missing; + s8 option = 0; + + if (sizeof(dbg_buff) < count + 1) + return -EFAULT; + + missing = copy_from_user(dbg_buff, ubuf, count); + if (missing) + return -EFAULT; + + dbg_buff[count] = '\0'; + if (kstrtos8(dbg_buff, 0, &option)) + return -EFAULT; + + ipa2_active_clients_log_clear(); + + return count; +} + +static ssize_t ipa_read_rx_polling_timeout(struct file *file, + char __user *ubuf, size_t count, loff_t *ppos) +{ + int min_cnt; + int max_cnt; + + if (active_clients_buf == NULL) { + IPAERR("Active Clients buffer is not allocated"); + return 0; + } + memset(active_clients_buf, 0, IPA_DBG_ACTIVE_CLIENTS_BUF_SIZE); + min_cnt = scnprintf(active_clients_buf, + IPA_DBG_ACTIVE_CLIENTS_BUF_SIZE, + "Rx Min Poll count = %u\n", + ipa_ctx->ipa_rx_min_timeout_usec); + + max_cnt = scnprintf(active_clients_buf + min_cnt, + IPA_DBG_ACTIVE_CLIENTS_BUF_SIZE, + "Rx Max Poll count = %u\n", + ipa_ctx->ipa_rx_max_timeout_usec); + + return simple_read_from_buffer(ubuf, count, ppos, active_clients_buf, + min_cnt + max_cnt); +} + +static ssize_t ipa_write_rx_polling_timeout(struct file *file, + const char __user *ubuf, size_t count, loff_t *ppos) +{ + s8 polltime = 0; + + if (sizeof(dbg_buff) < count + 1) + return -EFAULT; + + if (copy_from_user(dbg_buff, ubuf, count)) + return -EFAULT; + + dbg_buff[count] = '\0'; + + if (kstrtos8(dbg_buff, 0, &polltime)) + return -EFAULT; + + ipa_rx_timeout_min_max_calc(&ipa_ctx->ipa_rx_min_timeout_usec, + &ipa_ctx->ipa_rx_max_timeout_usec, polltime); + return count; +} + +static ssize_t ipa_read_polling_iteration(struct file *file, + char __user *ubuf, size_t count, loff_t *ppos) +{ + int cnt; + + if (active_clients_buf == NULL) { + IPAERR("Active Clients buffer is not allocated"); + return 0; + } + + memset(active_clients_buf, 0, IPA_DBG_ACTIVE_CLIENTS_BUF_SIZE); + + cnt = scnprintf(active_clients_buf, IPA_DBG_ACTIVE_CLIENTS_BUF_SIZE, + "Polling Iteration count = %u\n", + ipa_ctx->ipa_polling_iteration); + + return simple_read_from_buffer(ubuf, count, ppos, active_clients_buf, + cnt); +} + +static ssize_t ipa_write_polling_iteration(struct file *file, + const char __user *ubuf, size_t count, loff_t *ppos) +{ + s8 iteration_cnt = 0; + + if (sizeof(dbg_buff) < count + 1) + return -EFAULT; + + if (copy_from_user(dbg_buff, ubuf, count)) + return -EFAULT; + + dbg_buff[count] = '\0'; + + if (kstrtos8(dbg_buff, 0, &iteration_cnt)) + return -EFAULT; + + if ((iteration_cnt >= MIN_POLLING_ITERATION) && + (iteration_cnt <= MAX_POLLING_ITERATION)) + ipa_ctx->ipa_polling_iteration = iteration_cnt; + else + ipa_ctx->ipa_polling_iteration = MAX_POLLING_ITERATION; + + return count; +} + +static ssize_t ipa_enable_ipc_low(struct file *file, + const char __user *ubuf, size_t count, loff_t *ppos) +{ + unsigned long missing; + s8 option = 0; + + if (sizeof(dbg_buff) < count + 1) + return -EFAULT; + + missing = copy_from_user(dbg_buff, ubuf, count); + if (missing) + return -EFAULT; + + dbg_buff[count] = '\0'; + if (kstrtos8(dbg_buff, 0, &option)) + return -EFAULT; + + mutex_lock(&ipa_ctx->lock); + if (option) { + if (!ipa_ipc_low_buff) { + ipa_ipc_low_buff = + ipc_log_context_create(IPA_IPC_LOG_PAGES, + "ipa_low", 0); + if (ipa_ipc_low_buff == NULL) + IPADBG("failed to get logbuf_low\n"); + } + ipa_ctx->logbuf_low = ipa_ipc_low_buff; + } else { + ipa_ctx->logbuf_low = NULL; + } + mutex_unlock(&ipa_ctx->lock); + + return count; +} + +const struct file_operations ipa_gen_reg_ops = { + .read = ipa_read_gen_reg, +}; + +const struct file_operations ipa_ep_reg_ops = { + .read = ipa_read_ep_reg, + .write = ipa_write_ep_reg, +}; + +const struct file_operations ipa_keep_awake_ops = { + .read = ipa_read_keep_awake, + .write = ipa_write_keep_awake, +}; + +const struct file_operations ipa_ep_holb_ops = { + .write = ipa_write_ep_holb, +}; + +const struct file_operations ipa_hdr_ops = { + .read = ipa_read_hdr, +}; + +const struct file_operations ipa_rt_ops = { + .read = ipa_read_rt, + .open = simple_open, +}; + +const struct file_operations ipa_proc_ctx_ops = { + .read = ipa_read_proc_ctx, +}; + +const struct file_operations ipa_flt_ops = { + .read = ipa_read_flt, + .open = simple_open, +}; + +const struct file_operations ipa_stats_ops = { + .read = ipa_read_stats, +}; + +const struct file_operations ipa_wstats_ops = { + .read = ipa_read_wstats, +}; + +const struct file_operations ipa_wdi_ops = { + .read = ipa_read_wdi, +}; + +const struct file_operations ipa_ntn_ops = { + .read = ipa_read_ntn, +}; + +const struct file_operations ipa_msg_ops = { + .read = ipa_read_msg, +}; + +const struct file_operations ipa_dbg_cnt_ops = { + .read = ipa_read_dbg_cnt, + .write = ipa_write_dbg_cnt, +}; + +const struct file_operations ipa_nat4_ops = { + .read = ipa_read_nat4, +}; + +const struct file_operations ipa_rm_stats = { + .read = ipa_rm_read_stats, +}; + +const struct file_operations ipa_status_stats_ops = { + .read = ipa_status_stats_read, +}; + +const struct file_operations ipa2_active_clients = { + .read = ipa2_print_active_clients_log, + .write = ipa2_clear_active_clients_log, +}; + +const struct file_operations ipa_ipc_low_ops = { + .write = ipa_enable_ipc_low, +}; + +const struct file_operations ipa_rx_poll_time_ops = { + .read = ipa_read_rx_polling_timeout, + .write = ipa_write_rx_polling_timeout, +}; + +const struct file_operations ipa_poll_iteration_ops = { + .read = ipa_read_polling_iteration, + .write = ipa_write_polling_iteration, +}; + +void ipa_debugfs_init(void) +{ + const mode_t read_only_mode = 0444; + const mode_t read_write_mode = 0664; + const mode_t write_only_mode = 0220; + struct dentry *file; + + dent = debugfs_create_dir("ipa", 0); + if (IS_ERR(dent)) { + IPAERR("fail to create folder in debug_fs.\n"); + return; + } + + file = debugfs_create_u32("hw_type", read_only_mode, + dent, &ipa_ctx->ipa_hw_type); + if (!file) { + IPAERR("could not create hw_type file\n"); + goto fail; + } + + + dfile_gen_reg = debugfs_create_file("gen_reg", read_only_mode, dent, 0, + &ipa_gen_reg_ops); + if (!dfile_gen_reg || IS_ERR(dfile_gen_reg)) { + IPAERR("fail to create file for debug_fs gen_reg\n"); + goto fail; + } + + dfile_active_clients = debugfs_create_file("active_clients", + read_write_mode, dent, 0, &ipa2_active_clients); + if (!dfile_active_clients || IS_ERR(dfile_active_clients)) { + IPAERR("fail to create file for debug_fs active_clients\n"); + goto fail; + } + + active_clients_buf = NULL; + active_clients_buf = kzalloc(IPA_DBG_ACTIVE_CLIENTS_BUF_SIZE, + GFP_KERNEL); + if (active_clients_buf == NULL) + IPAERR("fail to allocate active clients memory buffer"); + + dfile_ep_reg = debugfs_create_file("ep_reg", read_write_mode, dent, 0, + &ipa_ep_reg_ops); + if (!dfile_ep_reg || IS_ERR(dfile_ep_reg)) { + IPAERR("fail to create file for debug_fs ep_reg\n"); + goto fail; + } + + dfile_keep_awake = debugfs_create_file("keep_awake", read_write_mode, + dent, 0, &ipa_keep_awake_ops); + if (!dfile_keep_awake || IS_ERR(dfile_keep_awake)) { + IPAERR("fail to create file for debug_fs dfile_keep_awake\n"); + goto fail; + } + + dfile_ep_holb = debugfs_create_file("holb", write_only_mode, dent, + 0, &ipa_ep_holb_ops); + if (!dfile_ep_holb || IS_ERR(dfile_ep_holb)) { + IPAERR("fail to create file for debug_fs dfile_ep_hol_en\n"); + goto fail; + } + + dfile_hdr = debugfs_create_file("hdr", read_only_mode, dent, 0, + &ipa_hdr_ops); + if (!dfile_hdr || IS_ERR(dfile_hdr)) { + IPAERR("fail to create file for debug_fs hdr\n"); + goto fail; + } + + dfile_proc_ctx = debugfs_create_file("proc_ctx", read_only_mode, dent, + 0, &ipa_proc_ctx_ops); + if (!dfile_hdr || IS_ERR(dfile_hdr)) { + IPAERR("fail to create file for debug_fs proc_ctx\n"); + goto fail; + } + + dfile_ip4_rt = debugfs_create_file("ip4_rt", read_only_mode, dent, + (void *)IPA_IP_v4, &ipa_rt_ops); + if (!dfile_ip4_rt || IS_ERR(dfile_ip4_rt)) { + IPAERR("fail to create file for debug_fs ip4 rt\n"); + goto fail; + } + + dfile_ip6_rt = debugfs_create_file("ip6_rt", read_only_mode, dent, + (void *)IPA_IP_v6, &ipa_rt_ops); + if (!dfile_ip6_rt || IS_ERR(dfile_ip6_rt)) { + IPAERR("fail to create file for debug_fs ip6:w rt\n"); + goto fail; + } + + dfile_ip4_flt = debugfs_create_file("ip4_flt", read_only_mode, dent, + (void *)IPA_IP_v4, &ipa_flt_ops); + if (!dfile_ip4_flt || IS_ERR(dfile_ip4_flt)) { + IPAERR("fail to create file for debug_fs ip4 flt\n"); + goto fail; + } + + dfile_ip6_flt = debugfs_create_file("ip6_flt", read_only_mode, dent, + (void *)IPA_IP_v6, &ipa_flt_ops); + if (!dfile_ip6_flt || IS_ERR(dfile_ip6_flt)) { + IPAERR("fail to create file for debug_fs ip6 flt\n"); + goto fail; + } + + dfile_stats = debugfs_create_file("stats", read_only_mode, dent, 0, + &ipa_stats_ops); + if (!dfile_stats || IS_ERR(dfile_stats)) { + IPAERR("fail to create file for debug_fs stats\n"); + goto fail; + } + + dfile_wstats = debugfs_create_file("wstats", read_only_mode, + dent, 0, &ipa_wstats_ops); + if (!dfile_wstats || IS_ERR(dfile_wstats)) { + IPAERR("fail to create file for debug_fs wstats\n"); + goto fail; + } + + dfile_wdi_stats = debugfs_create_file("wdi", read_only_mode, dent, 0, + &ipa_wdi_ops); + if (!dfile_wdi_stats || IS_ERR(dfile_wdi_stats)) { + IPAERR("fail to create file for debug_fs wdi stats\n"); + goto fail; + } + + dfile_ntn_stats = debugfs_create_file("ntn", read_only_mode, dent, 0, + &ipa_ntn_ops); + if (!dfile_ntn_stats || IS_ERR(dfile_ntn_stats)) { + IPAERR("fail to create file for debug_fs ntn stats\n"); + goto fail; + } + + dfile_dbg_cnt = debugfs_create_file("dbg_cnt", read_write_mode, dent, 0, + &ipa_dbg_cnt_ops); + if (!dfile_dbg_cnt || IS_ERR(dfile_dbg_cnt)) { + IPAERR("fail to create file for debug_fs dbg_cnt\n"); + goto fail; + } + + dfile_msg = debugfs_create_file("msg", read_only_mode, dent, 0, + &ipa_msg_ops); + if (!dfile_msg || IS_ERR(dfile_msg)) { + IPAERR("fail to create file for debug_fs msg\n"); + goto fail; + } + + dfile_ip4_nat = debugfs_create_file("ip4_nat", read_only_mode, dent, + 0, &ipa_nat4_ops); + if (!dfile_ip4_nat || IS_ERR(dfile_ip4_nat)) { + IPAERR("fail to create file for debug_fs ip4 nat\n"); + goto fail; + } + + dfile_rm_stats = debugfs_create_file("rm_stats", + read_only_mode, dent, 0, &ipa_rm_stats); + if (!dfile_rm_stats || IS_ERR(dfile_rm_stats)) { + IPAERR("fail to create file for debug_fs rm_stats\n"); + goto fail; + } + + dfile_status_stats = debugfs_create_file("status_stats", + read_only_mode, dent, 0, &ipa_status_stats_ops); + if (!dfile_status_stats || IS_ERR(dfile_status_stats)) { + IPAERR("fail to create file for debug_fs status_stats\n"); + goto fail; + } + + dfile_ipa_rx_poll_timeout = debugfs_create_file("ipa_rx_poll_time", + read_write_mode, dent, 0, &ipa_rx_poll_time_ops); + if (!dfile_ipa_rx_poll_timeout || IS_ERR(dfile_ipa_rx_poll_timeout)) { + IPAERR("fail to create file for debug_fs rx poll timeout\n"); + goto fail; + } + + dfile_ipa_poll_iteration = debugfs_create_file("ipa_poll_iteration", + read_write_mode, dent, 0, &ipa_poll_iteration_ops); + if (!dfile_ipa_poll_iteration || IS_ERR(dfile_ipa_poll_iteration)) { + IPAERR("fail to create file for debug_fs poll iteration\n"); + goto fail; + } + + file = debugfs_create_u32("enable_clock_scaling", read_write_mode, + dent, &ipa_ctx->enable_clock_scaling); + if (!file) { + IPAERR("could not create enable_clock_scaling file\n"); + goto fail; + } + + file = debugfs_create_u32("clock_scaling_bw_threshold_nominal_mbps", + read_write_mode, dent, + &ipa_ctx->ctrl->clock_scaling_bw_threshold_nominal); + if (!file) { + IPAERR("could not create bw_threshold_nominal_mbps\n"); + goto fail; + } + + file = debugfs_create_u32("clock_scaling_bw_threshold_turbo_mbps", + read_write_mode, dent, + &ipa_ctx->ctrl->clock_scaling_bw_threshold_turbo); + if (!file) { + IPAERR("could not create bw_threshold_turbo_mbps\n"); + goto fail; + } + + file = debugfs_create_file("enable_low_prio_print", write_only_mode, + dent, 0, &ipa_ipc_low_ops); + if (!file) { + IPAERR("could not create enable_low_prio_print file\n"); + goto fail; + } + + return; + +fail: + debugfs_remove_recursive(dent); +} + +void ipa_debugfs_remove(void) +{ + if (IS_ERR(dent)) { + IPAERR("Debugfs: folder was not created.\n"); + return; + } + if (active_clients_buf != NULL) { + kfree(active_clients_buf); + active_clients_buf = NULL; + } + debugfs_remove_recursive(dent); +} + +#else /* !CONFIG_DEBUG_FS */ +void ipa_debugfs_init(void) {} +void ipa_debugfs_remove(void) {} +int _ipa_read_dbg_cnt_v1_1(char *buf, int max_len) +{ + return 0; +} +int _ipa_read_ep_reg_v1_1(char *buf, int max_len, int pipe) +{ + return 0; +} +int _ipa_read_gen_reg_v1_1(char *buff, int max_len) +{ + return 0; +} +void _ipa_write_dbg_cnt_v1_1(int option) {} +int _ipa_read_gen_reg_v2_0(char *buff, int max_len) +{ + return 0; +} +int _ipa_read_ep_reg_v2_0(char *buf, int max_len, int pipe) +{ + return 0; +} +void _ipa_write_dbg_cnt_v2_0(int option) {} +int _ipa_read_dbg_cnt_v2_0(char *buf, int max_len) +{ + return 0; +} +#endif diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_dma.c b/drivers/platform/msm/ipa/ipa_v2/ipa_dma.c new file mode 100644 index 000000000000..232d9216d1bf --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_dma.c @@ -0,0 +1,902 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2015-2016, 2020, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "ipa_i.h" + +#define IPA_DMA_POLLING_MIN_SLEEP_RX 1010 +#define IPA_DMA_POLLING_MAX_SLEEP_RX 1050 +#define IPA_DMA_SYS_DESC_MAX_FIFO_SZ 0x7FF8 +#define IPA_DMA_MAX_PKT_SZ 0xFFFF +#define IPA_DMA_MAX_PENDING_SYNC (IPA_SYS_DESC_FIFO_SZ / \ + sizeof(struct sps_iovec) - 1) +#define IPA_DMA_MAX_PENDING_ASYNC (IPA_DMA_SYS_DESC_MAX_FIFO_SZ / \ + sizeof(struct sps_iovec) - 1) + +#define IPADMA_DRV_NAME "ipa_dma" + +#define IPADMA_DBG(fmt, args...) \ + do { \ + pr_debug(IPADMA_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + IPADMA_DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPADMA_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPADMA_DBG_LOW(fmt, args...) \ + do { \ + pr_debug(IPADMA_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPADMA_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPADMA_ERR(fmt, args...) \ + do { \ + pr_err(IPADMA_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + IPADMA_DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPADMA_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPADMA_FUNC_ENTRY() \ + IPADMA_DBG_LOW("ENTRY\n") + +#define IPADMA_FUNC_EXIT() \ + IPADMA_DBG_LOW("EXIT\n") + + +#ifdef CONFIG_DEBUG_FS +#define IPADMA_MAX_MSG_LEN 1024 +static char dbg_buff[IPADMA_MAX_MSG_LEN]; +static void ipa_dma_debugfs_init(void); +static void ipa_dma_debugfs_destroy(void); +#else +static void ipa_dma_debugfs_init(void) {} +static void ipa_dma_debugfs_destroy(void) {} +#endif + +/** + * struct ipa_dma_xfer_wrapper - IPADMA transfer descr wrapper + * @phys_addr_src: physical address of the source data to copy + * @phys_addr_dest: physical address to store the copied data + * @len: len in bytes to copy + * @link: linked to the wrappers list on the proper(sync/async) cons pipe + * @xfer_done: completion object for sync_memcpy completion + * @callback: IPADMA client provided completion callback + * @user1: cookie1 for above callback + * + * This struct can wrap both sync and async memcpy transfers descriptors. + */ +struct ipa_dma_xfer_wrapper { + u64 phys_addr_src; + u64 phys_addr_dest; + u16 len; + struct list_head link; + struct completion xfer_done; + void (*callback)(void *user1); + void *user1; +}; + +/** + * struct ipa_dma_ctx -IPADMA driver context information + * @is_enabled:is ipa_dma enabled? + * @destroy_pending: destroy ipa_dma after handling all pending memcpy + * @ipa_dma_xfer_wrapper_cache: cache of ipa_dma_xfer_wrapper structs + * @sync_lock: lock for synchronisation in sync_memcpy + * @async_lock: lock for synchronisation in async_memcpy + * @enable_lock: lock for is_enabled + * @pending_lock: lock for synchronize is_enable and pending_cnt + * @done: no pending works-ipadma can be destroyed + * @ipa_dma_sync_prod_hdl: handle of sync memcpy producer + * @ipa_dma_async_prod_hdl:handle of async memcpy producer + * @ipa_dma_sync_cons_hdl: handle of sync memcpy consumer + * @sync_memcpy_pending_cnt: number of pending sync memcopy operations + * @async_memcpy_pending_cnt: number of pending async memcopy operations + * @uc_memcpy_pending_cnt: number of pending uc memcopy operations + * @total_sync_memcpy: total number of sync memcpy (statistics) + * @total_async_memcpy: total number of async memcpy (statistics) + * @total_uc_memcpy: total number of uc memcpy (statistics) + */ +struct ipa_dma_ctx { + bool is_enabled; + bool destroy_pending; + struct kmem_cache *ipa_dma_xfer_wrapper_cache; + struct mutex sync_lock; + spinlock_t async_lock; + struct mutex enable_lock; + spinlock_t pending_lock; + struct completion done; + u32 ipa_dma_sync_prod_hdl; + u32 ipa_dma_async_prod_hdl; + u32 ipa_dma_sync_cons_hdl; + u32 ipa_dma_async_cons_hdl; + atomic_t sync_memcpy_pending_cnt; + atomic_t async_memcpy_pending_cnt; + atomic_t uc_memcpy_pending_cnt; + atomic_t total_sync_memcpy; + atomic_t total_async_memcpy; + atomic_t total_uc_memcpy; +}; +static struct ipa_dma_ctx *ipa_dma_ctx; + +/** + * ipa2_dma_init() -Initialize IPADMA. + * + * This function initialize all IPADMA internal data and connect in dma: + * MEMCPY_DMA_SYNC_PROD ->MEMCPY_DMA_SYNC_CONS + * MEMCPY_DMA_ASYNC_PROD->MEMCPY_DMA_SYNC_CONS + * + * Return codes: 0: success + * -EFAULT: IPADMA is already initialized + * -ENOMEM: allocating memory error + * -EPERM: pipe connection failed + */ +int ipa2_dma_init(void) +{ + struct ipa_dma_ctx *ipa_dma_ctx_t; + struct ipa_sys_connect_params sys_in; + int res = 0; + + IPADMA_FUNC_ENTRY(); + + if (ipa_dma_ctx) { + IPADMA_ERR("Already initialized.\n"); + return -EFAULT; + } + ipa_dma_ctx_t = kzalloc(sizeof(*(ipa_dma_ctx)), GFP_KERNEL); + + if (!ipa_dma_ctx_t) { + IPADMA_ERR("kzalloc error.\n"); + return -ENOMEM; + } + + ipa_dma_ctx_t->ipa_dma_xfer_wrapper_cache = + kmem_cache_create("IPA_DMA_XFER_WRAPPER", + sizeof(struct ipa_dma_xfer_wrapper), 0, 0, NULL); + if (!ipa_dma_ctx_t->ipa_dma_xfer_wrapper_cache) { + IPAERR(":failed to create ipa dma xfer wrapper cache.\n"); + res = -ENOMEM; + goto fail_mem_ctrl; + } + + mutex_init(&ipa_dma_ctx_t->enable_lock); + spin_lock_init(&ipa_dma_ctx_t->async_lock); + mutex_init(&ipa_dma_ctx_t->sync_lock); + spin_lock_init(&ipa_dma_ctx_t->pending_lock); + init_completion(&ipa_dma_ctx_t->done); + ipa_dma_ctx_t->is_enabled = false; + ipa_dma_ctx_t->destroy_pending = false; + atomic_set(&ipa_dma_ctx_t->async_memcpy_pending_cnt, 0); + atomic_set(&ipa_dma_ctx_t->sync_memcpy_pending_cnt, 0); + atomic_set(&ipa_dma_ctx_t->uc_memcpy_pending_cnt, 0); + atomic_set(&ipa_dma_ctx_t->total_async_memcpy, 0); + atomic_set(&ipa_dma_ctx_t->total_sync_memcpy, 0); + atomic_set(&ipa_dma_ctx_t->total_uc_memcpy, 0); + + /* IPADMA SYNC PROD-source for sync memcpy */ + memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params)); + sys_in.client = IPA_CLIENT_MEMCPY_DMA_SYNC_PROD; + sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ; + sys_in.ipa_ep_cfg.mode.mode = IPA_DMA; + sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_MEMCPY_DMA_SYNC_CONS; + sys_in.skip_ep_cfg = false; + if (ipa2_setup_sys_pipe(&sys_in, + &ipa_dma_ctx_t->ipa_dma_sync_prod_hdl)) { + IPADMA_ERR(":setup sync prod pipe failed\n"); + res = -EPERM; + goto fail_sync_prod; + } + + /* IPADMA SYNC CONS-destination for sync memcpy */ + memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params)); + sys_in.client = IPA_CLIENT_MEMCPY_DMA_SYNC_CONS; + sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ; + sys_in.skip_ep_cfg = false; + sys_in.ipa_ep_cfg.mode.mode = IPA_BASIC; + sys_in.notify = NULL; + sys_in.priv = NULL; + if (ipa2_setup_sys_pipe(&sys_in, + &ipa_dma_ctx_t->ipa_dma_sync_cons_hdl)) { + IPADMA_ERR(":setup sync cons pipe failed.\n"); + res = -EPERM; + goto fail_sync_cons; + } + + IPADMA_DBG("SYNC MEMCPY pipes are connected\n"); + + /* IPADMA ASYNC PROD-source for sync memcpy */ + memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params)); + sys_in.client = IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD; + sys_in.desc_fifo_sz = IPA_DMA_SYS_DESC_MAX_FIFO_SZ; + sys_in.ipa_ep_cfg.mode.mode = IPA_DMA; + sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS; + sys_in.skip_ep_cfg = false; + sys_in.notify = NULL; + if (ipa2_setup_sys_pipe(&sys_in, + &ipa_dma_ctx_t->ipa_dma_async_prod_hdl)) { + IPADMA_ERR(":setup async prod pipe failed.\n"); + res = -EPERM; + goto fail_async_prod; + } + + /* IPADMA ASYNC CONS-destination for sync memcpy */ + memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params)); + sys_in.client = IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS; + sys_in.desc_fifo_sz = IPA_DMA_SYS_DESC_MAX_FIFO_SZ; + sys_in.skip_ep_cfg = false; + sys_in.ipa_ep_cfg.mode.mode = IPA_BASIC; + sys_in.notify = ipa_dma_async_memcpy_notify_cb; + sys_in.priv = NULL; + if (ipa2_setup_sys_pipe(&sys_in, + &ipa_dma_ctx_t->ipa_dma_async_cons_hdl)) { + IPADMA_ERR(":setup async cons pipe failed.\n"); + res = -EPERM; + goto fail_async_cons; + } + ipa_dma_debugfs_init(); + ipa_dma_ctx = ipa_dma_ctx_t; + IPADMA_DBG("ASYNC MEMCPY pipes are connected\n"); + + IPADMA_FUNC_EXIT(); + return res; +fail_async_cons: + ipa2_teardown_sys_pipe(ipa_dma_ctx_t->ipa_dma_async_prod_hdl); +fail_async_prod: + ipa2_teardown_sys_pipe(ipa_dma_ctx_t->ipa_dma_sync_cons_hdl); +fail_sync_cons: + ipa2_teardown_sys_pipe(ipa_dma_ctx_t->ipa_dma_sync_prod_hdl); +fail_sync_prod: + kmem_cache_destroy(ipa_dma_ctx_t->ipa_dma_xfer_wrapper_cache); +fail_mem_ctrl: + kfree(ipa_dma_ctx_t); + ipa_dma_ctx = NULL; + return res; + +} + + +/** + * ipa2_dma_enable() -Vote for IPA clocks. + * + *Return codes: 0: success + * -EINVAL: IPADMA is not initialized + * -EPERM: Operation not permitted as ipa_dma is already + * enabled + */ +int ipa2_dma_enable(void) +{ + IPADMA_FUNC_ENTRY(); + if (ipa_dma_ctx == NULL) { + IPADMA_ERR("IPADMA isn't initialized, can't enable\n"); + return -EPERM; + } + mutex_lock(&ipa_dma_ctx->enable_lock); + if (ipa_dma_ctx->is_enabled) { + IPADMA_ERR("Already enabled.\n"); + mutex_unlock(&ipa_dma_ctx->enable_lock); + return -EPERM; + } + IPA_ACTIVE_CLIENTS_INC_SPECIAL("DMA"); + ipa_dma_ctx->is_enabled = true; + mutex_unlock(&ipa_dma_ctx->enable_lock); + + IPADMA_FUNC_EXIT(); + return 0; +} + +static bool ipa_dma_work_pending(void) +{ + if (atomic_read(&ipa_dma_ctx->sync_memcpy_pending_cnt)) { + IPADMA_DBG("pending sync\n"); + return true; + } + if (atomic_read(&ipa_dma_ctx->async_memcpy_pending_cnt)) { + IPADMA_DBG("pending async\n"); + return true; + } + if (atomic_read(&ipa_dma_ctx->uc_memcpy_pending_cnt)) { + IPADMA_DBG("pending uc\n"); + return true; + } + IPADMA_DBG_LOW("no pending work\n"); + return false; +} + +/** + * ipa2_dma_disable()- Unvote for IPA clocks. + * + * enter to power save mode. + * + * Return codes: 0: success + * -EINVAL: IPADMA is not initialized + * -EPERM: Operation not permitted as ipa_dma is already + * diabled + * -EFAULT: can not disable ipa_dma as there are pending + * memcopy works + */ +int ipa2_dma_disable(void) +{ + unsigned long flags; + + IPADMA_FUNC_ENTRY(); + if (ipa_dma_ctx == NULL) { + IPADMA_ERR("IPADMA isn't initialized, can't disable\n"); + return -EPERM; + } + mutex_lock(&ipa_dma_ctx->enable_lock); + spin_lock_irqsave(&ipa_dma_ctx->pending_lock, flags); + if (!ipa_dma_ctx->is_enabled) { + IPADMA_ERR("Already disabled.\n"); + spin_unlock_irqrestore(&ipa_dma_ctx->pending_lock, flags); + mutex_unlock(&ipa_dma_ctx->enable_lock); + return -EPERM; + } + if (ipa_dma_work_pending()) { + IPADMA_ERR("There is pending work, can't disable.\n"); + spin_unlock_irqrestore(&ipa_dma_ctx->pending_lock, flags); + mutex_unlock(&ipa_dma_ctx->enable_lock); + return -EFAULT; + } + ipa_dma_ctx->is_enabled = false; + spin_unlock_irqrestore(&ipa_dma_ctx->pending_lock, flags); + IPA_ACTIVE_CLIENTS_DEC_SPECIAL("DMA"); + mutex_unlock(&ipa_dma_ctx->enable_lock); + IPADMA_FUNC_EXIT(); + return 0; +} + +/** + * ipa2_dma_sync_memcpy()- Perform synchronous memcpy using IPA. + * + * @dest: physical address to store the copied data. + * @src: physical address of the source data to copy. + * @len: number of bytes to copy. + * + * Return codes: 0: success + * -EINVAL: invalid params + * -EPERM: operation not permitted as ipa_dma isn't enable or + * initialized + * -SPS_ERROR: on sps faliures + * -EFAULT: other + */ +int ipa2_dma_sync_memcpy(u64 dest, u64 src, int len) +{ + int ep_idx; + int res; + int i = 0; + struct ipa_sys_context *cons_sys; + struct ipa_sys_context *prod_sys; + struct sps_iovec iov; + struct ipa_dma_xfer_wrapper *xfer_descr = NULL; + struct ipa_dma_xfer_wrapper *head_descr = NULL; + unsigned long flags; + + IPADMA_FUNC_ENTRY(); + + IPADMA_DBG_LOW("dest = 0x%llx, src = 0x%llx, len = %d\n", + dest, src, len); + if (ipa_dma_ctx == NULL) { + IPADMA_ERR("IPADMA isn't initialized, can't memcpy\n"); + return -EPERM; + } + if ((max(src, dest) - min(src, dest)) < len) { + IPADMA_ERR("invalid addresses - overlapping buffers\n"); + return -EINVAL; + } + if (len > IPA_DMA_MAX_PKT_SZ || len <= 0) { + IPADMA_ERR("invalid len, %d\n", len); + return -EINVAL; + } + if (((u32)src != src) || ((u32)dest != dest)) { + IPADMA_ERR("Bad addr - only 32b addr supported for BAM"); + return -EINVAL; + } + spin_lock_irqsave(&ipa_dma_ctx->pending_lock, flags); + if (!ipa_dma_ctx->is_enabled) { + IPADMA_ERR("can't memcpy, IPADMA isn't enabled\n"); + spin_unlock_irqrestore(&ipa_dma_ctx->pending_lock, flags); + return -EPERM; + } + atomic_inc(&ipa_dma_ctx->sync_memcpy_pending_cnt); + spin_unlock_irqrestore(&ipa_dma_ctx->pending_lock, flags); + if (atomic_read(&ipa_dma_ctx->sync_memcpy_pending_cnt) >= + IPA_DMA_MAX_PENDING_SYNC) { + atomic_dec(&ipa_dma_ctx->sync_memcpy_pending_cnt); + IPADMA_ERR("Reached pending requests limit\n"); + return -EFAULT; + } + + ep_idx = ipa2_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_SYNC_CONS); + if (-1 == ep_idx) { + IPADMA_ERR("Client %u is not mapped\n", + IPA_CLIENT_MEMCPY_DMA_SYNC_CONS); + return -EFAULT; + } + cons_sys = ipa_ctx->ep[ep_idx].sys; + + ep_idx = ipa2_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_SYNC_PROD); + if (-1 == ep_idx) { + IPADMA_ERR("Client %u is not mapped\n", + IPA_CLIENT_MEMCPY_DMA_SYNC_PROD); + return -EFAULT; + } + prod_sys = ipa_ctx->ep[ep_idx].sys; + + xfer_descr = kmem_cache_zalloc(ipa_dma_ctx->ipa_dma_xfer_wrapper_cache, + GFP_KERNEL); + if (!xfer_descr) { + IPADMA_ERR("failed to alloc xfer descr wrapper\n"); + res = -ENOMEM; + goto fail_mem_alloc; + } + xfer_descr->phys_addr_dest = dest; + xfer_descr->phys_addr_src = src; + xfer_descr->len = len; + init_completion(&xfer_descr->xfer_done); + + mutex_lock(&ipa_dma_ctx->sync_lock); + list_add_tail(&xfer_descr->link, &cons_sys->head_desc_list); + cons_sys->len++; + res = sps_transfer_one(cons_sys->ep->ep_hdl, dest, len, NULL, 0); + if (res) { + IPADMA_ERR("Failed: sps_transfer_one on dest descr\n"); + goto fail_sps_send; + } + res = sps_transfer_one(prod_sys->ep->ep_hdl, src, len, + NULL, SPS_IOVEC_FLAG_EOT); + if (res) { + IPADMA_ERR("Failed: sps_transfer_one on src descr\n"); + ipa_assert(); + } + head_descr = list_first_entry(&cons_sys->head_desc_list, + struct ipa_dma_xfer_wrapper, link); + + /* in case we are not the head of the list, wait for head to wake us */ + if (xfer_descr != head_descr) { + mutex_unlock(&ipa_dma_ctx->sync_lock); + wait_for_completion(&xfer_descr->xfer_done); + mutex_lock(&ipa_dma_ctx->sync_lock); + head_descr = list_first_entry(&cons_sys->head_desc_list, + struct ipa_dma_xfer_wrapper, link); + ipa_assert_on(xfer_descr != head_descr); + } + mutex_unlock(&ipa_dma_ctx->sync_lock); + + do { + /* wait for transfer to complete */ + res = sps_get_iovec(cons_sys->ep->ep_hdl, &iov); + if (res) + IPADMA_ERR("Failed: get_iovec, returned %d loop#:%d\n" + , res, i); + + usleep_range(IPA_DMA_POLLING_MIN_SLEEP_RX, + IPA_DMA_POLLING_MAX_SLEEP_RX); + i++; + } while (iov.addr == 0); + + mutex_lock(&ipa_dma_ctx->sync_lock); + list_del(&head_descr->link); + cons_sys->len--; + kmem_cache_free(ipa_dma_ctx->ipa_dma_xfer_wrapper_cache, xfer_descr); + /* wake the head of the list */ + if (!list_empty(&cons_sys->head_desc_list)) { + head_descr = list_first_entry(&cons_sys->head_desc_list, + struct ipa_dma_xfer_wrapper, link); + complete(&head_descr->xfer_done); + } + mutex_unlock(&ipa_dma_ctx->sync_lock); + + ipa_assert_on(dest != iov.addr); + ipa_assert_on(len != iov.size); + atomic_inc(&ipa_dma_ctx->total_sync_memcpy); + atomic_dec(&ipa_dma_ctx->sync_memcpy_pending_cnt); + if (ipa_dma_ctx->destroy_pending && !ipa_dma_work_pending()) + complete(&ipa_dma_ctx->done); + + IPADMA_FUNC_EXIT(); + return res; + +fail_sps_send: + list_del(&xfer_descr->link); + cons_sys->len--; + mutex_unlock(&ipa_dma_ctx->sync_lock); + kmem_cache_free(ipa_dma_ctx->ipa_dma_xfer_wrapper_cache, xfer_descr); +fail_mem_alloc: + atomic_dec(&ipa_dma_ctx->sync_memcpy_pending_cnt); + if (ipa_dma_ctx->destroy_pending && !ipa_dma_work_pending()) + complete(&ipa_dma_ctx->done); + return res; +} + +/** + * ipa2_dma_async_memcpy()- Perform asynchronous memcpy using IPA. + * + * @dest: physical address to store the copied data. + * @src: physical address of the source data to copy. + * @len: number of bytes to copy. + * @user_cb: callback function to notify the client when the copy was done. + * @user_param: cookie for user_cb. + * + * Return codes: 0: success + * -EINVAL: invalid params + * -EPERM: operation not permitted as ipa_dma isn't enable or + * initialized + * -SPS_ERROR: on sps faliures + * -EFAULT: descr fifo is full. + */ +int ipa2_dma_async_memcpy(u64 dest, u64 src, int len, + void (*user_cb)(void *user1), void *user_param) +{ + int ep_idx; + int res = 0; + struct ipa_dma_xfer_wrapper *xfer_descr = NULL; + struct ipa_sys_context *prod_sys; + struct ipa_sys_context *cons_sys; + unsigned long flags; + + IPADMA_FUNC_ENTRY(); + IPADMA_DBG_LOW("dest = 0x%llx, src = 0x%llx, len = %d\n", + dest, src, len); + if (ipa_dma_ctx == NULL) { + IPADMA_ERR("IPADMA isn't initialized, can't memcpy\n"); + return -EPERM; + } + if ((max(src, dest) - min(src, dest)) < len) { + IPADMA_ERR("invalid addresses - overlapping buffers\n"); + return -EINVAL; + } + if (len > IPA_DMA_MAX_PKT_SZ || len <= 0) { + IPADMA_ERR("invalid len, %d\n", len); + return -EINVAL; + } + if (((u32)src != src) || ((u32)dest != dest)) { + IPADMA_ERR("Bad addr - only 32b addr supported for BAM"); + return -EINVAL; + } + if (!user_cb) { + IPADMA_ERR("null pointer: user_cb\n"); + return -EINVAL; + } + spin_lock_irqsave(&ipa_dma_ctx->pending_lock, flags); + if (!ipa_dma_ctx->is_enabled) { + IPADMA_ERR("can't memcpy, IPA_DMA isn't enabled\n"); + spin_unlock_irqrestore(&ipa_dma_ctx->pending_lock, flags); + return -EPERM; + } + atomic_inc(&ipa_dma_ctx->async_memcpy_pending_cnt); + spin_unlock_irqrestore(&ipa_dma_ctx->pending_lock, flags); + if (atomic_read(&ipa_dma_ctx->async_memcpy_pending_cnt) >= + IPA_DMA_MAX_PENDING_ASYNC) { + atomic_dec(&ipa_dma_ctx->async_memcpy_pending_cnt); + IPADMA_ERR("Reached pending requests limit\n"); + return -EFAULT; + } + + ep_idx = ipa2_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS); + if (-1 == ep_idx) { + IPADMA_ERR("Client %u is not mapped\n", + IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS); + return -EFAULT; + } + cons_sys = ipa_ctx->ep[ep_idx].sys; + + ep_idx = ipa2_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD); + if (-1 == ep_idx) { + IPADMA_ERR("Client %u is not mapped\n", + IPA_CLIENT_MEMCPY_DMA_SYNC_PROD); + return -EFAULT; + } + prod_sys = ipa_ctx->ep[ep_idx].sys; + + xfer_descr = kmem_cache_zalloc(ipa_dma_ctx->ipa_dma_xfer_wrapper_cache, + GFP_KERNEL); + if (!xfer_descr) { + IPADMA_ERR("failed to alloc xfrer descr wrapper\n"); + res = -ENOMEM; + goto fail_mem_alloc; + } + xfer_descr->phys_addr_dest = dest; + xfer_descr->phys_addr_src = src; + xfer_descr->len = len; + xfer_descr->callback = user_cb; + xfer_descr->user1 = user_param; + + spin_lock_irqsave(&ipa_dma_ctx->async_lock, flags); + list_add_tail(&xfer_descr->link, &cons_sys->head_desc_list); + cons_sys->len++; + res = sps_transfer_one(cons_sys->ep->ep_hdl, dest, len, xfer_descr, 0); + if (res) { + IPADMA_ERR("Failed: sps_transfer_one on dest descr\n"); + goto fail_sps_send; + } + res = sps_transfer_one(prod_sys->ep->ep_hdl, src, len, + NULL, SPS_IOVEC_FLAG_EOT); + if (res) { + IPADMA_ERR("Failed: sps_transfer_one on src descr\n"); + ipa_assert(); + goto fail_sps_send; + } + spin_unlock_irqrestore(&ipa_dma_ctx->async_lock, flags); + IPADMA_FUNC_EXIT(); + return res; + +fail_sps_send: + list_del(&xfer_descr->link); + spin_unlock_irqrestore(&ipa_dma_ctx->async_lock, flags); + kmem_cache_free(ipa_dma_ctx->ipa_dma_xfer_wrapper_cache, xfer_descr); +fail_mem_alloc: + atomic_dec(&ipa_dma_ctx->async_memcpy_pending_cnt); + if (ipa_dma_ctx->destroy_pending && !ipa_dma_work_pending()) + complete(&ipa_dma_ctx->done); + return res; +} + +/** + * ipa2_dma_uc_memcpy() - Perform a memcpy action using IPA uC + * @dest: physical address to store the copied data. + * @src: physical address of the source data to copy. + * @len: number of bytes to copy. + * + * Return codes: 0: success + * -EINVAL: invalid params + * -EPERM: operation not permitted as ipa_dma isn't enable or + * initialized + * -EBADF: IPA uC is not loaded + */ +int ipa2_dma_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len) +{ + int res; + unsigned long flags; + + IPADMA_FUNC_ENTRY(); + if (ipa_dma_ctx == NULL) { + IPADMA_ERR("IPADMA isn't initialized, can't memcpy\n"); + return -EPERM; + } + if ((max(src, dest) - min(src, dest)) < len) { + IPADMA_ERR("invalid addresses - overlapping buffers\n"); + return -EINVAL; + } + if (len > IPA_DMA_MAX_PKT_SZ || len <= 0) { + IPADMA_ERR("invalid len, %d\n", len); + return -EINVAL; + } + + spin_lock_irqsave(&ipa_dma_ctx->pending_lock, flags); + if (!ipa_dma_ctx->is_enabled) { + IPADMA_ERR("can't memcpy, IPADMA isn't enabled\n"); + spin_unlock_irqrestore(&ipa_dma_ctx->pending_lock, flags); + return -EPERM; + } + atomic_inc(&ipa_dma_ctx->uc_memcpy_pending_cnt); + spin_unlock_irqrestore(&ipa_dma_ctx->pending_lock, flags); + + res = ipa_uc_memcpy(dest, src, len); + if (res) { + IPADMA_ERR("ipa_uc_memcpy failed %d\n", res); + goto dec_and_exit; + } + + atomic_inc(&ipa_dma_ctx->total_uc_memcpy); + res = 0; +dec_and_exit: + atomic_dec(&ipa_dma_ctx->uc_memcpy_pending_cnt); + if (ipa_dma_ctx->destroy_pending && !ipa_dma_work_pending()) + complete(&ipa_dma_ctx->done); + IPADMA_FUNC_EXIT(); + return res; +} + +/** + * ipa2_dma_destroy() -teardown IPADMA pipes and release ipadma. + * + * this is a blocking function, returns just after destroying IPADMA. + */ +void ipa2_dma_destroy(void) +{ + int res = 0; + + IPADMA_FUNC_ENTRY(); + if (!ipa_dma_ctx) { + IPADMA_ERR("IPADMA isn't initialized\n"); + return; + } + + if (ipa_dma_work_pending()) { + ipa_dma_ctx->destroy_pending = true; + IPADMA_DBG("There are pending memcpy, wait for completion\n"); + wait_for_completion(&ipa_dma_ctx->done); + } + + res = ipa2_teardown_sys_pipe(ipa_dma_ctx->ipa_dma_async_cons_hdl); + if (res) + IPADMA_ERR("teardown IPADMA ASYNC CONS failed\n"); + ipa_dma_ctx->ipa_dma_async_cons_hdl = 0; + res = ipa2_teardown_sys_pipe(ipa_dma_ctx->ipa_dma_sync_cons_hdl); + if (res) + IPADMA_ERR("teardown IPADMA SYNC CONS failed\n"); + ipa_dma_ctx->ipa_dma_sync_cons_hdl = 0; + res = ipa2_teardown_sys_pipe(ipa_dma_ctx->ipa_dma_async_prod_hdl); + if (res) + IPADMA_ERR("teardown IPADMA ASYNC PROD failed\n"); + ipa_dma_ctx->ipa_dma_async_prod_hdl = 0; + res = ipa2_teardown_sys_pipe(ipa_dma_ctx->ipa_dma_sync_prod_hdl); + if (res) + IPADMA_ERR("teardown IPADMA SYNC PROD failed\n"); + ipa_dma_ctx->ipa_dma_sync_prod_hdl = 0; + + ipa_dma_debugfs_destroy(); + kmem_cache_destroy(ipa_dma_ctx->ipa_dma_xfer_wrapper_cache); + kfree(ipa_dma_ctx); + ipa_dma_ctx = NULL; + + IPADMA_FUNC_EXIT(); +} + +/** + * ipa_dma_async_memcpy_notify_cb() -Callback function which will be called by + * IPA driver after getting notify from SPS driver or poll mode on Rx operation + * is completed (data was written to dest descriptor on async_cons ep). + * + * @priv -not in use. + * @evt - event name - IPA_RECIVE. + * @data -the iovec. + */ +void ipa_dma_async_memcpy_notify_cb(void *priv + , enum ipa_dp_evt_type evt, unsigned long data) +{ + int ep_idx = 0; + struct sps_iovec *iov = (struct sps_iovec *) data; + struct ipa_dma_xfer_wrapper *xfer_descr_expected; + struct ipa_sys_context *sys; + unsigned long flags; + + IPADMA_FUNC_ENTRY(); + + ep_idx = ipa2_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS); + sys = ipa_ctx->ep[ep_idx].sys; + + spin_lock_irqsave(&ipa_dma_ctx->async_lock, flags); + xfer_descr_expected = list_first_entry(&sys->head_desc_list, + struct ipa_dma_xfer_wrapper, link); + list_del(&xfer_descr_expected->link); + sys->len--; + spin_unlock_irqrestore(&ipa_dma_ctx->async_lock, flags); + + ipa_assert_on(xfer_descr_expected->phys_addr_dest != iov->addr); + ipa_assert_on(xfer_descr_expected->len != iov->size); + + atomic_inc(&ipa_dma_ctx->total_async_memcpy); + atomic_dec(&ipa_dma_ctx->async_memcpy_pending_cnt); + xfer_descr_expected->callback(xfer_descr_expected->user1); + + kmem_cache_free(ipa_dma_ctx->ipa_dma_xfer_wrapper_cache, + xfer_descr_expected); + + if (ipa_dma_ctx->destroy_pending && !ipa_dma_work_pending()) + complete(&ipa_dma_ctx->done); + + IPADMA_FUNC_EXIT(); +} + +#ifdef CONFIG_DEBUG_FS +static struct dentry *dent; +static struct dentry *dfile_info; + +static ssize_t ipa_dma_debugfs_read(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + int nbytes = 0; + + if (!ipa_dma_ctx) { + nbytes += scnprintf(&dbg_buff[nbytes], + IPADMA_MAX_MSG_LEN - nbytes, + "Not initialized\n"); + } else { + nbytes += scnprintf(&dbg_buff[nbytes], + IPADMA_MAX_MSG_LEN - nbytes, + "Status:\n IPADMA is %s\n", + (ipa_dma_ctx->is_enabled) ? "Enabled" : "Disabled"); + nbytes += scnprintf(&dbg_buff[nbytes], + IPADMA_MAX_MSG_LEN - nbytes, + "Statistics:\n total sync memcpy: %d\n ", + atomic_read(&ipa_dma_ctx->total_sync_memcpy)); + nbytes += scnprintf(&dbg_buff[nbytes], + IPADMA_MAX_MSG_LEN - nbytes, + "total async memcpy: %d\n ", + atomic_read(&ipa_dma_ctx->total_async_memcpy)); + nbytes += scnprintf(&dbg_buff[nbytes], + IPADMA_MAX_MSG_LEN - nbytes, + "pending sync memcpy jobs: %d\n ", + atomic_read(&ipa_dma_ctx->sync_memcpy_pending_cnt)); + nbytes += scnprintf(&dbg_buff[nbytes], + IPADMA_MAX_MSG_LEN - nbytes, + "pending async memcpy jobs: %d\n", + atomic_read(&ipa_dma_ctx->async_memcpy_pending_cnt)); + nbytes += scnprintf(&dbg_buff[nbytes], + IPADMA_MAX_MSG_LEN - nbytes, + "pending uc memcpy jobs: %d\n", + atomic_read(&ipa_dma_ctx->uc_memcpy_pending_cnt)); + } + return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes); +} + +static ssize_t ipa_dma_debugfs_reset_statistics(struct file *file, + const char __user *ubuf, + size_t count, + loff_t *ppos) +{ + unsigned long missing; + s8 in_num = 0; + + if (sizeof(dbg_buff) < count + 1) + return -EFAULT; + + missing = copy_from_user(dbg_buff, ubuf, count); + if (missing) + return -EFAULT; + + dbg_buff[count] = '\0'; + if (kstrtos8(dbg_buff, 0, &in_num)) + return -EFAULT; + switch (in_num) { + case 0: + if (ipa_dma_work_pending()) + IPADMA_ERR("Note, there are pending memcpy\n"); + + atomic_set(&ipa_dma_ctx->total_async_memcpy, 0); + atomic_set(&ipa_dma_ctx->total_sync_memcpy, 0); + break; + default: + IPADMA_ERR("invalid argument: To reset statistics echo 0\n"); + break; + } + return count; +} + +const struct file_operations ipadma_stats_ops = { + .read = ipa_dma_debugfs_read, + .write = ipa_dma_debugfs_reset_statistics, +}; + +static void ipa_dma_debugfs_init(void) +{ + const mode_t read_write_mode = 0666; + + dent = debugfs_create_dir("ipa_dma", 0); + if (IS_ERR(dent)) { + IPADMA_ERR("fail to create folder ipa_dma\n"); + return; + } + + dfile_info = + debugfs_create_file("info", read_write_mode, dent, + 0, &ipadma_stats_ops); + if (!dfile_info || IS_ERR(dfile_info)) { + IPADMA_ERR("fail to create file stats\n"); + goto fail; + } + return; +fail: + debugfs_remove_recursive(dent); +} + +static void ipa_dma_debugfs_destroy(void) +{ + debugfs_remove_recursive(dent); +} + +#endif /* !CONFIG_DEBUG_FS */ diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c new file mode 100644 index 000000000000..b6bbc37f178a --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c @@ -0,0 +1,3736 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2018, 2020, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include "ipa_i.h" +#include "ipa_trace.h" + +#define IPA_WAN_AGGR_PKT_CNT 5 +#define IPA_LAST_DESC_CNT 0xFFFF +#define POLLING_INACTIVITY_RX 40 +#define POLLING_INACTIVITY_TX 40 +#define POLLING_MIN_SLEEP_TX 400 +#define POLLING_MAX_SLEEP_TX 500 +/* 8K less 1 nominal MTU (1500 bytes) rounded to units of KB */ +#define IPA_MTU 1500 +#define IPA_GENERIC_AGGR_BYTE_LIMIT 6 +#define IPA_GENERIC_AGGR_TIME_LIMIT 1 +#define IPA_GENERIC_AGGR_PKT_LIMIT 0 + +#define IPA_GENERIC_RX_BUFF_BASE_SZ 8192 +#define IPA_REAL_GENERIC_RX_BUFF_SZ(X) (SKB_DATA_ALIGN(\ + (X) + NET_SKB_PAD) +\ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) +#define IPA_GENERIC_RX_BUFF_SZ(X) ((X) -\ + (IPA_REAL_GENERIC_RX_BUFF_SZ(X) - (X))) +#define IPA_GENERIC_RX_BUFF_LIMIT (\ + IPA_REAL_GENERIC_RX_BUFF_SZ(\ + IPA_GENERIC_RX_BUFF_BASE_SZ) -\ + IPA_GENERIC_RX_BUFF_BASE_SZ) + +#define IPA_RX_BUFF_CLIENT_HEADROOM 256 + +/* less 1 nominal MTU (1500 bytes) rounded to units of KB */ +#define IPA_ADJUST_AGGR_BYTE_LIMIT(X) (((X) - IPA_MTU)/1000) + +#define IPA_WLAN_RX_POOL_SZ 100 +#define IPA_WLAN_RX_POOL_SZ_LOW_WM 5 +#define IPA_WLAN_RX_BUFF_SZ 2048 +#define IPA_WLAN_COMM_RX_POOL_LOW 100 +#define IPA_WLAN_COMM_RX_POOL_HIGH 900 + +#define IPA_ODU_RX_BUFF_SZ 2048 +#define IPA_ODU_RX_POOL_SZ 32 +#define IPA_SIZE_DL_CSUM_META_TRAILER 8 + +#define IPA_HEADROOM 128 + +static struct sk_buff *ipa_get_skb_ipa_rx(unsigned int len, gfp_t flags); +static void ipa_replenish_wlan_rx_cache(struct ipa_sys_context *sys); +static void ipa_replenish_rx_cache(struct ipa_sys_context *sys); +static void replenish_rx_work_func(struct work_struct *work); +static void ipa_wq_handle_rx(struct work_struct *work); +static void ipa_wq_handle_tx(struct work_struct *work); +static void ipa_wq_rx_common(struct ipa_sys_context *sys, u32 size); +static void ipa_wlan_wq_rx_common(struct ipa_sys_context *sys, + u32 size); +static int ipa_assign_policy(struct ipa_sys_connect_params *in, + struct ipa_sys_context *sys); +static void ipa_cleanup_rx(struct ipa_sys_context *sys); +static void ipa_wq_rx_avail(struct work_struct *work); +static void ipa_alloc_wlan_rx_common_cache(u32 size); +static void ipa_cleanup_wlan_rx_common_cache(void); +static void ipa_wq_repl_rx(struct work_struct *work); +static void ipa_dma_memcpy_notify(struct ipa_sys_context *sys, + struct sps_iovec *iovec); + +static u32 ipa_adjust_ra_buff_base_sz(u32 aggr_byte_limit); +static void ipa_fast_replenish_rx_cache(struct ipa_sys_context *sys); + +static void ipa_wq_write_done_common(struct ipa_sys_context *sys, u32 cnt) +{ + struct ipa_tx_pkt_wrapper *tx_pkt_expected; + int i; + + for (i = 0; i < cnt; i++) { + spin_lock_bh(&sys->spinlock); + if (unlikely(list_empty(&sys->head_desc_list))) { + spin_unlock_bh(&sys->spinlock); + return; + } + tx_pkt_expected = list_first_entry(&sys->head_desc_list, + struct ipa_tx_pkt_wrapper, + link); + list_del(&tx_pkt_expected->link); + sys->len--; + spin_unlock_bh(&sys->spinlock); + if (!tx_pkt_expected->no_unmap_dma) { + if (tx_pkt_expected->type != IPA_DATA_DESC_SKB_PAGED) { + dma_unmap_single(ipa_ctx->pdev, + tx_pkt_expected->mem.phys_base, + tx_pkt_expected->mem.size, + DMA_TO_DEVICE); + } else { + dma_unmap_page(ipa_ctx->pdev, + tx_pkt_expected->mem.phys_base, + tx_pkt_expected->mem.size, + DMA_TO_DEVICE); + } + } + if (tx_pkt_expected->callback) + tx_pkt_expected->callback(tx_pkt_expected->user1, + tx_pkt_expected->user2); + if (tx_pkt_expected->cnt > 1 && + tx_pkt_expected->cnt != IPA_LAST_DESC_CNT) { + if (tx_pkt_expected->cnt == IPA_NUM_DESC_PER_SW_TX) { + dma_pool_free(ipa_ctx->dma_pool, + tx_pkt_expected->mult.base, + tx_pkt_expected->mult.phys_base); + } else { + dma_unmap_single(ipa_ctx->pdev, + tx_pkt_expected->mult.phys_base, + tx_pkt_expected->mult.size, + DMA_TO_DEVICE); + kfree(tx_pkt_expected->mult.base); + } + } + kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt_expected); + } +} + +static void ipa_wq_write_done_status(int src_pipe) +{ + struct ipa_tx_pkt_wrapper *tx_pkt_expected; + struct ipa_sys_context *sys; + u32 cnt; + + WARN_ON(src_pipe >= ipa_ctx->ipa_num_pipes); + + if (!ipa_ctx->ep[src_pipe].status.status_en) + return; + + sys = ipa_ctx->ep[src_pipe].sys; + if (!sys) + return; + + spin_lock_bh(&sys->spinlock); + if (unlikely(list_empty(&sys->head_desc_list))) { + spin_unlock_bh(&sys->spinlock); + return; + } + tx_pkt_expected = list_first_entry(&sys->head_desc_list, + struct ipa_tx_pkt_wrapper, + link); + cnt = tx_pkt_expected->cnt; + spin_unlock_bh(&sys->spinlock); + ipa_wq_write_done_common(sys, cnt); +} + +/** + * ipa_write_done() - this function will be (eventually) called when a Tx + * operation is complete + * * @work: work_struct used by the work queue + * + * Will be called in deferred context. + * - invoke the callback supplied by the client who sent this command + * - iterate over all packets and validate that + * the order for sent packet is the same as expected + * - delete all the tx packet descriptors from the system + * pipe context (not needed anymore) + * - return the tx buffer back to dma_pool + */ +static void ipa_wq_write_done(struct work_struct *work) +{ + struct ipa_tx_pkt_wrapper *tx_pkt; + u32 cnt; + struct ipa_sys_context *sys; + + tx_pkt = container_of(work, struct ipa_tx_pkt_wrapper, work); + cnt = tx_pkt->cnt; + sys = tx_pkt->sys; + + ipa_wq_write_done_common(sys, cnt); +} + +static int ipa_handle_tx_core(struct ipa_sys_context *sys, bool process_all, + bool in_poll_state) +{ + struct sps_iovec iov; + int ret; + int cnt = 0; + + while ((in_poll_state ? atomic_read(&sys->curr_polling_state) : + !atomic_read(&sys->curr_polling_state))) { + if (cnt && !process_all) + break; + ret = sps_get_iovec(sys->ep->ep_hdl, &iov); + if (ret) { + IPAERR("sps_get_iovec failed %d\n", ret); + break; + } + + if (iov.addr == 0) + break; + + ipa_wq_write_done_common(sys, 1); + cnt++; + } + + return cnt; +} + +/** + * ipa_tx_switch_to_intr_mode() - Operate the Tx data path in interrupt mode + */ +static void ipa_tx_switch_to_intr_mode(struct ipa_sys_context *sys) +{ + int ret; + + if (!atomic_read(&sys->curr_polling_state)) { + IPAERR("already in intr mode\n"); + goto fail; + } + + ret = sps_get_config(sys->ep->ep_hdl, &sys->ep->connect); + if (ret) { + IPAERR("sps_get_config() failed %d\n", ret); + goto fail; + } + sys->event.options = SPS_O_EOT; + ret = sps_register_event(sys->ep->ep_hdl, &sys->event); + if (ret) { + IPAERR("sps_register_event() failed %d\n", ret); + goto fail; + } + sys->ep->connect.options = + SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_EOT; + ret = sps_set_config(sys->ep->ep_hdl, &sys->ep->connect); + if (ret) { + IPAERR("sps_set_config() failed %d\n", ret); + goto fail; + } + atomic_set(&sys->curr_polling_state, 0); + ipa_handle_tx_core(sys, true, false); + return; + +fail: + queue_delayed_work(sys->wq, &sys->switch_to_intr_work, + msecs_to_jiffies(1)); +} + +static void ipa_handle_tx(struct ipa_sys_context *sys) +{ + int inactive_cycles = 0; + int cnt; + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + do { + cnt = ipa_handle_tx_core(sys, true, true); + if (cnt == 0) { + inactive_cycles++; + usleep_range(POLLING_MIN_SLEEP_TX, + POLLING_MAX_SLEEP_TX); + } else { + inactive_cycles = 0; + } + } while (inactive_cycles <= POLLING_INACTIVITY_TX); + + ipa_tx_switch_to_intr_mode(sys); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); +} + +static void ipa_wq_handle_tx(struct work_struct *work) +{ + struct ipa_sys_context *sys; + + sys = container_of(work, struct ipa_sys_context, work); + + ipa_handle_tx(sys); +} + +/** + * ipa_send_one() - Send a single descriptor + * @sys: system pipe context + * @desc: descriptor to send + * @in_atomic: whether caller is in atomic context + * + * - Allocate tx_packet wrapper + * - transfer data to the IPA + * - after the transfer was done the SPS will + * notify the sending user via ipa_sps_irq_comp_tx() + * + * Return codes: 0: success, -EFAULT: failure + */ +int ipa_send_one(struct ipa_sys_context *sys, struct ipa_desc *desc, + bool in_atomic) +{ + struct ipa_tx_pkt_wrapper *tx_pkt; + int result; + u16 sps_flags = SPS_IOVEC_FLAG_EOT; + dma_addr_t dma_address; + u16 len; + u32 mem_flag = GFP_ATOMIC; + struct sps_iovec iov; + int ret; + + if (unlikely(!in_atomic)) + mem_flag = GFP_KERNEL; + + tx_pkt = kmem_cache_zalloc(ipa_ctx->tx_pkt_wrapper_cache, mem_flag); + if (!tx_pkt) { + IPAERR("failed to alloc tx wrapper\n"); + goto fail_mem_alloc; + } + + if (!desc->dma_address_valid) { + dma_address = dma_map_single(ipa_ctx->pdev, desc->pyld, + desc->len, DMA_TO_DEVICE); + } else { + dma_address = desc->dma_address; + tx_pkt->no_unmap_dma = true; + } + if (dma_mapping_error(ipa_ctx->pdev, dma_address)) { + IPAERR("dma_map_single failed\n"); + goto fail_dma_map; + } + + INIT_LIST_HEAD(&tx_pkt->link); + tx_pkt->type = desc->type; + tx_pkt->cnt = 1; /* only 1 desc in this "set" */ + + tx_pkt->mem.phys_base = dma_address; + tx_pkt->mem.base = desc->pyld; + tx_pkt->mem.size = desc->len; + tx_pkt->sys = sys; + tx_pkt->callback = desc->callback; + tx_pkt->user1 = desc->user1; + tx_pkt->user2 = desc->user2; + + /* + * Special treatment for immediate commands, where the structure of the + * descriptor is different + */ + if (desc->type == IPA_IMM_CMD_DESC) { + sps_flags |= SPS_IOVEC_FLAG_IMME; + len = desc->opcode; + IPADBG_LOW("sending cmd=%d pyld_len=%d sps_flags=%x\n", + desc->opcode, desc->len, sps_flags); + IPA_DUMP_BUFF(desc->pyld, dma_address, desc->len); + } else { + len = desc->len; + } + + INIT_WORK(&tx_pkt->work, ipa_wq_write_done); + + spin_lock_bh(&sys->spinlock); + list_add_tail(&tx_pkt->link, &sys->head_desc_list); + if (sys->policy == IPA_POLICY_NOINTR_MODE) { + do { + ret = sps_get_iovec(sys->ep->ep_hdl, &iov); + if (ret) { + IPADBG("sps_get_iovec failed %d\n", ret); + break; + } + if ((iov.addr == 0x0) && (iov.size == 0x0)) + break; + } while (1); + } + result = sps_transfer_one(sys->ep->ep_hdl, dma_address, len, tx_pkt, + sps_flags); + if (result) { + IPAERR("sps_transfer_one failed rc=%d\n", result); + goto fail_sps_send; + } + + spin_unlock_bh(&sys->spinlock); + + return 0; + +fail_sps_send: + list_del(&tx_pkt->link); + spin_unlock_bh(&sys->spinlock); + dma_unmap_single(ipa_ctx->pdev, dma_address, desc->len, DMA_TO_DEVICE); +fail_dma_map: + kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt); +fail_mem_alloc: + return -EFAULT; +} + +/** + * ipa_send() - Send multiple descriptors in one HW transaction + * @sys: system pipe context + * @num_desc: number of packets + * @desc: packets to send (may be immediate command or data) + * @in_atomic: whether caller is in atomic context + * + * This function is used for system-to-bam connection. + * - SPS driver expect struct sps_transfer which will contain all the data + * for a transaction + * - ipa_tx_pkt_wrapper will be used for each ipa + * descriptor (allocated from wrappers cache) + * - The wrapper struct will be configured for each ipa-desc payload and will + * contain information which will be later used by the user callbacks + * - each transfer will be made by calling to sps_transfer() + * - Each packet (command or data) that will be sent will also be saved in + * ipa_sys_context for later check that all data was sent + * + * Return codes: 0: success, -EFAULT: failure + */ +int ipa_send(struct ipa_sys_context *sys, u32 num_desc, struct ipa_desc *desc, + bool in_atomic) +{ + struct ipa_tx_pkt_wrapper *tx_pkt; + struct ipa_tx_pkt_wrapper *next_pkt; + struct sps_transfer transfer = { 0 }; + struct sps_iovec *iovec; + dma_addr_t dma_addr; + int i = 0; + int j; + int result; + uint size = num_desc * sizeof(struct sps_iovec); + gfp_t mem_flag = GFP_ATOMIC; + struct sps_iovec iov; + int ret; + gfp_t flag; + + if (unlikely(!in_atomic)) + mem_flag = GFP_KERNEL; + + flag = mem_flag | (ipa_ctx->use_dma_zone ? GFP_DMA : 0); + + if (num_desc == IPA_NUM_DESC_PER_SW_TX) { + transfer.iovec = dma_pool_alloc(ipa_ctx->dma_pool, mem_flag, + &dma_addr); + if (!transfer.iovec) { + IPAERR("fail to alloc dma mem for sps xfr buff\n"); + return -EFAULT; + } + } else { + transfer.iovec = kmalloc(size, flag); + if (!transfer.iovec) { + IPAERR("fail to alloc mem for sps xfr buff "); + IPAERR("num_desc = %d size = %d\n", num_desc, size); + return -EFAULT; + } + dma_addr = dma_map_single(ipa_ctx->pdev, + transfer.iovec, size, DMA_TO_DEVICE); + if (dma_mapping_error(ipa_ctx->pdev, dma_addr)) { + IPAERR("dma_map_single failed for sps xfr buff\n"); + kfree(transfer.iovec); + return -EFAULT; + } + } + + transfer.iovec_phys = dma_addr; + transfer.iovec_count = num_desc; + spin_lock_bh(&sys->spinlock); + + for (i = 0; i < num_desc; i++) { + tx_pkt = kmem_cache_zalloc(ipa_ctx->tx_pkt_wrapper_cache, + mem_flag); + if (!tx_pkt) { + IPAERR("failed to alloc tx wrapper\n"); + goto failure; + } + /* + * first desc of set is "special" as it holds the count and + * other info + */ + if (i == 0) { + transfer.user = tx_pkt; + tx_pkt->mult.phys_base = dma_addr; + tx_pkt->mult.base = transfer.iovec; + tx_pkt->mult.size = size; + tx_pkt->cnt = num_desc; + INIT_WORK(&tx_pkt->work, ipa_wq_write_done); + } + + iovec = &transfer.iovec[i]; + iovec->flags = 0; + + INIT_LIST_HEAD(&tx_pkt->link); + tx_pkt->type = desc[i].type; + + if (desc[i].type != IPA_DATA_DESC_SKB_PAGED) { + tx_pkt->mem.base = desc[i].pyld; + tx_pkt->mem.size = desc[i].len; + + if (!desc[i].dma_address_valid) { + tx_pkt->mem.phys_base = + dma_map_single(ipa_ctx->pdev, + tx_pkt->mem.base, + tx_pkt->mem.size, + DMA_TO_DEVICE); + } else { + tx_pkt->mem.phys_base = desc[i].dma_address; + tx_pkt->no_unmap_dma = true; + } + } else { + tx_pkt->mem.base = desc[i].frag; + tx_pkt->mem.size = desc[i].len; + + if (!desc[i].dma_address_valid) { + tx_pkt->mem.phys_base = + skb_frag_dma_map(ipa_ctx->pdev, + desc[i].frag, + 0, tx_pkt->mem.size, + DMA_TO_DEVICE); + } else { + tx_pkt->mem.phys_base = desc[i].dma_address; + tx_pkt->no_unmap_dma = true; + } + } + + if (dma_mapping_error(ipa_ctx->pdev, tx_pkt->mem.phys_base)) { + IPAERR("dma_map_single failed\n"); + goto failure_dma_map; + } + + tx_pkt->sys = sys; + tx_pkt->callback = desc[i].callback; + tx_pkt->user1 = desc[i].user1; + tx_pkt->user2 = desc[i].user2; + + /* + * Point the iovec to the buffer and + * add this packet to system pipe context. + */ + iovec->addr = tx_pkt->mem.phys_base; + list_add_tail(&tx_pkt->link, &sys->head_desc_list); + + /* + * Special treatment for immediate commands, where the structure + * of the descriptor is different + */ + if (desc[i].type == IPA_IMM_CMD_DESC) { + iovec->size = desc[i].opcode; + iovec->flags |= SPS_IOVEC_FLAG_IMME; + IPA_DUMP_BUFF(desc[i].pyld, + tx_pkt->mem.phys_base, desc[i].len); + } else { + iovec->size = desc[i].len; + } + + if (i == (num_desc - 1)) { + iovec->flags |= SPS_IOVEC_FLAG_EOT; + /* "mark" the last desc */ + tx_pkt->cnt = IPA_LAST_DESC_CNT; + } + } + + if (sys->policy == IPA_POLICY_NOINTR_MODE) { + do { + ret = sps_get_iovec(sys->ep->ep_hdl, &iov); + if (ret) { + IPADBG("sps_get_iovec failed %d\n", ret); + break; + } + if ((iov.addr == 0x0) && (iov.size == 0x0)) + break; + } while (1); + } + result = sps_transfer(sys->ep->ep_hdl, &transfer); + if (result) { + IPAERR("sps_transfer failed rc=%d\n", result); + goto failure; + } + + spin_unlock_bh(&sys->spinlock); + return 0; + +failure_dma_map: + kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt); + +failure: + tx_pkt = transfer.user; + for (j = 0; j < i; j++) { + next_pkt = list_next_entry(tx_pkt, link); + list_del(&tx_pkt->link); + if (!tx_pkt->no_unmap_dma) { + if (desc[j].type != IPA_DATA_DESC_SKB_PAGED) { + dma_unmap_single(ipa_ctx->pdev, + tx_pkt->mem.phys_base, + tx_pkt->mem.size, + DMA_TO_DEVICE); + } else { + dma_unmap_page(ipa_ctx->pdev, + tx_pkt->mem.phys_base, + tx_pkt->mem.size, + DMA_TO_DEVICE); + } + } + kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt); + tx_pkt = next_pkt; + } + if (transfer.iovec_phys) { + if (num_desc == IPA_NUM_DESC_PER_SW_TX) { + dma_pool_free(ipa_ctx->dma_pool, transfer.iovec, + transfer.iovec_phys); + } else { + dma_unmap_single(ipa_ctx->pdev, transfer.iovec_phys, + size, DMA_TO_DEVICE); + kfree(transfer.iovec); + } + } + spin_unlock_bh(&sys->spinlock); + return -EFAULT; +} + +/** + * ipa_sps_irq_cmd_ack - callback function which will be called by SPS driver + * after an immediate command is complete. + * @user1: pointer to the descriptor of the transfer + * @user2: + * + * Complete the immediate commands completion object, this will release the + * thread which waits on this completion object (ipa_send_cmd()) + */ +static void ipa_sps_irq_cmd_ack(void *user1, int user2) +{ + struct ipa_desc *desc = (struct ipa_desc *)user1; + + if (!desc) { + IPAERR("desc is NULL\n"); + WARN_ON(1); + return; + } + IPADBG_LOW("got ack for cmd=%d\n", desc->opcode); + complete(&desc->xfer_done); +} + +/** + * ipa_send_cmd - send immediate commands + * @num_desc: number of descriptors within the desc struct + * @descr: descriptor structure + * + * Function will block till command gets ACK from IPA HW, caller needs + * to free any resources it allocated after function returns + * The callback in ipa_desc should not be set by the caller + * for this function. + */ +int ipa_send_cmd(u16 num_desc, struct ipa_desc *descr) +{ + struct ipa_desc *desc; + int i, result = 0; + struct ipa_sys_context *sys; + int ep_idx; + + for (i = 0; i < num_desc; i++) + IPADBG_LOW("sending imm cmd %d\n", descr[i].opcode); + + ep_idx = ipa2_get_ep_mapping(IPA_CLIENT_APPS_CMD_PROD); + if (-1 == ep_idx) { + IPAERR("Client %u is not mapped\n", + IPA_CLIENT_APPS_CMD_PROD); + return -EFAULT; + } + sys = ipa_ctx->ep[ep_idx].sys; + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + if (num_desc == 1) { + init_completion(&descr->xfer_done); + + if (descr->callback || descr->user1) + WARN_ON(1); + + descr->callback = ipa_sps_irq_cmd_ack; + descr->user1 = descr; + if (ipa_send_one(sys, descr, true)) { + IPAERR("fail to send immediate command\n"); + result = -EFAULT; + goto bail; + } + wait_for_completion(&descr->xfer_done); + } else { + desc = &descr[num_desc - 1]; + init_completion(&desc->xfer_done); + + if (desc->callback || desc->user1) + WARN_ON(1); + + desc->callback = ipa_sps_irq_cmd_ack; + desc->user1 = desc; + if (ipa_send(sys, num_desc, descr, true)) { + IPAERR("fail to send multiple immediate command set\n"); + result = -EFAULT; + goto bail; + } + wait_for_completion(&desc->xfer_done); + } + +bail: + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return result; +} + +/** + * ipa_sps_irq_tx_notify() - Callback function which will be called by + * the SPS driver to start a Tx poll operation. + * Called in an interrupt context. + * @notify: SPS driver supplied notification struct + * + * This function defer the work for this event to the tx workqueue. + */ +static void ipa_sps_irq_tx_notify(struct sps_event_notify *notify) +{ + struct ipa_sys_context *sys = (struct ipa_sys_context *)notify->user; + int ret; + + IPADBG_LOW("event %d notified\n", notify->event_id); + + switch (notify->event_id) { + case SPS_EVENT_EOT: + if (IPA_CLIENT_IS_APPS_CONS(sys->ep->client)) + atomic_set(&ipa_ctx->sps_pm.eot_activity, 1); + if (!atomic_read(&sys->curr_polling_state)) { + ret = sps_get_config(sys->ep->ep_hdl, + &sys->ep->connect); + if (ret) { + IPAERR("sps_get_config() failed %d\n", ret); + break; + } + sys->ep->connect.options = SPS_O_AUTO_ENABLE | + SPS_O_ACK_TRANSFERS | SPS_O_POLL; + ret = sps_set_config(sys->ep->ep_hdl, + &sys->ep->connect); + if (ret) { + IPAERR("sps_set_config() failed %d\n", ret); + break; + } + atomic_set(&sys->curr_polling_state, 1); + queue_work(sys->wq, &sys->work); + } + break; + default: + IPAERR("received unexpected event id %d\n", notify->event_id); + } +} + +/** + * ipa_sps_irq_tx_no_aggr_notify() - Callback function which will be called by + * the SPS driver after a Tx operation is complete. + * Called in an interrupt context. + * @notify: SPS driver supplied notification struct + * + * This function defer the work for this event to the tx workqueue. + * This event will be later handled by ipa_write_done. + */ +static void ipa_sps_irq_tx_no_aggr_notify(struct sps_event_notify *notify) +{ + struct ipa_tx_pkt_wrapper *tx_pkt; + + IPADBG_LOW("event %d notified\n", notify->event_id); + + switch (notify->event_id) { + case SPS_EVENT_EOT: + tx_pkt = notify->data.transfer.user; + if (IPA_CLIENT_IS_APPS_CONS(tx_pkt->sys->ep->client)) + atomic_set(&ipa_ctx->sps_pm.eot_activity, 1); + queue_work(tx_pkt->sys->wq, &tx_pkt->work); + break; + default: + IPAERR("received unexpected event id %d\n", notify->event_id); + } +} + +/** + * ipa_poll_pkt() - Poll packet from SPS BAM + * return 0 to caller on poll successfully + * else -EIO + * + */ +static int ipa_poll_pkt(struct ipa_sys_context *sys, + struct sps_iovec *iov) +{ + int ret; + + ret = sps_get_iovec(sys->ep->ep_hdl, iov); + if (ret) { + IPAERR("sps_get_iovec failed %d\n", ret); + return ret; + } + + if (iov->addr == 0) + return -EIO; + + return 0; +} + +/** + * ipa_handle_rx_core() - The core functionality of packet reception. This + * function is read from multiple code paths. + * + * All the packets on the Rx data path are received on the IPA_A5_LAN_WAN_IN + * endpoint. The function runs as long as there are packets in the pipe. + * For each packet: + * - Disconnect the packet from the system pipe linked list + * - Unmap the packets skb, make it non DMAable + * - Free the packet from the cache + * - Prepare a proper skb + * - Call the endpoints notify function, passing the skb in the parameters + * - Replenish the rx cache + */ +static int ipa_handle_rx_core(struct ipa_sys_context *sys, bool process_all, + bool in_poll_state) +{ + struct sps_iovec iov; + int ret; + int cnt = 0; + + while ((in_poll_state ? atomic_read(&sys->curr_polling_state) : + !atomic_read(&sys->curr_polling_state))) { + if (cnt && !process_all) + break; + + ret = ipa_poll_pkt(sys, &iov); + if (ret) + break; + + if (IPA_CLIENT_IS_MEMCPY_DMA_CONS(sys->ep->client)) + ipa_dma_memcpy_notify(sys, &iov); + else if (IPA_CLIENT_IS_WLAN_CONS(sys->ep->client)) + ipa_wlan_wq_rx_common(sys, iov.size); + else + ipa_wq_rx_common(sys, iov.size); + + cnt++; + } + + return cnt; +} + +/** + * ipa_rx_switch_to_intr_mode() - Operate the Rx data path in interrupt mode + */ +static void ipa_rx_switch_to_intr_mode(struct ipa_sys_context *sys) +{ + int ret; + + if (!sys->ep || !sys->ep->valid) { + IPAERR("EP Not Valid, no need to cleanup.\n"); + return; + } + + ret = sps_get_config(sys->ep->ep_hdl, &sys->ep->connect); + if (ret) { + IPAERR("sps_get_config() failed %d\n", ret); + goto fail; + } + + if (!atomic_read(&sys->curr_polling_state) && + ((sys->ep->connect.options & SPS_O_EOT) == SPS_O_EOT)) { + IPADBG("already in intr mode\n"); + return; + } + + if (!atomic_read(&sys->curr_polling_state)) { + IPAERR("already in intr mode\n"); + goto fail; + } + + sys->event.options = SPS_O_EOT; + ret = sps_register_event(sys->ep->ep_hdl, &sys->event); + if (ret) { + IPAERR("sps_register_event() failed %d\n", ret); + goto fail; + } + sys->ep->connect.options = + SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_EOT; + ret = sps_set_config(sys->ep->ep_hdl, &sys->ep->connect); + if (ret) { + IPAERR("sps_set_config() failed %d\n", ret); + goto fail; + } + atomic_set(&sys->curr_polling_state, 0); + if (!sys->ep->napi_enabled) + ipa_handle_rx_core(sys, true, false); + ipa_dec_release_wakelock(sys->ep->wakelock_client); + return; + +fail: + queue_delayed_work(sys->wq, &sys->switch_to_intr_work, + msecs_to_jiffies(1)); +} + + +/** + * ipa_sps_irq_control() - Function to enable or disable BAM IRQ. + */ +static void ipa_sps_irq_control(struct ipa_sys_context *sys, bool enable) +{ + int ret; + + /* + * Do not change sps config in case we are in polling mode as this + * indicates that sps driver already notified EOT event and sps config + * should not change until ipa driver processes the packet. + */ + if (atomic_read(&sys->curr_polling_state)) { + IPADBG("in polling mode, do not change config\n"); + return; + } + + if (enable) { + ret = sps_get_config(sys->ep->ep_hdl, &sys->ep->connect); + if (ret) { + IPAERR("sps_get_config() failed %d\n", ret); + return; + } + sys->event.options = SPS_O_EOT; + ret = sps_register_event(sys->ep->ep_hdl, &sys->event); + if (ret) { + IPAERR("sps_register_event() failed %d\n", ret); + return; + } + sys->ep->connect.options = + SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_EOT; + ret = sps_set_config(sys->ep->ep_hdl, &sys->ep->connect); + if (ret) { + IPAERR("sps_set_config() failed %d\n", ret); + return; + } + } else { + ret = sps_get_config(sys->ep->ep_hdl, + &sys->ep->connect); + if (ret) { + IPAERR("sps_get_config() failed %d\n", ret); + return; + } + sys->ep->connect.options = SPS_O_AUTO_ENABLE | + SPS_O_ACK_TRANSFERS | SPS_O_POLL; + ret = sps_set_config(sys->ep->ep_hdl, + &sys->ep->connect); + if (ret) { + IPAERR("sps_set_config() failed %d\n", ret); + return; + } + } +} + +void ipa_sps_irq_control_all(bool enable) +{ + struct ipa_ep_context *ep; + int ipa_ep_idx, client_num; + + IPADBG("\n"); + + for (client_num = 0; + client_num < IPA_CLIENT_MAX; client_num++) { + if (!IPA_CLIENT_IS_APPS_CONS(client_num)) + continue; + + ipa_ep_idx = ipa_get_ep_mapping(client_num); + if (ipa_ep_idx == -1) { + IPAERR("Invalid client.\n"); + continue; + } + ep = &ipa_ctx->ep[ipa_ep_idx]; + if (!ep->valid) { + IPAERR("EP (%d) not allocated.\n", ipa_ep_idx); + continue; + } + ipa_sps_irq_control(ep->sys, enable); + } +} + +/** + * ipa_rx_notify() - Callback function which is called by the SPS driver when a + * a packet is received + * @notify: SPS driver supplied notification information + * + * Called in an interrupt context, therefore the majority of the work is + * deffered using a work queue. + * + * After receiving a packet, the driver goes to polling mode and keeps pulling + * packets until the rx buffer is empty, then it goes back to interrupt mode. + * This comes to prevent the CPU from handling too many interrupts when the + * throughput is high. + */ +static void ipa_sps_irq_rx_notify(struct sps_event_notify *notify) +{ + struct ipa_sys_context *sys = (struct ipa_sys_context *)notify->user; + int ret; + + IPADBG("event %d notified\n", notify->event_id); + + switch (notify->event_id) { + case SPS_EVENT_EOT: + if (IPA_CLIENT_IS_APPS_CONS(sys->ep->client)) + atomic_set(&ipa_ctx->sps_pm.eot_activity, 1); + + if (atomic_read(&sys->curr_polling_state)) { + sys->ep->eot_in_poll_err++; + break; + } + + ret = sps_get_config(sys->ep->ep_hdl, + &sys->ep->connect); + if (ret) { + IPAERR("sps_get_config() failed %d\n", ret); + break; + } + sys->ep->connect.options = SPS_O_AUTO_ENABLE | + SPS_O_ACK_TRANSFERS | SPS_O_POLL; + ret = sps_set_config(sys->ep->ep_hdl, + &sys->ep->connect); + if (ret) { + IPAERR("sps_set_config() failed %d\n", ret); + break; + } + ipa_inc_acquire_wakelock(sys->ep->wakelock_client); + atomic_set(&sys->curr_polling_state, 1); + trace_intr_to_poll(sys->ep->client); + queue_work(sys->wq, &sys->work); + break; + default: + IPAERR("received unexpected event id %d\n", notify->event_id); + } +} + +static void switch_to_intr_tx_work_func(struct work_struct *work) +{ + struct delayed_work *dwork; + struct ipa_sys_context *sys; + + dwork = container_of(work, struct delayed_work, work); + sys = container_of(dwork, struct ipa_sys_context, switch_to_intr_work); + ipa_handle_tx(sys); +} + +/** + * ipa_handle_rx() - handle packet reception. This function is executed in the + * context of a work queue. + * @work: work struct needed by the work queue + * + * ipa_handle_rx_core() is run in polling mode. After all packets has been + * received, the driver switches back to interrupt mode. + */ +static void ipa_handle_rx(struct ipa_sys_context *sys) +{ + int inactive_cycles = 0; + int cnt; + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + do { + cnt = ipa_handle_rx_core(sys, true, true); + if (cnt == 0) { + inactive_cycles++; + trace_idle_sleep_enter(sys->ep->client); + usleep_range(ipa_ctx->ipa_rx_min_timeout_usec, + ipa_ctx->ipa_rx_max_timeout_usec); + trace_idle_sleep_exit(sys->ep->client); + } else { + inactive_cycles = 0; + } + + /* if pipe is out of buffers there is no point polling for + * completed descs; release the worker so delayed work can + * run in a timely manner + */ + if (sys->len == 0) + break; + + } while (inactive_cycles <= ipa_ctx->ipa_polling_iteration); + + trace_poll_to_intr(sys->ep->client); + ipa_rx_switch_to_intr_mode(sys); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); +} + +/** + * ipa2_rx_poll() - Poll the rx packets from IPA HW. This + * function is exectued in the softirq context + * + * if input budget is zero, the driver switches back to + * interrupt mode + * + * return number of polled packets, on error 0(zero) + */ +int ipa2_rx_poll(u32 clnt_hdl, int weight) +{ + struct ipa_ep_context *ep; + int ret; + int cnt = 0; + unsigned int delay = 1; + struct sps_iovec iov; + + IPADBG("\n"); + if (clnt_hdl >= ipa_ctx->ipa_num_pipes || + ipa_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("bad parm 0x%x\n", clnt_hdl); + return cnt; + } + + ep = &ipa_ctx->ep[clnt_hdl]; + while (cnt < weight && + atomic_read(&ep->sys->curr_polling_state)) { + + ret = ipa_poll_pkt(ep->sys, &iov); + if (ret) + break; + + ipa_wq_rx_common(ep->sys, iov.size); + cnt += IPA_WAN_AGGR_PKT_CNT; + } + + if (cnt == 0 || cnt < weight) { + ep->inactive_cycles++; + ep->client_notify(ep->priv, IPA_CLIENT_COMP_NAPI, 0); + + if (ep->inactive_cycles > 3 || ep->sys->len == 0) { + ep->switch_to_intr = true; + delay = 0; + } else if (cnt < weight) { + delay = 0; + } + queue_delayed_work(ep->sys->wq, + &ep->sys->switch_to_intr_work, msecs_to_jiffies(delay)); + } else + ep->inactive_cycles = 0; + + return cnt; +} + +static void switch_to_intr_rx_work_func(struct work_struct *work) +{ + struct delayed_work *dwork; + struct ipa_sys_context *sys; + + dwork = container_of(work, struct delayed_work, work); + sys = container_of(dwork, struct ipa_sys_context, switch_to_intr_work); + + if (sys->ep->napi_enabled) { + if (sys->ep->switch_to_intr) { + ipa_rx_switch_to_intr_mode(sys); + IPA_ACTIVE_CLIENTS_DEC_SPECIAL("NAPI"); + sys->ep->switch_to_intr = false; + sys->ep->inactive_cycles = 0; + } else + sys->ep->client_notify(sys->ep->priv, + IPA_CLIENT_START_POLL, 0); + } else + ipa_handle_rx(sys); +} + +/** + * ipa_update_repl_threshold()- Update the repl_threshold for the client. + * + * Return value: None. + */ +void ipa_update_repl_threshold(enum ipa_client_type ipa_client) +{ + int ep_idx; + struct ipa_ep_context *ep; + + /* Check if ep is valid. */ + ep_idx = ipa2_get_ep_mapping(ipa_client); + if (ep_idx == -1) { + IPADBG("Invalid IPA client\n"); + return; + } + + ep = &ipa_ctx->ep[ep_idx]; + if (!ep->valid) { + IPADBG("EP not valid/Not applicable for client.\n"); + return; + } + /* + * Determine how many buffers/descriptors remaining will + * cause to drop below the yellow WM bar. + */ + if (ep->sys->rx_buff_sz) + ep->rx_replenish_threshold = ipa_get_sys_yellow_wm(ep->sys) + / ep->sys->rx_buff_sz; + else + ep->rx_replenish_threshold = 0; +} + +/** + * ipa2_setup_sys_pipe() - Setup an IPA end-point in system-BAM mode and perform + * IPA EP configuration + * @sys_in: [in] input needed to setup BAM pipe and configure EP + * @clnt_hdl: [out] client handle + * + * - configure the end-point registers with the supplied + * parameters from the user. + * - call SPS APIs to create a system-to-bam connection with IPA. + * - allocate descriptor FIFO + * - register callback function(ipa_sps_irq_rx_notify or + * ipa_sps_irq_tx_notify - depends on client type) in case the driver is + * not configured to pulling mode + * + * Returns: 0 on success, negative on failure + */ +int ipa2_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl) +{ + struct ipa_ep_context *ep; + int ipa_ep_idx; + int result = -EINVAL; + dma_addr_t dma_addr; + char buff[IPA_RESOURCE_NAME_MAX]; + struct iommu_domain *smmu_domain; + + if (unlikely(!ipa_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return -EINVAL; + } + + if (sys_in == NULL || clnt_hdl == NULL) { + IPAERR("NULL args\n"); + goto fail_gen; + } + + if (sys_in->client >= IPA_CLIENT_MAX || sys_in->desc_fifo_sz == 0) { + IPAERR("bad parm client:%d fifo_sz:%d\n", + sys_in->client, sys_in->desc_fifo_sz); + goto fail_gen; + } + + ipa_ep_idx = ipa2_get_ep_mapping(sys_in->client); + if (ipa_ep_idx == -1) { + IPAERR("Invalid client.\n"); + goto fail_gen; + } + + ep = &ipa_ctx->ep[ipa_ep_idx]; + + IPA_ACTIVE_CLIENTS_INC_EP(sys_in->client); + + if (ep->valid == 1) { + if (sys_in->client != IPA_CLIENT_APPS_LAN_WAN_PROD) { + IPAERR("EP already allocated.\n"); + goto fail_and_disable_clocks; + } else { + if (ipa2_cfg_ep_hdr(ipa_ep_idx, + &sys_in->ipa_ep_cfg.hdr)) { + IPAERR("fail to configure hdr prop of EP.\n"); + result = -EFAULT; + goto fail_and_disable_clocks; + } + if (ipa2_cfg_ep_cfg(ipa_ep_idx, + &sys_in->ipa_ep_cfg.cfg)) { + IPAERR("fail to configure cfg prop of EP.\n"); + result = -EFAULT; + goto fail_and_disable_clocks; + } + IPADBG("client %d (ep: %d) overlay ok sys=%p\n", + sys_in->client, ipa_ep_idx, ep->sys); + ep->client_notify = sys_in->notify; + ep->priv = sys_in->priv; + *clnt_hdl = ipa_ep_idx; + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client); + + return 0; + } + } + + memset(ep, 0, offsetof(struct ipa_ep_context, sys)); + + if (!ep->sys) { + ep->sys = kzalloc(sizeof(struct ipa_sys_context), GFP_KERNEL); + if (!ep->sys) { + IPAERR("failed to sys ctx for client %d\n", + sys_in->client); + result = -ENOMEM; + goto fail_and_disable_clocks; + } + + ep->sys->ep = ep; + snprintf(buff, IPA_RESOURCE_NAME_MAX, "ipawq%d", + sys_in->client); + ep->sys->wq = alloc_workqueue(buff, + WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND, 1); + if (!ep->sys->wq) { + IPAERR("failed to create wq for client %d\n", + sys_in->client); + result = -EFAULT; + goto fail_wq; + } + + snprintf(buff, IPA_RESOURCE_NAME_MAX, "iparepwq%d", + sys_in->client); + ep->sys->repl_wq = alloc_workqueue(buff, + WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND, 1); + if (!ep->sys->repl_wq) { + IPAERR("failed to create rep wq for client %d\n", + sys_in->client); + result = -EFAULT; + goto fail_wq2; + } + + INIT_LIST_HEAD(&ep->sys->head_desc_list); + INIT_LIST_HEAD(&ep->sys->rcycl_list); + spin_lock_init(&ep->sys->spinlock); + } else { + memset(ep->sys, 0, offsetof(struct ipa_sys_context, ep)); + } + + ep->skip_ep_cfg = sys_in->skip_ep_cfg; + if (ipa_assign_policy(sys_in, ep->sys)) { + IPAERR("failed to sys ctx for client %d\n", sys_in->client); + result = -ENOMEM; + goto fail_gen2; + } + + ep->valid = 1; + ep->client = sys_in->client; + ep->client_notify = sys_in->notify; + ep->napi_enabled = sys_in->napi_enabled; + ep->priv = sys_in->priv; + ep->keep_ipa_awake = sys_in->keep_ipa_awake; + atomic_set(&ep->avail_fifo_desc, + ((sys_in->desc_fifo_sz/sizeof(struct sps_iovec))-1)); + + if (ep->status.status_en && IPA_CLIENT_IS_CONS(ep->client) && + ep->sys->status_stat == NULL) { + ep->sys->status_stat = + kzalloc(sizeof(struct ipa_status_stats), GFP_KERNEL); + if (!ep->sys->status_stat) { + IPAERR("no memory\n"); + goto fail_gen2; + } + } + + result = ipa_enable_data_path(ipa_ep_idx); + if (result) { + IPAERR("enable data path failed res=%d clnt=%d.\n", result, + ipa_ep_idx); + goto fail_gen2; + } + + if (!ep->skip_ep_cfg) { + if (ipa2_cfg_ep(ipa_ep_idx, &sys_in->ipa_ep_cfg)) { + IPAERR("fail to configure EP.\n"); + goto fail_gen2; + } + if (ipa2_cfg_ep_status(ipa_ep_idx, &ep->status)) { + IPAERR("fail to configure status of EP.\n"); + goto fail_gen2; + } + IPADBG("ep configuration successful\n"); + } else { + IPADBG("skipping ep configuration\n"); + } + + /* Default Config */ + ep->ep_hdl = sps_alloc_endpoint(); + if (ep->ep_hdl == NULL) { + IPAERR("SPS EP allocation failed.\n"); + goto fail_gen2; + } + + result = sps_get_config(ep->ep_hdl, &ep->connect); + if (result) { + IPAERR("fail to get config.\n"); + goto fail_sps_cfg; + } + + /* Specific Config */ + if (IPA_CLIENT_IS_CONS(sys_in->client)) { + ep->connect.mode = SPS_MODE_SRC; + ep->connect.destination = SPS_DEV_HANDLE_MEM; + ep->connect.source = ipa_ctx->bam_handle; + ep->connect.dest_pipe_index = ipa_ctx->a5_pipe_index++; + ep->connect.src_pipe_index = ipa_ep_idx; + /* + * Determine how many buffers/descriptors remaining will + * cause to drop below the yellow WM bar. + */ + if (ep->sys->rx_buff_sz) + ep->rx_replenish_threshold = + ipa_get_sys_yellow_wm(ep->sys) / ep->sys->rx_buff_sz; + else + ep->rx_replenish_threshold = 0; + /* Only when the WAN pipes are setup, actual threshold will + * be read from the register. So update LAN_CONS ep again with + * right value. + */ + if (sys_in->client == IPA_CLIENT_APPS_WAN_CONS) + ipa_update_repl_threshold(IPA_CLIENT_APPS_LAN_CONS); + } else { + ep->connect.mode = SPS_MODE_DEST; + ep->connect.source = SPS_DEV_HANDLE_MEM; + ep->connect.destination = ipa_ctx->bam_handle; + ep->connect.src_pipe_index = ipa_ctx->a5_pipe_index++; + ep->connect.dest_pipe_index = ipa_ep_idx; + } + + IPADBG("client:%d ep:%d", + sys_in->client, ipa_ep_idx); + + IPADBG("dest_pipe_index:%d src_pipe_index:%d\n", + ep->connect.dest_pipe_index, + ep->connect.src_pipe_index); + + ep->connect.options = ep->sys->sps_option; + ep->connect.desc.size = sys_in->desc_fifo_sz; + ep->connect.desc.base = dma_alloc_coherent(ipa_ctx->pdev, + ep->connect.desc.size, &dma_addr, GFP_KERNEL); + if (ipa_ctx->smmu_s1_bypass) { + ep->connect.desc.phys_base = dma_addr; + } else { + ep->connect.desc.iova = dma_addr; + smmu_domain = ipa2_get_smmu_domain(); + if (smmu_domain != NULL) { + ep->connect.desc.phys_base = + iommu_iova_to_phys(smmu_domain, dma_addr); + } + } + if (ep->connect.desc.base == NULL) { + IPAERR("fail to get DMA desc memory.\n"); + goto fail_sps_cfg; + } + + ep->connect.event_thresh = IPA_EVENT_THRESHOLD; + + result = ipa_sps_connect_safe(ep->ep_hdl, &ep->connect, sys_in->client); + if (result) { + IPAERR("sps_connect fails.\n"); + goto fail_sps_connect; + } + + ep->sys->event.options = SPS_O_EOT; + ep->sys->event.mode = SPS_TRIGGER_CALLBACK; + ep->sys->event.xfer_done = NULL; + ep->sys->event.user = ep->sys; + ep->sys->event.callback = ep->sys->sps_callback; + result = sps_register_event(ep->ep_hdl, &ep->sys->event); + if (result < 0) { + IPAERR("register event error %d\n", result); + goto fail_register_event; + } + + *clnt_hdl = ipa_ep_idx; + + if (ep->sys->repl_hdlr == ipa_fast_replenish_rx_cache) { + ep->sys->repl.capacity = ep->sys->rx_pool_sz + 1; + ep->sys->repl.cache = kcalloc(ep->sys->repl.capacity, + sizeof(void *), GFP_KERNEL); + if (!ep->sys->repl.cache) { + IPAERR("ep=%d fail to alloc repl cache\n", ipa_ep_idx); + ep->sys->repl_hdlr = ipa_replenish_rx_cache; + ep->sys->repl.capacity = 0; + } else { + atomic_set(&ep->sys->repl.head_idx, 0); + atomic_set(&ep->sys->repl.tail_idx, 0); + ipa_wq_repl_rx(&ep->sys->repl_work); + } + } + + if (IPA_CLIENT_IS_CONS(sys_in->client)) + ipa_replenish_rx_cache(ep->sys); + + if (IPA_CLIENT_IS_WLAN_CONS(sys_in->client)) { + ipa_alloc_wlan_rx_common_cache(IPA_WLAN_COMM_RX_POOL_LOW); + atomic_inc(&ipa_ctx->wc_memb.active_clnt_cnt); + } + + ipa_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg; + if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(sys_in->client)) { + if (ipa_ctx->modem_cfg_emb_pipe_flt && + sys_in->client == IPA_CLIENT_APPS_LAN_WAN_PROD) + IPADBG("modem cfg emb pipe flt\n"); + else + ipa_install_dflt_flt_rules(ipa_ep_idx); + } + + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client); + + IPADBG("client %d (ep: %d) connected sys=%p\n", sys_in->client, + ipa_ep_idx, ep->sys); + + return 0; + +fail_register_event: + sps_disconnect(ep->ep_hdl); +fail_sps_connect: + dma_free_coherent(ipa_ctx->pdev, ep->connect.desc.size, + ep->connect.desc.base, + ep->connect.desc.phys_base); +fail_sps_cfg: + sps_free_endpoint(ep->ep_hdl); +fail_gen2: + destroy_workqueue(ep->sys->repl_wq); +fail_wq2: + destroy_workqueue(ep->sys->wq); +fail_wq: + kfree(ep->sys); + memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context)); +fail_and_disable_clocks: + IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client); +fail_gen: + return result; +} + +/** + * ipa2_teardown_sys_pipe() - Teardown the system-BAM pipe and cleanup IPA EP + * @clnt_hdl: [in] the handle obtained from ipa2_setup_sys_pipe + * + * Returns: 0 on success, negative on failure + */ +int ipa2_teardown_sys_pipe(u32 clnt_hdl) +{ + struct ipa_ep_context *ep; + int empty; + + if (unlikely(!ipa_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return -EINVAL; + } + + if (clnt_hdl >= ipa_ctx->ipa_num_pipes || + ipa_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("bad parm.\n"); + return -EINVAL; + } + + ep = &ipa_ctx->ep[clnt_hdl]; + + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); + + ipa_disable_data_path(clnt_hdl); + if (ep->napi_enabled) { + ep->switch_to_intr = true; + do { + usleep_range(95, 105); + } while (atomic_read(&ep->sys->curr_polling_state)); + } + + if (IPA_CLIENT_IS_PROD(ep->client)) { + do { + spin_lock_bh(&ep->sys->spinlock); + empty = list_empty(&ep->sys->head_desc_list); + spin_unlock_bh(&ep->sys->spinlock); + if (!empty) + usleep_range(95, 105); + else + break; + } while (1); + } + + if (IPA_CLIENT_IS_CONS(ep->client)) { + cancel_delayed_work_sync(&ep->sys->replenish_rx_work); + cancel_delayed_work_sync(&ep->sys->switch_to_intr_work); + } + + flush_workqueue(ep->sys->wq); + sps_disconnect(ep->ep_hdl); + dma_free_coherent(ipa_ctx->pdev, ep->connect.desc.size, + ep->connect.desc.base, + ep->connect.desc.phys_base); + sps_free_endpoint(ep->ep_hdl); + if (ep->sys->repl_wq) + flush_workqueue(ep->sys->repl_wq); + if (IPA_CLIENT_IS_CONS(ep->client)) + ipa_cleanup_rx(ep->sys); + + if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(ep->client)) { + if (ipa_ctx->modem_cfg_emb_pipe_flt && + ep->client == IPA_CLIENT_APPS_LAN_WAN_PROD) + IPADBG("modem cfg emb pipe flt\n"); + else + ipa_delete_dflt_flt_rules(clnt_hdl); + } + + if (IPA_CLIENT_IS_WLAN_CONS(ep->client)) + atomic_dec(&ipa_ctx->wc_memb.active_clnt_cnt); + + memset(&ep->wstats, 0, sizeof(struct ipa_wlan_stats)); + + if (!atomic_read(&ipa_ctx->wc_memb.active_clnt_cnt)) + ipa_cleanup_wlan_rx_common_cache(); + + ep->valid = 0; + IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); + + IPADBG("client (ep: %d) disconnected\n", clnt_hdl); + + return 0; +} + +/** + * ipa_tx_comp_usr_notify_release() - Callback function which will call the + * user supplied callback function to release the skb, or release it on + * its own if no callback function was supplied. + * @user1 + * @user2 + * + * This notified callback is for the destination client. + * This function is supplied in ipa_connect. + */ +static void ipa_tx_comp_usr_notify_release(void *user1, int user2) +{ + struct sk_buff *skb = (struct sk_buff *)user1; + int ep_idx = user2; + + IPADBG_LOW("skb=%p ep=%d\n", skb, ep_idx); + + IPA_STATS_INC_CNT(ipa_ctx->stats.tx_pkts_compl); + + if (ipa_ctx->ep[ep_idx].client_notify) + ipa_ctx->ep[ep_idx].client_notify(ipa_ctx->ep[ep_idx].priv, + IPA_WRITE_DONE, (unsigned long)skb); + else + dev_kfree_skb_any(skb); +} + +static void ipa_tx_cmd_comp(void *user1, int user2) +{ + kfree(user1); +} + +/** + * ipa2_tx_dp() - Data-path tx handler + * @dst: [in] which IPA destination to route tx packets to + * @skb: [in] the packet to send + * @metadata: [in] TX packet meta-data + * + * Data-path tx handler, this is used for both SW data-path which by-passes most + * IPA HW blocks AND the regular HW data-path for WLAN AMPDU traffic only. If + * dst is a "valid" CONS type, then SW data-path is used. If dst is the + * WLAN_AMPDU PROD type, then HW data-path for WLAN AMPDU is used. Anything else + * is an error. For errors, client needs to free the skb as needed. For success, + * IPA driver will later invoke client callback if one was supplied. That + * callback should free the skb. If no callback supplied, IPA driver will free + * the skb internally + * + * The function will use two descriptors for this send command + * (for A5_WLAN_AMPDU_PROD only one desciprtor will be sent), + * the first descriptor will be used to inform the IPA hardware that + * apps need to push data into the IPA (IP_PACKET_INIT immediate command). + * Once this send was done from SPS point-of-view the IPA driver will + * get notified by the supplied callback - ipa_sps_irq_tx_comp() + * + * ipa_sps_irq_tx_comp will call to the user supplied + * callback (from ipa_connect) + * + * Returns: 0 on success, negative on failure + */ +int ipa2_tx_dp(enum ipa_client_type dst, struct sk_buff *skb, + struct ipa_tx_meta *meta) +{ + struct ipa_desc *desc; + struct ipa_desc _desc[2]; + int dst_ep_idx; + struct ipa_ip_packet_init *cmd; + struct ipa_sys_context *sys; + int src_ep_idx; + int num_frags, f; + gfp_t flag = GFP_ATOMIC | (ipa_ctx->use_dma_zone ? GFP_DMA : 0); + + if (unlikely(!ipa_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return -EINVAL; + } + + if (skb->len == 0) { + IPAERR("packet size is 0\n"); + return -EINVAL; + } + + num_frags = skb_shinfo(skb)->nr_frags; + if (num_frags) { + /* 1 desc is needed for the linear portion of skb; + * 1 desc may be needed for the PACKET_INIT; + * 1 desc for each frag + */ + desc = kzalloc(sizeof(*desc) * (num_frags + 2), GFP_ATOMIC); + if (!desc) { + IPAERR("failed to alloc desc array\n"); + goto fail_mem; + } + } else { + memset(_desc, 0, 2 * sizeof(struct ipa_desc)); + desc = &_desc[0]; + } + + /* + * USB_CONS: PKT_INIT ep_idx = dst pipe + * Q6_CONS: PKT_INIT ep_idx = sender pipe + * A5_LAN_WAN_PROD: HW path ep_idx = sender pipe + * + * LAN TX: all PKT_INIT + * WAN TX: PKT_INIT (cmd) + HW (data) + * + */ + if (IPA_CLIENT_IS_CONS(dst)) { + src_ep_idx = ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD); + if (-1 == src_ep_idx) { + IPAERR("Client %u is not mapped\n", + IPA_CLIENT_APPS_LAN_WAN_PROD); + goto fail_gen; + } + dst_ep_idx = ipa2_get_ep_mapping(dst); + } else { + src_ep_idx = ipa2_get_ep_mapping(dst); + if (-1 == src_ep_idx) { + IPAERR("Client %u is not mapped\n", dst); + goto fail_gen; + } + if (meta && meta->pkt_init_dst_ep_valid) + dst_ep_idx = meta->pkt_init_dst_ep; + else + dst_ep_idx = -1; + } + + sys = ipa_ctx->ep[src_ep_idx].sys; + + if (!sys->ep->valid) { + IPAERR("pipe not valid\n"); + goto fail_gen; + } + + if (dst_ep_idx != -1) { + /* SW data path */ + cmd = kzalloc(sizeof(struct ipa_ip_packet_init), flag); + if (!cmd) { + IPAERR("failed to alloc immediate command object\n"); + goto fail_gen; + } + + cmd->destination_pipe_index = dst_ep_idx; + desc[0].opcode = IPA_IP_PACKET_INIT; + desc[0].pyld = cmd; + desc[0].len = sizeof(struct ipa_ip_packet_init); + desc[0].type = IPA_IMM_CMD_DESC; + desc[0].callback = ipa_tx_cmd_comp; + desc[0].user1 = cmd; + desc[1].pyld = skb->data; + desc[1].len = skb_headlen(skb); + desc[1].type = IPA_DATA_DESC_SKB; + desc[1].callback = ipa_tx_comp_usr_notify_release; + desc[1].user1 = skb; + desc[1].user2 = (meta && meta->pkt_init_dst_ep_valid && + meta->pkt_init_dst_ep_remote) ? + src_ep_idx : + dst_ep_idx; + if (meta && meta->dma_address_valid) { + desc[1].dma_address_valid = true; + desc[1].dma_address = meta->dma_address; + } + + for (f = 0; f < num_frags; f++) { + desc[2+f].frag = &skb_shinfo(skb)->frags[f]; + desc[2+f].type = IPA_DATA_DESC_SKB_PAGED; + desc[2+f].len = skb_frag_size(desc[2+f].frag); + } + + /* don't free skb till frag mappings are released */ + if (num_frags) { + desc[2+f-1].callback = desc[1].callback; + desc[2+f-1].user1 = desc[1].user1; + desc[2+f-1].user2 = desc[1].user2; + desc[1].callback = NULL; + } + + if (ipa_send(sys, num_frags + 2, desc, true)) { + IPAERR("fail to send skb %p num_frags %u SWP\n", + skb, num_frags); + goto fail_send; + } + IPA_STATS_INC_CNT(ipa_ctx->stats.tx_sw_pkts); + } else { + /* HW data path */ + desc[0].pyld = skb->data; + desc[0].len = skb_headlen(skb); + desc[0].type = IPA_DATA_DESC_SKB; + desc[0].callback = ipa_tx_comp_usr_notify_release; + desc[0].user1 = skb; + desc[0].user2 = src_ep_idx; + + if (meta && meta->dma_address_valid) { + desc[0].dma_address_valid = true; + desc[0].dma_address = meta->dma_address; + } + + if (num_frags == 0) { + if (ipa_send_one(sys, desc, true)) { + IPAERR("fail to send skb %p HWP\n", skb); + goto fail_gen; + } + } else { + for (f = 0; f < num_frags; f++) { + desc[1+f].frag = &skb_shinfo(skb)->frags[f]; + desc[1+f].type = IPA_DATA_DESC_SKB_PAGED; + desc[1+f].len = skb_frag_size(desc[1+f].frag); + } + + /* don't free skb till frag mappings are released */ + desc[1+f-1].callback = desc[0].callback; + desc[1+f-1].user1 = desc[0].user1; + desc[1+f-1].user2 = desc[0].user2; + desc[0].callback = NULL; + + if (ipa_send(sys, num_frags + 1, desc, true)) { + IPAERR("fail to send skb %p num_frags %u HWP\n", + skb, num_frags); + goto fail_gen; + } + } + + IPA_STATS_INC_CNT(ipa_ctx->stats.tx_hw_pkts); + } + + if (num_frags) { + kfree(desc); + IPA_STATS_INC_CNT(ipa_ctx->stats.tx_non_linear); + } + + return 0; + +fail_send: + kfree(cmd); +fail_gen: + if (num_frags) + kfree(desc); +fail_mem: + return -EFAULT; +} + +static void ipa_wq_handle_rx(struct work_struct *work) +{ + struct ipa_sys_context *sys; + + sys = container_of(work, struct ipa_sys_context, work); + + if (sys->ep->napi_enabled) { + IPA_ACTIVE_CLIENTS_INC_SPECIAL("NAPI"); + sys->ep->client_notify(sys->ep->priv, + IPA_CLIENT_START_POLL, 0); + } else + ipa_handle_rx(sys); +} + +static void ipa_wq_repl_rx(struct work_struct *work) +{ + struct ipa_sys_context *sys; + void *ptr; + struct ipa_rx_pkt_wrapper *rx_pkt; + gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0); + u32 next; + u32 curr; + + sys = container_of(work, struct ipa_sys_context, repl_work); + curr = atomic_read(&sys->repl.tail_idx); + +begin: + while (1) { + next = (curr + 1) % sys->repl.capacity; + if (next == atomic_read(&sys->repl.head_idx)) + goto fail_kmem_cache_alloc; + + rx_pkt = kmem_cache_zalloc(ipa_ctx->rx_pkt_wrapper_cache, + flag); + if (!rx_pkt) { + pr_err_ratelimited("%s fail alloc rx wrapper sys=%p\n", + __func__, sys); + goto fail_kmem_cache_alloc; + } + + INIT_LIST_HEAD(&rx_pkt->link); + INIT_WORK(&rx_pkt->work, ipa_wq_rx_avail); + rx_pkt->sys = sys; + + rx_pkt->data.skb = sys->get_skb(sys->rx_buff_sz, flag); + if (rx_pkt->data.skb == NULL) { + pr_err_ratelimited("%s fail alloc skb sys=%p\n", + __func__, sys); + goto fail_skb_alloc; + } + ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz); + rx_pkt->data.dma_addr = dma_map_single(ipa_ctx->pdev, ptr, + sys->rx_buff_sz, + DMA_FROM_DEVICE); + if (dma_mapping_error(ipa_ctx->pdev, + rx_pkt->data.dma_addr)) { + pr_err_ratelimited("%s dma map fail %p for %p sys=%p\n", + __func__, (void *)rx_pkt->data.dma_addr, + ptr, sys); + goto fail_dma_mapping; + } + + sys->repl.cache[curr] = rx_pkt; + curr = next; + /* ensure write is done before setting tail index */ + mb(); + atomic_set(&sys->repl.tail_idx, next); + } + + return; + +fail_dma_mapping: + sys->free_skb(rx_pkt->data.skb); +fail_skb_alloc: + kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt); +fail_kmem_cache_alloc: + if (atomic_read(&sys->repl.tail_idx) == + atomic_read(&sys->repl.head_idx)) { + if (sys->ep->client == IPA_CLIENT_APPS_WAN_CONS) + IPA_STATS_INC_CNT(ipa_ctx->stats.wan_repl_rx_empty); + else if (sys->ep->client == IPA_CLIENT_APPS_LAN_CONS) + IPA_STATS_INC_CNT(ipa_ctx->stats.lan_repl_rx_empty); + else + WARN_ON(1); + pr_err_ratelimited("%s sys=%p repl ring empty\n", + __func__, sys); + goto begin; + } +} + +static void ipa_replenish_wlan_rx_cache(struct ipa_sys_context *sys) +{ + struct ipa_rx_pkt_wrapper *rx_pkt = NULL; + struct ipa_rx_pkt_wrapper *tmp; + int ret; + u32 rx_len_cached = 0; + + IPADBG_LOW("\n"); + + spin_lock_bh(&ipa_ctx->wc_memb.wlan_spinlock); + rx_len_cached = sys->len; + + if (rx_len_cached < sys->rx_pool_sz) { + list_for_each_entry_safe(rx_pkt, tmp, + &ipa_ctx->wc_memb.wlan_comm_desc_list, link) { + list_del(&rx_pkt->link); + + if (ipa_ctx->wc_memb.wlan_comm_free_cnt > 0) + ipa_ctx->wc_memb.wlan_comm_free_cnt--; + + INIT_LIST_HEAD(&rx_pkt->link); + rx_pkt->len = 0; + rx_pkt->sys = sys; + + ret = sps_transfer_one(sys->ep->ep_hdl, + rx_pkt->data.dma_addr, + IPA_WLAN_RX_BUFF_SZ, rx_pkt, 0); + + if (ret) { + IPAERR("sps_transfer_one failed %d\n", ret); + goto fail_sps_transfer; + } + + list_add_tail(&rx_pkt->link, &sys->head_desc_list); + rx_len_cached = ++sys->len; + + if (rx_len_cached >= sys->rx_pool_sz) { + spin_unlock_bh(&ipa_ctx->wc_memb.wlan_spinlock); + return; + } + } + } + spin_unlock_bh(&ipa_ctx->wc_memb.wlan_spinlock); + + if (rx_len_cached < sys->rx_pool_sz && + ipa_ctx->wc_memb.wlan_comm_total_cnt < + IPA_WLAN_COMM_RX_POOL_HIGH) { + ipa_replenish_rx_cache(sys); + ipa_ctx->wc_memb.wlan_comm_total_cnt += + (sys->rx_pool_sz - rx_len_cached); + } + + return; + +fail_sps_transfer: + list_del(&rx_pkt->link); + spin_unlock_bh(&ipa_ctx->wc_memb.wlan_spinlock); +} + +static void ipa_cleanup_wlan_rx_common_cache(void) +{ + struct ipa_rx_pkt_wrapper *rx_pkt; + struct ipa_rx_pkt_wrapper *tmp; + + spin_lock_bh(&ipa_ctx->wc_memb.wlan_spinlock); + + list_for_each_entry_safe(rx_pkt, tmp, + &ipa_ctx->wc_memb.wlan_comm_desc_list, link) { + list_del(&rx_pkt->link); + dma_unmap_single(ipa_ctx->pdev, rx_pkt->data.dma_addr, + IPA_WLAN_COMM_RX_POOL_LOW, DMA_FROM_DEVICE); + dev_kfree_skb_any(rx_pkt->data.skb); + kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt); + ipa_ctx->wc_memb.wlan_comm_free_cnt--; + ipa_ctx->wc_memb.wlan_comm_total_cnt--; + } + ipa_ctx->wc_memb.total_tx_pkts_freed = 0; + + if (ipa_ctx->wc_memb.wlan_comm_free_cnt != 0) + IPAERR("wlan comm buff free cnt: %d\n", + ipa_ctx->wc_memb.wlan_comm_free_cnt); + + if (ipa_ctx->wc_memb.wlan_comm_total_cnt != 0) + IPAERR("wlan comm buff total cnt: %d\n", + ipa_ctx->wc_memb.wlan_comm_total_cnt); + + spin_unlock_bh(&ipa_ctx->wc_memb.wlan_spinlock); + +} + +static void ipa_alloc_wlan_rx_common_cache(u32 size) +{ + void *ptr; + struct ipa_rx_pkt_wrapper *rx_pkt; + int rx_len_cached = 0; + gfp_t flag = GFP_NOWAIT | __GFP_NOWARN | + (ipa_ctx->use_dma_zone ? GFP_DMA : 0); + + rx_len_cached = ipa_ctx->wc_memb.wlan_comm_total_cnt; + while (rx_len_cached < size) { + rx_pkt = kmem_cache_zalloc(ipa_ctx->rx_pkt_wrapper_cache, + flag); + if (!rx_pkt) { + IPAERR("failed to alloc rx wrapper\n"); + goto fail_kmem_cache_alloc; + } + + INIT_LIST_HEAD(&rx_pkt->link); + INIT_WORK(&rx_pkt->work, ipa_wq_rx_avail); + + rx_pkt->data.skb = + ipa_get_skb_ipa_rx(IPA_WLAN_RX_BUFF_SZ, + flag); + if (rx_pkt->data.skb == NULL) { + IPAERR("failed to alloc skb\n"); + goto fail_skb_alloc; + } + ptr = skb_put(rx_pkt->data.skb, IPA_WLAN_RX_BUFF_SZ); + rx_pkt->data.dma_addr = dma_map_single(ipa_ctx->pdev, ptr, + IPA_WLAN_RX_BUFF_SZ, DMA_FROM_DEVICE); + if (dma_mapping_error(ipa_ctx->pdev, + rx_pkt->data.dma_addr)) { + IPAERR("dma_map_single failure %p for %p\n", + (void *)rx_pkt->data.dma_addr, ptr); + goto fail_dma_mapping; + } + + spin_lock_bh(&ipa_ctx->wc_memb.wlan_spinlock); + list_add_tail(&rx_pkt->link, + &ipa_ctx->wc_memb.wlan_comm_desc_list); + rx_len_cached = ++ipa_ctx->wc_memb.wlan_comm_total_cnt; + + ipa_ctx->wc_memb.wlan_comm_free_cnt++; + spin_unlock_bh(&ipa_ctx->wc_memb.wlan_spinlock); + + } + + return; + +fail_dma_mapping: + dev_kfree_skb_any(rx_pkt->data.skb); +fail_skb_alloc: + kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt); +fail_kmem_cache_alloc: + return; +} + + +/** + * ipa_replenish_rx_cache() - Replenish the Rx packets cache. + * + * The function allocates buffers in the rx_pkt_wrapper_cache cache until there + * are IPA_RX_POOL_CEIL buffers in the cache. + * - Allocate a buffer in the cache + * - Initialized the packets link + * - Initialize the packets work struct + * - Allocate the packets socket buffer (skb) + * - Fill the packets skb with data + * - Make the packet DMAable + * - Add the packet to the system pipe linked list + * - Initiate a SPS transfer so that SPS driver will use this packet later. + */ +static void ipa_replenish_rx_cache(struct ipa_sys_context *sys) +{ + void *ptr; + struct ipa_rx_pkt_wrapper *rx_pkt; + int ret; + int rx_len_cached = 0; + gfp_t flag = GFP_NOWAIT | __GFP_NOWARN | + (ipa_ctx->use_dma_zone ? GFP_DMA : 0); + + rx_len_cached = sys->len; + + while (rx_len_cached < sys->rx_pool_sz) { + rx_pkt = kmem_cache_zalloc(ipa_ctx->rx_pkt_wrapper_cache, + flag); + if (!rx_pkt) { + IPAERR("failed to alloc rx wrapper\n"); + goto fail_kmem_cache_alloc; + } + + INIT_LIST_HEAD(&rx_pkt->link); + INIT_WORK(&rx_pkt->work, ipa_wq_rx_avail); + rx_pkt->sys = sys; + + rx_pkt->data.skb = sys->get_skb(sys->rx_buff_sz, flag); + if (rx_pkt->data.skb == NULL) { + IPAERR("failed to alloc skb\n"); + goto fail_skb_alloc; + } + ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz); + rx_pkt->data.dma_addr = dma_map_single(ipa_ctx->pdev, ptr, + sys->rx_buff_sz, + DMA_FROM_DEVICE); + if (dma_mapping_error(ipa_ctx->pdev, + rx_pkt->data.dma_addr)) { + IPAERR("dma_map_single failure %p for %p\n", + (void *)rx_pkt->data.dma_addr, ptr); + goto fail_dma_mapping; + } + + spin_lock_bh(&sys->spinlock); + list_add_tail(&rx_pkt->link, &sys->head_desc_list); + rx_len_cached = ++sys->len; + spin_unlock_bh(&sys->spinlock); + + ret = sps_transfer_one(sys->ep->ep_hdl, + rx_pkt->data.dma_addr, sys->rx_buff_sz, rx_pkt, 0); + + if (ret) { + IPAERR("sps_transfer_one failed %d\n", ret); + goto fail_sps_transfer; + } + } + + return; + +fail_sps_transfer: + spin_lock_bh(&sys->spinlock); + list_del(&rx_pkt->link); + rx_len_cached = --sys->len; + spin_unlock_bh(&sys->spinlock); + dma_unmap_single(ipa_ctx->pdev, rx_pkt->data.dma_addr, + sys->rx_buff_sz, DMA_FROM_DEVICE); +fail_dma_mapping: + sys->free_skb(rx_pkt->data.skb); +fail_skb_alloc: + kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt); +fail_kmem_cache_alloc: + if (rx_len_cached == 0) + queue_delayed_work(sys->wq, &sys->replenish_rx_work, + msecs_to_jiffies(1)); +} + +static void ipa_replenish_rx_cache_recycle(struct ipa_sys_context *sys) +{ + void *ptr; + struct ipa_rx_pkt_wrapper *rx_pkt; + int ret; + int rx_len_cached = 0; + + rx_len_cached = sys->len; + + while (rx_len_cached < sys->rx_pool_sz) { + spin_lock_bh(&sys->spinlock); + if (list_empty(&sys->rcycl_list)) + goto fail_kmem_cache_alloc; + + rx_pkt = list_first_entry(&sys->rcycl_list, + struct ipa_rx_pkt_wrapper, link); + list_del(&rx_pkt->link); + spin_unlock_bh(&sys->spinlock); + INIT_LIST_HEAD(&rx_pkt->link); + ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz); + rx_pkt->data.dma_addr = dma_map_single(ipa_ctx->pdev, + ptr, sys->rx_buff_sz, DMA_FROM_DEVICE); + if (dma_mapping_error(ipa_ctx->pdev, rx_pkt->data.dma_addr)) { + IPAERR("dma_map_single failure for rx_pkt\n"); + goto fail_dma_mapping; + } + + spin_lock_bh(&sys->spinlock); + list_add_tail(&rx_pkt->link, &sys->head_desc_list); + rx_len_cached = ++sys->len; + spin_unlock_bh(&sys->spinlock); + + ret = sps_transfer_one(sys->ep->ep_hdl, + rx_pkt->data.dma_addr, sys->rx_buff_sz, rx_pkt, 0); + + if (ret) { + IPAERR("sps_transfer_one failed %d\n", ret); + goto fail_sps_transfer; + } + } + + return; +fail_sps_transfer: + spin_lock_bh(&sys->spinlock); + rx_len_cached = --sys->len; + list_del(&rx_pkt->link); + INIT_LIST_HEAD(&rx_pkt->link); + spin_unlock_bh(&sys->spinlock); + dma_unmap_single(ipa_ctx->pdev, rx_pkt->data.dma_addr, + sys->rx_buff_sz, DMA_FROM_DEVICE); +fail_dma_mapping: + spin_lock_bh(&sys->spinlock); + list_add_tail(&rx_pkt->link, &sys->rcycl_list); + INIT_LIST_HEAD(&rx_pkt->link); + spin_unlock_bh(&sys->spinlock); +fail_kmem_cache_alloc: + spin_unlock_bh(&sys->spinlock); + if (rx_len_cached == 0) + queue_delayed_work(sys->wq, &sys->replenish_rx_work, + msecs_to_jiffies(1)); +} + +static void ipa_fast_replenish_rx_cache(struct ipa_sys_context *sys) +{ + struct ipa_rx_pkt_wrapper *rx_pkt; + int ret; + int rx_len_cached = 0; + u32 curr; + + rx_len_cached = sys->len; + curr = atomic_read(&sys->repl.head_idx); + + while (rx_len_cached < sys->rx_pool_sz) { + if (curr == atomic_read(&sys->repl.tail_idx)) { + queue_work(sys->repl_wq, &sys->repl_work); + break; + } + + rx_pkt = sys->repl.cache[curr]; + spin_lock_bh(&sys->spinlock); + list_add_tail(&rx_pkt->link, &sys->head_desc_list); + spin_unlock_bh(&sys->spinlock); + + ret = sps_transfer_one(sys->ep->ep_hdl, + rx_pkt->data.dma_addr, sys->rx_buff_sz, rx_pkt, 0); + + if (ret) { + IPAERR("sps_transfer_one failed %d\n", ret); + list_del(&rx_pkt->link); + break; + } + rx_len_cached = ++sys->len; + sys->repl_trig_cnt++; + curr = (curr + 1) % sys->repl.capacity; + /* ensure write is done before setting head index */ + mb(); + atomic_set(&sys->repl.head_idx, curr); + } + + if (sys->repl_trig_cnt % sys->repl_trig_thresh == 0) + queue_work(sys->repl_wq, &sys->repl_work); + + if (rx_len_cached <= sys->ep->rx_replenish_threshold) { + if (rx_len_cached == 0) { + if (sys->ep->client == IPA_CLIENT_APPS_WAN_CONS) + IPA_STATS_INC_CNT(ipa_ctx->stats.wan_rx_empty); + else if (sys->ep->client == IPA_CLIENT_APPS_LAN_CONS) + IPA_STATS_INC_CNT(ipa_ctx->stats.lan_rx_empty); + else + WARN_ON(1); + } + sys->repl_trig_cnt = 0; + queue_delayed_work(sys->wq, &sys->replenish_rx_work, + msecs_to_jiffies(1)); + } +} + +static void replenish_rx_work_func(struct work_struct *work) +{ + struct delayed_work *dwork; + struct ipa_sys_context *sys; + + dwork = container_of(work, struct delayed_work, work); + sys = container_of(dwork, struct ipa_sys_context, replenish_rx_work); + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + sys->repl_hdlr(sys); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); +} + +/** + * ipa_cleanup_rx() - release RX queue resources + * + */ +static void ipa_cleanup_rx(struct ipa_sys_context *sys) +{ + struct ipa_rx_pkt_wrapper *rx_pkt; + struct ipa_rx_pkt_wrapper *r; + u32 head; + u32 tail; + + spin_lock_bh(&sys->spinlock); + list_for_each_entry_safe(rx_pkt, r, + &sys->head_desc_list, link) { + list_del(&rx_pkt->link); + dma_unmap_single(ipa_ctx->pdev, rx_pkt->data.dma_addr, + sys->rx_buff_sz, DMA_FROM_DEVICE); + sys->free_skb(rx_pkt->data.skb); + kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt); + } + + list_for_each_entry_safe(rx_pkt, r, + &sys->rcycl_list, link) { + list_del(&rx_pkt->link); + dma_unmap_single(ipa_ctx->pdev, rx_pkt->data.dma_addr, + sys->rx_buff_sz, DMA_FROM_DEVICE); + sys->free_skb(rx_pkt->data.skb); + kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt); + } + spin_unlock_bh(&sys->spinlock); + + if (sys->repl.cache) { + head = atomic_read(&sys->repl.head_idx); + tail = atomic_read(&sys->repl.tail_idx); + while (head != tail) { + rx_pkt = sys->repl.cache[head]; + dma_unmap_single(ipa_ctx->pdev, rx_pkt->data.dma_addr, + sys->rx_buff_sz, DMA_FROM_DEVICE); + sys->free_skb(rx_pkt->data.skb); + kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt); + head = (head + 1) % sys->repl.capacity; + } + kfree(sys->repl.cache); + } +} + +static struct sk_buff *ipa_skb_copy_for_client(struct sk_buff *skb, int len) +{ + struct sk_buff *skb2 = NULL; + + skb2 = __dev_alloc_skb(len + IPA_RX_BUFF_CLIENT_HEADROOM, GFP_KERNEL); + if (likely(skb2)) { + /* Set the data pointer */ + skb_reserve(skb2, IPA_RX_BUFF_CLIENT_HEADROOM); + memcpy(skb2->data, skb->data, len); + skb2->len = len; + skb_set_tail_pointer(skb2, len); + } + + return skb2; +} + +static int ipa_lan_rx_pyld_hdlr(struct sk_buff *skb, + struct ipa_sys_context *sys) +{ + struct ipa_hw_pkt_status *status; + struct sk_buff *skb2; + int pad_len_byte; + int len; + unsigned char *buf; + int src_pipe; + unsigned int used = *(unsigned int *)skb->cb; + unsigned int used_align = ALIGN(used, 32); + unsigned long unused = IPA_GENERIC_RX_BUFF_BASE_SZ - used; + u32 skb2_len; + + IPA_DUMP_BUFF(skb->data, 0, skb->len); + + if (skb->len == 0) { + IPAERR("ZLT\n"); + sys->free_skb(skb); + goto out; + } + + if (sys->len_partial) { + IPADBG_LOW("len_partial %d\n", sys->len_partial); + buf = skb_push(skb, sys->len_partial); + memcpy(buf, sys->prev_skb->data, sys->len_partial); + sys->len_partial = 0; + sys->free_skb(sys->prev_skb); + sys->prev_skb = NULL; + goto begin; + } + + /* this pipe has TX comp (status only) + mux-ed LAN RX data + * (status+data) + */ + if (sys->len_rem) { + IPADBG_LOW("rem %d skb %d pad %d\n", sys->len_rem, skb->len, + sys->len_pad); + if (sys->len_rem <= skb->len) { + if (sys->prev_skb) { + skb2 = skb_copy_expand(sys->prev_skb, 0, + sys->len_rem, GFP_KERNEL); + if (likely(skb2)) { + memcpy(skb_put(skb2, sys->len_rem), + skb->data, sys->len_rem); + skb_trim(skb2, + skb2->len - sys->len_pad); + skb2->truesize = skb2->len + + sizeof(struct sk_buff); + if (sys->drop_packet) + dev_kfree_skb_any(skb2); + else + sys->ep->client_notify( + sys->ep->priv, + IPA_RECEIVE, + (unsigned long)(skb2)); + } else { + IPAERR("copy expand failed\n"); + } + dev_kfree_skb_any(sys->prev_skb); + } + skb_pull(skb, sys->len_rem); + sys->prev_skb = NULL; + sys->len_rem = 0; + sys->len_pad = 0; + } else { + if (sys->prev_skb) { + skb2 = skb_copy_expand(sys->prev_skb, 0, + skb->len, GFP_KERNEL); + if (likely(skb2)) { + memcpy(skb_put(skb2, skb->len), + skb->data, skb->len); + } else { + IPAERR("copy expand failed\n"); + } + dev_kfree_skb_any(sys->prev_skb); + sys->prev_skb = skb2; + } + sys->len_rem -= skb->len; + sys->free_skb(skb); + goto out; + } + } + +begin: + while (skb->len) { + sys->drop_packet = false; + IPADBG_LOW("LEN_REM %d\n", skb->len); + + if (skb->len < IPA_PKT_STATUS_SIZE) { + WARN_ON(sys->prev_skb != NULL); + IPADBG("status straddles buffer\n"); + sys->prev_skb = skb_copy(skb, GFP_KERNEL); + sys->len_partial = skb->len; + goto out; + } + + status = (struct ipa_hw_pkt_status *)skb->data; + IPADBG_LOW("STATUS opcode=%d src=%d dst=%d len=%d\n", + status->status_opcode, status->endp_src_idx, + status->endp_dest_idx, status->pkt_len); + if (sys->status_stat) { + sys->status_stat->status[sys->status_stat->curr] = + *status; + sys->status_stat->curr++; + if (sys->status_stat->curr == IPA_MAX_STATUS_STAT_NUM) + sys->status_stat->curr = 0; + } + + if (status->status_opcode != + IPA_HW_STATUS_OPCODE_DROPPED_PACKET && + status->status_opcode != + IPA_HW_STATUS_OPCODE_PACKET && + status->status_opcode != + IPA_HW_STATUS_OPCODE_SUSPENDED_PACKET && + status->status_opcode != + IPA_HW_STATUS_OPCODE_XLAT_PACKET) { + IPAERR("unsupported opcode(%d)\n", + status->status_opcode); + skb_pull(skb, IPA_PKT_STATUS_SIZE); + continue; + } + IPA_STATS_EXCP_CNT(status->exception, + ipa_ctx->stats.rx_excp_pkts); + if (status->endp_dest_idx >= ipa_ctx->ipa_num_pipes || + status->endp_src_idx >= ipa_ctx->ipa_num_pipes) { + IPAERR("status fields invalid\n"); + IPAERR("STATUS opcode=%d src=%d dst=%d len=%d\n", + status->status_opcode, status->endp_src_idx, + status->endp_dest_idx, status->pkt_len); + WARN_ON(1); + ipa_assert(); + } + if (status->status_mask & IPA_HW_PKT_STATUS_MASK_TAG_VALID) { + struct ipa_tag_completion *comp; + + IPADBG_LOW("TAG packet arrived\n"); + if (status->tag_f_2 == IPA_COOKIE) { + skb_pull(skb, IPA_PKT_STATUS_SIZE); + if (skb->len < sizeof(comp)) { + IPAERR("TAG arrived without packet\n"); + goto out; + } + memcpy(&comp, skb->data, sizeof(comp)); + skb_pull(skb, sizeof(comp) + + IPA_SIZE_DL_CSUM_META_TRAILER); + complete(&comp->comp); + if (atomic_dec_return(&comp->cnt) == 0) + kfree(comp); + continue; + } else { + IPADBG("ignoring TAG with wrong cookie\n"); + } + } + if (status->pkt_len == 0) { + IPADBG("Skip aggr close status\n"); + skb_pull(skb, IPA_PKT_STATUS_SIZE); + IPA_STATS_INC_CNT(ipa_ctx->stats.aggr_close); + IPA_STATS_DEC_CNT( + ipa_ctx->stats.rx_excp_pkts[MAX_NUM_EXCP - 1]); + continue; + } + if (status->endp_dest_idx == (sys->ep - ipa_ctx->ep)) { + /* RX data */ + src_pipe = status->endp_src_idx; + + /* + * A packet which is received back to the AP after + * there was no route match. + */ + if (!status->exception && !status->route_match) + sys->drop_packet = true; + + if (skb->len == IPA_PKT_STATUS_SIZE && + !status->exception) { + WARN_ON(sys->prev_skb != NULL); + IPADBG_LOW("Ins header in next buffer\n"); + sys->prev_skb = skb_copy(skb, GFP_KERNEL); + sys->len_partial = skb->len; + goto out; + } + + pad_len_byte = ((status->pkt_len + 3) & ~3) - + status->pkt_len; + + len = status->pkt_len + pad_len_byte + + IPA_SIZE_DL_CSUM_META_TRAILER; + IPADBG_LOW("pad %d pkt_len %d len %d\n", pad_len_byte, + status->pkt_len, len); + + if (status->exception == + IPA_HW_PKT_STATUS_EXCEPTION_DEAGGR) { + IPADBG_LOW("Dropping packet"); + IPADBG_LOW(" on DeAggr Exception\n"); + sys->drop_packet = true; + } + + skb2_len = status->pkt_len + IPA_PKT_STATUS_SIZE; + skb2_len = min(skb2_len, skb->len); + skb2 = ipa_skb_copy_for_client(skb, skb2_len); + if (likely(skb2)) { + if (skb->len < len + IPA_PKT_STATUS_SIZE) { + IPADBG_LOW("SPL skb len %d len %d\n", + skb->len, len); + sys->prev_skb = skb2; + sys->len_rem = len - skb->len + + IPA_PKT_STATUS_SIZE; + sys->len_pad = pad_len_byte; + skb_pull(skb, skb->len); + } else { + skb_trim(skb2, status->pkt_len + + IPA_PKT_STATUS_SIZE); + IPADBG_LOW("rx avail for %d\n", + status->endp_dest_idx); + if (sys->drop_packet) { + dev_kfree_skb_any(skb2); + } else if (status->pkt_len > + IPA_GENERIC_AGGR_BYTE_LIMIT * + 1024) { + IPAERR("packet size invalid\n"); + IPAERR("STATUS opcode=%d\n", + status->status_opcode); + IPAERR("src=%d dst=%d len=%d\n", + status->endp_src_idx, + status->endp_dest_idx, + status->pkt_len); + ipa_assert(); + } else { + skb2->truesize = skb2->len + + sizeof(struct sk_buff) + + (ALIGN(len + + IPA_PKT_STATUS_SIZE, 32) * + unused / used_align); + sys->ep->client_notify( + sys->ep->priv, + IPA_RECEIVE, + (unsigned long)(skb2)); + } + skb_pull(skb, len + + IPA_PKT_STATUS_SIZE); + } + } else { + IPAERR("fail to alloc skb\n"); + if (skb->len < len) { + sys->prev_skb = NULL; + sys->len_rem = len - skb->len + + IPA_PKT_STATUS_SIZE; + sys->len_pad = pad_len_byte; + skb_pull(skb, skb->len); + } else { + skb_pull(skb, len + + IPA_PKT_STATUS_SIZE); + } + } + /* TX comp */ + ipa_wq_write_done_status(src_pipe); + IPADBG_LOW("tx comp imp for %d\n", src_pipe); + } else { + /* TX comp */ + ipa_wq_write_done_status(status->endp_src_idx); + IPADBG_LOW + ("tx comp exp for %d\n", status->endp_src_idx); + skb_pull(skb, IPA_PKT_STATUS_SIZE); + IPA_STATS_INC_CNT(ipa_ctx->stats.stat_compl); + IPA_STATS_DEC_CNT( + ipa_ctx->stats.rx_excp_pkts[MAX_NUM_EXCP - 1]); + } + } + +out: + return 0; +} + +static struct sk_buff *join_prev_skb(struct sk_buff *prev_skb, + struct sk_buff *skb, unsigned int len) +{ + struct sk_buff *skb2; + + skb2 = skb_copy_expand(prev_skb, 0, + len, GFP_KERNEL); + if (likely(skb2)) { + memcpy(skb_put(skb2, len), + skb->data, len); + } else { + IPAERR("copy expand failed\n"); + skb2 = NULL; + } + dev_kfree_skb_any(prev_skb); + + return skb2; +} + +static void wan_rx_handle_splt_pyld(struct sk_buff *skb, + struct ipa_sys_context *sys) +{ + struct sk_buff *skb2; + + IPADBG_LOW("rem %d skb %d\n", sys->len_rem, skb->len); + if (sys->len_rem <= skb->len) { + if (sys->prev_skb) { + skb2 = join_prev_skb(sys->prev_skb, skb, + sys->len_rem); + if (likely(skb2)) { + IPADBG_LOW( + "removing Status element from skb and sending to WAN client"); + skb_pull(skb2, IPA_PKT_STATUS_SIZE); + skb2->truesize = skb2->len + + sizeof(struct sk_buff); + sys->ep->client_notify(sys->ep->priv, + IPA_RECEIVE, + (unsigned long)(skb2)); + } + } + skb_pull(skb, sys->len_rem); + sys->prev_skb = NULL; + sys->len_rem = 0; + } else { + if (sys->prev_skb) { + skb2 = join_prev_skb(sys->prev_skb, skb, + skb->len); + sys->prev_skb = skb2; + } + sys->len_rem -= skb->len; + skb_pull(skb, skb->len); + } +} + +static int ipa_wan_rx_pyld_hdlr(struct sk_buff *skb, + struct ipa_sys_context *sys) +{ + struct ipa_hw_pkt_status *status; + struct sk_buff *skb2; + u16 pkt_len_with_pad; + u32 qmap_hdr; + int checksum_trailer_exists; + int frame_len; + int ep_idx; + unsigned int used = *(unsigned int *)skb->cb; + unsigned int used_align = ALIGN(used, 32); + unsigned long unused = IPA_GENERIC_RX_BUFF_BASE_SZ - used; + + IPA_DUMP_BUFF(skb->data, 0, skb->len); + if (skb->len == 0) { + IPAERR("ZLT\n"); + goto bail; + } + + if (ipa_ctx->ipa_client_apps_wan_cons_agg_gro) { + sys->ep->client_notify(sys->ep->priv, + IPA_RECEIVE, (unsigned long)(skb)); + return 0; + } + if (sys->repl_hdlr == ipa_replenish_rx_cache_recycle) { + IPAERR("Recycle should enable only with GRO Aggr\n"); + ipa_assert(); + } + /* + * payload splits across 2 buff or more, + * take the start of the payload from prev_skb + */ + if (sys->len_rem) + wan_rx_handle_splt_pyld(skb, sys); + + + while (skb->len) { + IPADBG_LOW("LEN_REM %d\n", skb->len); + if (skb->len < IPA_PKT_STATUS_SIZE) { + IPAERR("status straddles buffer\n"); + WARN_ON(1); + goto bail; + } + status = (struct ipa_hw_pkt_status *)skb->data; + IPADBG_LOW("STATUS opcode=%d src=%d dst=%d len=%d\n", + status->status_opcode, status->endp_src_idx, + status->endp_dest_idx, status->pkt_len); + + if (sys->status_stat) { + sys->status_stat->status[sys->status_stat->curr] = + *status; + sys->status_stat->curr++; + if (sys->status_stat->curr == IPA_MAX_STATUS_STAT_NUM) + sys->status_stat->curr = 0; + } + + if (status->status_opcode != + IPA_HW_STATUS_OPCODE_DROPPED_PACKET && + status->status_opcode != + IPA_HW_STATUS_OPCODE_PACKET && + status->status_opcode != + IPA_HW_STATUS_OPCODE_XLAT_PACKET) { + IPAERR("unsupported opcode\n"); + skb_pull(skb, IPA_PKT_STATUS_SIZE); + continue; + } + IPA_STATS_INC_CNT(ipa_ctx->stats.rx_pkts); + if (status->endp_dest_idx >= ipa_ctx->ipa_num_pipes || + status->endp_src_idx >= ipa_ctx->ipa_num_pipes || + status->pkt_len > IPA_GENERIC_AGGR_BYTE_LIMIT * 1024) { + IPAERR("status fields invalid\n"); + WARN_ON(1); + goto bail; + } + if (status->pkt_len == 0) { + IPADBG_LOW("Skip aggr close status\n"); + skb_pull(skb, IPA_PKT_STATUS_SIZE); + IPA_STATS_DEC_CNT(ipa_ctx->stats.rx_pkts); + IPA_STATS_INC_CNT(ipa_ctx->stats.wan_aggr_close); + continue; + } + ep_idx = ipa2_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS); + if (status->endp_dest_idx != ep_idx) { + IPAERR("expected endp_dest_idx %d received %d\n", + ep_idx, status->endp_dest_idx); + WARN_ON(1); + goto bail; + } + /* RX data */ + if (skb->len == IPA_PKT_STATUS_SIZE) { + IPAERR("Ins header in next buffer\n"); + WARN_ON(1); + goto bail; + } + qmap_hdr = *(u32 *)(status+1); + /* + * Take the pkt_len_with_pad from the last 2 bytes of the QMAP + * header + */ + + /*QMAP is BE: convert the pkt_len field from BE to LE*/ + pkt_len_with_pad = ntohs((qmap_hdr>>16) & 0xffff); + IPADBG_LOW("pkt_len with pad %d\n", pkt_len_with_pad); + /*get the CHECKSUM_PROCESS bit*/ + checksum_trailer_exists = status->status_mask & + IPA_HW_PKT_STATUS_MASK_CKSUM_PROCESS; + IPADBG_LOW("checksum_trailer_exists %d\n", + checksum_trailer_exists); + + frame_len = IPA_PKT_STATUS_SIZE + + IPA_QMAP_HEADER_LENGTH + + pkt_len_with_pad; + if (checksum_trailer_exists) + frame_len += IPA_DL_CHECKSUM_LENGTH; + IPADBG_LOW("frame_len %d\n", frame_len); + + skb2 = skb_clone(skb, GFP_KERNEL); + if (likely(skb2)) { + /* + * the len of actual data is smaller than expected + * payload split across 2 buff + */ + if (skb->len < frame_len) { + IPADBG_LOW("SPL skb len %d len %d\n", + skb->len, frame_len); + sys->prev_skb = skb2; + sys->len_rem = frame_len - skb->len; + skb_pull(skb, skb->len); + } else { + skb_trim(skb2, frame_len); + IPADBG_LOW("rx avail for %d\n", + status->endp_dest_idx); + IPADBG_LOW( + "removing Status element from skb and sending to WAN client"); + skb_pull(skb2, IPA_PKT_STATUS_SIZE); + skb2->truesize = skb2->len + + sizeof(struct sk_buff) + + (ALIGN(frame_len, 32) * + unused / used_align); + sys->ep->client_notify(sys->ep->priv, + IPA_RECEIVE, (unsigned long)(skb2)); + skb_pull(skb, frame_len); + } + } else { + IPAERR("fail to clone\n"); + if (skb->len < frame_len) { + sys->prev_skb = NULL; + sys->len_rem = frame_len - skb->len; + skb_pull(skb, skb->len); + } else { + skb_pull(skb, frame_len); + } + } + } +bail: + sys->free_skb(skb); + return 0; +} + +static int ipa_rx_pyld_hdlr(struct sk_buff *rx_skb, struct ipa_sys_context *sys) +{ + struct ipa_a5_mux_hdr *mux_hdr; + unsigned int pull_len; + unsigned int padding; + struct ipa_ep_context *ep; + unsigned int src_pipe; + + mux_hdr = (struct ipa_a5_mux_hdr *)rx_skb->data; + + src_pipe = mux_hdr->src_pipe_index; + + IPADBG("RX pkt len=%d IID=0x%x src=%d, flags=0x%x, meta=0x%x\n", + rx_skb->len, ntohs(mux_hdr->interface_id), + src_pipe, mux_hdr->flags, ntohl(mux_hdr->metadata)); + + IPA_DUMP_BUFF(rx_skb->data, 0, rx_skb->len); + + IPA_STATS_INC_CNT(ipa_ctx->stats.rx_pkts); + IPA_STATS_EXCP_CNT(mux_hdr->flags, ipa_ctx->stats.rx_excp_pkts); + + /* + * Any packets arriving over AMPDU_TX should be dispatched + * to the regular WLAN RX data-path. + */ + if (unlikely(src_pipe == WLAN_AMPDU_TX_EP)) + src_pipe = WLAN_PROD_TX_EP; + + ep = &ipa_ctx->ep[src_pipe]; + spin_lock(&ipa_ctx->disconnect_lock); + if (unlikely(src_pipe >= ipa_ctx->ipa_num_pipes || + !ep->valid || !ep->client_notify)) { + IPAERR("drop pipe=%d ep_valid=%d client_notify=%p\n", + src_pipe, ep->valid, ep->client_notify); + dev_kfree_skb_any(rx_skb); + spin_unlock(&ipa_ctx->disconnect_lock); + return 0; + } + + pull_len = sizeof(struct ipa_a5_mux_hdr); + + /* + * IP packet starts on word boundary + * remove the MUX header and any padding and pass the frame to + * the client which registered a rx callback on the "src pipe" + */ + padding = ep->cfg.hdr.hdr_len & 0x3; + if (padding) + pull_len += 4 - padding; + + IPADBG("pulling %d bytes from skb\n", pull_len); + skb_pull(rx_skb, pull_len); + ep->client_notify(ep->priv, IPA_RECEIVE, + (unsigned long)(rx_skb)); + spin_unlock(&ipa_ctx->disconnect_lock); + return 0; +} + +static struct sk_buff *ipa_get_skb_ipa_rx(unsigned int len, gfp_t flags) +{ + return __dev_alloc_skb(len, flags); +} + +static struct sk_buff *ipa_get_skb_ipa_rx_headroom(unsigned int len, + gfp_t flags) +{ + struct sk_buff *skb; + + skb = __dev_alloc_skb(len + IPA_HEADROOM, flags); + if (skb) + skb_reserve(skb, IPA_HEADROOM); + + return skb; +} + +static void ipa_free_skb_rx(struct sk_buff *skb) +{ + dev_kfree_skb_any(skb); +} + +void ipa_lan_rx_cb(void *priv, enum ipa_dp_evt_type evt, unsigned long data) +{ + struct sk_buff *rx_skb = (struct sk_buff *)data; + struct ipa_hw_pkt_status *status; + struct ipa_ep_context *ep; + unsigned int src_pipe; + u32 metadata; + u8 ucp; + + status = (struct ipa_hw_pkt_status *)rx_skb->data; + src_pipe = status->endp_src_idx; + metadata = status->metadata; + ucp = status->ucp; + ep = &ipa_ctx->ep[src_pipe]; + if (unlikely(src_pipe >= ipa_ctx->ipa_num_pipes || + !ep->valid || + !ep->client_notify)) { + IPAERR("drop pipe=%d ep_valid=%d client_notify=%p\n", + src_pipe, ep->valid, ep->client_notify); + dev_kfree_skb_any(rx_skb); + return; + } + if (!status->exception) + skb_pull(rx_skb, IPA_PKT_STATUS_SIZE + + IPA_LAN_RX_HEADER_LENGTH); + else + skb_pull(rx_skb, IPA_PKT_STATUS_SIZE); + + /* + * Metadata Info + * ------------------------------------------ + * | 3 | 2 | 1 | 0 | + * | fw_desc | vdev_id | qmap mux id | Resv | + * ------------------------------------------ + */ + *(u16 *)rx_skb->cb = ((metadata >> 16) & 0xFFFF); + *(u8 *)(rx_skb->cb + 4) = ucp; + IPADBG_LOW("meta_data: 0x%x cb: 0x%x\n", + metadata, *(u32 *)rx_skb->cb); + IPADBG_LOW("ucp: %d\n", *(u8 *)(rx_skb->cb + 4)); + + ep->client_notify(ep->priv, IPA_RECEIVE, (unsigned long)(rx_skb)); +} + +void ipa2_recycle_wan_skb(struct sk_buff *skb) +{ + struct ipa_rx_pkt_wrapper *rx_pkt; + int ep_idx = ipa2_get_ep_mapping( + IPA_CLIENT_APPS_WAN_CONS); + gfp_t flag = GFP_NOWAIT | __GFP_NOWARN | + (ipa_ctx->use_dma_zone ? GFP_DMA : 0); + + if (unlikely(ep_idx == -1)) { + IPAERR("dest EP does not exist\n"); + ipa_assert(); + } + + rx_pkt = kmem_cache_zalloc( + ipa_ctx->rx_pkt_wrapper_cache, flag); + if (!rx_pkt) + ipa_assert(); + + INIT_WORK(&rx_pkt->work, ipa_wq_rx_avail); + rx_pkt->sys = ipa_ctx->ep[ep_idx].sys; + + rx_pkt->data.skb = skb; + rx_pkt->data.dma_addr = 0; + ipa_skb_recycle(rx_pkt->data.skb); + skb_reserve(rx_pkt->data.skb, IPA_HEADROOM); + INIT_LIST_HEAD(&rx_pkt->link); + spin_lock_bh(&rx_pkt->sys->spinlock); + list_add_tail(&rx_pkt->link, &rx_pkt->sys->rcycl_list); + spin_unlock_bh(&rx_pkt->sys->spinlock); +} + +static void ipa_wq_rx_common(struct ipa_sys_context *sys, u32 size) +{ + struct ipa_rx_pkt_wrapper *rx_pkt_expected; + struct sk_buff *rx_skb; + + spin_lock_bh(&sys->spinlock); + if (unlikely(list_empty(&sys->head_desc_list))) { + WARN_ON(1); + spin_unlock_bh(&sys->spinlock); + return; + } + rx_pkt_expected = list_first_entry(&sys->head_desc_list, + struct ipa_rx_pkt_wrapper, + link); + list_del(&rx_pkt_expected->link); + sys->len--; + spin_unlock_bh(&sys->spinlock); + if (size) + rx_pkt_expected->len = size; + rx_skb = rx_pkt_expected->data.skb; + dma_unmap_single(ipa_ctx->pdev, rx_pkt_expected->data.dma_addr, + sys->rx_buff_sz, DMA_FROM_DEVICE); + skb_set_tail_pointer(rx_skb, rx_pkt_expected->len); + rx_skb->len = rx_pkt_expected->len; + *(unsigned int *)rx_skb->cb = rx_skb->len; + rx_skb->truesize = rx_pkt_expected->len + sizeof(struct sk_buff); + sys->pyld_hdlr(rx_skb, sys); + sys->repl_hdlr(sys); + kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt_expected); + +} + +static void ipa_wlan_wq_rx_common(struct ipa_sys_context *sys, u32 size) +{ + struct ipa_rx_pkt_wrapper *rx_pkt_expected; + struct sk_buff *rx_skb; + + spin_lock_bh(&sys->spinlock); + if (unlikely(list_empty(&sys->head_desc_list))) { + WARN_ON(1); + spin_unlock_bh(&sys->spinlock); + return; + } + rx_pkt_expected = list_first_entry(&sys->head_desc_list, + struct ipa_rx_pkt_wrapper, + link); + list_del(&rx_pkt_expected->link); + sys->len--; + spin_unlock_bh(&sys->spinlock); + + if (size) + rx_pkt_expected->len = size; + + rx_skb = rx_pkt_expected->data.skb; + skb_set_tail_pointer(rx_skb, rx_pkt_expected->len); + rx_skb->len = rx_pkt_expected->len; + rx_skb->truesize = rx_pkt_expected->len + sizeof(struct sk_buff); + sys->ep->wstats.tx_pkts_rcvd++; + if (sys->len <= IPA_WLAN_RX_POOL_SZ_LOW_WM) { + ipa2_free_skb(&rx_pkt_expected->data); + sys->ep->wstats.tx_pkts_dropped++; + } else { + sys->ep->wstats.tx_pkts_sent++; + sys->ep->client_notify(sys->ep->priv, IPA_RECEIVE, + (unsigned long)(&rx_pkt_expected->data)); + } + ipa_replenish_wlan_rx_cache(sys); +} + +static void ipa_dma_memcpy_notify(struct ipa_sys_context *sys, + struct sps_iovec *iovec) +{ + IPADBG_LOW("ENTER.\n"); + if (unlikely(list_empty(&sys->head_desc_list))) { + IPAERR("descriptor list is empty!\n"); + WARN_ON(1); + return; + } + if (!(iovec->flags & SPS_IOVEC_FLAG_EOT)) { + IPAERR("received unexpected event. sps flag is 0x%x\n" + , iovec->flags); + WARN_ON(1); + return; + } + sys->ep->client_notify(sys->ep->priv, IPA_RECEIVE, + (unsigned long)(iovec)); + IPADBG("EXIT\n"); +} + +static void ipa_wq_rx_avail(struct work_struct *work) +{ + struct ipa_rx_pkt_wrapper *rx_pkt; + struct ipa_sys_context *sys; + + rx_pkt = container_of(work, struct ipa_rx_pkt_wrapper, work); + if (unlikely(rx_pkt == NULL)) + WARN_ON(1); + sys = rx_pkt->sys; + ipa_wq_rx_common(sys, 0); +} + +/** + * ipa_sps_irq_rx_no_aggr_notify() - Callback function which will be called by + * the SPS driver after a Rx operation is complete. + * Called in an interrupt context. + * @notify: SPS driver supplied notification struct + * + * This function defer the work for this event to a workqueue. + */ +void ipa_sps_irq_rx_no_aggr_notify(struct sps_event_notify *notify) +{ + struct ipa_rx_pkt_wrapper *rx_pkt; + + switch (notify->event_id) { + case SPS_EVENT_EOT: + rx_pkt = notify->data.transfer.user; + if (IPA_CLIENT_IS_APPS_CONS(rx_pkt->sys->ep->client)) + atomic_set(&ipa_ctx->sps_pm.eot_activity, 1); + rx_pkt->len = notify->data.transfer.iovec.size; + IPADBG_LOW + ("event %d notified sys=%p len=%u\n", notify->event_id, + notify->user, rx_pkt->len); + queue_work(rx_pkt->sys->wq, &rx_pkt->work); + break; + default: + IPAERR("received unexpected event id %d sys=%p\n", + notify->event_id, notify->user); + } +} + +static int ipa_odu_rx_pyld_hdlr(struct sk_buff *rx_skb, + struct ipa_sys_context *sys) +{ + if (sys->ep->client_notify) { + sys->ep->client_notify(sys->ep->priv, IPA_RECEIVE, + (unsigned long)(rx_skb)); + } else { + dev_kfree_skb_any(rx_skb); + WARN_ON(1); + } + + return 0; +} + +static int ipa_assign_policy_v2(struct ipa_sys_connect_params *in, + struct ipa_sys_context *sys) +{ + unsigned long aggr_byte_limit; + + sys->ep->status.status_en = true; + sys->ep->wakelock_client = IPA_WAKELOCK_REF_CLIENT_MAX; + if (IPA_CLIENT_IS_PROD(in->client)) { + if (!sys->ep->skip_ep_cfg) { + sys->policy = IPA_POLICY_NOINTR_MODE; + sys->sps_option = SPS_O_AUTO_ENABLE; + sys->sps_callback = NULL; + sys->ep->status.status_ep = ipa2_get_ep_mapping( + IPA_CLIENT_APPS_LAN_CONS); + if (IPA_CLIENT_IS_MEMCPY_DMA_PROD(in->client)) + sys->ep->status.status_en = false; + } else { + sys->policy = IPA_POLICY_INTR_MODE; + sys->sps_option = (SPS_O_AUTO_ENABLE | + SPS_O_EOT); + sys->sps_callback = + ipa_sps_irq_tx_no_aggr_notify; + } + return 0; + } + + aggr_byte_limit = + (unsigned long)IPA_GENERIC_RX_BUFF_SZ( + ipa_adjust_ra_buff_base_sz( + in->ipa_ep_cfg.aggr.aggr_byte_limit)); + + if (in->client == IPA_CLIENT_APPS_LAN_CONS || + in->client == IPA_CLIENT_APPS_WAN_CONS) { + sys->policy = IPA_POLICY_INTR_POLL_MODE; + sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT + | SPS_O_ACK_TRANSFERS); + sys->sps_callback = ipa_sps_irq_rx_notify; + INIT_WORK(&sys->work, ipa_wq_handle_rx); + INIT_DELAYED_WORK(&sys->switch_to_intr_work, + switch_to_intr_rx_work_func); + INIT_DELAYED_WORK(&sys->replenish_rx_work, + replenish_rx_work_func); + INIT_WORK(&sys->repl_work, ipa_wq_repl_rx); + atomic_set(&sys->curr_polling_state, 0); + sys->rx_buff_sz = IPA_GENERIC_RX_BUFF_SZ( + IPA_GENERIC_RX_BUFF_BASE_SZ) - + IPA_HEADROOM; + sys->get_skb = ipa_get_skb_ipa_rx_headroom; + sys->free_skb = ipa_free_skb_rx; + in->ipa_ep_cfg.aggr.aggr_en = IPA_ENABLE_AGGR; + in->ipa_ep_cfg.aggr.aggr = IPA_GENERIC; + in->ipa_ep_cfg.aggr.aggr_time_limit = + IPA_GENERIC_AGGR_TIME_LIMIT; + if (in->client == IPA_CLIENT_APPS_LAN_CONS) { + sys->pyld_hdlr = ipa_lan_rx_pyld_hdlr; + sys->rx_pool_sz = + ipa_ctx->lan_rx_ring_size; + if (nr_cpu_ids > 1) { + sys->repl_hdlr = + ipa_fast_replenish_rx_cache; + sys->repl_trig_thresh = + sys->rx_pool_sz / 8; + } else { + sys->repl_hdlr = + ipa_replenish_rx_cache; + } + in->ipa_ep_cfg.aggr.aggr_byte_limit = + IPA_GENERIC_AGGR_BYTE_LIMIT; + in->ipa_ep_cfg.aggr.aggr_pkt_limit = + IPA_GENERIC_AGGR_PKT_LIMIT; + sys->ep->wakelock_client = + IPA_WAKELOCK_REF_CLIENT_LAN_RX; + } else if (in->client == + IPA_CLIENT_APPS_WAN_CONS) { + sys->pyld_hdlr = ipa_wan_rx_pyld_hdlr; + sys->rx_pool_sz = ipa_ctx->wan_rx_ring_size; + if (nr_cpu_ids > 1) { + sys->repl_hdlr = + ipa_fast_replenish_rx_cache; + sys->repl_trig_thresh = + sys->rx_pool_sz / 8; + } else { + sys->repl_hdlr = + ipa_replenish_rx_cache; + } + if (in->napi_enabled && in->recycle_enabled) + sys->repl_hdlr = + ipa_replenish_rx_cache_recycle; + sys->ep->wakelock_client = + IPA_WAKELOCK_REF_CLIENT_WAN_RX; + in->ipa_ep_cfg.aggr.aggr_sw_eof_active + = true; + if (ipa_ctx->ipa_client_apps_wan_cons_agg_gro) { + IPAERR("get close-by %u\n", + ipa_adjust_ra_buff_base_sz( + in->ipa_ep_cfg.aggr.aggr_byte_limit)); + IPAERR("set rx_buff_sz %lu\n", aggr_byte_limit); + /* disable ipa_status */ + sys->ep->status.status_en = false; + sys->rx_buff_sz = + IPA_GENERIC_RX_BUFF_SZ( + ipa_adjust_ra_buff_base_sz( + in->ipa_ep_cfg.aggr.aggr_byte_limit - IPA_HEADROOM)); + in->ipa_ep_cfg.aggr.aggr_byte_limit = + sys->rx_buff_sz < in->ipa_ep_cfg.aggr.aggr_byte_limit ? + IPA_ADJUST_AGGR_BYTE_LIMIT( + sys->rx_buff_sz) : + IPA_ADJUST_AGGR_BYTE_LIMIT( + in->ipa_ep_cfg.aggr.aggr_byte_limit); + IPAERR("set aggr_limit %lu\n", + (unsigned long) + in->ipa_ep_cfg.aggr.aggr_byte_limit); + } else { + in->ipa_ep_cfg.aggr.aggr_byte_limit = + IPA_GENERIC_AGGR_BYTE_LIMIT; + in->ipa_ep_cfg.aggr.aggr_pkt_limit = + IPA_GENERIC_AGGR_PKT_LIMIT; + } + } + } else if (IPA_CLIENT_IS_WLAN_CONS(in->client)) { + IPADBG("assigning policy to client:%d", + in->client); + + sys->ep->status.status_en = false; + sys->policy = IPA_POLICY_INTR_POLL_MODE; + sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT + | SPS_O_ACK_TRANSFERS); + sys->sps_callback = ipa_sps_irq_rx_notify; + INIT_WORK(&sys->work, ipa_wq_handle_rx); + INIT_DELAYED_WORK(&sys->switch_to_intr_work, + switch_to_intr_rx_work_func); + INIT_DELAYED_WORK(&sys->replenish_rx_work, + replenish_rx_work_func); + atomic_set(&sys->curr_polling_state, 0); + sys->rx_buff_sz = IPA_WLAN_RX_BUFF_SZ; + sys->rx_pool_sz = in->desc_fifo_sz / + sizeof(struct sps_iovec) - 1; + if (sys->rx_pool_sz > IPA_WLAN_RX_POOL_SZ) + sys->rx_pool_sz = IPA_WLAN_RX_POOL_SZ; + sys->pyld_hdlr = NULL; + sys->repl_hdlr = ipa_replenish_wlan_rx_cache; + sys->get_skb = ipa_get_skb_ipa_rx; + sys->free_skb = ipa_free_skb_rx; + in->ipa_ep_cfg.aggr.aggr_en = IPA_BYPASS_AGGR; + sys->ep->wakelock_client = + IPA_WAKELOCK_REF_CLIENT_WLAN_RX; + } else if (IPA_CLIENT_IS_ODU_CONS(in->client)) { + IPADBG("assigning policy to client:%d", + in->client); + + sys->ep->status.status_en = false; + sys->policy = IPA_POLICY_INTR_POLL_MODE; + sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT + | SPS_O_ACK_TRANSFERS); + sys->sps_callback = ipa_sps_irq_rx_notify; + INIT_WORK(&sys->work, ipa_wq_handle_rx); + INIT_DELAYED_WORK(&sys->switch_to_intr_work, + switch_to_intr_rx_work_func); + INIT_DELAYED_WORK(&sys->replenish_rx_work, + replenish_rx_work_func); + atomic_set(&sys->curr_polling_state, 0); + sys->rx_buff_sz = IPA_ODU_RX_BUFF_SZ; + sys->rx_pool_sz = in->desc_fifo_sz / + sizeof(struct sps_iovec) - 1; + if (sys->rx_pool_sz > IPA_ODU_RX_POOL_SZ) + sys->rx_pool_sz = IPA_ODU_RX_POOL_SZ; + sys->pyld_hdlr = ipa_odu_rx_pyld_hdlr; + sys->get_skb = ipa_get_skb_ipa_rx; + sys->free_skb = ipa_free_skb_rx; + sys->repl_hdlr = ipa_replenish_rx_cache; + sys->ep->wakelock_client = + IPA_WAKELOCK_REF_CLIENT_ODU_RX; + } else if (in->client == + IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS) { + IPADBG("assigning policy to client:%d", + in->client); + sys->ep->status.status_en = false; + sys->policy = IPA_POLICY_INTR_POLL_MODE; + sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT + | SPS_O_ACK_TRANSFERS); + sys->sps_callback = ipa_sps_irq_rx_notify; + INIT_WORK(&sys->work, ipa_wq_handle_rx); + INIT_DELAYED_WORK(&sys->switch_to_intr_work, + switch_to_intr_rx_work_func); + } else if (in->client == + IPA_CLIENT_MEMCPY_DMA_SYNC_CONS) { + IPADBG("assigning policy to client:%d", + in->client); + sys->ep->status.status_en = false; + sys->policy = IPA_POLICY_NOINTR_MODE; + sys->sps_option = SPS_O_AUTO_ENABLE | + SPS_O_ACK_TRANSFERS | SPS_O_POLL; + } else { + IPAERR("Need to install a RX pipe hdlr\n"); + WARN_ON(1); + return -EINVAL; + } + return 0; +} + +static int ipa_assign_policy(struct ipa_sys_connect_params *in, + struct ipa_sys_context *sys) +{ + if (in->client == IPA_CLIENT_APPS_CMD_PROD) { + sys->policy = IPA_POLICY_INTR_MODE; + sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT); + sys->sps_callback = ipa_sps_irq_tx_no_aggr_notify; + return 0; + } + + if (ipa_ctx->ipa_hw_type == IPA_HW_v1_1) { + if (in->client == IPA_CLIENT_APPS_LAN_WAN_PROD) { + sys->policy = IPA_POLICY_INTR_POLL_MODE; + sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT | + SPS_O_ACK_TRANSFERS); + sys->sps_callback = ipa_sps_irq_tx_notify; + INIT_WORK(&sys->work, ipa_wq_handle_tx); + INIT_DELAYED_WORK(&sys->switch_to_intr_work, + switch_to_intr_tx_work_func); + atomic_set(&sys->curr_polling_state, 0); + } else if (in->client == IPA_CLIENT_APPS_LAN_CONS) { + sys->policy = IPA_POLICY_INTR_POLL_MODE; + sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT | + SPS_O_ACK_TRANSFERS); + sys->sps_callback = ipa_sps_irq_rx_notify; + INIT_WORK(&sys->work, ipa_wq_handle_rx); + INIT_DELAYED_WORK(&sys->switch_to_intr_work, + switch_to_intr_rx_work_func); + INIT_DELAYED_WORK(&sys->replenish_rx_work, + replenish_rx_work_func); + atomic_set(&sys->curr_polling_state, 0); + sys->rx_buff_sz = IPA_RX_SKB_SIZE; + sys->rx_pool_sz = IPA_RX_POOL_CEIL; + sys->pyld_hdlr = ipa_rx_pyld_hdlr; + sys->get_skb = ipa_get_skb_ipa_rx; + sys->free_skb = ipa_free_skb_rx; + sys->repl_hdlr = ipa_replenish_rx_cache; + } else if (IPA_CLIENT_IS_PROD(in->client)) { + sys->policy = IPA_POLICY_INTR_MODE; + sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT); + sys->sps_callback = ipa_sps_irq_tx_no_aggr_notify; + } else { + IPAERR("Need to install a RX pipe hdlr\n"); + WARN_ON(1); + return -EINVAL; + } + + return 0; + } else if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_0) + return ipa_assign_policy_v2(in, sys); + + IPAERR("Unsupported HW type %d\n", ipa_ctx->ipa_hw_type); + WARN_ON(1); + return -EINVAL; +} + +/** + * ipa_tx_client_rx_notify_release() - Callback function + * which will call the user supplied callback function to + * release the skb, or release it on its own if no callback + * function was supplied + * + * @user1: [in] - Data Descriptor + * @user2: [in] - endpoint idx + * + * This notified callback is for the destination client + * This function is supplied in ipa_tx_dp_mul + */ +static void ipa_tx_client_rx_notify_release(void *user1, int user2) +{ + struct ipa_tx_data_desc *dd = (struct ipa_tx_data_desc *)user1; + int ep_idx = user2; + + IPADBG_LOW("Received data desc anchor:%p\n", dd); + + atomic_inc(&ipa_ctx->ep[ep_idx].avail_fifo_desc); + ipa_ctx->ep[ep_idx].wstats.rx_pkts_status_rcvd++; + + /* wlan host driver waits till tx complete before unload */ + IPADBG_LOW("ep=%d fifo_desc_free_count=%d\n", + ep_idx, atomic_read(&ipa_ctx->ep[ep_idx].avail_fifo_desc)); + IPADBG_LOW("calling client notify callback with priv:%p\n", + ipa_ctx->ep[ep_idx].priv); + + if (ipa_ctx->ep[ep_idx].client_notify) { + ipa_ctx->ep[ep_idx].client_notify(ipa_ctx->ep[ep_idx].priv, + IPA_WRITE_DONE, (unsigned long)user1); + ipa_ctx->ep[ep_idx].wstats.rx_hd_reply++; + } +} +/** + * ipa_tx_client_rx_pkt_status() - Callback function + * which will call the user supplied callback function to + * increase the available fifo descriptor + * + * @user1: [in] - Data Descriptor + * @user2: [in] - endpoint idx + * + * This notified callback is for the destination client + * This function is supplied in ipa_tx_dp_mul + */ +static void ipa_tx_client_rx_pkt_status(void *user1, int user2) +{ + int ep_idx = user2; + + atomic_inc(&ipa_ctx->ep[ep_idx].avail_fifo_desc); + ipa_ctx->ep[ep_idx].wstats.rx_pkts_status_rcvd++; +} + + +/** + * ipa2_tx_dp_mul() - Data-path tx handler for multiple packets + * @src: [in] - Client that is sending data + * @ipa_tx_data_desc: [in] data descriptors from wlan + * + * this is used for to transfer data descriptors that received + * from WLAN1_PROD pipe to IPA HW + * + * The function will send data descriptors from WLAN1_PROD (one + * at a time) using sps_transfer_one. Will set EOT flag for last + * descriptor Once this send was done from SPS point-of-view the + * IPA driver will get notified by the supplied callback - + * ipa_sps_irq_tx_no_aggr_notify() + * + * ipa_sps_irq_tx_no_aggr_notify will call to the user supplied + * callback (from ipa_connect) + * + * Returns: 0 on success, negative on failure + */ +int ipa2_tx_dp_mul(enum ipa_client_type src, + struct ipa_tx_data_desc *data_desc) +{ + /* The second byte in wlan header holds qmap id */ +#define IPA_WLAN_HDR_QMAP_ID_OFFSET 1 + struct ipa_tx_data_desc *entry; + struct ipa_sys_context *sys; + struct ipa_desc desc = { 0 }; + u32 num_desc, cnt; + int ep_idx; + + if (unlikely(!ipa_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return -EINVAL; + } + + IPADBG_LOW("Received data desc anchor:%p\n", data_desc); + + spin_lock_bh(&ipa_ctx->wc_memb.ipa_tx_mul_spinlock); + + ep_idx = ipa2_get_ep_mapping(src); + if (unlikely(ep_idx == -1)) { + IPAERR("dest EP does not exist.\n"); + goto fail_send; + } + IPADBG_LOW("ep idx:%d\n", ep_idx); + sys = ipa_ctx->ep[ep_idx].sys; + + if (unlikely(ipa_ctx->ep[ep_idx].valid == 0)) { + IPAERR("dest EP not valid.\n"); + goto fail_send; + } + sys->ep->wstats.rx_hd_rcvd++; + + /* Calculate the number of descriptors */ + num_desc = 0; + list_for_each_entry(entry, &data_desc->link, link) { + num_desc++; + } + IPADBG_LOW("Number of Data Descriptors:%d", num_desc); + + if (atomic_read(&sys->ep->avail_fifo_desc) < num_desc) { + IPAERR("Insufficient data descriptors available\n"); + goto fail_send; + } + + /* Assign callback only for last data descriptor */ + cnt = 0; + list_for_each_entry(entry, &data_desc->link, link) { + IPADBG_LOW("Parsing data desc :%d\n", cnt); + cnt++; + ((u8 *)entry->pyld_buffer)[IPA_WLAN_HDR_QMAP_ID_OFFSET] = + (u8)sys->ep->cfg.meta.qmap_id; + desc.pyld = entry->pyld_buffer; + desc.len = entry->pyld_len; + desc.type = IPA_DATA_DESC_SKB; + desc.user1 = data_desc; + desc.user2 = ep_idx; + IPADBG_LOW("priv:%p pyld_buf:0x%p pyld_len:%d\n", + entry->priv, desc.pyld, desc.len); + + /* In case of last descriptor populate callback */ + if (cnt == num_desc) { + IPADBG_LOW("data desc:%p\n", data_desc); + desc.callback = ipa_tx_client_rx_notify_release; + } else { + desc.callback = ipa_tx_client_rx_pkt_status; + } + + IPADBG_LOW("calling ipa_send_one()\n"); + if (ipa_send_one(sys, &desc, true)) { + IPAERR("fail to send skb\n"); + sys->ep->wstats.rx_pkt_leak += (cnt-1); + sys->ep->wstats.rx_dp_fail++; + goto fail_send; + } + + if (atomic_read(&sys->ep->avail_fifo_desc) >= 0) + atomic_dec(&sys->ep->avail_fifo_desc); + + sys->ep->wstats.rx_pkts_rcvd++; + IPADBG_LOW("ep=%d fifo desc=%d\n", + ep_idx, atomic_read(&sys->ep->avail_fifo_desc)); + } + + sys->ep->wstats.rx_hd_processed++; + spin_unlock_bh(&ipa_ctx->wc_memb.ipa_tx_mul_spinlock); + return 0; + +fail_send: + spin_unlock_bh(&ipa_ctx->wc_memb.ipa_tx_mul_spinlock); + return -EFAULT; + +} + +void ipa2_free_skb(struct ipa_rx_data *data) +{ + struct ipa_rx_pkt_wrapper *rx_pkt; + + if (unlikely(!ipa_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return; + } + + spin_lock_bh(&ipa_ctx->wc_memb.wlan_spinlock); + + ipa_ctx->wc_memb.total_tx_pkts_freed++; + rx_pkt = container_of(data, struct ipa_rx_pkt_wrapper, data); + + ipa_skb_recycle(rx_pkt->data.skb); + (void)skb_put(rx_pkt->data.skb, IPA_WLAN_RX_BUFF_SZ); + + list_add_tail(&rx_pkt->link, + &ipa_ctx->wc_memb.wlan_comm_desc_list); + ipa_ctx->wc_memb.wlan_comm_free_cnt++; + + spin_unlock_bh(&ipa_ctx->wc_memb.wlan_spinlock); +} + + +/* Functions added to support kernel tests */ + +int ipa2_sys_setup(struct ipa_sys_connect_params *sys_in, + unsigned long *ipa_bam_hdl, + u32 *ipa_pipe_num, u32 *clnt_hdl, bool en_status) +{ + struct ipa_ep_context *ep; + int ipa_ep_idx; + int result = -EINVAL; + + if (sys_in == NULL || clnt_hdl == NULL) { + IPAERR("NULL args\n"); + goto fail_gen; + } + + if (ipa_bam_hdl == NULL || ipa_pipe_num == NULL) { + IPAERR("NULL args\n"); + goto fail_gen; + } + if (sys_in->client >= IPA_CLIENT_MAX) { + IPAERR("bad parm client:%d\n", sys_in->client); + goto fail_gen; + } + + ipa_ep_idx = ipa2_get_ep_mapping(sys_in->client); + if (ipa_ep_idx == -1) { + IPAERR("Invalid client :%d\n", sys_in->client); + goto fail_gen; + } + + ep = &ipa_ctx->ep[ipa_ep_idx]; + + IPA_ACTIVE_CLIENTS_INC_EP(sys_in->client); + + if (ep->valid == 1) { + if (sys_in->client != IPA_CLIENT_APPS_LAN_WAN_PROD) { + IPAERR("EP %d already allocated\n", ipa_ep_idx); + goto fail_and_disable_clocks; + } else { + if (ipa2_cfg_ep_hdr(ipa_ep_idx, + &sys_in->ipa_ep_cfg.hdr)) { + IPAERR("fail to configure hdr prop of EP %d\n", + ipa_ep_idx); + result = -EFAULT; + goto fail_and_disable_clocks; + } + if (ipa2_cfg_ep_cfg(ipa_ep_idx, + &sys_in->ipa_ep_cfg.cfg)) { + IPAERR("fail to configure cfg prop of EP %d\n", + ipa_ep_idx); + result = -EFAULT; + goto fail_and_disable_clocks; + } + IPAERR("client %d (ep: %d) overlay ok sys=%p\n", + sys_in->client, ipa_ep_idx, ep->sys); + ep->client_notify = sys_in->notify; + ep->priv = sys_in->priv; + *clnt_hdl = ipa_ep_idx; + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client); + + return 0; + } + } + + memset(ep, 0, offsetof(struct ipa_ep_context, sys)); + + ep->valid = 1; + ep->client = sys_in->client; + ep->client_notify = sys_in->notify; + ep->priv = sys_in->priv; + ep->keep_ipa_awake = true; + + result = ipa_enable_data_path(ipa_ep_idx); + if (result) { + IPAERR("enable data path failed res=%d clnt=%d.\n", + result, ipa_ep_idx); + goto fail_gen2; + } + + if (!ep->skip_ep_cfg) { + if (ipa2_cfg_ep(ipa_ep_idx, &sys_in->ipa_ep_cfg)) { + IPAERR("fail to configure EP.\n"); + goto fail_gen2; + } + if (ipa2_cfg_ep_status(ipa_ep_idx, &ep->status)) { + IPAERR("fail to configure status of EP.\n"); + goto fail_gen2; + } + IPADBG("ep configuration successful\n"); + } else { + IPADBG("skipping ep configuration\n"); + } + + *clnt_hdl = ipa_ep_idx; + + *ipa_pipe_num = ipa_ep_idx; + *ipa_bam_hdl = ipa_ctx->bam_handle; + + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client); + + ipa_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg; + IPADBG("client %d (ep: %d) connected sys=%p\n", sys_in->client, + ipa_ep_idx, ep->sys); + + return 0; + +fail_gen2: +fail_and_disable_clocks: + IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client); +fail_gen: + return result; +} + +int ipa2_sys_teardown(u32 clnt_hdl) +{ + struct ipa_ep_context *ep; + + if (clnt_hdl >= ipa_ctx->ipa_num_pipes || + ipa_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("bad parm(Either endpoint or client hdl invalid)\n"); + return -EINVAL; + } + + ep = &ipa_ctx->ep[clnt_hdl]; + + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); + + ipa_disable_data_path(clnt_hdl); + ep->valid = 0; + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); + + IPADBG("client (ep: %d) disconnected\n", clnt_hdl); + + return 0; +} + +int ipa2_sys_update_gsi_hdls(u32 clnt_hdl, unsigned long gsi_ch_hdl, + unsigned long gsi_ev_hdl) +{ + IPAERR("GSI not supported in IPAv2"); + return -EFAULT; +} + + +/** + * ipa_adjust_ra_buff_base_sz() + * + * Return value: the largest power of two which is smaller + * than the input value + */ +static u32 ipa_adjust_ra_buff_base_sz(u32 aggr_byte_limit) +{ + aggr_byte_limit += IPA_MTU; + aggr_byte_limit += IPA_GENERIC_RX_BUFF_LIMIT; + aggr_byte_limit--; + aggr_byte_limit |= aggr_byte_limit >> 1; + aggr_byte_limit |= aggr_byte_limit >> 2; + aggr_byte_limit |= aggr_byte_limit >> 4; + aggr_byte_limit |= aggr_byte_limit >> 8; + aggr_byte_limit |= aggr_byte_limit >> 16; + aggr_byte_limit++; + return aggr_byte_limit >> 1; +} diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c new file mode 100644 index 000000000000..da0304845deb --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c @@ -0,0 +1,1549 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2018, 2020, The Linux Foundation. All rights reserved. + */ + +#include "ipa_i.h" + +#define IPA_FLT_TABLE_WORD_SIZE (4) +#define IPA_FLT_ENTRY_MEMORY_ALLIGNMENT (0x3) +#define IPA_FLT_BIT_MASK (0x1) +#define IPA_FLT_TABLE_INDEX_NOT_FOUND (-1) +#define IPA_FLT_STATUS_OF_ADD_FAILED (-1) +#define IPA_FLT_STATUS_OF_DEL_FAILED (-1) +#define IPA_FLT_STATUS_OF_MDFY_FAILED (-1) + +static int ipa_generate_hw_rule_from_eq( + const struct ipa_ipfltri_rule_eq *attrib, u8 **buf) +{ + uint8_t num_offset_meq_32 = attrib->num_offset_meq_32; + uint8_t num_ihl_offset_range_16 = attrib->num_ihl_offset_range_16; + uint8_t num_ihl_offset_meq_32 = attrib->num_ihl_offset_meq_32; + uint8_t num_offset_meq_128 = attrib->num_offset_meq_128; + int i; + + if (attrib->tos_eq_present) { + *buf = ipa_write_8(attrib->tos_eq, *buf); + *buf = ipa_pad_to_32(*buf); + } + + if (attrib->protocol_eq_present) { + *buf = ipa_write_8(attrib->protocol_eq, *buf); + *buf = ipa_pad_to_32(*buf); + } + + if (num_offset_meq_32) { + *buf = ipa_write_8(attrib->offset_meq_32[0].offset, *buf); + *buf = ipa_write_32(attrib->offset_meq_32[0].mask, *buf); + *buf = ipa_write_32(attrib->offset_meq_32[0].value, *buf); + *buf = ipa_pad_to_32(*buf); + num_offset_meq_32--; + } + + if (num_offset_meq_32) { + *buf = ipa_write_8(attrib->offset_meq_32[1].offset, *buf); + *buf = ipa_write_32(attrib->offset_meq_32[1].mask, *buf); + *buf = ipa_write_32(attrib->offset_meq_32[1].value, *buf); + *buf = ipa_pad_to_32(*buf); + num_offset_meq_32--; + } + + if (num_ihl_offset_range_16) { + *buf = ipa_write_8(attrib->ihl_offset_range_16[0].offset, *buf); + *buf = ipa_write_16(attrib->ihl_offset_range_16[0].range_high, + *buf); + *buf = ipa_write_16(attrib->ihl_offset_range_16[0].range_low, + *buf); + *buf = ipa_pad_to_32(*buf); + num_ihl_offset_range_16--; + } + + if (num_ihl_offset_range_16) { + *buf = ipa_write_8(attrib->ihl_offset_range_16[1].offset, *buf); + *buf = ipa_write_16(attrib->ihl_offset_range_16[1].range_high, + *buf); + *buf = ipa_write_16(attrib->ihl_offset_range_16[1].range_low, + *buf); + *buf = ipa_pad_to_32(*buf); + num_ihl_offset_range_16--; + } + + if (attrib->ihl_offset_eq_16_present) { + *buf = ipa_write_8(attrib->ihl_offset_eq_16.offset, *buf); + *buf = ipa_write_16(attrib->ihl_offset_eq_16.value, *buf); + *buf = ipa_pad_to_32(*buf); + } + + if (attrib->ihl_offset_eq_32_present) { + *buf = ipa_write_8(attrib->ihl_offset_eq_32.offset, *buf); + *buf = ipa_write_32(attrib->ihl_offset_eq_32.value, *buf); + *buf = ipa_pad_to_32(*buf); + } + + if (num_ihl_offset_meq_32) { + *buf = ipa_write_8(attrib->ihl_offset_meq_32[0].offset, *buf); + *buf = ipa_write_32(attrib->ihl_offset_meq_32[0].mask, *buf); + *buf = ipa_write_32(attrib->ihl_offset_meq_32[0].value, *buf); + *buf = ipa_pad_to_32(*buf); + num_ihl_offset_meq_32--; + } + + /* TODO check layout of 16 byte mask and value */ + if (num_offset_meq_128) { + *buf = ipa_write_8(attrib->offset_meq_128[0].offset, *buf); + for (i = 0; i < 16; i++) + *buf = ipa_write_8(attrib->offset_meq_128[0].mask[i], + *buf); + for (i = 0; i < 16; i++) + *buf = ipa_write_8(attrib->offset_meq_128[0].value[i], + *buf); + *buf = ipa_pad_to_32(*buf); + num_offset_meq_128--; + } + + if (num_offset_meq_128) { + *buf = ipa_write_8(attrib->offset_meq_128[1].offset, *buf); + for (i = 0; i < 16; i++) + *buf = ipa_write_8(attrib->offset_meq_128[1].mask[i], + *buf); + for (i = 0; i < 16; i++) + *buf = ipa_write_8(attrib->offset_meq_128[1].value[i], + *buf); + *buf = ipa_pad_to_32(*buf); + num_offset_meq_128--; + } + + if (attrib->tc_eq_present) { + *buf = ipa_write_8(attrib->tc_eq, *buf); + *buf = ipa_pad_to_32(*buf); + } + + if (attrib->fl_eq_present) { + *buf = ipa_write_32(attrib->fl_eq, *buf); + *buf = ipa_pad_to_32(*buf); + } + + if (num_ihl_offset_meq_32) { + *buf = ipa_write_8(attrib->ihl_offset_meq_32[1].offset, *buf); + *buf = ipa_write_32(attrib->ihl_offset_meq_32[1].mask, *buf); + *buf = ipa_write_32(attrib->ihl_offset_meq_32[1].value, *buf); + *buf = ipa_pad_to_32(*buf); + num_ihl_offset_meq_32--; + } + + if (attrib->metadata_meq32_present) { + *buf = ipa_write_8(attrib->metadata_meq32.offset, *buf); + *buf = ipa_write_32(attrib->metadata_meq32.mask, *buf); + *buf = ipa_write_32(attrib->metadata_meq32.value, *buf); + *buf = ipa_pad_to_32(*buf); + } + + if (attrib->ipv4_frag_eq_present) + *buf = ipa_pad_to_32(*buf); + + return 0; +} + +/** + * ipa_generate_flt_hw_rule() - generates the filtering hardware rule + * @ip: the ip address family type + * @entry: routing entry + * @buf: output buffer, buf == NULL means + * caller wants to know the size of the rule as seen + * by HW so they did not pass a valid buffer, we will use a + * scratch buffer instead. + * With this scheme we are going to + * generate the rule twice, once to know size using scratch + * buffer and second to write the rule to the actual caller + * supplied buffer which is of required size + * + * Returns: 0 on success, negative on failure + * + * caller needs to hold any needed locks to ensure integrity + * + */ +static int ipa_generate_flt_hw_rule(enum ipa_ip_type ip, + struct ipa_flt_entry *entry, u8 *buf) +{ + struct ipa_flt_rule_hw_hdr *hdr; + const struct ipa_flt_rule *rule = + (const struct ipa_flt_rule *)&entry->rule; + u16 en_rule = 0; + u32 tmp[IPA_RT_FLT_HW_RULE_BUF_SIZE/4]; + u8 *start; + + if (buf == NULL) { + memset(tmp, 0, IPA_RT_FLT_HW_RULE_BUF_SIZE); + buf = (u8 *)tmp; + } + + start = buf; + hdr = (struct ipa_flt_rule_hw_hdr *)buf; + hdr->u.hdr.action = entry->rule.action; + hdr->u.hdr.retain_hdr = entry->rule.retain_hdr; + hdr->u.hdr.to_uc = entry->rule.to_uc; + if (entry->rt_tbl) + hdr->u.hdr.rt_tbl_idx = entry->rt_tbl->idx; + else + hdr->u.hdr.rt_tbl_idx = entry->rule.rt_tbl_idx; + hdr->u.hdr.rsvd = 0; + buf += sizeof(struct ipa_flt_rule_hw_hdr); + + if (rule->eq_attrib_type) { + if (ipa_generate_hw_rule_from_eq(&rule->eq_attrib, &buf)) { + IPAERR("fail to generate hw rule\n"); + return -EPERM; + } + en_rule = rule->eq_attrib.rule_eq_bitmap; + } else { + if (ipa_generate_hw_rule(ip, &rule->attrib, &buf, &en_rule)) { + IPAERR("fail to generate hw rule\n"); + return -EPERM; + } + } + + IPADBG_LOW("en_rule 0x%x, action=%d, rt_idx=%d, uc=%d, retain_hdr=%d\n", + en_rule, + hdr->u.hdr.action, + hdr->u.hdr.rt_tbl_idx, + hdr->u.hdr.to_uc, + hdr->u.hdr.retain_hdr); + + hdr->u.hdr.en_rule = en_rule; + ipa_write_32(hdr->u.word, (u8 *)hdr); + + if (entry->hw_len == 0) { + entry->hw_len = buf - start; + } else if (entry->hw_len != (buf - start)) { + IPAERR("hw_len differs b/w passes passed=%x calc=%td\n", + entry->hw_len, (buf - start)); + return -EPERM; + } + + return 0; +} + +/** + * ipa_get_flt_hw_tbl_size() - returns the size of HW filtering table + * @ip: the ip address family type + * @hdr_sz: header size + * + * Returns: size on success, negative on failure + * + * caller needs to hold any needed locks to ensure integrity + * + */ +static int ipa_get_flt_hw_tbl_size(enum ipa_ip_type ip, u32 *hdr_sz) +{ + struct ipa_flt_tbl *tbl; + struct ipa_flt_entry *entry; + u32 total_sz = 0; + u32 rule_set_sz; + int i; + + *hdr_sz = 0; + tbl = &ipa_ctx->glob_flt_tbl[ip]; + rule_set_sz = 0; + list_for_each_entry(entry, &tbl->head_flt_rule_list, link) { + if (ipa_generate_flt_hw_rule(ip, entry, NULL)) { + IPAERR("failed to find HW FLT rule size\n"); + return -EPERM; + } + IPADBG("glob ip %d len %d\n", ip, entry->hw_len); + rule_set_sz += entry->hw_len; + } + + if (rule_set_sz) { + tbl->sz = rule_set_sz + IPA_FLT_TABLE_WORD_SIZE; + /* this rule-set uses a word in header block */ + *hdr_sz += IPA_FLT_TABLE_WORD_SIZE; + if (!tbl->in_sys) { + /* add the terminator */ + total_sz += (rule_set_sz + IPA_FLT_TABLE_WORD_SIZE); + total_sz = (total_sz + + IPA_FLT_ENTRY_MEMORY_ALLIGNMENT) & + ~IPA_FLT_ENTRY_MEMORY_ALLIGNMENT; + } + } + + for (i = 0; i < ipa_ctx->ipa_num_pipes; i++) { + tbl = &ipa_ctx->flt_tbl[i][ip]; + rule_set_sz = 0; + list_for_each_entry(entry, &tbl->head_flt_rule_list, link) { + if (ipa_generate_flt_hw_rule(ip, entry, NULL)) { + IPAERR("failed to find HW FLT rule size\n"); + return -EPERM; + } + IPADBG("pipe %d len %d\n", i, entry->hw_len); + rule_set_sz += entry->hw_len; + } + + if (rule_set_sz) { + tbl->sz = rule_set_sz + IPA_FLT_TABLE_WORD_SIZE; + /* this rule-set uses a word in header block */ + *hdr_sz += IPA_FLT_TABLE_WORD_SIZE; + if (!tbl->in_sys) { + /* add the terminator */ + total_sz += (rule_set_sz + + IPA_FLT_TABLE_WORD_SIZE); + total_sz = (total_sz + + IPA_FLT_ENTRY_MEMORY_ALLIGNMENT) & + ~IPA_FLT_ENTRY_MEMORY_ALLIGNMENT; + } + } + } + + *hdr_sz += IPA_FLT_TABLE_WORD_SIZE; + total_sz += *hdr_sz; + IPADBG("FLT HW TBL SZ %d HDR SZ %d IP %d\n", total_sz, *hdr_sz, ip); + + return total_sz; +} + +static int ipa_generate_flt_hw_tbl_common(enum ipa_ip_type ip, u8 *base, + u8 *hdr, u32 body_start_offset, u8 *hdr2, u32 *hdr_top) +{ + struct ipa_flt_tbl *tbl; + struct ipa_flt_entry *entry; + int i; + u32 offset; + u8 *body; + struct ipa_mem_buffer flt_tbl_mem; + u8 *ftbl_membody; + + *hdr_top = 0; + body = base; + +#define IPA_WRITE_FLT_HDR(idx, val) { \ + if (idx <= 5) { \ + *((u32 *)hdr + 1 + idx) = val; \ + } else if (idx >= 6 && idx <= 10) { \ + WARN_ON(1); \ + } else if (idx >= 11 && idx <= 19) { \ + *((u32 *)hdr2 + idx - 11) = val; \ + } else { \ + WARN_ON(1); \ + } \ +} + + tbl = &ipa_ctx->glob_flt_tbl[ip]; + + if (!list_empty(&tbl->head_flt_rule_list)) { + *hdr_top |= IPA_FLT_BIT_MASK; + + if (!tbl->in_sys) { + offset = body - base + body_start_offset; + if (offset & IPA_FLT_ENTRY_MEMORY_ALLIGNMENT) { + IPAERR("offset is not word multiple %d\n", + offset); + goto proc_err; + } + + offset &= ~IPA_FLT_ENTRY_MEMORY_ALLIGNMENT; + /* rule is at an offset from base */ + offset |= IPA_FLT_BIT_MASK; + + if (hdr2) + *(u32 *)hdr = offset; + else + hdr = ipa_write_32(offset, hdr); + + /* generate the rule-set */ + list_for_each_entry(entry, &tbl->head_flt_rule_list, + link) { + if (ipa_generate_flt_hw_rule(ip, entry, body)) { + IPAERR("failed to gen HW FLT rule\n"); + goto proc_err; + } + body += entry->hw_len; + } + + /* write the rule-set terminator */ + body = ipa_write_32(0, body); + if ((long)body & IPA_FLT_ENTRY_MEMORY_ALLIGNMENT) + /* advance body to next word boundary */ + body = body + (IPA_FLT_TABLE_WORD_SIZE - + ((long)body & + IPA_FLT_ENTRY_MEMORY_ALLIGNMENT)); + } else { + if (tbl->sz == 0) { + IPAERR("tbl size is 0\n"); + WARN_ON(1); + goto proc_err; + } + + /* allocate memory for the flt tbl */ + flt_tbl_mem.size = tbl->sz; + flt_tbl_mem.base = + dma_alloc_coherent(ipa_ctx->pdev, flt_tbl_mem.size, + &flt_tbl_mem.phys_base, GFP_KERNEL); + if (!flt_tbl_mem.base) { + IPAERR("fail to alloc DMA buff of size %d\n", + flt_tbl_mem.size); + WARN_ON(1); + goto proc_err; + } + + WARN_ON(flt_tbl_mem.phys_base & + IPA_FLT_ENTRY_MEMORY_ALLIGNMENT); + ftbl_membody = flt_tbl_mem.base; + memset(flt_tbl_mem.base, 0, flt_tbl_mem.size); + + if (hdr2) + *(u32 *)hdr = flt_tbl_mem.phys_base; + else + hdr = ipa_write_32(flt_tbl_mem.phys_base, hdr); + + /* generate the rule-set */ + list_for_each_entry(entry, &tbl->head_flt_rule_list, + link) { + if (ipa_generate_flt_hw_rule(ip, entry, + ftbl_membody)) { + IPAERR("failed to gen HW FLT rule\n"); + WARN_ON(1); + } + ftbl_membody += entry->hw_len; + } + + /* write the rule-set terminator */ + ftbl_membody = ipa_write_32(0, ftbl_membody); + if (tbl->curr_mem.phys_base) { + WARN_ON(tbl->prev_mem.phys_base); + tbl->prev_mem = tbl->curr_mem; + } + tbl->curr_mem = flt_tbl_mem; + } + } + + for (i = 0; i < ipa_ctx->ipa_num_pipes; i++) { + tbl = &ipa_ctx->flt_tbl[i][ip]; + if (!list_empty(&tbl->head_flt_rule_list)) { + /* pipe "i" is at bit "i+1" */ + *hdr_top |= (1 << (i + 1)); + + if (!tbl->in_sys) { + offset = body - base + body_start_offset; + if (offset & IPA_FLT_ENTRY_MEMORY_ALLIGNMENT) { + IPAERR("ofst is not word multiple %d\n", + offset); + goto proc_err; + } + offset &= ~IPA_FLT_ENTRY_MEMORY_ALLIGNMENT; + /* rule is at an offset from base */ + offset |= IPA_FLT_BIT_MASK; + + if (hdr2) + IPA_WRITE_FLT_HDR(i, offset) + else + hdr = ipa_write_32(offset, hdr); + + /* generate the rule-set */ + list_for_each_entry(entry, + &tbl->head_flt_rule_list, + link) { + if (ipa_generate_flt_hw_rule(ip, entry, + body)) { + IPAERR("fail gen FLT rule\n"); + goto proc_err; + } + body += entry->hw_len; + } + + /* write the rule-set terminator */ + body = ipa_write_32(0, body); + if ((long)body & + IPA_FLT_ENTRY_MEMORY_ALLIGNMENT) + /* advance body to next word boundary */ + body = body + (IPA_FLT_TABLE_WORD_SIZE - + ((long)body & + IPA_FLT_ENTRY_MEMORY_ALLIGNMENT)); + } else { + if (tbl->sz == 0) { + IPAERR("tbl size is 0\n"); + WARN_ON(1); + goto proc_err; + } + + /* allocate memory for the flt tbl */ + flt_tbl_mem.size = tbl->sz; + flt_tbl_mem.base = + dma_alloc_coherent(ipa_ctx->pdev, + flt_tbl_mem.size, + &flt_tbl_mem.phys_base, + GFP_KERNEL); + if (!flt_tbl_mem.base) { + IPAERR("fail alloc DMA buff size %d\n", + flt_tbl_mem.size); + WARN_ON(1); + goto proc_err; + } + + WARN_ON(flt_tbl_mem.phys_base & + IPA_FLT_ENTRY_MEMORY_ALLIGNMENT); + + ftbl_membody = flt_tbl_mem.base; + memset(flt_tbl_mem.base, 0, flt_tbl_mem.size); + + if (hdr2) + IPA_WRITE_FLT_HDR(i, + flt_tbl_mem.phys_base) + else + hdr = ipa_write_32( + flt_tbl_mem.phys_base, hdr); + + /* generate the rule-set */ + list_for_each_entry(entry, + &tbl->head_flt_rule_list, + link) { + if (ipa_generate_flt_hw_rule(ip, entry, + ftbl_membody)) { + IPAERR("fail gen FLT rule\n"); + WARN_ON(1); + } + ftbl_membody += entry->hw_len; + } + + /* write the rule-set terminator */ + ftbl_membody = + ipa_write_32(0, ftbl_membody); + if (tbl->curr_mem.phys_base) { + WARN_ON(tbl->prev_mem.phys_base); + tbl->prev_mem = tbl->curr_mem; + } + tbl->curr_mem = flt_tbl_mem; + } + } + } + + return 0; + +proc_err: + return -EPERM; +} + + +/** + * ipa_generate_flt_hw_tbl() - generates the filtering hardware table + * @ip: [in] the ip address family type + * @mem: [out] buffer to put the filtering table + * + * Returns: 0 on success, negative on failure + */ +static int ipa_generate_flt_hw_tbl_v1_1(enum ipa_ip_type ip, + struct ipa_mem_buffer *mem) +{ + u32 hdr_top = 0; + u32 hdr_sz; + u8 *hdr; + u8 *body; + u8 *base; + int res; + + res = ipa_get_flt_hw_tbl_size(ip, &hdr_sz); + if (res < 0) { + IPAERR("ipa_get_flt_hw_tbl_size failed %d\n", res); + return res; + } + + mem->size = res; + mem->size = IPA_HW_TABLE_ALIGNMENT(mem->size); + + if (mem->size == 0) { + IPAERR("flt tbl empty ip=%d\n", ip); + goto error; + } + mem->base = dma_zalloc_coherent(ipa_ctx->pdev, mem->size, + &mem->phys_base, GFP_KERNEL); + if (!mem->base) { + IPAERR("fail to alloc DMA buff of size %d\n", mem->size); + goto error; + } + + /* build the flt tbl in the DMA buffer to submit to IPA HW */ + base = hdr = (u8 *)mem->base; + body = base + hdr_sz; + + /* write a dummy header to move cursor */ + hdr = ipa_write_32(hdr_top, hdr); + + if (ipa_generate_flt_hw_tbl_common(ip, body, hdr, hdr_sz, 0, + &hdr_top)) { + IPAERR("fail to generate FLT HW table\n"); + goto proc_err; + } + + /* now write the hdr_top */ + ipa_write_32(hdr_top, base); + + IPA_DUMP_BUFF(mem->base, mem->phys_base, mem->size); + + return 0; + +proc_err: + dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base, mem->phys_base); +error: + return -EPERM; +} + +static void __ipa_reap_sys_flt_tbls(enum ipa_ip_type ip) +{ + struct ipa_flt_tbl *tbl; + int i; + + tbl = &ipa_ctx->glob_flt_tbl[ip]; + if (tbl->prev_mem.phys_base) { + IPADBG_LOW("reaping glob flt tbl (prev) ip=%d\n", ip); + dma_free_coherent(ipa_ctx->pdev, tbl->prev_mem.size, + tbl->prev_mem.base, tbl->prev_mem.phys_base); + memset(&tbl->prev_mem, 0, sizeof(tbl->prev_mem)); + } + + if (list_empty(&tbl->head_flt_rule_list)) { + if (tbl->curr_mem.phys_base) { + IPADBG_LOW("reaping glob flt tbl (curr) ip=%d\n", ip); + dma_free_coherent(ipa_ctx->pdev, tbl->curr_mem.size, + tbl->curr_mem.base, + tbl->curr_mem.phys_base); + memset(&tbl->curr_mem, 0, sizeof(tbl->curr_mem)); + } + } + + for (i = 0; i < ipa_ctx->ipa_num_pipes; i++) { + tbl = &ipa_ctx->flt_tbl[i][ip]; + if (tbl->prev_mem.phys_base) { + IPADBG_LOW("reaping flt tbl"); + IPADBG_LOW("(prev) pipe=%d ip=%d\n", i, ip); + dma_free_coherent(ipa_ctx->pdev, tbl->prev_mem.size, + tbl->prev_mem.base, + tbl->prev_mem.phys_base); + memset(&tbl->prev_mem, 0, sizeof(tbl->prev_mem)); + } + + if (list_empty(&tbl->head_flt_rule_list)) { + if (tbl->curr_mem.phys_base) { + IPADBG_LOW("reaping flt tbl"); + IPADBG_LOW("(curr) pipe=%d ip=%d\n", + i, ip); + dma_free_coherent(ipa_ctx->pdev, + tbl->curr_mem.size, + tbl->curr_mem.base, + tbl->curr_mem.phys_base); + memset(&tbl->curr_mem, 0, + sizeof(tbl->curr_mem)); + } + } + } +} + +int __ipa_commit_flt_v1_1(enum ipa_ip_type ip) +{ + struct ipa_desc desc = { 0 }; + struct ipa_mem_buffer *mem; + void *cmd; + struct ipa_ip_v4_filter_init *v4; + struct ipa_ip_v6_filter_init *v6; + u16 avail; + u16 size; + gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0); + + mem = kmalloc(sizeof(struct ipa_mem_buffer), GFP_KERNEL); + if (!mem) { + IPAERR("failed to alloc memory object\n"); + goto fail_alloc_mem; + } + + if (ip == IPA_IP_v4) { + avail = ipa_ctx->ip4_flt_tbl_lcl ? IPA_MEM_v1_RAM_V4_FLT_SIZE : + IPA_MEM_PART(v4_flt_size_ddr); + size = sizeof(struct ipa_ip_v4_filter_init); + } else { + avail = ipa_ctx->ip6_flt_tbl_lcl ? IPA_MEM_v1_RAM_V6_FLT_SIZE : + IPA_MEM_PART(v6_flt_size_ddr); + size = sizeof(struct ipa_ip_v6_filter_init); + } + cmd = kmalloc(size, flag); + if (!cmd) { + IPAERR("failed to alloc immediate command object\n"); + goto fail_alloc_cmd; + } + + if (ipa_generate_flt_hw_tbl_v1_1(ip, mem)) { + IPAERR("fail to generate FLT HW TBL ip %d\n", ip); + goto fail_hw_tbl_gen; + } + + if (mem->size > avail) { + IPAERR("tbl too big, needed %d avail %d\n", mem->size, avail); + goto fail_send_cmd; + } + + if (ip == IPA_IP_v4) { + v4 = (struct ipa_ip_v4_filter_init *)cmd; + desc.opcode = IPA_IP_V4_FILTER_INIT; + v4->ipv4_rules_addr = mem->phys_base; + v4->size_ipv4_rules = mem->size; + v4->ipv4_addr = IPA_MEM_v1_RAM_V4_FLT_OFST; + } else { + v6 = (struct ipa_ip_v6_filter_init *)cmd; + desc.opcode = IPA_IP_V6_FILTER_INIT; + v6->ipv6_rules_addr = mem->phys_base; + v6->size_ipv6_rules = mem->size; + v6->ipv6_addr = IPA_MEM_v1_RAM_V6_FLT_OFST; + } + + desc.pyld = cmd; + desc.len = size; + desc.type = IPA_IMM_CMD_DESC; + IPA_DUMP_BUFF(mem->base, mem->phys_base, mem->size); + + if (ipa_send_cmd(1, &desc)) { + IPAERR("fail to send immediate command\n"); + goto fail_send_cmd; + } + + __ipa_reap_sys_flt_tbls(ip); + dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base, mem->phys_base); + kfree(cmd); + kfree(mem); + + return 0; + +fail_send_cmd: + if (mem->phys_base) + dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base, + mem->phys_base); +fail_hw_tbl_gen: + kfree(cmd); +fail_alloc_cmd: + kfree(mem); +fail_alloc_mem: + + return -EPERM; +} + +static int ipa_generate_flt_hw_tbl_v2(enum ipa_ip_type ip, + struct ipa_mem_buffer *mem, struct ipa_mem_buffer *head1, + struct ipa_mem_buffer *head2) +{ + int i; + u32 hdr_sz; + int num_words; + u32 *entr; + u32 body_start_offset; + u32 hdr_top; + int res; + + if (ip == IPA_IP_v4) + body_start_offset = IPA_MEM_PART(apps_v4_flt_ofst) - + IPA_MEM_PART(v4_flt_ofst); + else + body_start_offset = IPA_MEM_PART(apps_v6_flt_ofst) - + IPA_MEM_PART(v6_flt_ofst); + + num_words = 7; + head1->size = num_words * 4; + head1->base = dma_alloc_coherent(ipa_ctx->pdev, head1->size, + &head1->phys_base, GFP_KERNEL); + if (!head1->base) { + IPAERR("fail to alloc DMA buff of size %d\n", head1->size); + goto err; + } + entr = (u32 *)head1->base; + for (i = 0; i < num_words; i++) { + *entr = ipa_ctx->empty_rt_tbl_mem.phys_base; + entr++; + } + + num_words = 9; + head2->size = num_words * 4; + head2->base = dma_alloc_coherent(ipa_ctx->pdev, head2->size, + &head2->phys_base, GFP_KERNEL); + if (!head2->base) { + IPAERR("fail to alloc DMA buff of size %d\n", head2->size); + goto head_err; + } + entr = (u32 *)head2->base; + for (i = 0; i < num_words; i++) { + *entr = ipa_ctx->empty_rt_tbl_mem.phys_base; + entr++; + } + + res = ipa_get_flt_hw_tbl_size(ip, &hdr_sz); + if (res < 0) { + IPAERR("ipa_get_flt_hw_tbl_size failed %d\n", res); + goto body_err; + } + + mem->size = res; + mem->size -= hdr_sz; + mem->size = IPA_HW_TABLE_ALIGNMENT(mem->size); + + if (mem->size) { + mem->base = dma_zalloc_coherent(ipa_ctx->pdev, mem->size, + &mem->phys_base, GFP_KERNEL); + if (!mem->base) { + IPAERR("fail to alloc DMA buff of size %d\n", + mem->size); + goto body_err; + } + } + + if (ipa_generate_flt_hw_tbl_common(ip, mem->base, head1->base, + body_start_offset, head2->base, &hdr_top)) { + IPAERR("fail to generate FLT HW table\n"); + goto proc_err; + } + + IPADBG("HEAD1\n"); + IPA_DUMP_BUFF(head1->base, head1->phys_base, head1->size); + IPADBG("HEAD2\n"); + IPA_DUMP_BUFF(head2->base, head2->phys_base, head2->size); + if (mem->size) { + IPADBG("BODY\n"); + IPA_DUMP_BUFF(mem->base, mem->phys_base, mem->size); + } + + return 0; + +proc_err: + if (mem->size) + dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base, + mem->phys_base); +body_err: + dma_free_coherent(ipa_ctx->pdev, head2->size, head2->base, + head2->phys_base); +head_err: + dma_free_coherent(ipa_ctx->pdev, head1->size, head1->base, + head1->phys_base); +err: + return -EPERM; +} + +int __ipa_commit_flt_v2(enum ipa_ip_type ip) +{ + struct ipa_desc *desc; + struct ipa_hw_imm_cmd_dma_shared_mem *cmd; + struct ipa_mem_buffer body; + struct ipa_mem_buffer head1; + struct ipa_mem_buffer head2; + int rc = 0; + u32 local_addrb; + u32 local_addrh; + bool lcl; + int num_desc = 0; + int i; + u16 avail; + gfp_t flag = GFP_ATOMIC | (ipa_ctx->use_dma_zone ? GFP_DMA : 0); + + desc = kzalloc(16 * sizeof(*desc), GFP_ATOMIC); + if (desc == NULL) { + IPAERR("fail to alloc desc blob ip %d\n", ip); + rc = -ENOMEM; + goto fail_desc; + } + + cmd = kzalloc(16 * sizeof(*cmd), flag); + if (cmd == NULL) { + IPAERR("fail to alloc cmd blob ip %d\n", ip); + rc = -ENOMEM; + goto fail_imm; + } + + if (ip == IPA_IP_v4) { + avail = ipa_ctx->ip4_flt_tbl_lcl ? + IPA_MEM_PART(apps_v4_flt_size) : + IPA_MEM_PART(v4_flt_size_ddr); + local_addrh = ipa_ctx->smem_restricted_bytes + + IPA_MEM_PART(v4_flt_ofst) + 4; + local_addrb = ipa_ctx->smem_restricted_bytes + + IPA_MEM_PART(apps_v4_flt_ofst); + lcl = ipa_ctx->ip4_flt_tbl_lcl; + } else { + avail = ipa_ctx->ip6_flt_tbl_lcl ? + IPA_MEM_PART(apps_v6_flt_size) : + IPA_MEM_PART(v6_flt_size_ddr); + local_addrh = ipa_ctx->smem_restricted_bytes + + IPA_MEM_PART(v6_flt_ofst) + 4; + local_addrb = ipa_ctx->smem_restricted_bytes + + IPA_MEM_PART(apps_v6_flt_ofst); + lcl = ipa_ctx->ip6_flt_tbl_lcl; + } + + if (ipa_generate_flt_hw_tbl_v2(ip, &body, &head1, &head2)) { + IPAERR("fail to generate FLT HW TBL ip %d\n", ip); + rc = -EFAULT; + goto fail_gen; + } + + if (body.size > avail) { + IPAERR("tbl too big, needed %d avail %d\n", body.size, avail); + goto fail_send_cmd; + } + + cmd[num_desc].size = 4; + cmd[num_desc].system_addr = head1.phys_base; + cmd[num_desc].local_addr = local_addrh; + + desc[num_desc].opcode = IPA_DMA_SHARED_MEM; + desc[num_desc].pyld = &cmd[num_desc]; + desc[num_desc].len = sizeof(struct ipa_hw_imm_cmd_dma_shared_mem); + desc[num_desc++].type = IPA_IMM_CMD_DESC; + + for (i = 0; i < 6; i++) { + if (ipa_ctx->skip_ep_cfg_shadow[i]) { + IPADBG_LOW("skip %d\n", i); + continue; + } + + if (ipa2_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS) == i || + ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS) == i || + ipa2_get_ep_mapping(IPA_CLIENT_APPS_CMD_PROD) == i || + (ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD) == i + && ipa_ctx->modem_cfg_emb_pipe_flt)) { + IPADBG_LOW("skip %d\n", i); + continue; + } + + if (ip == IPA_IP_v4) { + local_addrh = ipa_ctx->smem_restricted_bytes + + IPA_MEM_PART(v4_flt_ofst) + + 8 + i * 4; + } else { + local_addrh = ipa_ctx->smem_restricted_bytes + + IPA_MEM_PART(v6_flt_ofst) + + 8 + i * 4; + } + cmd[num_desc].size = 4; + cmd[num_desc].system_addr = head1.phys_base + 4 + i * 4; + cmd[num_desc].local_addr = local_addrh; + + desc[num_desc].opcode = IPA_DMA_SHARED_MEM; + desc[num_desc].pyld = &cmd[num_desc]; + desc[num_desc].len = + sizeof(struct ipa_hw_imm_cmd_dma_shared_mem); + desc[num_desc++].type = IPA_IMM_CMD_DESC; + } + + for (i = 11; i < ipa_ctx->ipa_num_pipes; i++) { + if (ipa_ctx->skip_ep_cfg_shadow[i]) { + IPADBG_LOW("skip %d\n", i); + continue; + } + if (ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD) == i && + ipa_ctx->modem_cfg_emb_pipe_flt) { + IPADBG_LOW("skip %d\n", i); + continue; + } + if (ip == IPA_IP_v4) { + local_addrh = ipa_ctx->smem_restricted_bytes + + IPA_MEM_PART(v4_flt_ofst) + + 13 * 4 + (i - 11) * 4; + } else { + local_addrh = ipa_ctx->smem_restricted_bytes + + IPA_MEM_PART(v6_flt_ofst) + + 13 * 4 + (i - 11) * 4; + } + cmd[num_desc].size = 4; + cmd[num_desc].system_addr = head2.phys_base + (i - 11) * 4; + cmd[num_desc].local_addr = local_addrh; + + desc[num_desc].opcode = IPA_DMA_SHARED_MEM; + desc[num_desc].pyld = &cmd[num_desc]; + desc[num_desc].len = + sizeof(struct ipa_hw_imm_cmd_dma_shared_mem); + desc[num_desc++].type = IPA_IMM_CMD_DESC; + } + + if (lcl) { + cmd[num_desc].size = body.size; + cmd[num_desc].system_addr = body.phys_base; + cmd[num_desc].local_addr = local_addrb; + + desc[num_desc].opcode = IPA_DMA_SHARED_MEM; + desc[num_desc].pyld = &cmd[num_desc]; + desc[num_desc].len = + sizeof(struct ipa_hw_imm_cmd_dma_shared_mem); + desc[num_desc++].type = IPA_IMM_CMD_DESC; + + if (ipa_send_cmd(num_desc, desc)) { + IPAERR("fail to send immediate command\n"); + rc = -EFAULT; + goto fail_send_cmd; + } + } else { + if (ipa_send_cmd(num_desc, desc)) { + IPAERR("fail to send immediate command\n"); + rc = -EFAULT; + goto fail_send_cmd; + } + } + + __ipa_reap_sys_flt_tbls(ip); + +fail_send_cmd: + if (body.size) + dma_free_coherent(ipa_ctx->pdev, body.size, body.base, + body.phys_base); + dma_free_coherent(ipa_ctx->pdev, head1.size, head1.base, + head1.phys_base); + dma_free_coherent(ipa_ctx->pdev, head2.size, head2.base, + head2.phys_base); +fail_gen: + kfree(cmd); +fail_imm: + kfree(desc); +fail_desc: + return rc; +} + +static int __ipa_add_flt_rule(struct ipa_flt_tbl *tbl, enum ipa_ip_type ip, + const struct ipa_flt_rule *rule, u8 add_rear, + u32 *rule_hdl, bool user) +{ + struct ipa_flt_entry *entry; + struct ipa_rt_tbl *rt_tbl = NULL; + int id; + + if (rule->action != IPA_PASS_TO_EXCEPTION) { + if (!rule->eq_attrib_type) { + if (!rule->rt_tbl_hdl) { + IPAERR_RL("invalid RT tbl\n"); + goto error; + } + + rt_tbl = ipa_id_find(rule->rt_tbl_hdl); + if (rt_tbl == NULL) { + IPAERR_RL("RT tbl not found\n"); + goto error; + } + + if (rt_tbl->cookie != IPA_RT_TBL_COOKIE) { + IPAERR_RL("RT table cookie is invalid\n"); + goto error; + } + } else { + if (rule->rt_tbl_idx > ((ip == IPA_IP_v4) ? + IPA_MEM_PART(v4_modem_rt_index_hi) : + IPA_MEM_PART(v6_modem_rt_index_hi))) { + IPAERR_RL("invalid RT tbl\n"); + goto error; + } + } + } else { + if (rule->rt_tbl_idx > 0) { + IPAERR_RL("invalid RT tbl\n"); + goto error; + } + } + + entry = kmem_cache_zalloc(ipa_ctx->flt_rule_cache, GFP_KERNEL); + if (!entry) { + IPAERR("failed to alloc FLT rule object\n"); + goto error; + } + INIT_LIST_HEAD(&entry->link); + entry->rule = *rule; + entry->cookie = IPA_FLT_COOKIE; + entry->rt_tbl = rt_tbl; + entry->tbl = tbl; + if (add_rear) { + if (tbl->sticky_rear) + list_add_tail(&entry->link, + tbl->head_flt_rule_list.prev); + else + list_add_tail(&entry->link, &tbl->head_flt_rule_list); + } else { + list_add(&entry->link, &tbl->head_flt_rule_list); + } + tbl->rule_cnt++; + if (entry->rt_tbl) + entry->rt_tbl->ref_cnt++; + id = ipa_id_alloc(entry); + if (id < 0) { + IPAERR("failed to add to tree\n"); + WARN_ON(1); + goto ipa_insert_failed; + } + *rule_hdl = id; + entry->id = id; + entry->ipacm_installed = user; + IPADBG_LOW("add flt rule rule_cnt=%d\n", tbl->rule_cnt); + + return 0; +ipa_insert_failed: + tbl->rule_cnt--; + if (entry->rt_tbl) + entry->rt_tbl->ref_cnt--; + list_del(&entry->link); + kmem_cache_free(ipa_ctx->flt_rule_cache, entry); +error: + return -EPERM; +} + +static int __ipa_del_flt_rule(u32 rule_hdl) +{ + struct ipa_flt_entry *entry; + int id; + + entry = ipa_id_find(rule_hdl); + if (entry == NULL) { + IPAERR_RL("lookup failed\n"); + return -EINVAL; + } + + if (entry->cookie != IPA_FLT_COOKIE) { + IPAERR_RL("bad params\n"); + return -EINVAL; + } + id = entry->id; + + list_del(&entry->link); + entry->tbl->rule_cnt--; + if (entry->rt_tbl) + entry->rt_tbl->ref_cnt--; + IPADBG_LOW("del flt rule rule_cnt=%d\n", entry->tbl->rule_cnt); + entry->cookie = 0; + kmem_cache_free(ipa_ctx->flt_rule_cache, entry); + + /* remove the handle from the database */ + ipa_id_remove(id); + + return 0; +} + +static int __ipa_mdfy_flt_rule(struct ipa_flt_rule_mdfy *frule, + enum ipa_ip_type ip) +{ + struct ipa_flt_entry *entry; + struct ipa_rt_tbl *rt_tbl = NULL; + + entry = ipa_id_find(frule->rule_hdl); + if (entry == NULL) { + IPAERR_RL("lookup failed\n"); + goto error; + } + + if (entry->cookie != IPA_FLT_COOKIE) { + IPAERR_RL("bad params\n"); + goto error; + } + + if (entry->rt_tbl) + entry->rt_tbl->ref_cnt--; + + if (frule->rule.action != IPA_PASS_TO_EXCEPTION) { + if (!frule->rule.eq_attrib_type) { + if (!frule->rule.rt_tbl_hdl) { + IPAERR_RL("invalid RT tbl\n"); + goto error; + } + + rt_tbl = ipa_id_find(frule->rule.rt_tbl_hdl); + if (rt_tbl == NULL) { + IPAERR_RL("RT tbl not found\n"); + goto error; + } + + if (rt_tbl->cookie != IPA_RT_TBL_COOKIE) { + IPAERR_RL("RT table cookie is invalid\n"); + goto error; + } + } else { + if (frule->rule.rt_tbl_idx > ((ip == IPA_IP_v4) ? + IPA_MEM_PART(v4_modem_rt_index_hi) : + IPA_MEM_PART(v6_modem_rt_index_hi))) { + IPAERR_RL("invalid RT tbl\n"); + goto error; + } + } + } else { + if (frule->rule.rt_tbl_idx > 0) { + IPAERR_RL("invalid RT tbl\n"); + goto error; + } + } + + entry->rule = frule->rule; + entry->rt_tbl = rt_tbl; + if (entry->rt_tbl) + entry->rt_tbl->ref_cnt++; + entry->hw_len = 0; + + return 0; + +error: + return -EPERM; +} + +static int __ipa_add_global_flt_rule(enum ipa_ip_type ip, + const struct ipa_flt_rule *rule, u8 add_rear, u32 *rule_hdl) +{ + struct ipa_flt_tbl *tbl; + + if (rule == NULL || rule_hdl == NULL) { + IPAERR_RL("bad parms rule=%p rule_hdl=%p\n", rule, rule_hdl); + + return -EINVAL; + } + + tbl = &ipa_ctx->glob_flt_tbl[ip]; + IPADBG_LOW("add global flt rule ip=%d\n", ip); + + return __ipa_add_flt_rule(tbl, ip, rule, add_rear, rule_hdl, false); +} + +static int __ipa_add_ep_flt_rule(enum ipa_ip_type ip, enum ipa_client_type ep, + const struct ipa_flt_rule *rule, u8 add_rear, + u32 *rule_hdl, bool user) +{ + struct ipa_flt_tbl *tbl; + int ipa_ep_idx; + + if (rule == NULL || rule_hdl == NULL || ep >= IPA_CLIENT_MAX) { + IPAERR_RL("bad parms rule=%p rule_hdl=%p ep=%d\n", rule, + rule_hdl, ep); + + return -EINVAL; + } + ipa_ep_idx = ipa2_get_ep_mapping(ep); + if (ipa_ep_idx == IPA_FLT_TABLE_INDEX_NOT_FOUND) { + IPAERR_RL("ep not valid ep=%d\n", ep); + return -EINVAL; + } + if (ipa_ctx->ep[ipa_ep_idx].valid == 0) + IPADBG("ep not connected ep_idx=%d\n", ipa_ep_idx); + + tbl = &ipa_ctx->flt_tbl[ipa_ep_idx][ip]; + IPADBG_LOW("add ep flt rule ip=%d ep=%d\n", ip, ep); + + return __ipa_add_flt_rule(tbl, ip, rule, add_rear, rule_hdl, user); +} + +/** + * ipa2_add_flt_rule() - Add the specified filtering rules to SW and optionally + * commit to IPA HW + * @rules: [inout] set of filtering rules to add + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_add_flt_rule(struct ipa_ioc_add_flt_rule *rules) +{ + return ipa2_add_flt_rule_usr(rules, false); +} + +/** + * ipa2_add_flt_rule_usr() - Add the specified filtering rules + * to SW and optionally commit to IPA HW + * @rules: [inout] set of filtering rules to add + * @user_only: [in] indicate rules installed by userspace + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_add_flt_rule_usr(struct ipa_ioc_add_flt_rule *rules, bool user_only) +{ + int i; + int result; + + if (rules == NULL || rules->num_rules == 0 || + rules->ip >= IPA_IP_MAX) { + IPAERR_RL("bad parm\n"); + + return -EINVAL; + } + + mutex_lock(&ipa_ctx->lock); + for (i = 0; i < rules->num_rules; i++) { + if (rules->global) + result = __ipa_add_global_flt_rule(rules->ip, + &rules->rules[i].rule, + rules->rules[i].at_rear, + &rules->rules[i].flt_rule_hdl); + else + result = __ipa_add_ep_flt_rule(rules->ip, rules->ep, + &rules->rules[i].rule, + rules->rules[i].at_rear, + &rules->rules[i].flt_rule_hdl, + user_only); + if (result) { + IPAERR_RL("failed to add flt rule %d\n", i); + rules->rules[i].status = IPA_FLT_STATUS_OF_ADD_FAILED; + } else { + rules->rules[i].status = 0; + } + } + + if (rules->commit) + if (ipa_ctx->ctrl->ipa_commit_flt(rules->ip)) { + result = -EPERM; + goto bail; + } + result = 0; +bail: + mutex_unlock(&ipa_ctx->lock); + + return result; +} + +/** + * ipa2_del_flt_rule() - Remove the specified filtering rules from SW and + * optionally commit to IPA HW + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls) +{ + int i; + int result; + + if (hdls == NULL || hdls->num_hdls == 0 || hdls->ip >= IPA_IP_MAX) { + IPAERR_RL("bad parm\n"); + return -EINVAL; + } + + mutex_lock(&ipa_ctx->lock); + for (i = 0; i < hdls->num_hdls; i++) { + if (__ipa_del_flt_rule(hdls->hdl[i].hdl)) { + IPAERR_RL("failed to del rt rule %i\n", i); + hdls->hdl[i].status = IPA_FLT_STATUS_OF_DEL_FAILED; + } else { + hdls->hdl[i].status = 0; + } + } + + if (hdls->commit) + if (ipa_ctx->ctrl->ipa_commit_flt(hdls->ip)) { + result = -EPERM; + goto bail; + } + result = 0; +bail: + mutex_unlock(&ipa_ctx->lock); + + return result; +} + +/** + * ipa2_mdfy_flt_rule() - Modify the specified filtering rules in SW and + * optionally commit to IPA HW + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_mdfy_flt_rule(struct ipa_ioc_mdfy_flt_rule *hdls) +{ + int i; + int result; + + if (hdls == NULL || hdls->num_rules == 0 || hdls->ip >= IPA_IP_MAX) { + IPAERR_RL("bad parm\n"); + return -EINVAL; + } + + mutex_lock(&ipa_ctx->lock); + for (i = 0; i < hdls->num_rules; i++) { + if (__ipa_mdfy_flt_rule(&hdls->rules[i], hdls->ip)) { + IPAERR_RL("failed to mdfy rt rule %i\n", i); + hdls->rules[i].status = IPA_FLT_STATUS_OF_MDFY_FAILED; + } else { + hdls->rules[i].status = 0; + } + } + + if (hdls->commit) + if (ipa_ctx->ctrl->ipa_commit_flt(hdls->ip)) { + result = -EPERM; + goto bail; + } + result = 0; +bail: + mutex_unlock(&ipa_ctx->lock); + + return result; +} + + +/** + * ipa2_commit_flt() - Commit the current SW filtering table of specified type + * to IPA HW + * @ip: [in] the family of routing tables + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_commit_flt(enum ipa_ip_type ip) +{ + int result; + + if (ip >= IPA_IP_MAX) { + IPAERR_RL("bad parm\n"); + return -EINVAL; + } + + mutex_lock(&ipa_ctx->lock); + + if (ipa_ctx->ctrl->ipa_commit_flt(ip)) { + result = -EPERM; + goto bail; + } + result = 0; + +bail: + mutex_unlock(&ipa_ctx->lock); + + return result; +} + +/** + * ipa2_reset_flt() - Reset the current SW filtering table of specified type + * (does not commit to HW) + * @ip: [in] the family of routing tables + * @user_only: [in] indicate rules deleted by userspace + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_reset_flt(enum ipa_ip_type ip, bool user_only) +{ + struct ipa_flt_tbl *tbl; + struct ipa_flt_entry *entry; + struct ipa_flt_entry *next; + int i; + int id; + + if (ip >= IPA_IP_MAX) { + IPAERR_RL("bad parm\n"); + return -EINVAL; + } + + tbl = &ipa_ctx->glob_flt_tbl[ip]; + mutex_lock(&ipa_ctx->lock); + IPADBG("reset flt ip=%d\n", ip); + list_for_each_entry_safe(entry, next, &tbl->head_flt_rule_list, link) { + if (ipa_id_find(entry->id) == NULL) { + WARN_ON(1); + mutex_unlock(&ipa_ctx->lock); + return -EFAULT; + } + + if ((ip == IPA_IP_v4 && + entry->rule.attrib.attrib_mask == IPA_FLT_PROTOCOL && + entry->rule.attrib.u.v4.protocol == + IPA_INVALID_L4_PROTOCOL) || + (ip == IPA_IP_v6 && + entry->rule.attrib.attrib_mask == IPA_FLT_NEXT_HDR && + entry->rule.attrib.u.v6.next_hdr == + IPA_INVALID_L4_PROTOCOL)) + continue; + + if (!user_only || + entry->ipacm_installed) { + list_del(&entry->link); + entry->tbl->rule_cnt--; + if (entry->rt_tbl) + entry->rt_tbl->ref_cnt--; + entry->cookie = 0; + id = entry->id; + kmem_cache_free(ipa_ctx->flt_rule_cache, entry); + + /* remove the handle from the database */ + ipa_id_remove(id); + } + } + + for (i = 0; i < ipa_ctx->ipa_num_pipes; i++) { + tbl = &ipa_ctx->flt_tbl[i][ip]; + list_for_each_entry_safe(entry, next, &tbl->head_flt_rule_list, + link) { + if (ipa_id_find(entry->id) == NULL) { + WARN_ON(1); + mutex_unlock(&ipa_ctx->lock); + return -EFAULT; + } + + if (!user_only || + entry->ipacm_installed) { + list_del(&entry->link); + entry->tbl->rule_cnt--; + if (entry->rt_tbl) + entry->rt_tbl->ref_cnt--; + entry->cookie = 0; + id = entry->id; + kmem_cache_free(ipa_ctx->flt_rule_cache, + entry); + + /* remove the handle from the database */ + ipa_id_remove(id); + } + } + } + + /* commit the change to IPA-HW */ + if (ipa_ctx->ctrl->ipa_commit_flt(IPA_IP_v4) || + ipa_ctx->ctrl->ipa_commit_flt(IPA_IP_v6)) { + IPAERR_RL("fail to commit flt-rule\n"); + WARN_ON_RATELIMIT_IPA(1); + mutex_unlock(&ipa_ctx->lock); + return -EPERM; + } + mutex_unlock(&ipa_ctx->lock); + return 0; +} + +void ipa_install_dflt_flt_rules(u32 ipa_ep_idx) +{ + struct ipa_flt_tbl *tbl; + struct ipa_ep_context *ep = &ipa_ctx->ep[ipa_ep_idx]; + struct ipa_flt_rule rule; + + memset(&rule, 0, sizeof(rule)); + + mutex_lock(&ipa_ctx->lock); + tbl = &ipa_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v4]; + rule.action = IPA_PASS_TO_EXCEPTION; + __ipa_add_flt_rule(tbl, IPA_IP_v4, &rule, true, + &ep->dflt_flt4_rule_hdl, false); + ipa_ctx->ctrl->ipa_commit_flt(IPA_IP_v4); + tbl->sticky_rear = true; + + tbl = &ipa_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v6]; + rule.action = IPA_PASS_TO_EXCEPTION; + __ipa_add_flt_rule(tbl, IPA_IP_v6, &rule, true, + &ep->dflt_flt6_rule_hdl, false); + ipa_ctx->ctrl->ipa_commit_flt(IPA_IP_v6); + tbl->sticky_rear = true; + mutex_unlock(&ipa_ctx->lock); +} + +void ipa_delete_dflt_flt_rules(u32 ipa_ep_idx) +{ + struct ipa_flt_tbl *tbl; + struct ipa_ep_context *ep = &ipa_ctx->ep[ipa_ep_idx]; + + mutex_lock(&ipa_ctx->lock); + if (ep->dflt_flt4_rule_hdl) { + tbl = &ipa_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v4]; + __ipa_del_flt_rule(ep->dflt_flt4_rule_hdl); + ipa_ctx->ctrl->ipa_commit_flt(IPA_IP_v4); + /* Reset the sticky flag. */ + tbl->sticky_rear = false; + ep->dflt_flt4_rule_hdl = 0; + } + if (ep->dflt_flt6_rule_hdl) { + tbl = &ipa_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v6]; + __ipa_del_flt_rule(ep->dflt_flt6_rule_hdl); + ipa_ctx->ctrl->ipa_commit_flt(IPA_IP_v6); + /* Reset the sticky flag. */ + tbl->sticky_rear = false; + ep->dflt_flt6_rule_hdl = 0; + } + mutex_unlock(&ipa_ctx->lock); +} diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c new file mode 100644 index 000000000000..d67b8744a65d --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c @@ -0,0 +1,1580 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2018, 2020, The Linux Foundation. All rights reserved. + */ + +#include "ipa_i.h" + +static const u32 ipa_hdr_bin_sz[IPA_HDR_BIN_MAX] = { 8, 16, 24, 36, 60}; +static const u32 ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN_MAX] = { 32, 64}; + +#define HDR_TYPE_IS_VALID(type) \ + ((type) >= 0 && (type) < IPA_HDR_L2_MAX) + +#define HDR_PROC_TYPE_IS_VALID(type) \ + ((type) >= 0 && (type) < IPA_HDR_PROC_MAX) + +/* uCP command numbers */ +#define IPA_HDR_UCP_802_3_TO_802_3 6 +#define IPA_HDR_UCP_802_3_TO_ETHII 7 +#define IPA_HDR_UCP_ETHII_TO_802_3 8 +#define IPA_HDR_UCP_ETHII_TO_ETHII 9 + +/** + * ipa_generate_hdr_hw_tbl() - generates the headers table + * @mem: [out] buffer to put the header table + * + * Returns: 0 on success, negative on failure + */ +static int ipa_generate_hdr_hw_tbl(struct ipa_mem_buffer *mem) +{ + struct ipa_hdr_entry *entry; + + mem->size = ipa_ctx->hdr_tbl.end; + + if (mem->size == 0) { + IPAERR("hdr tbl empty\n"); + return -EPERM; + } + IPADBG_LOW("tbl_sz=%d\n", ipa_ctx->hdr_tbl.end); + + mem->base = dma_zalloc_coherent(ipa_ctx->pdev, mem->size, + &mem->phys_base, GFP_KERNEL); + if (!mem->base) { + IPAERR("fail to alloc DMA buff of size %d\n", mem->size); + return -ENOMEM; + } + + list_for_each_entry(entry, &ipa_ctx->hdr_tbl.head_hdr_entry_list, + link) { + if (entry->is_hdr_proc_ctx) + continue; + IPADBG_LOW("hdr of len %d ofst=%d\n", entry->hdr_len, + entry->offset_entry->offset); + memcpy(mem->base + entry->offset_entry->offset, entry->hdr, + entry->hdr_len); + } + + return 0; +} + +static void ipa_hdr_proc_ctx_to_hw_format(struct ipa_mem_buffer *mem, + u32 hdr_base_addr) +{ + struct ipa_hdr_proc_ctx_entry *entry; + + list_for_each_entry(entry, + &ipa_ctx->hdr_proc_ctx_tbl.head_proc_ctx_entry_list, + link) { + IPADBG_LOW("processing type %d ofst=%d\n", + entry->type, entry->offset_entry->offset); + if (entry->type == IPA_HDR_PROC_NONE) { + struct ipa_hdr_proc_ctx_add_hdr_seq *ctx; + + ctx = (struct ipa_hdr_proc_ctx_add_hdr_seq *) + (mem->base + entry->offset_entry->offset); + ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD; + ctx->hdr_add.tlv.length = 1; + ctx->hdr_add.tlv.value = entry->hdr->hdr_len; + ctx->hdr_add.hdr_addr = (entry->hdr->is_hdr_proc_ctx) ? + entry->hdr->phys_base : + hdr_base_addr + + entry->hdr->offset_entry->offset; + IPADBG_LOW("header address 0x%x\n", + ctx->hdr_add.hdr_addr); + ctx->end.type = IPA_PROC_CTX_TLV_TYPE_END; + ctx->end.length = 0; + ctx->end.value = 0; + } else { + struct ipa_hdr_proc_ctx_add_hdr_cmd_seq *ctx; + + ctx = (struct ipa_hdr_proc_ctx_add_hdr_cmd_seq *) + (mem->base + entry->offset_entry->offset); + ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD; + ctx->hdr_add.tlv.length = 1; + ctx->hdr_add.tlv.value = entry->hdr->hdr_len; + ctx->hdr_add.hdr_addr = (entry->hdr->is_hdr_proc_ctx) ? + entry->hdr->phys_base : + hdr_base_addr + + entry->hdr->offset_entry->offset; + IPADBG_LOW("header address 0x%x\n", + ctx->hdr_add.hdr_addr); + ctx->cmd.type = IPA_PROC_CTX_TLV_TYPE_PROC_CMD; + ctx->cmd.length = 0; + if (entry->type == IPA_HDR_PROC_ETHII_TO_ETHII) + ctx->cmd.value = IPA_HDR_UCP_ETHII_TO_ETHII; + else if (entry->type == IPA_HDR_PROC_ETHII_TO_802_3) + ctx->cmd.value = IPA_HDR_UCP_ETHII_TO_802_3; + else if (entry->type == IPA_HDR_PROC_802_3_TO_ETHII) + ctx->cmd.value = IPA_HDR_UCP_802_3_TO_ETHII; + else if (entry->type == IPA_HDR_PROC_802_3_TO_802_3) + ctx->cmd.value = IPA_HDR_UCP_802_3_TO_802_3; + IPADBG_LOW("command id %d\n", ctx->cmd.value); + ctx->end.type = IPA_PROC_CTX_TLV_TYPE_END; + ctx->end.length = 0; + ctx->end.value = 0; + } + } +} + +/** + * ipa_generate_hdr_proc_ctx_hw_tbl() - + * generates the headers processing context table. + * @mem: [out] buffer to put the processing context table + * @aligned_mem: [out] actual processing context table (with alignment). + * Processing context table needs to be 8 Bytes aligned. + * + * Returns: 0 on success, negative on failure + */ +static int ipa_generate_hdr_proc_ctx_hw_tbl(u32 hdr_sys_addr, + struct ipa_mem_buffer *mem, struct ipa_mem_buffer *aligned_mem) +{ + u32 hdr_base_addr; + + mem->size = (ipa_ctx->hdr_proc_ctx_tbl.end) ? : 4; + + /* make sure table is aligned */ + mem->size += IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE; + + IPADBG_LOW("tbl_sz=%d\n", ipa_ctx->hdr_proc_ctx_tbl.end); + + mem->base = dma_alloc_coherent(ipa_ctx->pdev, mem->size, + &mem->phys_base, GFP_KERNEL); + if (!mem->base) { + IPAERR("fail to alloc DMA buff of size %d\n", mem->size); + return -ENOMEM; + } + + aligned_mem->phys_base = + IPA_HDR_PROC_CTX_TABLE_ALIGNMENT(mem->phys_base); + aligned_mem->base = mem->base + + (aligned_mem->phys_base - mem->phys_base); + aligned_mem->size = mem->size - IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE; + memset(aligned_mem->base, 0, aligned_mem->size); + hdr_base_addr = (ipa_ctx->hdr_tbl_lcl) ? IPA_MEM_PART(apps_hdr_ofst) : + hdr_sys_addr; + ipa_hdr_proc_ctx_to_hw_format(aligned_mem, hdr_base_addr); + + return 0; +} + +/* + * __ipa_commit_hdr() commits hdr to hardware + * This function needs to be called with a locked mutex. + */ +int __ipa_commit_hdr_v1_1(void) +{ + struct ipa_desc desc = { 0 }; + struct ipa_mem_buffer *mem; + struct ipa_hdr_init_local *cmd; + u16 len; + gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0); + + mem = kmalloc(sizeof(struct ipa_mem_buffer), GFP_KERNEL); + if (!mem) { + IPAERR("failed to alloc memory object\n"); + goto fail_alloc_mem; + } + + /* the immediate command param size is same for both local and system */ + len = sizeof(struct ipa_hdr_init_local); + + /* + * we can use init_local ptr for init_system due to layout of the + * struct + */ + cmd = kmalloc(len, flag); + if (!cmd) { + IPAERR("failed to alloc immediate command object\n"); + goto fail_alloc_cmd; + } + + if (ipa_generate_hdr_hw_tbl(mem)) { + IPAERR("fail to generate HDR HW TBL\n"); + goto fail_hw_tbl_gen; + } + + if (ipa_ctx->hdr_tbl_lcl) { + if (mem->size > IPA_MEM_v1_RAM_HDR_SIZE) { + IPAERR("tbl too big, needed %d avail %d\n", mem->size, + IPA_MEM_v1_RAM_HDR_SIZE); + goto fail_send_cmd; + } + } else { + if (mem->size > IPA_MEM_PART(apps_hdr_size_ddr)) { + IPAERR("tbl too big, needed %d avail %d\n", mem->size, + IPA_MEM_PART(apps_hdr_size_ddr)); + goto fail_send_cmd; + } + } + + cmd->hdr_table_src_addr = mem->phys_base; + if (ipa_ctx->hdr_tbl_lcl) { + cmd->size_hdr_table = mem->size; + cmd->hdr_table_dst_addr = IPA_MEM_v1_RAM_HDR_OFST; + desc.opcode = IPA_HDR_INIT_LOCAL; + } else { + desc.opcode = IPA_HDR_INIT_SYSTEM; + } + desc.pyld = cmd; + desc.len = sizeof(struct ipa_hdr_init_local); + desc.type = IPA_IMM_CMD_DESC; + IPA_DUMP_BUFF(mem->base, mem->phys_base, mem->size); + + if (ipa_send_cmd(1, &desc)) { + IPAERR("fail to send immediate command\n"); + goto fail_send_cmd; + } + + if (ipa_ctx->hdr_tbl_lcl) { + dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base, + mem->phys_base); + } else { + if (ipa_ctx->hdr_mem.phys_base) { + dma_free_coherent(ipa_ctx->pdev, ipa_ctx->hdr_mem.size, + ipa_ctx->hdr_mem.base, + ipa_ctx->hdr_mem.phys_base); + } + ipa_ctx->hdr_mem = *mem; + } + kfree(cmd); + kfree(mem); + + return 0; + +fail_send_cmd: + if (mem->base) + dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base, + mem->phys_base); +fail_hw_tbl_gen: + kfree(cmd); +fail_alloc_cmd: + kfree(mem); +fail_alloc_mem: + + return -EPERM; +} + +int __ipa_commit_hdr_v2(void) +{ + struct ipa_desc desc = { 0 }; + struct ipa_mem_buffer mem; + struct ipa_hdr_init_system *cmd = NULL; + struct ipa_hw_imm_cmd_dma_shared_mem *dma_cmd = NULL; + gfp_t flag = GFP_ATOMIC | (ipa_ctx->use_dma_zone ? GFP_DMA : 0); + int rc = -EFAULT; + + if (ipa_generate_hdr_hw_tbl(&mem)) { + IPAERR("fail to generate HDR HW TBL\n"); + goto end; + } + + if (ipa_ctx->hdr_tbl_lcl) { + if (mem.size > IPA_MEM_PART(apps_hdr_size)) { + IPAERR("tbl too big, needed %d avail %d\n", mem.size, + IPA_MEM_PART(apps_hdr_size)); + goto fail_send_cmd; + } else { + dma_cmd = kzalloc(sizeof(*dma_cmd), flag); + if (dma_cmd == NULL) { + IPAERR("fail to alloc immediate cmd\n"); + rc = -ENOMEM; + goto fail_send_cmd; + } + + dma_cmd->system_addr = mem.phys_base; + dma_cmd->size = mem.size; + dma_cmd->local_addr = ipa_ctx->smem_restricted_bytes + + IPA_MEM_PART(apps_hdr_ofst); + desc.opcode = IPA_DMA_SHARED_MEM; + desc.pyld = (void *)dma_cmd; + desc.len = + sizeof(struct ipa_hw_imm_cmd_dma_shared_mem); + } + } else { + if (mem.size > IPA_MEM_PART(apps_hdr_size_ddr)) { + IPAERR("tbl too big, needed %d avail %d\n", mem.size, + IPA_MEM_PART(apps_hdr_size_ddr)); + goto fail_send_cmd; + } else { + cmd = kzalloc(sizeof(*cmd), flag); + if (cmd == NULL) { + IPAERR("fail to alloc hdr init cmd\n"); + rc = -ENOMEM; + goto fail_send_cmd; + } + cmd->hdr_table_addr = mem.phys_base; + desc.opcode = IPA_HDR_INIT_SYSTEM; + desc.pyld = (void *)cmd; + desc.len = sizeof(struct ipa_hdr_init_system); + } + } + + desc.type = IPA_IMM_CMD_DESC; + IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size); + + if (ipa_send_cmd(1, &desc)) + IPAERR("fail to send immediate command\n"); + else + rc = 0; + + kfree(dma_cmd); + kfree(cmd); + +fail_send_cmd: + if (ipa_ctx->hdr_tbl_lcl) { + dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, + mem.phys_base); + } else { + if (!rc) { + if (ipa_ctx->hdr_mem.phys_base) + dma_free_coherent(ipa_ctx->pdev, + ipa_ctx->hdr_mem.size, + ipa_ctx->hdr_mem.base, + ipa_ctx->hdr_mem.phys_base); + ipa_ctx->hdr_mem = mem; + } else { + dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, + mem.phys_base); + } + } + +end: + return rc; +} + +int __ipa_commit_hdr_v2_5(void) +{ + struct ipa_desc desc[2]; + struct ipa_mem_buffer hdr_mem; + struct ipa_mem_buffer ctx_mem; + struct ipa_mem_buffer aligned_ctx_mem; + struct ipa_hdr_init_system *hdr_init_cmd = NULL; + struct ipa_hw_imm_cmd_dma_shared_mem *dma_cmd_hdr = NULL; + struct ipa_hw_imm_cmd_dma_shared_mem *dma_cmd_ctx = NULL; + struct ipa_register_write *reg_write_cmd = NULL; + gfp_t flag = GFP_ATOMIC | (ipa_ctx->use_dma_zone ? GFP_DMA : 0); + int rc = -EFAULT; + u32 proc_ctx_size; + u32 proc_ctx_ofst; + u32 proc_ctx_size_ddr; + + memset(desc, 0, 2 * sizeof(struct ipa_desc)); + + if (ipa_generate_hdr_hw_tbl(&hdr_mem)) { + IPAERR("fail to generate HDR HW TBL\n"); + goto end; + } + + if (ipa_generate_hdr_proc_ctx_hw_tbl(hdr_mem.phys_base, &ctx_mem, + &aligned_ctx_mem)) { + IPAERR("fail to generate HDR PROC CTX HW TBL\n"); + goto end; + } + + if (ipa_ctx->hdr_tbl_lcl) { + if (hdr_mem.size > IPA_MEM_PART(apps_hdr_size)) { + IPAERR("tbl too big needed %d avail %d\n", hdr_mem.size, + IPA_MEM_PART(apps_hdr_size)); + goto fail_send_cmd1; + } else { + dma_cmd_hdr = kzalloc(sizeof(*dma_cmd_hdr), flag); + if (dma_cmd_hdr == NULL) { + IPAERR("fail to alloc immediate cmd\n"); + rc = -ENOMEM; + goto fail_send_cmd1; + } + dma_cmd_hdr->system_addr = hdr_mem.phys_base; + dma_cmd_hdr->size = hdr_mem.size; + dma_cmd_hdr->local_addr = + ipa_ctx->smem_restricted_bytes + + IPA_MEM_PART(apps_hdr_ofst); + desc[0].opcode = IPA_DMA_SHARED_MEM; + desc[0].pyld = (void *)dma_cmd_hdr; + desc[0].len = + sizeof(struct ipa_hw_imm_cmd_dma_shared_mem); + } + } else { + if (hdr_mem.size > IPA_MEM_PART(apps_hdr_size_ddr)) { + IPAERR("tbl too big needed %d avail %d\n", hdr_mem.size, + IPA_MEM_PART(apps_hdr_size_ddr)); + goto fail_send_cmd1; + } else { + hdr_init_cmd = kzalloc(sizeof(*hdr_init_cmd), + flag); + if (hdr_init_cmd == NULL) { + IPAERR("fail to alloc immediate cmd\n"); + rc = -ENOMEM; + goto fail_send_cmd1; + } + hdr_init_cmd->hdr_table_addr = hdr_mem.phys_base; + desc[0].opcode = IPA_HDR_INIT_SYSTEM; + desc[0].pyld = (void *)hdr_init_cmd; + desc[0].len = sizeof(struct ipa_hdr_init_system); + } + } + desc[0].type = IPA_IMM_CMD_DESC; + IPA_DUMP_BUFF(hdr_mem.base, hdr_mem.phys_base, hdr_mem.size); + + proc_ctx_size = IPA_MEM_PART(apps_hdr_proc_ctx_size); + proc_ctx_ofst = IPA_MEM_PART(apps_hdr_proc_ctx_ofst); + if (ipa_ctx->hdr_proc_ctx_tbl_lcl) { + if (aligned_ctx_mem.size > proc_ctx_size) { + IPAERR("tbl too big needed %d avail %d\n", + aligned_ctx_mem.size, + proc_ctx_size); + goto fail_send_cmd1; + } else { + dma_cmd_ctx = kzalloc(sizeof(*dma_cmd_ctx), + flag); + if (dma_cmd_ctx == NULL) { + IPAERR("fail to alloc immediate cmd\n"); + rc = -ENOMEM; + goto fail_send_cmd1; + } + dma_cmd_ctx->system_addr = aligned_ctx_mem.phys_base; + dma_cmd_ctx->size = aligned_ctx_mem.size; + dma_cmd_ctx->local_addr = + ipa_ctx->smem_restricted_bytes + + proc_ctx_ofst; + desc[1].opcode = IPA_DMA_SHARED_MEM; + desc[1].pyld = (void *)dma_cmd_ctx; + desc[1].len = + sizeof(struct ipa_hw_imm_cmd_dma_shared_mem); + } + } else { + proc_ctx_size_ddr = IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr); + if (aligned_ctx_mem.size > proc_ctx_size_ddr) { + IPAERR("tbl too big, needed %d avail %d\n", + aligned_ctx_mem.size, + proc_ctx_size_ddr); + goto fail_send_cmd1; + } else { + reg_write_cmd = kzalloc(sizeof(*reg_write_cmd), + flag); + if (reg_write_cmd == NULL) { + IPAERR("fail to alloc immediate cmd\n"); + rc = -ENOMEM; + goto fail_send_cmd1; + } + reg_write_cmd->offset = + IPA_SYS_PKT_PROC_CNTXT_BASE_OFST; + reg_write_cmd->value = aligned_ctx_mem.phys_base; + reg_write_cmd->value_mask = + ~(IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE - 1); + desc[1].pyld = (void *)reg_write_cmd; + desc[1].opcode = IPA_REGISTER_WRITE; + desc[1].len = sizeof(*reg_write_cmd); + } + } + desc[1].type = IPA_IMM_CMD_DESC; + IPA_DUMP_BUFF(ctx_mem.base, ctx_mem.phys_base, ctx_mem.size); + + if (ipa_send_cmd(2, desc)) + IPAERR("fail to send immediate command\n"); + else + rc = 0; + +fail_send_cmd1: + + kfree(dma_cmd_hdr); + kfree(hdr_init_cmd); + kfree(dma_cmd_ctx); + kfree(reg_write_cmd); + + if (ipa_ctx->hdr_proc_ctx_tbl_lcl) { + dma_free_coherent(ipa_ctx->pdev, ctx_mem.size, + ctx_mem.base, + ctx_mem.phys_base); + } else { + if (!rc) { + if (ipa_ctx->hdr_proc_ctx_mem.phys_base) + dma_free_coherent(ipa_ctx->pdev, + ipa_ctx->hdr_proc_ctx_mem.size, + ipa_ctx->hdr_proc_ctx_mem.base, + ipa_ctx->hdr_proc_ctx_mem.phys_base); + ipa_ctx->hdr_proc_ctx_mem = ctx_mem; + } else { + dma_free_coherent(ipa_ctx->pdev, ctx_mem.size, + ctx_mem.base, + ctx_mem.phys_base); + } + } + + if (ipa_ctx->hdr_tbl_lcl) { + dma_free_coherent(ipa_ctx->pdev, hdr_mem.size, + hdr_mem.base, + hdr_mem.phys_base); + } else { + if (!rc) { + if (ipa_ctx->hdr_mem.phys_base) + dma_free_coherent(ipa_ctx->pdev, + ipa_ctx->hdr_mem.size, + ipa_ctx->hdr_mem.base, + ipa_ctx->hdr_mem.phys_base); + ipa_ctx->hdr_mem = hdr_mem; + } else { + dma_free_coherent(ipa_ctx->pdev, hdr_mem.size, + hdr_mem.base, + hdr_mem.phys_base); + } + } +end: + return rc; +} + +/** + * __ipa_commit_hdr_v2_6L() - Commits a header to the IPA HW. + * + * This function needs to be called with a locked mutex. + */ +int __ipa_commit_hdr_v2_6L(void) +{ + /* Same implementation as IPAv2 */ + return __ipa_commit_hdr_v2(); +} + +static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx, + bool add_ref_hdr, bool user_only) +{ + struct ipa_hdr_entry *hdr_entry; + struct ipa_hdr_proc_ctx_entry *entry; + struct ipa_hdr_proc_ctx_offset_entry *offset = NULL; + u32 bin; + struct ipa_hdr_proc_ctx_tbl *htbl = &ipa_ctx->hdr_proc_ctx_tbl; + int id; + int needed_len; + int mem_size; + + IPADBG_LOW("processing type %d hdr_hdl %d\n", + proc_ctx->type, proc_ctx->hdr_hdl); + + if (!HDR_PROC_TYPE_IS_VALID(proc_ctx->type)) { + IPAERR_RL("invalid processing type %d\n", proc_ctx->type); + return -EINVAL; + } + + hdr_entry = ipa_id_find(proc_ctx->hdr_hdl); + if (!hdr_entry || (hdr_entry->cookie != IPA_HDR_COOKIE)) { + IPAERR_RL("hdr_hdl is invalid\n"); + return -EINVAL; + } + + entry = kmem_cache_zalloc(ipa_ctx->hdr_proc_ctx_cache, GFP_KERNEL); + if (!entry) { + IPAERR("failed to alloc proc_ctx object\n"); + return -ENOMEM; + } + + INIT_LIST_HEAD(&entry->link); + + entry->type = proc_ctx->type; + entry->hdr = hdr_entry; + if (add_ref_hdr) + hdr_entry->ref_cnt++; + entry->cookie = IPA_PROC_HDR_COOKIE; + entry->ipacm_installed = user_only; + + needed_len = (proc_ctx->type == IPA_HDR_PROC_NONE) ? + sizeof(struct ipa_hdr_proc_ctx_add_hdr_seq) : + sizeof(struct ipa_hdr_proc_ctx_add_hdr_cmd_seq); + + if (needed_len <= ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN0]) { + bin = IPA_HDR_PROC_CTX_BIN0; + } else if (needed_len <= + ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN1]) { + bin = IPA_HDR_PROC_CTX_BIN1; + } else { + IPAERR_RL("unexpected needed len %d\n", needed_len); + WARN_ON(1); + goto bad_len; + } + + mem_size = (ipa_ctx->hdr_proc_ctx_tbl_lcl) ? + IPA_MEM_PART(apps_hdr_proc_ctx_size) : + IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr); + if (list_empty(&htbl->head_free_offset_list[bin])) { + if (htbl->end + ipa_hdr_proc_ctx_bin_sz[bin] > mem_size) { + IPAERR_RL("hdr proc ctx table overflow\n"); + goto bad_len; + } + + offset = kmem_cache_zalloc(ipa_ctx->hdr_proc_ctx_offset_cache, + GFP_KERNEL); + if (!offset) { + IPAERR("failed to alloc offset object\n"); + goto bad_len; + } + INIT_LIST_HEAD(&offset->link); + /* + * for a first item grow, set the bin and offset which are set + * in stone + */ + offset->offset = htbl->end; + offset->bin = bin; + offset->ipacm_installed = user_only; + htbl->end += ipa_hdr_proc_ctx_bin_sz[bin]; + list_add(&offset->link, + &htbl->head_offset_list[bin]); + } else { + /* get the first free slot */ + offset = + list_first_entry(&htbl->head_free_offset_list[bin], + struct ipa_hdr_proc_ctx_offset_entry, link); + offset->ipacm_installed = user_only; + list_move(&offset->link, &htbl->head_offset_list[bin]); + } + + entry->offset_entry = offset; + list_add(&entry->link, &htbl->head_proc_ctx_entry_list); + htbl->proc_ctx_cnt++; + IPADBG_LOW("add proc ctx of sz=%d cnt=%d ofst=%d\n", needed_len, + htbl->proc_ctx_cnt, offset->offset); + + id = ipa_id_alloc(entry); + if (id < 0) { + IPAERR("failed to alloc id\n"); + WARN_ON(1); + goto ipa_insert_failed; + } + entry->id = id; + proc_ctx->proc_ctx_hdl = id; + entry->ref_cnt++; + + return 0; + +ipa_insert_failed: + list_move(&offset->link, + &htbl->head_free_offset_list[offset->bin]); + entry->offset_entry = NULL; + list_del(&entry->link); + htbl->proc_ctx_cnt--; + +bad_len: + if (add_ref_hdr) + hdr_entry->ref_cnt--; + entry->cookie = 0; + kmem_cache_free(ipa_ctx->hdr_proc_ctx_cache, entry); + return -EPERM; +} + + +static int __ipa_add_hdr(struct ipa_hdr_add *hdr, bool user) +{ + struct ipa_hdr_entry *entry; + struct ipa_hdr_offset_entry *offset = NULL; + u32 bin; + struct ipa_hdr_tbl *htbl = &ipa_ctx->hdr_tbl; + int id; + int mem_size; + gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0); + + if (hdr->hdr_len == 0 || hdr->hdr_len > IPA_HDR_MAX_SIZE) { + IPAERR_RL("bad parm\n"); + goto error; + } + + if (!HDR_TYPE_IS_VALID(hdr->type)) { + IPAERR_RL("invalid hdr type %d\n", hdr->type); + goto error; + } + + entry = kmem_cache_zalloc(ipa_ctx->hdr_cache, flag); + if (!entry) { + IPAERR("failed to alloc hdr object\n"); + goto error; + } + + INIT_LIST_HEAD(&entry->link); + + memcpy(entry->hdr, hdr->hdr, hdr->hdr_len); + entry->hdr_len = hdr->hdr_len; + strlcpy(entry->name, hdr->name, IPA_RESOURCE_NAME_MAX); + entry->is_partial = hdr->is_partial; + entry->type = hdr->type; + entry->is_eth2_ofst_valid = hdr->is_eth2_ofst_valid; + entry->eth2_ofst = hdr->eth2_ofst; + entry->cookie = IPA_HDR_COOKIE; + entry->ipacm_installed = user; + + if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN0]) + bin = IPA_HDR_BIN0; + else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN1]) + bin = IPA_HDR_BIN1; + else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN2]) + bin = IPA_HDR_BIN2; + else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN3]) + bin = IPA_HDR_BIN3; + else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN4]) + bin = IPA_HDR_BIN4; + else { + IPAERR_RL("unexpected hdr len %d\n", hdr->hdr_len); + goto bad_hdr_len; + } + + mem_size = (ipa_ctx->hdr_tbl_lcl) ? IPA_MEM_PART(apps_hdr_size) : + IPA_MEM_PART(apps_hdr_size_ddr); + + if (list_empty(&htbl->head_free_offset_list[bin])) { + /* + * if header does not fit to table, place it in DDR + * This is valid for IPA 2.5 and on, + * with the exception of IPA2.6L. + */ + if (htbl->end + ipa_hdr_bin_sz[bin] > mem_size) { + if (ipa_ctx->ipa_hw_type != IPA_HW_v2_5) { + IPAERR("not enough room for header\n"); + goto bad_hdr_len; + } else { + entry->is_hdr_proc_ctx = true; + entry->phys_base = dma_map_single(ipa_ctx->pdev, + entry->hdr, + entry->hdr_len, + DMA_TO_DEVICE); + if (dma_mapping_error(ipa_ctx->pdev, + entry->phys_base)) { + IPAERR("dma_map_single failureed\n"); + goto fail_dma_mapping; + } + } + } else { + entry->is_hdr_proc_ctx = false; + offset = kmem_cache_zalloc(ipa_ctx->hdr_offset_cache, + GFP_KERNEL); + if (!offset) { + IPAERR("failed to alloc hdr offset object\n"); + goto bad_hdr_len; + } + INIT_LIST_HEAD(&offset->link); + /* + * for a first item grow, set the bin and offset which + * are set in stone + */ + offset->offset = htbl->end; + offset->bin = bin; + htbl->end += ipa_hdr_bin_sz[bin]; + list_add(&offset->link, + &htbl->head_offset_list[bin]); + entry->offset_entry = offset; + offset->ipacm_installed = user; + } + } else { + entry->is_hdr_proc_ctx = false; + /* get the first free slot */ + offset = + list_first_entry(&htbl->head_free_offset_list[bin], + struct ipa_hdr_offset_entry, link); + list_move(&offset->link, &htbl->head_offset_list[bin]); + entry->offset_entry = offset; + offset->ipacm_installed = user; + } + + list_add(&entry->link, &htbl->head_hdr_entry_list); + htbl->hdr_cnt++; + if (entry->is_hdr_proc_ctx) + IPADBG_LOW("add hdr of sz=%d hdr_cnt=%d phys_base=%pa\n", + hdr->hdr_len, + htbl->hdr_cnt, + &entry->phys_base); + else + IPADBG_LOW("add hdr of sz=%d hdr_cnt=%d ofst=%d\n", + hdr->hdr_len, + htbl->hdr_cnt, + entry->offset_entry->offset); + + id = ipa_id_alloc(entry); + if (id < 0) { + IPAERR("failed to alloc id\n"); + WARN_ON(1); + goto ipa_insert_failed; + } + entry->id = id; + hdr->hdr_hdl = id; + entry->ref_cnt++; + + if (entry->is_hdr_proc_ctx) { + struct ipa_hdr_proc_ctx_add proc_ctx; + + IPADBG("adding processing context for header %s\n", hdr->name); + proc_ctx.type = IPA_HDR_PROC_NONE; + proc_ctx.hdr_hdl = id; + if (__ipa_add_hdr_proc_ctx(&proc_ctx, false, user)) { + IPAERR("failed to add hdr proc ctx\n"); + goto fail_add_proc_ctx; + } + entry->proc_ctx = ipa_id_find(proc_ctx.proc_ctx_hdl); + } + + return 0; + +fail_add_proc_ctx: + entry->ref_cnt--; + hdr->hdr_hdl = 0; + ipa_id_remove(id); +ipa_insert_failed: + if (entry->is_hdr_proc_ctx) { + dma_unmap_single(ipa_ctx->pdev, entry->phys_base, + entry->hdr_len, DMA_TO_DEVICE); + } else { + if (offset) + list_move(&offset->link, + &htbl->head_free_offset_list[offset->bin]); + entry->offset_entry = NULL; + } + htbl->hdr_cnt--; + list_del(&entry->link); + +fail_dma_mapping: + entry->is_hdr_proc_ctx = false; +bad_hdr_len: + entry->cookie = 0; + kmem_cache_free(ipa_ctx->hdr_cache, entry); +error: + return -EPERM; +} + +static int __ipa_del_hdr_proc_ctx(u32 proc_ctx_hdl, + bool release_hdr, bool by_user) +{ + struct ipa_hdr_proc_ctx_entry *entry; + struct ipa_hdr_proc_ctx_tbl *htbl = &ipa_ctx->hdr_proc_ctx_tbl; + + entry = ipa_id_find(proc_ctx_hdl); + if (!entry || (entry->cookie != IPA_PROC_HDR_COOKIE)) { + IPAERR_RL("bad parm\n"); + return -EINVAL; + } + + IPADBG("del ctx proc cnt=%d ofst=%d\n", + htbl->proc_ctx_cnt, entry->offset_entry->offset); + + if (by_user && entry->user_deleted) { + IPAERR_RL("proc_ctx already deleted by user\n"); + return -EINVAL; + } + + if (by_user) + entry->user_deleted = true; + + if (--entry->ref_cnt) { + IPADBG("proc_ctx_hdl %x ref_cnt %d\n", + proc_ctx_hdl, entry->ref_cnt); + return 0; + } + + if (release_hdr) + __ipa_del_hdr(entry->hdr->id, false); + + /* move the offset entry to appropriate free list */ + list_move(&entry->offset_entry->link, + &htbl->head_free_offset_list[entry->offset_entry->bin]); + list_del(&entry->link); + htbl->proc_ctx_cnt--; + entry->cookie = 0; + kmem_cache_free(ipa_ctx->hdr_proc_ctx_cache, entry); + + /* remove the handle from the database */ + ipa_id_remove(proc_ctx_hdl); + + return 0; +} + + +int __ipa_del_hdr(u32 hdr_hdl, bool by_user) +{ + struct ipa_hdr_entry *entry; + struct ipa_hdr_tbl *htbl = &ipa_ctx->hdr_tbl; + + entry = ipa_id_find(hdr_hdl); + if (entry == NULL) { + IPAERR_RL("lookup failed\n"); + return -EINVAL; + } + + if (entry->cookie != IPA_HDR_COOKIE) { + IPAERR_RL("bad parm\n"); + return -EINVAL; + } + + if (entry->is_hdr_proc_ctx) + IPADBG("del hdr of sz=%d hdr_cnt=%d phys_base=%pa\n", + entry->hdr_len, htbl->hdr_cnt, &entry->phys_base); + else + IPADBG("del hdr of sz=%d hdr_cnt=%d ofst=%d\n", entry->hdr_len, + htbl->hdr_cnt, entry->offset_entry->offset); + + if (by_user && entry->user_deleted) { + IPAERR_RL("hdr already deleted by user\n"); + return -EINVAL; + } + + if (by_user) { + if (!strcmp(entry->name, IPA_LAN_RX_HDR_NAME)) { + IPADBG("Trying to delete hdr %s offset=%u\n", + entry->name, entry->offset_entry->offset); + if (!entry->offset_entry->offset) { + IPAERR("User cannot delete default header\n"); + return -EPERM; + } + } + entry->user_deleted = true; + } + + if (--entry->ref_cnt) { + IPADBG("hdr_hdl %x ref_cnt %d\n", hdr_hdl, entry->ref_cnt); + return 0; + } + + if (entry->is_hdr_proc_ctx) { + dma_unmap_single(ipa_ctx->pdev, + entry->phys_base, + entry->hdr_len, + DMA_TO_DEVICE); + __ipa_del_hdr_proc_ctx(entry->proc_ctx->id, false, false); + } else { + /* move the offset entry to appropriate free list */ + list_move(&entry->offset_entry->link, + &htbl->head_free_offset_list[entry->offset_entry->bin]); + } + list_del(&entry->link); + htbl->hdr_cnt--; + entry->cookie = 0; + kmem_cache_free(ipa_ctx->hdr_cache, entry); + + /* remove the handle from the database */ + ipa_id_remove(hdr_hdl); + + return 0; +} + +/** + * ipa2_add_hdr() - add the specified headers to SW and optionally commit them + * to IPA HW + * @hdrs: [inout] set of headers to add + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_add_hdr(struct ipa_ioc_add_hdr *hdrs) +{ + return ipa2_add_hdr_usr(hdrs, false); +} + +/** + * ipa2_add_hdr_usr() - add the specified headers to SW + * and optionally commit them to IPA HW + * @hdrs: [inout] set of headers to add + * @user_only: [in] indicate installed from user + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_add_hdr_usr(struct ipa_ioc_add_hdr *hdrs, bool user_only) +{ + int i; + int result = -EFAULT; + + if (unlikely(!ipa_ctx)) { + IPAERR_RL("IPA driver was not initialized\n"); + return -EINVAL; + } + + if (hdrs == NULL || hdrs->num_hdrs == 0) { + IPAERR_RL("bad parm\n"); + return -EINVAL; + } + + mutex_lock(&ipa_ctx->lock); + IPADBG("adding %d headers to IPA driver internal data struct\n", + hdrs->num_hdrs); + for (i = 0; i < hdrs->num_hdrs; i++) { + if (__ipa_add_hdr(&hdrs->hdr[i], user_only)) { + IPAERR_RL("failed to add hdr %d\n", i); + hdrs->hdr[i].status = -1; + } else { + hdrs->hdr[i].status = 0; + } + } + + if (hdrs->commit) { + IPADBG("committing all headers to IPA core"); + if (ipa_ctx->ctrl->ipa_commit_hdr()) { + result = -EPERM; + goto bail; + } + } + result = 0; +bail: + mutex_unlock(&ipa_ctx->lock); + return result; +} +/** + * ipa2_del_hdr_by_user() - Remove the specified headers + * from SW and optionally commit them to IPA HW + * @hdls: [inout] set of headers to delete + * @by_user: Operation requested by user? + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_del_hdr_by_user(struct ipa_ioc_del_hdr *hdls, bool by_user) +{ + int i; + int result = -EFAULT; + + if (unlikely(!ipa_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return -EINVAL; + } + + if (hdls == NULL || hdls->num_hdls == 0) { + IPAERR_RL("bad parm\n"); + return -EINVAL; + } + + mutex_lock(&ipa_ctx->lock); + for (i = 0; i < hdls->num_hdls; i++) { + if (__ipa_del_hdr(hdls->hdl[i].hdl, by_user)) { + IPAERR_RL("failed to del hdr %i\n", i); + hdls->hdl[i].status = -1; + } else { + hdls->hdl[i].status = 0; + } + } + + if (hdls->commit) { + if (ipa_ctx->ctrl->ipa_commit_hdr()) { + result = -EPERM; + goto bail; + } + } + result = 0; +bail: + mutex_unlock(&ipa_ctx->lock); + return result; +} + +/** + * ipa2_del_hdr() - Remove the specified headers from SW + * and optionally commit them to IPA HW + * @hdls: [inout] set of headers to delete + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_del_hdr(struct ipa_ioc_del_hdr *hdls) +{ + return ipa2_del_hdr_by_user(hdls, false); +} + +/** + * ipa2_add_hdr_proc_ctx() - add the specified headers to SW + * and optionally commit them to IPA HW + * @proc_ctxs: [inout] set of processing context headers to add + * @user_only: [in] indicate installed by user-space module + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs, + bool user_only) +{ + int i; + int result = -EFAULT; + + if (ipa_ctx->ipa_hw_type <= IPA_HW_v2_0 || + ipa_ctx->ipa_hw_type == IPA_HW_v2_6L) { + IPAERR_RL("Processing context not supported on IPA HW %d\n", + ipa_ctx->ipa_hw_type); + return -EFAULT; + } + + if (proc_ctxs == NULL || proc_ctxs->num_proc_ctxs == 0) { + IPAERR_RL("bad parm\n"); + return -EINVAL; + } + + mutex_lock(&ipa_ctx->lock); + IPADBG("adding %d header processing contextes to IPA driver\n", + proc_ctxs->num_proc_ctxs); + for (i = 0; i < proc_ctxs->num_proc_ctxs; i++) { + if (__ipa_add_hdr_proc_ctx(&proc_ctxs->proc_ctx[i], + true, user_only)) { + IPAERR_RL("failed to add hdr pric ctx %d\n", i); + proc_ctxs->proc_ctx[i].status = -1; + } else { + proc_ctxs->proc_ctx[i].status = 0; + } + } + + if (proc_ctxs->commit) { + IPADBG("committing all headers to IPA core"); + if (ipa_ctx->ctrl->ipa_commit_hdr()) { + result = -EPERM; + goto bail; + } + } + result = 0; +bail: + mutex_unlock(&ipa_ctx->lock); + return result; +} + +/** + * ipa2_del_hdr_proc_ctx_by_user() - + * Remove the specified processing context headers from SW and + * optionally commit them to IPA HW. + * @hdls: [inout] set of processing context headers to delete + * @by_user: Operation requested by user? + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_del_hdr_proc_ctx_by_user(struct ipa_ioc_del_hdr_proc_ctx *hdls, + bool by_user) +{ + int i; + int result; + + if (ipa_ctx->ipa_hw_type <= IPA_HW_v2_0 || + ipa_ctx->ipa_hw_type == IPA_HW_v2_6L) { + IPAERR("Processing context not supported on IPA HW %d\n", + ipa_ctx->ipa_hw_type); + return -EFAULT; + } + + if (hdls == NULL || hdls->num_hdls == 0) { + IPAERR_RL("bad parm\n"); + return -EINVAL; + } + + mutex_lock(&ipa_ctx->lock); + for (i = 0; i < hdls->num_hdls; i++) { + if (__ipa_del_hdr_proc_ctx(hdls->hdl[i].hdl, true, by_user)) { + IPAERR_RL("failed to del hdr %i\n", i); + hdls->hdl[i].status = -1; + } else { + hdls->hdl[i].status = 0; + } + } + + if (hdls->commit) { + if (ipa_ctx->ctrl->ipa_commit_hdr()) { + result = -EPERM; + goto bail; + } + } + result = 0; +bail: + mutex_unlock(&ipa_ctx->lock); + return result; +} + +/** + * ipa2_del_hdr_proc_ctx() - + * Remove the specified processing context headers from SW and + * optionally commit them to IPA HW. + * @hdls: [inout] set of processing context headers to delete + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_del_hdr_proc_ctx(struct ipa_ioc_del_hdr_proc_ctx *hdls) +{ + return ipa2_del_hdr_proc_ctx_by_user(hdls, false); +} + +/** + * ipa2_commit_hdr() - commit to IPA HW the current header table in SW + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_commit_hdr(void) +{ + int result = -EFAULT; + + /* + * issue a commit on the routing module since routing rules point to + * header table entries + */ + if (ipa2_commit_rt(IPA_IP_v4)) + return -EPERM; + if (ipa2_commit_rt(IPA_IP_v6)) + return -EPERM; + + mutex_lock(&ipa_ctx->lock); + if (ipa_ctx->ctrl->ipa_commit_hdr()) { + result = -EPERM; + goto bail; + } + result = 0; +bail: + mutex_unlock(&ipa_ctx->lock); + return result; +} + +/** + * ipa2_reset_hdr() - reset the current header table in SW (does not commit to + * HW) + * + * @user_only: [in] indicate delete rules installed by userspace + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_reset_hdr(bool user_only) +{ + struct ipa_hdr_entry *entry; + struct ipa_hdr_entry *next; + struct ipa_hdr_proc_ctx_entry *ctx_entry; + struct ipa_hdr_proc_ctx_entry *ctx_next; + struct ipa_hdr_offset_entry *off_entry; + struct ipa_hdr_offset_entry *off_next; + struct ipa_hdr_proc_ctx_offset_entry *ctx_off_entry; + struct ipa_hdr_proc_ctx_offset_entry *ctx_off_next; + struct ipa_hdr_tbl *htbl = &ipa_ctx->hdr_tbl; + struct ipa_hdr_proc_ctx_tbl *htbl_proc = &ipa_ctx->hdr_proc_ctx_tbl; + int i; + + /* + * issue a reset on the routing module since routing rules point to + * header table entries + */ + if (ipa2_reset_rt(IPA_IP_v4, user_only)) + IPAERR("fail to reset v4 rt\n"); + if (ipa2_reset_rt(IPA_IP_v6, user_only)) + IPAERR("fail to reset v4 rt\n"); + + mutex_lock(&ipa_ctx->lock); + IPADBG("reset hdr\n"); + list_for_each_entry_safe(entry, next, + &ipa_ctx->hdr_tbl.head_hdr_entry_list, link) { + + /* do not remove the default header */ + if (!strcmp(entry->name, IPA_LAN_RX_HDR_NAME)) { + IPADBG("Trying to remove hdr %s offset=%u\n", + entry->name, entry->offset_entry->offset); + if (!entry->offset_entry->offset) { + if (entry->is_hdr_proc_ctx) { + mutex_unlock(&ipa_ctx->lock); + WARN_ON(1); + IPAERR("default header is proc ctx\n"); + return -EFAULT; + } + IPADBG("skip default header\n"); + continue; + } + } + + if (ipa_id_find(entry->id) == NULL) { + mutex_unlock(&ipa_ctx->lock); + WARN_ON(1); + return -EFAULT; + } + + if (!user_only || entry->ipacm_installed) { + if (entry->is_hdr_proc_ctx) { + dma_unmap_single(ipa_ctx->pdev, + entry->phys_base, + entry->hdr_len, + DMA_TO_DEVICE); + entry->proc_ctx = NULL; + } else { + /* move the offset entry to free list */ + entry->offset_entry->ipacm_installed = 0; + list_move(&entry->offset_entry->link, + &htbl->head_free_offset_list[ + entry->offset_entry->bin]); + } + list_del(&entry->link); + htbl->hdr_cnt--; + entry->ref_cnt = 0; + entry->cookie = 0; + + /* remove the handle from the database */ + ipa_id_remove(entry->id); + kmem_cache_free(ipa_ctx->hdr_cache, entry); + } + } + + /* only clean up offset_list and free_offset_list on global reset */ + if (!user_only) { + for (i = 0; i < IPA_HDR_BIN_MAX; i++) { + list_for_each_entry_safe(off_entry, off_next, + &ipa_ctx->hdr_tbl.head_offset_list[i], + link) { + /** + * do not remove the default exception + * header which is at offset 0 + */ + if (off_entry->offset == 0) + continue; + list_del(&off_entry->link); + kmem_cache_free(ipa_ctx->hdr_offset_cache, + off_entry); + } + list_for_each_entry_safe(off_entry, off_next, + &ipa_ctx->hdr_tbl.head_free_offset_list[i], + link) { + list_del(&off_entry->link); + kmem_cache_free(ipa_ctx->hdr_offset_cache, + off_entry); + } + } + /* there is one header of size 8 */ + ipa_ctx->hdr_tbl.end = 8; + ipa_ctx->hdr_tbl.hdr_cnt = 1; + } + + IPADBG("reset hdr proc ctx\n"); + list_for_each_entry_safe( + ctx_entry, + ctx_next, + &ipa_ctx->hdr_proc_ctx_tbl.head_proc_ctx_entry_list, + link) { + + if (ipa_id_find(ctx_entry->id) == NULL) { + mutex_unlock(&ipa_ctx->lock); + WARN_ON_RATELIMIT_IPA(1); + return -EFAULT; + } + + if (!user_only || + ctx_entry->ipacm_installed) { + /* move the offset entry to appropriate free list */ + list_move(&ctx_entry->offset_entry->link, + &htbl_proc->head_free_offset_list[ + ctx_entry->offset_entry->bin]); + list_del(&ctx_entry->link); + htbl_proc->proc_ctx_cnt--; + ctx_entry->ref_cnt = 0; + ctx_entry->cookie = 0; + + /* remove the handle from the database */ + ipa_id_remove(ctx_entry->id); + kmem_cache_free(ipa_ctx->hdr_proc_ctx_cache, + ctx_entry); + } + } + /* only clean up offset_list and free_offset_list on global reset */ + if (!user_only) { + for (i = 0; i < IPA_HDR_PROC_CTX_BIN_MAX; i++) { + list_for_each_entry_safe(ctx_off_entry, ctx_off_next, + &ipa_ctx->hdr_proc_ctx_tbl.head_offset_list[i], + link) { + list_del(&ctx_off_entry->link); + kmem_cache_free( + ipa_ctx->hdr_proc_ctx_offset_cache, + ctx_off_entry); + } + list_for_each_entry_safe(ctx_off_entry, ctx_off_next, + &ipa_ctx->hdr_proc_ctx_tbl.head_free_offset_list[i], + link) { + list_del(&ctx_off_entry->link); + kmem_cache_free( + ipa_ctx->hdr_proc_ctx_offset_cache, + ctx_off_entry); + } + } + ipa_ctx->hdr_proc_ctx_tbl.end = 0; + ipa_ctx->hdr_proc_ctx_tbl.proc_ctx_cnt = 0; + } + + /* commit the change to IPA-HW */ + if (ipa_ctx->ctrl->ipa_commit_hdr()) { + IPAERR_RL("fail to commit hdr\n"); + WARN_ON_RATELIMIT_IPA(1); + mutex_unlock(&ipa_ctx->lock); + return -EFAULT; + } + + mutex_unlock(&ipa_ctx->lock); + return 0; +} + +static struct ipa_hdr_entry *__ipa_find_hdr(const char *name) +{ + struct ipa_hdr_entry *entry; + + list_for_each_entry(entry, &ipa_ctx->hdr_tbl.head_hdr_entry_list, + link) { + if (!strcmp(name, entry->name)) + return entry; + } + + return NULL; +} + +/** + * ipa2_get_hdr() - Lookup the specified header resource + * @lookup: [inout] header to lookup and its handle + * + * lookup the specified header resource and return handle if it exists + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + * Caller should call ipa_put_hdr later if this function succeeds + */ +int ipa2_get_hdr(struct ipa_ioc_get_hdr *lookup) +{ + struct ipa_hdr_entry *entry; + int result = -1; + + if (unlikely(!ipa_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return -EINVAL; + } + + if (lookup == NULL) { + IPAERR_RL("bad parm\n"); + return -EINVAL; + } + mutex_lock(&ipa_ctx->lock); + lookup->name[IPA_RESOURCE_NAME_MAX-1] = '\0'; + entry = __ipa_find_hdr(lookup->name); + if (entry) { + lookup->hdl = entry->id; + result = 0; + } + mutex_unlock(&ipa_ctx->lock); + + return result; +} + +/** + * __ipa_release_hdr() - drop reference to header and cause + * deletion if reference count permits + * @hdr_hdl: [in] handle of header to be released + * + * Returns: 0 on success, negative on failure + */ +int __ipa_release_hdr(u32 hdr_hdl) +{ + int result = 0; + + if (__ipa_del_hdr(hdr_hdl, false)) { + IPADBG("fail to del hdr %x\n", hdr_hdl); + result = -EFAULT; + goto bail; + } + + /* commit for put */ + if (ipa_ctx->ctrl->ipa_commit_hdr()) { + IPAERR("fail to commit hdr\n"); + result = -EFAULT; + goto bail; + } + +bail: + return result; +} + +/** + * __ipa_release_hdr_proc_ctx() - drop reference to processing context + * and cause deletion if reference count permits + * @proc_ctx_hdl: [in] handle of processing context to be released + * + * Returns: 0 on success, negative on failure + */ +int __ipa_release_hdr_proc_ctx(u32 proc_ctx_hdl) +{ + int result = 0; + + if (__ipa_del_hdr_proc_ctx(proc_ctx_hdl, true, false)) { + IPADBG("fail to del hdr %x\n", proc_ctx_hdl); + result = -EFAULT; + goto bail; + } + + /* commit for put */ + if (ipa_ctx->ctrl->ipa_commit_hdr()) { + IPAERR("fail to commit hdr\n"); + result = -EFAULT; + goto bail; + } + +bail: + return result; +} + +/** + * ipa2_put_hdr() - Release the specified header handle + * @hdr_hdl: [in] the header handle to release + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_put_hdr(u32 hdr_hdl) +{ + struct ipa_hdr_entry *entry; + int result = -EFAULT; + + mutex_lock(&ipa_ctx->lock); + + entry = ipa_id_find(hdr_hdl); + if (entry == NULL) { + IPAERR_RL("lookup failed\n"); + result = -EINVAL; + goto bail; + } + + if (entry->cookie != IPA_HDR_COOKIE) { + IPAERR_RL("invalid header entry\n"); + result = -EINVAL; + goto bail; + } + + result = 0; +bail: + mutex_unlock(&ipa_ctx->lock); + return result; +} + +/** + * ipa2_copy_hdr() - Lookup the specified header resource and return a copy of + * it + * @copy: [inout] header to lookup and its copy + * + * lookup the specified header resource and return a copy of it (along with its + * attributes) if it exists, this would be called for partial headers + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_copy_hdr(struct ipa_ioc_copy_hdr *copy) +{ + struct ipa_hdr_entry *entry; + int result = -EFAULT; + + if (copy == NULL) { + IPAERR_RL("bad parm\n"); + return -EINVAL; + } + mutex_lock(&ipa_ctx->lock); + copy->name[IPA_RESOURCE_NAME_MAX-1] = '\0'; + entry = __ipa_find_hdr(copy->name); + if (entry) { + memcpy(copy->hdr, entry->hdr, entry->hdr_len); + copy->hdr_len = entry->hdr_len; + copy->type = entry->type; + copy->is_partial = entry->is_partial; + copy->is_eth2_ofst_valid = entry->is_eth2_ofst_valid; + copy->eth2_ofst = entry->eth2_ofst; + result = 0; + } + mutex_unlock(&ipa_ctx->lock); + + return result; +} diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_hw_defs.h b/drivers/platform/msm/ipa/ipa_v2/ipa_hw_defs.h new file mode 100644 index 000000000000..1b1ebea08e92 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_hw_defs.h @@ -0,0 +1,443 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2015, 2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _IPA_HW_DEFS_H +#define _IPA_HW_DEFS_H +#include + +/* This header defines various HW related data types */ + +/* immediate command op-codes */ +#define IPA_DECIPH_INIT (1) +#define IPA_PPP_FRM_INIT (2) +#define IPA_IP_V4_FILTER_INIT (3) +#define IPA_IP_V6_FILTER_INIT (4) +#define IPA_IP_V4_NAT_INIT (5) +#define IPA_IP_V6_NAT_INIT (6) +#define IPA_IP_V4_ROUTING_INIT (7) +#define IPA_IP_V6_ROUTING_INIT (8) +#define IPA_HDR_INIT_LOCAL (9) +#define IPA_HDR_INIT_SYSTEM (10) +#define IPA_DECIPH_SETUP (11) +#define IPA_REGISTER_WRITE (12) +#define IPA_NAT_DMA (14) +#define IPA_IP_PACKET_TAG (15) +#define IPA_IP_PACKET_INIT (16) +#define IPA_DMA_SHARED_MEM (19) +#define IPA_IP_PACKET_TAG_STATUS (20) + +/* Processing context TLV type */ +#define IPA_PROC_CTX_TLV_TYPE_END 0 +#define IPA_PROC_CTX_TLV_TYPE_HDR_ADD 1 +#define IPA_PROC_CTX_TLV_TYPE_PROC_CMD 3 + + +/** + * struct ipa_flt_rule_hw_hdr - HW header of IPA filter rule + * @word: filtering rule properties + * @en_rule: enable rule + * @action: post routing action + * @rt_tbl_idx: index in routing table + * @retain_hdr: added to add back to the packet the header removed + * as part of header removal. This will be done as part of + * header insertion block. + * @to_uc: direct IPA to sent the packet to uc instead of + * the intended destination. This will be performed just after + * routing block processing, so routing will have determined + * destination end point and uc will receive this information + * together with the packet as part of the HW packet TX commands + * @rsvd: reserved bits + */ +struct ipa_flt_rule_hw_hdr { + union { + u32 word; + struct { + u32 en_rule:16; + u32 action:5; + u32 rt_tbl_idx:5; + u32 retain_hdr:1; + u32 to_uc:1; + u32 rsvd:4; + } hdr; + } u; +}; + +/** + * struct ipa_rt_rule_hw_hdr - HW header of IPA routing rule + * @word: filtering rule properties + * @en_rule: enable rule + * @pipe_dest_idx: destination pipe index + * @system: changed from local to system due to HW change + * @hdr_offset: header offset + * @proc_ctx: whether hdr_offset points to header table or to + * header processing context table + */ +struct ipa_rt_rule_hw_hdr { + union { + u32 word; + struct { + u32 en_rule:16; + u32 pipe_dest_idx:5; + u32 system:1; + u32 hdr_offset:10; + } hdr; + struct { + u32 en_rule:16; + u32 pipe_dest_idx:5; + u32 system:1; + u32 hdr_offset:9; + u32 proc_ctx:1; + } hdr_v2_5; + } u; +}; + +/** + * struct ipa_ip_v4_filter_init - IPA_IP_V4_FILTER_INIT command payload + * @ipv4_rules_addr: address of ipv4 rules + * @size_ipv4_rules: size of the above + * @ipv4_addr: ipv4 address + * @rsvd: reserved + */ +struct ipa_ip_v4_filter_init { + u64 ipv4_rules_addr:32; + u64 size_ipv4_rules:12; + u64 ipv4_addr:16; + u64 rsvd:4; +}; + +/** + * struct ipa_ip_v6_filter_init - IPA_IP_V6_FILTER_INIT command payload + * @ipv6_rules_addr: address of ipv6 rules + * @size_ipv6_rules: size of the above + * @ipv6_addr: ipv6 address + */ +struct ipa_ip_v6_filter_init { + u64 ipv6_rules_addr:32; + u64 size_ipv6_rules:16; + u64 ipv6_addr:16; +}; + +/** + * struct ipa_ip_v4_routing_init - IPA_IP_V4_ROUTING_INIT command payload + * @ipv4_rules_addr: address of ipv4 rules + * @size_ipv4_rules: size of the above + * @ipv4_addr: ipv4 address + * @rsvd: reserved + */ +struct ipa_ip_v4_routing_init { + u64 ipv4_rules_addr:32; + u64 size_ipv4_rules:12; + u64 ipv4_addr:16; + u64 rsvd:4; +}; + +/** + * struct ipa_ip_v6_routing_init - IPA_IP_V6_ROUTING_INIT command payload + * @ipv6_rules_addr: address of ipv6 rules + * @size_ipv6_rules: size of the above + * @ipv6_addr: ipv6 address + */ +struct ipa_ip_v6_routing_init { + u64 ipv6_rules_addr:32; + u64 size_ipv6_rules:16; + u64 ipv6_addr:16; +}; + +/** + * struct ipa_hdr_init_local - IPA_HDR_INIT_LOCAL command payload + * @hdr_table_src_addr: word address of header table in system memory where the + * table starts (use as source for memory copying) + * @size_hdr_table: size of the above (in bytes) + * @hdr_table_dst_addr: header address in IPA sram (used as dst for memory copy) + * @rsvd: reserved + */ +struct ipa_hdr_init_local { + u64 hdr_table_src_addr:32; + u64 size_hdr_table:12; + u64 hdr_table_dst_addr:16; + u64 rsvd:4; +}; + +/** + * struct ipa_hdr_init_system - IPA_HDR_INIT_SYSTEM command payload + * @hdr_table_addr: word address of header table in system memory where the + * table starts (use as source for memory copying) + * @rsvd: reserved + */ +struct ipa_hdr_init_system { + u64 hdr_table_addr:32; + u64 rsvd:32; +}; + +/** + * struct ipa_hdr_proc_ctx_tlv - + * HW structure of IPA processing context header - TLV part + * @type: 0 - end type + * 1 - header addition type + * 3 - processing command type + * @length: number of bytes after tlv + * for type: + * 0 - needs to be 0 + * 1 - header addition length + * 3 - number of 32B including type and length. + * @value: specific value for type + * for type: + * 0 - needs to be 0 + * 1 - header length + * 3 - command ID (see IPA_HDR_UCP_* definitions) + */ +struct ipa_hdr_proc_ctx_tlv { + u32 type:8; + u32 length:8; + u32 value:16; +}; + +/** + * struct ipa_hdr_proc_ctx_hdr_add - + * HW structure of IPA processing context - add header tlv + * @tlv: IPA processing context TLV + * @hdr_addr: processing context header address + */ +struct ipa_hdr_proc_ctx_hdr_add { + struct ipa_hdr_proc_ctx_tlv tlv; + u32 hdr_addr; +}; + +#define IPA_A5_MUX_HDR_EXCP_FLAG_IP BIT(7) +#define IPA_A5_MUX_HDR_EXCP_FLAG_NAT BIT(6) +#define IPA_A5_MUX_HDR_EXCP_FLAG_SW_FLT BIT(5) +#define IPA_A5_MUX_HDR_EXCP_FLAG_TAG BIT(4) +#define IPA_A5_MUX_HDR_EXCP_FLAG_REPLICATED BIT(3) +#define IPA_A5_MUX_HDR_EXCP_FLAG_IHL BIT(2) + +/** + * struct ipa_a5_mux_hdr - A5 MUX header definition + * @interface_id: interface ID + * @src_pipe_index: source pipe index + * @flags: flags + * @metadata: metadata + * + * A5 MUX header is in BE, A5 runs in LE. This struct definition + * allows A5 SW to correctly parse the header + */ +struct ipa_a5_mux_hdr { + u16 interface_id; + u8 src_pipe_index; + u8 flags; + u32 metadata; +}; + +/** + * struct ipa_register_write - IPA_REGISTER_WRITE command payload + * @rsvd: reserved + * @skip_pipeline_clear: 0 to wait until IPA pipeline is clear + * @offset: offset from IPA base address + * @value: value to write to register + * @value_mask: mask specifying which value bits to write to the register + */ +struct ipa_register_write { + u32 rsvd:15; + u32 skip_pipeline_clear:1; + u32 offset:16; + u32 value:32; + u32 value_mask:32; +}; + +/** + * struct ipa_nat_dma - IPA_NAT_DMA command payload + * @table_index: NAT table index + * @rsvd1: reserved + * @base_addr: base address + * @rsvd2: reserved + * @offset: offset + * @data: metadata + * @rsvd3: reserved + */ +struct ipa_nat_dma { + u64 table_index:3; + u64 rsvd1:1; + u64 base_addr:2; + u64 rsvd2:2; + u64 offset:32; + u64 data:16; + u64 rsvd3:8; +}; + +/** + * struct ipa_nat_dma - IPA_IP_PACKET_INIT command payload + * @destination_pipe_index: destination pipe index + * @rsvd1: reserved + * @metadata: metadata + * @rsvd2: reserved + */ +struct ipa_ip_packet_init { + u64 destination_pipe_index:5; + u64 rsvd1:3; + u64 metadata:32; + u64 rsvd2:24; +}; + +/** + * struct ipa_nat_dma - IPA_IP_V4_NAT_INIT command payload + * @ipv4_rules_addr: ipv4 rules address + * @ipv4_expansion_rules_addr: ipv4 expansion rules address + * @index_table_addr: index tables address + * @index_table_expansion_addr: index expansion table address + * @table_index: index in table + * @ipv4_rules_addr_type: ipv4 address type + * @ipv4_expansion_rules_addr_type: ipv4 expansion address type + * @index_table_addr_type: index table address type + * @index_table_expansion_addr_type: index expansion table type + * @size_base_tables: size of base tables + * @size_expansion_tables: size of expansion tables + * @rsvd2: reserved + * @public_ip_addr: public IP address + */ +struct ipa_ip_v4_nat_init { + u64 ipv4_rules_addr:32; + u64 ipv4_expansion_rules_addr:32; + u64 index_table_addr:32; + u64 index_table_expansion_addr:32; + u64 table_index:3; + u64 rsvd1:1; + u64 ipv4_rules_addr_type:1; + u64 ipv4_expansion_rules_addr_type:1; + u64 index_table_addr_type:1; + u64 index_table_expansion_addr_type:1; + u64 size_base_tables:12; + u64 size_expansion_tables:10; + u64 rsvd2:2; + u64 public_ip_addr:32; +}; + +/** + * struct ipa_ip_packet_tag - IPA_IP_PACKET_TAG command payload + * @tag: tag value returned with response + */ +struct ipa_ip_packet_tag { + u32 tag; +}; + +/** + * struct ipa_ip_packet_tag_status - IPA_IP_PACKET_TAG_STATUS command payload + * @rsvd: reserved + * @tag_f_1: tag value returned within status + * @tag_f_2: tag value returned within status + */ +struct ipa_ip_packet_tag_status { + u32 rsvd:16; + u32 tag_f_1:16; + u32 tag_f_2:32; +}; + +/*! @brief Struct for the IPAv2.0 and IPAv2.5 UL packet status header */ +struct ipa_hw_pkt_status { + u32 status_opcode:8; + u32 exception:8; + u32 status_mask:16; + u32 pkt_len:16; + u32 endp_src_idx:5; + u32 reserved_1:3; + u32 endp_dest_idx:5; + u32 reserved_2:3; + u32 metadata:32; + union { + struct { + u32 filt_local:1; + u32 filt_global:1; + u32 filt_pipe_idx:5; + u32 filt_match:1; + u32 filt_rule_idx:6; + u32 ret_hdr:1; + u32 reserved_3:1; + u32 tag_f_1:16; + + } ipa_hw_v2_0_pkt_status; + struct { + u32 filt_local:1; + u32 filt_global:1; + u32 filt_pipe_idx:5; + u32 ret_hdr:1; + u32 filt_rule_idx:8; + u32 tag_f_1:16; + + } ipa_hw_v2_5_pkt_status; + }; + + u32 tag_f_2:32; + u32 time_day_ctr:32; + u32 nat_hit:1; + u32 nat_tbl_idx:13; + u32 nat_type:2; + u32 route_local:1; + u32 route_tbl_idx:5; + u32 route_match:1; + u32 ucp:1; + u32 route_rule_idx:8; + u32 hdr_local:1; + u32 hdr_offset:10; + u32 frag_hit:1; + u32 frag_rule:4; + u32 reserved_4:16; +}; + +#define IPA_PKT_STATUS_SIZE 32 + +/*! @brief Status header opcodes */ +enum ipa_hw_status_opcode { + IPA_HW_STATUS_OPCODE_MIN, + IPA_HW_STATUS_OPCODE_PACKET = IPA_HW_STATUS_OPCODE_MIN, + IPA_HW_STATUS_OPCODE_NEW_FRAG_RULE, + IPA_HW_STATUS_OPCODE_DROPPED_PACKET, + IPA_HW_STATUS_OPCODE_SUSPENDED_PACKET, + IPA_HW_STATUS_OPCODE_XLAT_PACKET = 6, + IPA_HW_STATUS_OPCODE_MAX +}; + +/*! @brief Possible Masks received in status */ +enum ipa_hw_pkt_status_mask { + IPA_HW_PKT_STATUS_MASK_FRAG_PROCESS = 0x1, + IPA_HW_PKT_STATUS_MASK_FILT_PROCESS = 0x2, + IPA_HW_PKT_STATUS_MASK_NAT_PROCESS = 0x4, + IPA_HW_PKT_STATUS_MASK_ROUTE_PROCESS = 0x8, + IPA_HW_PKT_STATUS_MASK_TAG_VALID = 0x10, + IPA_HW_PKT_STATUS_MASK_FRAGMENT = 0x20, + IPA_HW_PKT_STATUS_MASK_FIRST_FRAGMENT = 0x40, + IPA_HW_PKT_STATUS_MASK_V4 = 0x80, + IPA_HW_PKT_STATUS_MASK_CKSUM_PROCESS = 0x100, + IPA_HW_PKT_STATUS_MASK_AGGR_PROCESS = 0x200, + IPA_HW_PKT_STATUS_MASK_DEST_EOT = 0x400, + IPA_HW_PKT_STATUS_MASK_DEAGGR_PROCESS = 0x800, + IPA_HW_PKT_STATUS_MASK_DEAGG_FIRST = 0x1000, + IPA_HW_PKT_STATUS_MASK_SRC_EOT = 0x2000 +}; + +/*! @brief Possible Exceptions received in status */ +enum ipa_hw_pkt_status_exception { + IPA_HW_PKT_STATUS_EXCEPTION_NONE = 0x0, + IPA_HW_PKT_STATUS_EXCEPTION_DEAGGR = 0x1, + IPA_HW_PKT_STATUS_EXCEPTION_REPL = 0x2, + IPA_HW_PKT_STATUS_EXCEPTION_IPTYPE = 0x4, + IPA_HW_PKT_STATUS_EXCEPTION_IHL = 0x8, + IPA_HW_PKT_STATUS_EXCEPTION_FRAG_RULE_MISS = 0x10, + IPA_HW_PKT_STATUS_EXCEPTION_SW_FILT = 0x20, + IPA_HW_PKT_STATUS_EXCEPTION_NAT = 0x40, + IPA_HW_PKT_STATUS_EXCEPTION_ACTUAL_MAX, + IPA_HW_PKT_STATUS_EXCEPTION_MAX = 0xFF +}; + +/*! @brief IPA_HW_IMM_CMD_DMA_SHARED_MEM Immediate Command Parameters */ +struct ipa_hw_imm_cmd_dma_shared_mem { + u32 reserved_1:16; + u32 size:16; + u32 system_addr:32; + u32 local_addr:16; + u32 direction:1; + u32 skip_pipeline_clear:1; + u32 reserved_2:14; + u32 padding:32; +}; + +#endif /* _IPA_HW_DEFS_H */ diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h new file mode 100644 index 000000000000..66399f6f0a66 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h @@ -0,0 +1,1973 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2018, 2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _IPA_I_H_ +#define _IPA_I_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "ipa_hw_defs.h" +#include "ipa_ram_mmap.h" +#include "ipa_reg.h" +#include "ipa_qmi_service.h" +#include "../ipa_api.h" +#include "../ipa_common_i.h" +#include "ipa_uc_offload_i.h" + +#define DRV_NAME "ipa" +#define NAT_DEV_NAME "ipaNatTable" + +#define IPA_COOKIE 0x57831603 +#define IPA_RT_RULE_COOKIE 0x57831604 +#define IPA_RT_TBL_COOKIE 0x57831605 +#define IPA_FLT_COOKIE 0x57831606 +#define IPA_HDR_COOKIE 0x57831607 +#define IPA_PROC_HDR_COOKIE 0x57831608 + + +#define MTU_BYTE 1500 + +#define IPA_MAX_NUM_PIPES 0x14 +#define IPA_SYS_DESC_FIFO_SZ 0x2000 +#define IPA_SYS_TX_DATA_DESC_FIFO_SZ 0x1000 +#define IPA_LAN_RX_HEADER_LENGTH (2) +#define IPA_QMAP_HEADER_LENGTH (4) +#define IPA_DL_CHECKSUM_LENGTH (8) +#define IPA_NUM_DESC_PER_SW_TX (2) +#define IPA_GENERIC_RX_POOL_SZ 192 +#define IPA_UC_FINISH_MAX 6 +#define IPA_UC_WAIT_MIN_SLEEP 1000 +#define IPA_UC_WAII_MAX_SLEEP 1200 +#define IPA_BAM_STOP_MAX_RETRY 10 + +#define IPA_MAX_STATUS_STAT_NUM 30 + + +#define IPA_MAX_NUM_REQ_CACHE 10 +#define IPA_IPC_LOG_PAGES 50 + +#define IPA_WDI_RX_RING_RES 0 +#define IPA_WDI_RX_RING_RP_RES 1 +#define IPA_WDI_RX_COMP_RING_RES 2 +#define IPA_WDI_RX_COMP_RING_WP_RES 3 +#define IPA_WDI_TX_RING_RES 4 +#define IPA_WDI_CE_RING_RES 5 +#define IPA_WDI_CE_DB_RES 6 +#define IPA_WDI_TX_DB_RES 7 +#define IPA_WDI_MAX_RES 8 + +#define IPADBG(fmt, args...) \ + do { \ + pr_debug(DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args);\ + if (ipa_ctx) { \ + IPA_IPC_LOGGING(ipa_ctx->logbuf, \ + DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_ctx->logbuf_low, \ + DRV_NAME " %s:%d " fmt, ## args); \ + } \ + } while (0) + +#define IPADBG_LOW(fmt, args...) \ + do { \ + pr_debug(DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args);\ + if (ipa_ctx) \ + IPA_IPC_LOGGING(ipa_ctx->logbuf_low, \ + DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPAERR(fmt, args...) \ + do { \ + pr_err(DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args);\ + if (ipa_ctx) { \ + IPA_IPC_LOGGING(ipa_ctx->logbuf, \ + DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_ctx->logbuf_low, \ + DRV_NAME " %s:%d " fmt, ## args); \ + } \ + } while (0) + +#define IPAERR_RL(fmt, args...) \ + do { \ + pr_err_ratelimited(DRV_NAME " %s:%d " fmt, __func__, \ + __LINE__, ## args);\ + if (ipa_ctx) { \ + IPA_IPC_LOGGING(ipa_ctx->logbuf, \ + DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_ctx->logbuf_low, \ + DRV_NAME " %s:%d " fmt, ## args); \ + } \ + } while (0) + +#define WLAN_AMPDU_TX_EP 15 +#define WLAN_PROD_TX_EP 19 +#define WLAN1_CONS_RX_EP 14 +#define WLAN2_CONS_RX_EP 16 +#define WLAN3_CONS_RX_EP 17 +#define WLAN4_CONS_RX_EP 18 + +#define MAX_NUM_EXCP 8 + +#define IPA_STATS + +#ifdef IPA_STATS +#define IPA_STATS_INC_CNT(val) (++val) +#define IPA_STATS_DEC_CNT(val) (--val) +#define IPA_STATS_EXCP_CNT(flags, base) do { \ + int i; \ + for (i = 0; i < MAX_NUM_EXCP; i++) \ + if (flags & BIT(i)) \ + ++base[i]; \ + if (flags == 0) \ + ++base[MAX_NUM_EXCP - 1]; \ + } while (0) +#else +#define IPA_STATS_INC_CNT(x) do { } while (0) +#define IPA_STATS_DEC_CNT(x) +#define IPA_STATS_EXCP_CNT(flags, base) do { } while (0) +#endif + +#define IPA_TOS_EQ BIT(0) +#define IPA_PROTOCOL_EQ BIT(1) +#define IPA_OFFSET_MEQ32_0 BIT(2) +#define IPA_OFFSET_MEQ32_1 BIT(3) +#define IPA_IHL_OFFSET_RANGE16_0 BIT(4) +#define IPA_IHL_OFFSET_RANGE16_1 BIT(5) +#define IPA_IHL_OFFSET_EQ_16 BIT(6) +#define IPA_IHL_OFFSET_EQ_32 BIT(7) +#define IPA_IHL_OFFSET_MEQ32_0 BIT(8) +#define IPA_OFFSET_MEQ128_0 BIT(9) +#define IPA_OFFSET_MEQ128_1 BIT(10) +#define IPA_TC_EQ BIT(11) +#define IPA_FL_EQ BIT(12) +#define IPA_IHL_OFFSET_MEQ32_1 BIT(13) +#define IPA_METADATA_COMPARE BIT(14) +#define IPA_IS_FRAG BIT(15) + +#define IPA_HDR_BIN0 0 +#define IPA_HDR_BIN1 1 +#define IPA_HDR_BIN2 2 +#define IPA_HDR_BIN3 3 +#define IPA_HDR_BIN4 4 +#define IPA_HDR_BIN_MAX 5 + +#define IPA_HDR_PROC_CTX_BIN0 0 +#define IPA_HDR_PROC_CTX_BIN1 1 +#define IPA_HDR_PROC_CTX_BIN_MAX 2 + +#define IPA_EVENT_THRESHOLD 0x10 + +/* + * Due to ZLT issue with USB 3.0 core, IPA BAM threashold need to be set + * to max packet size + 1. After setting the threshold, USB core + * will not be notified on ZLTs + */ +#define IPA_USB_EVENT_THRESHOLD 0x4001 + +#define IPA_RX_POOL_CEIL 32 +#define IPA_RX_SKB_SIZE 1792 + +#define IPA_A5_MUX_HDR_NAME "ipa_excp_hdr" +#define IPA_LAN_RX_HDR_NAME "ipa_lan_hdr" +#define IPA_INVALID_L4_PROTOCOL 0xFF + +#define IPA_SETFIELD(val, shift, mask) (((val) << (shift)) & (mask)) +#define IPA_SETFIELD_IN_REG(reg, val, shift, mask) \ + (reg |= ((val) << (shift)) & (mask)) + +#define IPA_HW_TABLE_ALIGNMENT(start_ofst) \ + (((start_ofst) + 127) & ~127) +#define IPA_RT_FLT_HW_RULE_BUF_SIZE (256) + +#define IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE 8 +#define IPA_HDR_PROC_CTX_TABLE_ALIGNMENT(start_ofst) \ + (((start_ofst) + IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE - 1) & \ + ~(IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE - 1)) + +#define MAX_RESOURCE_TO_CLIENTS (IPA_CLIENT_MAX) +#define IPA_MEM_PART(x_) (ipa_ctx->ctrl->mem_partition.x_) + +#define IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES 120 +#define IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN 96 +#define IPA2_ACTIVE_CLIENTS_LOG_HASHTABLE_SIZE 50 +#define IPA2_ACTIVE_CLIENTS_LOG_NAME_LEN 40 + +struct ipa2_active_client_htable_entry { + struct hlist_node list; + char id_string[IPA2_ACTIVE_CLIENTS_LOG_NAME_LEN]; + int count; + enum ipa_active_client_log_type type; +}; + +struct ipa2_active_clients_log_ctx { + char *log_buffer[IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES]; + int log_head; + int log_tail; + bool log_rdy; + struct hlist_head htable[IPA2_ACTIVE_CLIENTS_LOG_HASHTABLE_SIZE]; +}; + + +struct ipa_client_names { + enum ipa_client_type names[MAX_RESOURCE_TO_CLIENTS]; + int length; +}; + +struct ipa_smmu_cb_ctx { + bool valid; + struct device *dev; + struct dma_iommu_mapping *mapping; + struct iommu_domain *iommu; + unsigned long next_addr; + u32 va_start; + u32 va_size; + u32 va_end; +}; + +/** + * struct ipa_flt_entry - IPA filtering table entry + * @link: entry's link in global filtering enrties list + * @rule: filter rule + * @cookie: cookie used for validity check + * @tbl: filter table + * @rt_tbl: routing table + * @hw_len: entry's size + * @id: rule handle - globally unique + * @ipacm_installed: indicate if installed by ipacm + */ +struct ipa_flt_entry { + struct list_head link; + u32 cookie; + struct ipa_flt_rule rule; + struct ipa_flt_tbl *tbl; + struct ipa_rt_tbl *rt_tbl; + u32 hw_len; + int id; + bool ipacm_installed; +}; + +/** + * struct ipa_rt_tbl - IPA routing table + * @link: table's link in global routing tables list + * @head_rt_rule_list: head of routing rules list + * @name: routing table name + * @idx: routing table index + * @rule_cnt: number of rules in routing table + * @ref_cnt: reference counter of routing table + * @set: collection of routing tables + * @cookie: cookie used for validity check + * @in_sys: flag indicating if the table is located in system memory + * @sz: the size of the routing table + * @curr_mem: current routing tables block in sys memory + * @prev_mem: previous routing table block in sys memory + * @id: routing table id + */ +struct ipa_rt_tbl { + struct list_head link; + u32 cookie; + struct list_head head_rt_rule_list; + char name[IPA_RESOURCE_NAME_MAX]; + u32 idx; + u32 rule_cnt; + u32 ref_cnt; + struct ipa_rt_tbl_set *set; + bool in_sys; + u32 sz; + struct ipa_mem_buffer curr_mem; + struct ipa_mem_buffer prev_mem; + int id; +}; + +/** + * struct ipa_hdr_entry - IPA header table entry + * @link: entry's link in global header table entries list + * @hdr: the header + * @hdr_len: header length + * @name: name of header table entry + * @type: l2 header type + * @is_partial: flag indicating if header table entry is partial + * @is_hdr_proc_ctx: false - hdr entry resides in hdr table, + * true - hdr entry resides in DDR and pointed to by proc ctx + * @phys_base: physical address of entry in DDR when is_hdr_proc_ctx is true, + * else 0 + * @proc_ctx: processing context header + * @offset_entry: entry's offset + * @cookie: cookie used for validity check + * @ref_cnt: reference counter of routing table + * @id: header entry id + * @is_eth2_ofst_valid: is eth2_ofst field valid? + * @eth2_ofst: offset to start of Ethernet-II/802.3 header + * @user_deleted: is the header deleted by the user? + * @ipacm_installed: indicate if installed by ipacm + */ +struct ipa_hdr_entry { + struct list_head link; + u32 cookie; + u8 hdr[IPA_HDR_MAX_SIZE]; + u32 hdr_len; + char name[IPA_RESOURCE_NAME_MAX]; + enum ipa_hdr_l2_type type; + u8 is_partial; + bool is_hdr_proc_ctx; + dma_addr_t phys_base; + struct ipa_hdr_proc_ctx_entry *proc_ctx; + struct ipa_hdr_offset_entry *offset_entry; + u32 ref_cnt; + int id; + u8 is_eth2_ofst_valid; + u16 eth2_ofst; + bool user_deleted; + bool ipacm_installed; +}; + +/** + * struct ipa_hdr_tbl - IPA header table + * @head_hdr_entry_list: header entries list + * @head_offset_list: header offset list + * @head_free_offset_list: header free offset list + * @hdr_cnt: number of headers + * @end: the last header index + */ +struct ipa_hdr_tbl { + struct list_head head_hdr_entry_list; + struct list_head head_offset_list[IPA_HDR_BIN_MAX]; + struct list_head head_free_offset_list[IPA_HDR_BIN_MAX]; + u32 hdr_cnt; + u32 end; +}; + +/** + * struct ipa_hdr_offset_entry - IPA header offset entry + * @link: entry's link in global processing context header offset entries list + * @offset: the offset + * @bin: bin + * @ipacm_installed: indicate if installed by ipacm + */ +struct ipa_hdr_proc_ctx_offset_entry { + struct list_head link; + u32 offset; + u32 bin; + bool ipacm_installed; +}; + +/** + * struct ipa_hdr_proc_ctx_add_hdr_seq - + * IPA processing context header - add header sequence + * @hdr_add: add header command + * @end: tlv end command (cmd.type must be 0) + */ +struct ipa_hdr_proc_ctx_add_hdr_seq { + struct ipa_hdr_proc_ctx_hdr_add hdr_add; + struct ipa_hdr_proc_ctx_tlv end; +}; + +/** + * struct ipa_hdr_proc_ctx_add_hdr_cmd_seq - + * IPA processing context header - process command sequence + * @hdr_add: add header command + * @cmd: tlv processing command (cmd.type must be 3) + * @end: tlv end command (cmd.type must be 0) + */ +struct ipa_hdr_proc_ctx_add_hdr_cmd_seq { + struct ipa_hdr_proc_ctx_hdr_add hdr_add; + struct ipa_hdr_proc_ctx_tlv cmd; + struct ipa_hdr_proc_ctx_tlv end; +}; + +/** + *struct ipa_hdr_proc_ctx_entry - IPA processing context header table entry + * @link: entry's link in global header table entries list + * @type: + * @offset_entry: entry's offset + * @hdr: the header + * @cookie: cookie used for validity check + * @ref_cnt: reference counter of routing table + * @id: processing context header entry id + * @user_deleted: is the hdr processing context deleted by the user? + * @ipacm_installed: indicate if installed by ipacm + */ +struct ipa_hdr_proc_ctx_entry { + struct list_head link; + u32 cookie; + enum ipa_hdr_proc_type type; + struct ipa_hdr_proc_ctx_offset_entry *offset_entry; + struct ipa_hdr_entry *hdr; + u32 ref_cnt; + int id; + bool user_deleted; + bool ipacm_installed; +}; + +/** + * struct ipa_hdr_proc_ctx_tbl - IPA processing context header table + * @head_proc_ctx_entry_list: header entries list + * @head_offset_list: header offset list + * @head_free_offset_list: header free offset list + * @proc_ctx_cnt: number of processing context headers + * @end: the last processing context header index + * @start_offset: offset in words of processing context header table + */ +struct ipa_hdr_proc_ctx_tbl { + struct list_head head_proc_ctx_entry_list; + struct list_head head_offset_list[IPA_HDR_PROC_CTX_BIN_MAX]; + struct list_head head_free_offset_list[IPA_HDR_PROC_CTX_BIN_MAX]; + u32 proc_ctx_cnt; + u32 end; + u32 start_offset; +}; + +/** + * struct ipa_flt_tbl - IPA filter table + * @head_flt_rule_list: filter rules list + * @rule_cnt: number of filter rules + * @in_sys: flag indicating if filter table is located in system memory + * @sz: the size of the filter table + * @end: the last header index + * @curr_mem: current filter tables block in sys memory + * @prev_mem: previous filter table block in sys memory + */ +struct ipa_flt_tbl { + struct list_head head_flt_rule_list; + u32 rule_cnt; + bool in_sys; + u32 sz; + struct ipa_mem_buffer curr_mem; + struct ipa_mem_buffer prev_mem; + bool sticky_rear; +}; + +/** + * struct ipa_rt_entry - IPA routing table entry + * @link: entry's link in global routing table entries list + * @rule: routing rule + * @cookie: cookie used for validity check + * @tbl: routing table + * @hdr: header table + * @proc_ctx: processing context table + * @hw_len: the length of the table + * @id: rule handle - globaly unique + * @ipacm_installed: indicate if installed by ipacm + */ +struct ipa_rt_entry { + struct list_head link; + u32 cookie; + struct ipa_rt_rule rule; + struct ipa_rt_tbl *tbl; + struct ipa_hdr_entry *hdr; + struct ipa_hdr_proc_ctx_entry *proc_ctx; + u32 hw_len; + int id; + bool ipacm_installed; +}; + +/** + * struct ipa_rt_tbl_set - collection of routing tables + * @head_rt_tbl_list: collection of routing tables + * @tbl_cnt: number of routing tables + */ +struct ipa_rt_tbl_set { + struct list_head head_rt_tbl_list; + u32 tbl_cnt; +}; + +/** + * struct ipa_ep_cfg_status - status configuration in IPA end-point + * @status_en: Determines if end point supports Status Indications. SW should + * set this bit in order to enable Statuses. Output Pipe - send + * Status indications only if bit is set. Input Pipe - forward Status + * indication to STATUS_ENDP only if bit is set. Valid for Input + * and Output Pipes (IPA Consumer and Producer) + * @status_ep: Statuses generated for this endpoint will be forwarded to the + * specified Status End Point. Status endpoint needs to be + * configured with STATUS_EN=1 Valid only for Input Pipes (IPA + * Consumer) + */ +struct ipa_ep_cfg_status { + bool status_en; + u8 status_ep; +}; + +/** + * struct ipa_wlan_stats - Wlan stats for each wlan endpoint + * @rx_pkts_rcvd: Packets sent by wlan driver + * @rx_pkts_status_rcvd: Status packets received from ipa hw + * @rx_hd_processed: Data Descriptors processed by IPA Driver + * @rx_hd_reply: Data Descriptors recycled by wlan driver + * @rx_hd_rcvd: Data Descriptors sent by wlan driver + * @rx_pkt_leak: Packet count that are not recycled + * @rx_dp_fail: Packets failed to transfer to IPA HW + * @tx_pkts_rcvd: SKB Buffers received from ipa hw + * @tx_pkts_sent: SKB Buffers sent to wlan driver + * @tx_pkts_dropped: Dropped packets count + */ +struct ipa_wlan_stats { + u32 rx_pkts_rcvd; + u32 rx_pkts_status_rcvd; + u32 rx_hd_processed; + u32 rx_hd_reply; + u32 rx_hd_rcvd; + u32 rx_pkt_leak; + u32 rx_dp_fail; + u32 tx_pkts_rcvd; + u32 tx_pkts_sent; + u32 tx_pkts_dropped; +}; + +/** + * struct ipa_wlan_comm_memb - Wlan comm members + * @wlan_spinlock: protects wlan comm buff list and its size + * @ipa_tx_mul_spinlock: protects tx dp mul transfer + * @wlan_comm_total_cnt: wlan common skb buffers allocated count + * @wlan_comm_free_cnt: wlan common skb buffer free count + * @total_tx_pkts_freed: Recycled Buffer count + * @wlan_comm_desc_list: wlan common skb buffer list + */ +struct ipa_wlan_comm_memb { + spinlock_t wlan_spinlock; + spinlock_t ipa_tx_mul_spinlock; + u32 wlan_comm_total_cnt; + u32 wlan_comm_free_cnt; + u32 total_tx_pkts_freed; + struct list_head wlan_comm_desc_list; + atomic_t active_clnt_cnt; +}; + +struct ipa_status_stats { + struct ipa_hw_pkt_status status[IPA_MAX_STATUS_STAT_NUM]; + int curr; +}; + +enum ipa_wakelock_ref_client { + IPA_WAKELOCK_REF_CLIENT_TX = 0, + IPA_WAKELOCK_REF_CLIENT_LAN_RX = 1, + IPA_WAKELOCK_REF_CLIENT_WAN_RX = 2, + IPA_WAKELOCK_REF_CLIENT_WLAN_RX = 3, + IPA_WAKELOCK_REF_CLIENT_ODU_RX = 4, + IPA_WAKELOCK_REF_CLIENT_SPS = 5, + IPA_WAKELOCK_REF_CLIENT_MAX +}; + +/** + * struct ipa_ep_context - IPA end point context + * @valid: flag indicating id EP context is valid + * @client: EP client type + * @ep_hdl: EP's client SPS handle + * @cfg: EP cionfiguration + * @dst_pipe_index: destination pipe index + * @rt_tbl_idx: routing table index + * @connect: SPS connect + * @priv: user provided information which will forwarded once the user is + * notified for new data avail + * @client_notify: user provided CB for EP events notification, the event is + * data revived. + * @desc_fifo_in_pipe_mem: flag indicating if descriptors FIFO uses pipe memory + * @data_fifo_in_pipe_mem: flag indicating if data FIFO uses pipe memory + * @desc_fifo_pipe_mem_ofst: descriptors FIFO pipe memory offset + * @data_fifo_pipe_mem_ofst: data FIFO pipe memory offset + * @desc_fifo_client_allocated: if descriptors FIFO was allocated by a client + * @data_fifo_client_allocated: if data FIFO was allocated by a client + * @skip_ep_cfg: boolean field that determines if EP should be configured + * by IPA driver + * @keep_ipa_awake: when true, IPA will not be clock gated + * @rx_replenish_threshold: Indicates the WM value which requires the RX + * descriptors replenish function to be called to + * avoid the RX pipe to run out of descriptors + * and cause HOLB. + * @disconnect_in_progress: Indicates client disconnect in progress. + * @qmi_request_sent: Indicates whether QMI request to enable clear data path + * request is sent or not. + * @napi_enabled: when true, IPA call client callback to start polling + */ +struct ipa_ep_context { + int valid; + enum ipa_client_type client; + struct sps_pipe *ep_hdl; + struct ipa_ep_cfg cfg; + struct ipa_ep_cfg_holb holb; + struct ipa_ep_cfg_status status; + u32 dst_pipe_index; + u32 rt_tbl_idx; + struct sps_connect connect; + void *priv; + void (*client_notify)(void *priv, enum ipa_dp_evt_type evt, + unsigned long data); + bool desc_fifo_in_pipe_mem; + bool data_fifo_in_pipe_mem; + u32 desc_fifo_pipe_mem_ofst; + u32 data_fifo_pipe_mem_ofst; + bool desc_fifo_client_allocated; + bool data_fifo_client_allocated; + atomic_t avail_fifo_desc; + u32 dflt_flt4_rule_hdl; + u32 dflt_flt6_rule_hdl; + bool skip_ep_cfg; + bool keep_ipa_awake; + struct ipa_wlan_stats wstats; + u32 uc_offload_state; + u32 rx_replenish_threshold; + bool disconnect_in_progress; + u32 qmi_request_sent; + enum ipa_wakelock_ref_client wakelock_client; + bool napi_enabled; + bool switch_to_intr; + int inactive_cycles; + u32 eot_in_poll_err; + bool ep_disabled; + + /* sys MUST be the last element of this struct */ + struct ipa_sys_context *sys; +}; + +enum ipa_sys_pipe_policy { + IPA_POLICY_INTR_MODE, + IPA_POLICY_NOINTR_MODE, + IPA_POLICY_INTR_POLL_MODE, +}; + +struct ipa_repl_ctx { + struct ipa_rx_pkt_wrapper **cache; + atomic_t head_idx; + atomic_t tail_idx; + u32 capacity; +}; + +/** + * struct ipa_sys_context - IPA endpoint context for system to BAM pipes + * @head_desc_list: header descriptors list + * @len: the size of the above list + * @spinlock: protects the list and its size + * @event: used to request CALLBACK mode from SPS driver + * @ep: IPA EP context + * + * IPA context specific to the system-bam pipes a.k.a LAN IN/OUT and WAN + */ +struct ipa_sys_context { + u32 len; + struct sps_register_event event; + atomic_t curr_polling_state; + struct delayed_work switch_to_intr_work; + enum ipa_sys_pipe_policy policy; + int (*pyld_hdlr)(struct sk_buff *skb, struct ipa_sys_context *sys); + struct sk_buff * (*get_skb)(unsigned int len, gfp_t flags); + void (*free_skb)(struct sk_buff *skb); + u32 rx_buff_sz; + u32 rx_pool_sz; + struct sk_buff *prev_skb; + unsigned int len_rem; + unsigned int len_pad; + unsigned int len_partial; + bool drop_packet; + struct work_struct work; + void (*sps_callback)(struct sps_event_notify *notify); + enum sps_option sps_option; + struct delayed_work replenish_rx_work; + struct work_struct repl_work; + void (*repl_hdlr)(struct ipa_sys_context *sys); + struct ipa_repl_ctx repl; + unsigned int repl_trig_cnt; + unsigned int repl_trig_thresh; + + /* ordering is important - mutable fields go above */ + struct ipa_ep_context *ep; + struct list_head head_desc_list; + struct list_head rcycl_list; + spinlock_t spinlock; + struct workqueue_struct *wq; + struct workqueue_struct *repl_wq; + struct ipa_status_stats *status_stat; + /* ordering is important - other immutable fields go below */ +}; + +/** + * enum ipa_desc_type - IPA decriptors type + * + * IPA decriptors type, IPA supports DD and ICD but no CD + */ +enum ipa_desc_type { + IPA_DATA_DESC, + IPA_DATA_DESC_SKB, + IPA_DATA_DESC_SKB_PAGED, + IPA_IMM_CMD_DESC +}; + +/** + * struct ipa_tx_pkt_wrapper - IPA Tx packet wrapper + * @type: specify if this packet is for the skb or immediate command + * @mem: memory buffer used by this Tx packet + * @work: work struct for current Tx packet + * @link: linked to the wrappers on that pipe + * @callback: IPA client provided callback + * @user1: cookie1 for above callback + * @user2: cookie2 for above callback + * @sys: corresponding IPA sys context + * @mult: valid only for first of a "multiple" transfer, + * holds info for the "sps_transfer" buffer + * @cnt: 1 for single transfers, + * >1 and <0xFFFF for first of a "multiple" transfer, + * 0xFFFF for last desc, 0 for rest of "multiple' transfer + * @bounce: va of bounce buffer + * @unmap_dma: in case this is true, the buffer will not be dma unmapped + * + * This struct can wrap both data packet and immediate command packet. + */ +struct ipa_tx_pkt_wrapper { + enum ipa_desc_type type; + struct ipa_mem_buffer mem; + struct work_struct work; + struct list_head link; + void (*callback)(void *user1, int user2); + void *user1; + int user2; + struct ipa_sys_context *sys; + struct ipa_mem_buffer mult; + u32 cnt; + void *bounce; + bool no_unmap_dma; +}; + +/** + * struct ipa_desc - IPA descriptor + * @type: skb or immediate command or plain old data + * @pyld: points to skb + * @frag: points to paged fragment + * or kmalloc'ed immediate command parameters/plain old data + * @dma_address: dma mapped address of pyld + * @dma_address_valid: valid field for dma_address + * @len: length of the pyld + * @opcode: for immediate commands + * @callback: IPA client provided completion callback + * @user1: cookie1 for above callback + * @user2: cookie2 for above callback + * @xfer_done: completion object for sync completion + */ +struct ipa_desc { + enum ipa_desc_type type; + void *pyld; + skb_frag_t *frag; + dma_addr_t dma_address; + bool dma_address_valid; + u16 len; + u16 opcode; + void (*callback)(void *user1, int user2); + void *user1; + int user2; + struct completion xfer_done; +}; + +/** + * struct ipa_rx_pkt_wrapper - IPA Rx packet wrapper + * @skb: skb + * @dma_address: DMA address of this Rx packet + * @link: linked to the Rx packets on that pipe + * @len: how many bytes are copied into skb's flat buffer + */ +struct ipa_rx_pkt_wrapper { + struct list_head link; + struct ipa_rx_data data; + u32 len; + struct work_struct work; + struct ipa_sys_context *sys; +}; + +/** + * struct ipa_nat_mem - IPA NAT memory description + * @class: pointer to the struct class + * @dev: the dev_t of the device + * @cdev: cdev of the device + * @dev_num: device number + * @vaddr: virtual address + * @dma_handle: DMA handle + * @size: NAT memory size + * @is_mapped: flag indicating if NAT memory is mapped + * @is_sys_mem: flag indicating if NAT memory is sys memory + * @is_dev_init: flag indicating if NAT device is initialized + * @lock: NAT memory mutex + * @nat_base_address: nat table virutal address + * @ipv4_rules_addr: base nat table address + * @ipv4_expansion_rules_addr: expansion table address + * @index_table_addr: index table address + * @index_table_expansion_addr: index expansion table address + * @size_base_tables: base table size + * @size_expansion_tables: expansion table size + * @public_ip_addr: ip address of nat table + */ +struct ipa_nat_mem { + struct class *class; + struct device *dev; + struct cdev cdev; + dev_t dev_num; + void *vaddr; + dma_addr_t dma_handle; + size_t size; + bool is_mapped; + bool is_sys_mem; + bool is_dev_init; + bool is_dev; + struct mutex lock; + void *nat_base_address; + char *ipv4_rules_addr; + char *ipv4_expansion_rules_addr; + char *index_table_addr; + char *index_table_expansion_addr; + u32 size_base_tables; + u32 size_expansion_tables; + u32 public_ip_addr; + void *tmp_vaddr; + dma_addr_t tmp_dma_handle; + bool is_tmp_mem; +}; + +/** + * enum ipa_hw_mode - IPA hardware mode + * @IPA_HW_Normal: Regular IPA hardware + * @IPA_HW_Virtual: IPA hardware supporting virtual memory allocation + * @IPA_HW_PCIE: IPA hardware supporting memory allocation over PCIE Bridge + */ +enum ipa_hw_mode { + IPA_HW_MODE_NORMAL = 0, + IPA_HW_MODE_VIRTUAL = 1, + IPA_HW_MODE_PCIE = 2 +}; + +enum ipa_config_this_ep { + IPA_CONFIGURE_THIS_EP, + IPA_DO_NOT_CONFIGURE_THIS_EP, +}; + +struct ipa_stats { + u32 tx_sw_pkts; + u32 tx_hw_pkts; + u32 rx_pkts; + u32 rx_excp_pkts[MAX_NUM_EXCP]; + u32 rx_repl_repost; + u32 tx_pkts_compl; + u32 rx_q_len; + u32 msg_w[IPA_EVENT_MAX_NUM]; + u32 msg_r[IPA_EVENT_MAX_NUM]; + u32 stat_compl; + u32 aggr_close; + u32 wan_aggr_close; + u32 wan_rx_empty; + u32 wan_repl_rx_empty; + u32 lan_rx_empty; + u32 lan_repl_rx_empty; + u32 flow_enable; + u32 flow_disable; + u32 tx_non_linear; +}; + +struct ipa_active_clients { + struct mutex mutex; + spinlock_t spinlock; + bool mutex_locked; + int cnt; +}; + +struct ipa_wakelock_ref_cnt { + spinlock_t spinlock; + u32 cnt; +}; + +struct ipa_tag_completion { + struct completion comp; + atomic_t cnt; +}; + +struct ipa_controller; + +/** + * struct ipa_uc_hdlrs - IPA uC callback functions + * @ipa_uc_loaded_hdlr: Function handler when uC is loaded + * @ipa_uc_event_hdlr: Event handler function + * @ipa_uc_response_hdlr: Response handler function + * @ipa_uc_event_log_info_hdlr: Log event handler function + */ +struct ipa_uc_hdlrs { + void (*ipa_uc_loaded_hdlr)(void); + + void (*ipa_uc_event_hdlr) + (struct IpaHwSharedMemCommonMapping_t *uc_sram_mmio); + int (*ipa_uc_response_hdlr) + (struct IpaHwSharedMemCommonMapping_t *uc_sram_mmio, + u32 *uc_status); + void (*ipa_uc_event_log_info_hdlr) + (struct IpaHwEventLogInfoData_t *uc_event_top_mmio); +}; + +/** + * enum ipa_hw_flags - flags which defines the behavior of HW + * + * @IPA_HW_FLAG_HALT_SYSTEM_ON_ASSERT_FAILURE: Halt system in case of assert + * failure. + * @IPA_HW_FLAG_NO_REPORT_MHI_CHANNEL_ERORR: Channel error would be reported + * in the event ring only. No event to CPU. + * @IPA_HW_FLAG_NO_REPORT_MHI_CHANNEL_WAKE_UP: No need to report event + * IPA_HW_2_CPU_EVENT_MHI_WAKE_UP_REQUEST + * @IPA_HW_FLAG_WORK_OVER_DDR: Perform all transaction to external addresses by + * QMB (avoid memcpy) + * @IPA_HW_FLAG_NO_REPORT_OOB: If set do not report that the device is OOB in + * IN Channel + * @IPA_HW_FLAG_NO_REPORT_DB_MODE: If set, do not report that the device is + * entering a mode where it expects a doorbell to be rung for OUT Channel + * @IPA_HW_FLAG_NO_START_OOB_TIMER + */ +enum ipa_hw_flags { + IPA_HW_FLAG_HALT_SYSTEM_ON_ASSERT_FAILURE = 0x01, + IPA_HW_FLAG_NO_REPORT_MHI_CHANNEL_ERORR = 0x02, + IPA_HW_FLAG_NO_REPORT_MHI_CHANNEL_WAKE_UP = 0x04, + IPA_HW_FLAG_WORK_OVER_DDR = 0x08, + IPA_HW_FLAG_NO_REPORT_OOB = 0x10, + IPA_HW_FLAG_NO_REPORT_DB_MODE = 0x20, + IPA_HW_FLAG_NO_START_OOB_TIMER = 0x40 +}; + +/** + * struct ipa_uc_ctx - IPA uC context + * @uc_inited: Indicates if uC interface has been initialized + * @uc_loaded: Indicates if uC has loaded + * @uc_failed: Indicates if uC has failed / returned an error + * @uc_lock: uC interface lock to allow only one uC interaction at a time + * @uc_completation: Completion mechanism to wait for uC commands + * @uc_sram_mmio: Pointer to uC mapped memory + * @pending_cmd: The last command sent waiting to be ACKed + * @uc_status: The last status provided by the uC + * @uc_zip_error: uC has notified the APPS upon a ZIP engine error + * @uc_error_type: error type from uC error event + */ +struct ipa_uc_ctx { + bool uc_inited; + bool uc_loaded; + bool uc_failed; + struct mutex uc_lock; + struct completion uc_completion; + struct IpaHwSharedMemCommonMapping_t *uc_sram_mmio; + struct IpaHwEventLogInfoData_t *uc_event_top_mmio; + u32 uc_event_top_ofst; + u32 pending_cmd; + u32 uc_status; + bool uc_zip_error; + u32 uc_error_type; + phys_addr_t rdy_ring_base_pa; + phys_addr_t rdy_ring_rp_pa; + u32 rdy_ring_size; + phys_addr_t rdy_comp_ring_base_pa; + phys_addr_t rdy_comp_ring_wp_pa; + u32 rdy_comp_ring_size; + u32 *rdy_ring_rp_va; + u32 *rdy_comp_ring_wp_va; +}; + +/** + * struct ipa_uc_wdi_ctx + * @wdi_uc_top_ofst: + * @wdi_uc_top_mmio: + * @wdi_uc_stats_ofst: + * @wdi_uc_stats_mmio: + */ +struct ipa_uc_wdi_ctx { + /* WDI specific fields */ + u32 wdi_uc_stats_ofst; + struct IpaHwStatsWDIInfoData_t *wdi_uc_stats_mmio; + void *priv; + ipa_uc_ready_cb uc_ready_cb; + /* for AP+STA stats update */ +#ifdef IPA_WAN_MSG_IPv6_ADDR_GW_LEN + ipa_wdi_meter_notifier_cb stats_notify; +#endif +}; + +/** + * struct ipa_sps_pm - SPS power management related members + * @dec_clients: true if need to decrease active clients count + * @eot_activity: represent EOT interrupt activity to determine to reset + * the inactivity timer + * @sps_pm_lock: Lock to protect the sps_pm functionality. + */ +struct ipa_sps_pm { + atomic_t dec_clients; + atomic_t eot_activity; + struct mutex sps_pm_lock; +}; + +/** + * struct ipacm_client_info - the client-info indicated from IPACM + * @ipacm_client_enum: the enum to indicate tether-client + * @ipacm_client_uplink: the bool to indicate pipe for uplink + */ +struct ipacm_client_info { + enum ipacm_client_enum client_enum; + bool uplink; +}; + +struct ipa_cne_evt { + struct ipa_wan_msg wan_msg; + struct ipa_msg_meta msg_meta; +}; + +/** + * struct ipa_context - IPA context + * @class: pointer to the struct class + * @dev_num: device number + * @dev: the dev_t of the device + * @cdev: cdev of the device + * @bam_handle: IPA driver's BAM handle + * @ep: list of all end points + * @skip_ep_cfg_shadow: state to update filter table correctly across + power-save + * @resume_on_connect: resume ep on ipa_connect + * @flt_tbl: list of all IPA filter tables + * @mode: IPA operating mode + * @mmio: iomem + * @ipa_wrapper_base: IPA wrapper base address + * @glob_flt_tbl: global filter table + * @hdr_tbl: IPA header table + * @hdr_proc_ctx_tbl: IPA processing context table + * @rt_tbl_set: list of routing tables each of which is a list of rules + * @reap_rt_tbl_set: list of sys mem routing tables waiting to be reaped + * @flt_rule_cache: filter rule cache + * @rt_rule_cache: routing rule cache + * @hdr_cache: header cache + * @hdr_offset_cache: header offset cache + * @hdr_proc_ctx_cache: processing context cache + * @hdr_proc_ctx_offset_cache: processing context offset cache + * @rt_tbl_cache: routing table cache + * @tx_pkt_wrapper_cache: Tx packets cache + * @rx_pkt_wrapper_cache: Rx packets cache + * @rt_idx_bitmap: routing table index bitmap + * @lock: this does NOT protect the linked lists within ipa_sys_context + * @smem_sz: shared memory size available for SW use starting + * from non-restricted bytes + * @smem_restricted_bytes: the bytes that SW should not use in the shared mem + * @nat_mem: NAT memory + * @excp_hdr_hdl: exception header handle + * @dflt_v4_rt_rule_hdl: default v4 routing rule handle + * @dflt_v6_rt_rule_hdl: default v6 routing rule handle + * @aggregation_type: aggregation type used on USB client endpoint + * @aggregation_byte_limit: aggregation byte limit used on USB client endpoint + * @aggregation_time_limit: aggregation time limit used on USB client endpoint + * @hdr_tbl_lcl: where hdr tbl resides 1-local, 0-system + * @hdr_proc_ctx_tbl_lcl: where proc_ctx tbl resides true-local, false-system + * @hdr_mem: header memory + * @hdr_proc_ctx_mem: processing context memory + * @ip4_rt_tbl_lcl: where ip4 rt tables reside 1-local; 0-system + * @ip6_rt_tbl_lcl: where ip6 rt tables reside 1-local; 0-system + * @ip4_flt_tbl_lcl: where ip4 flt tables reside 1-local; 0-system + * @ip6_flt_tbl_lcl: where ip6 flt tables reside 1-local; 0-system + * @empty_rt_tbl_mem: empty routing tables memory + * @power_mgmt_wq: workqueue for power management + * @sps_power_mgmt_wq: workqueue SPS related power management + * @tag_process_before_gating: indicates whether to start tag process before + * gating IPA clocks + * @sps_pm: sps power management related information + * @disconnect_lock: protects LAN_CONS packet receive notification CB + * @pipe_mem_pool: pipe memory pool + * @dma_pool: special purpose DMA pool + * @ipa_active_clients: structure for reference counting connected IPA clients + * @ipa_hw_type: type of IPA HW type (e.g. IPA 1.0, IPA 1.1 etc') + * @ipa_hw_mode: mode of IPA HW mode (e.g. Normal, Virtual or over PCIe) + * @use_ipa_teth_bridge: use tethering bridge driver + * @ipa_bam_remote_mode: ipa bam is in remote mode + * @modem_cfg_emb_pipe_flt: modem configure embedded pipe filtering rules + * @logbuf: ipc log buffer for high priority messages + * @logbuf_low: ipc log buffer for low priority messages + * @ipa_wdi2: using wdi-2.0 + * @ipa_bus_hdl: msm driver handle for the data path bus + * @ctrl: holds the core specific operations based on + * core version (vtable like) + * @enable_clock_scaling: clock scaling is enabled ? + * @curr_ipa_clk_rate: ipa_clk current rate + * @wcstats: wlan common buffer stats + * @uc_ctx: uC interface context + * @uc_wdi_ctx: WDI specific fields for uC interface + * @ipa_num_pipes: The number of pipes used by IPA HW + * @skip_uc_pipe_reset: Indicates whether pipe reset via uC needs to be avoided + * @ipa_client_apps_wan_cons_agg_gro: RMNET_IOCTL_INGRESS_FORMAT_AGG_DATA + * @w_lock: Indicates the wakeup source. + * @wakelock_ref_cnt: Indicates the number of times wakelock is acquired + + * IPA context - holds all relevant info about IPA driver and its state + */ +struct ipa_context { + struct class *class; + dev_t dev_num; + struct device *dev; + struct cdev cdev; + unsigned long bam_handle; + struct ipa_ep_context ep[IPA_MAX_NUM_PIPES]; + bool skip_ep_cfg_shadow[IPA_MAX_NUM_PIPES]; + bool resume_on_connect[IPA_CLIENT_MAX]; + struct ipa_flt_tbl flt_tbl[IPA_MAX_NUM_PIPES][IPA_IP_MAX]; + void __iomem *mmio; + u32 ipa_wrapper_base; + u32 ipa_wrapper_size; + struct ipa_flt_tbl glob_flt_tbl[IPA_IP_MAX]; + struct ipa_hdr_tbl hdr_tbl; + struct ipa_hdr_proc_ctx_tbl hdr_proc_ctx_tbl; + struct ipa_rt_tbl_set rt_tbl_set[IPA_IP_MAX]; + struct ipa_rt_tbl_set reap_rt_tbl_set[IPA_IP_MAX]; + struct kmem_cache *flt_rule_cache; + struct kmem_cache *rt_rule_cache; + struct kmem_cache *hdr_cache; + struct kmem_cache *hdr_offset_cache; + struct kmem_cache *hdr_proc_ctx_cache; + struct kmem_cache *hdr_proc_ctx_offset_cache; + struct kmem_cache *rt_tbl_cache; + struct kmem_cache *tx_pkt_wrapper_cache; + struct kmem_cache *rx_pkt_wrapper_cache; + unsigned long rt_idx_bitmap[IPA_IP_MAX]; + struct mutex lock; + u16 smem_sz; + u16 smem_restricted_bytes; + u16 smem_reqd_sz; + struct ipa_nat_mem nat_mem; + u32 excp_hdr_hdl; + u32 dflt_v4_rt_rule_hdl; + u32 dflt_v6_rt_rule_hdl; + uint aggregation_type; + uint aggregation_byte_limit; + uint aggregation_time_limit; + bool hdr_tbl_lcl; + bool hdr_proc_ctx_tbl_lcl; + struct ipa_mem_buffer hdr_mem; + struct ipa_mem_buffer hdr_proc_ctx_mem; + bool ip4_rt_tbl_lcl; + bool ip6_rt_tbl_lcl; + bool ip4_flt_tbl_lcl; + bool ip6_flt_tbl_lcl; + struct ipa_mem_buffer empty_rt_tbl_mem; + struct gen_pool *pipe_mem_pool; + struct dma_pool *dma_pool; + struct ipa_active_clients ipa_active_clients; + struct ipa2_active_clients_log_ctx ipa2_active_clients_logging; + struct workqueue_struct *power_mgmt_wq; + struct workqueue_struct *sps_power_mgmt_wq; + bool tag_process_before_gating; + struct ipa_sps_pm sps_pm; + u32 clnt_hdl_cmd; + u32 clnt_hdl_data_in; + u32 clnt_hdl_data_out; + spinlock_t disconnect_lock; + u8 a5_pipe_index; + struct list_head intf_list; + struct list_head msg_list; + struct list_head pull_msg_list; + struct mutex msg_lock; + struct list_head msg_wlan_client_list; + struct mutex msg_wlan_client_lock; + wait_queue_head_t msg_waitq; + enum ipa_hw_type ipa_hw_type; + enum ipa_hw_mode ipa_hw_mode; + bool use_ipa_teth_bridge; + bool ipa_bam_remote_mode; + bool modem_cfg_emb_pipe_flt; + bool ipa_wdi2; + /* featurize if memory footprint becomes a concern */ + struct ipa_stats stats; + void *smem_pipe_mem; + void *logbuf; + void *logbuf_low; + u32 ipa_bus_hdl; + struct ipa_controller *ctrl; + struct idr ipa_idr; + struct device *pdev; + struct device *uc_pdev; + spinlock_t idr_lock; + u32 enable_clock_scaling; + u32 curr_ipa_clk_rate; + bool q6_proxy_clk_vote_valid; + u32 ipa_num_pipes; + + struct ipa_wlan_comm_memb wc_memb; + + struct ipa_uc_ctx uc_ctx; + + struct ipa_uc_wdi_ctx uc_wdi_ctx; + struct ipa_uc_ntn_ctx uc_ntn_ctx; + u32 wan_rx_ring_size; + u32 lan_rx_ring_size; + bool skip_uc_pipe_reset; + bool smmu_present; + bool smmu_s1_bypass; + unsigned long peer_bam_iova; + phys_addr_t peer_bam_pa; + u32 peer_bam_map_size; + unsigned long peer_bam_dev; + u32 peer_bam_map_cnt; + u32 wdi_map_cnt; + bool use_dma_zone; + struct wakeup_source w_lock; + struct ipa_wakelock_ref_cnt wakelock_ref_cnt; + + /* RMNET_IOCTL_INGRESS_FORMAT_AGG_DATA */ + bool ipa_client_apps_wan_cons_agg_gro; + /* M-release support to know client pipes */ + struct ipacm_client_info ipacm_client[IPA_MAX_NUM_PIPES]; + bool tethered_flow_control; + u32 ipa_rx_min_timeout_usec; + u32 ipa_rx_max_timeout_usec; + u32 ipa_polling_iteration; + struct ipa_cne_evt ipa_cne_evt_req_cache[IPA_MAX_NUM_REQ_CACHE]; + int num_ipa_cne_evt_req; + struct mutex ipa_cne_evt_lock; + bool ipa_uc_monitor_holb; +}; + +/** + * struct ipa_route - IPA route + * @route_dis: route disable + * @route_def_pipe: route default pipe + * @route_def_hdr_table: route default header table + * @route_def_hdr_ofst: route default header offset table + * @route_frag_def_pipe: Default pipe to route fragmented exception + * packets and frag new rule statues, if source pipe does not have + * a notification status pipe defined. + */ +struct ipa_route { + u32 route_dis; + u32 route_def_pipe; + u32 route_def_hdr_table; + u32 route_def_hdr_ofst; + u8 route_frag_def_pipe; +}; + +/** + * enum ipa_pipe_mem_type - IPA pipe memory type + * @IPA_SPS_PIPE_MEM: Default, SPS dedicated pipe memory + * @IPA_PRIVATE_MEM: IPA's private memory + * @IPA_SYSTEM_MEM: System RAM, requires allocation + */ +enum ipa_pipe_mem_type { + IPA_SPS_PIPE_MEM = 0, + IPA_PRIVATE_MEM = 1, + IPA_SYSTEM_MEM = 2, +}; + +struct ipa_plat_drv_res { + bool use_ipa_teth_bridge; + u32 ipa_mem_base; + u32 ipa_mem_size; + u32 bam_mem_base; + u32 bam_mem_size; + u32 ipa_irq; + u32 bam_irq; + u32 ipa_pipe_mem_start_ofst; + u32 ipa_pipe_mem_size; + enum ipa_hw_type ipa_hw_type; + enum ipa_hw_mode ipa_hw_mode; + u32 ee; + bool ipa_bam_remote_mode; + bool modem_cfg_emb_pipe_flt; + bool ipa_wdi2; + u32 wan_rx_ring_size; + u32 lan_rx_ring_size; + bool skip_uc_pipe_reset; + bool use_dma_zone; + bool tethered_flow_control; + u32 ipa_rx_polling_sleep_msec; + u32 ipa_polling_iteration; + bool ipa_uc_monitor_holb; +}; + +struct ipa_mem_partition { + u16 ofst_start; + u16 nat_ofst; + u16 nat_size; + u16 v4_flt_ofst; + u16 v4_flt_size; + u16 v4_flt_size_ddr; + u16 v6_flt_ofst; + u16 v6_flt_size; + u16 v6_flt_size_ddr; + u16 v4_rt_ofst; + u16 v4_num_index; + u16 v4_modem_rt_index_lo; + u16 v4_modem_rt_index_hi; + u16 v4_apps_rt_index_lo; + u16 v4_apps_rt_index_hi; + u16 v4_rt_size; + u16 v4_rt_size_ddr; + u16 v6_rt_ofst; + u16 v6_num_index; + u16 v6_modem_rt_index_lo; + u16 v6_modem_rt_index_hi; + u16 v6_apps_rt_index_lo; + u16 v6_apps_rt_index_hi; + u16 v6_rt_size; + u16 v6_rt_size_ddr; + u16 modem_hdr_ofst; + u16 modem_hdr_size; + u16 apps_hdr_ofst; + u16 apps_hdr_size; + u16 apps_hdr_size_ddr; + u16 modem_hdr_proc_ctx_ofst; + u16 modem_hdr_proc_ctx_size; + u16 apps_hdr_proc_ctx_ofst; + u16 apps_hdr_proc_ctx_size; + u16 apps_hdr_proc_ctx_size_ddr; + u16 modem_comp_decomp_ofst; + u16 modem_comp_decomp_size; + u16 modem_ofst; + u16 modem_size; + u16 apps_v4_flt_ofst; + u16 apps_v4_flt_size; + u16 apps_v6_flt_ofst; + u16 apps_v6_flt_size; + u16 uc_info_ofst; + u16 uc_info_size; + u16 end_ofst; + u16 apps_v4_rt_ofst; + u16 apps_v4_rt_size; + u16 apps_v6_rt_ofst; + u16 apps_v6_rt_size; +}; + +struct ipa_controller { + struct ipa_mem_partition mem_partition; + u32 ipa_clk_rate_turbo; + u32 ipa_clk_rate_nominal; + u32 ipa_clk_rate_svs; + u32 clock_scaling_bw_threshold_turbo; + u32 clock_scaling_bw_threshold_nominal; + u32 ipa_reg_base_ofst; + u32 max_holb_tmr_val; + void (*ipa_sram_read_settings)(void); + int (*ipa_init_sram)(void); + int (*ipa_init_hdr)(void); + int (*ipa_init_rt4)(void); + int (*ipa_init_rt6)(void); + int (*ipa_init_flt4)(void); + int (*ipa_init_flt6)(void); + void (*ipa_cfg_ep_hdr)(u32 pipe_number, + const struct ipa_ep_cfg_hdr *ipa_ep_hdr_cfg); + int (*ipa_cfg_ep_hdr_ext)(u32 pipe_number, + const struct ipa_ep_cfg_hdr_ext *ipa_ep_hdr_ext_cfg); + void (*ipa_cfg_ep_aggr)(u32 pipe_number, + const struct ipa_ep_cfg_aggr *ipa_ep_agrr_cfg); + int (*ipa_cfg_ep_deaggr)(u32 pipe_index, + const struct ipa_ep_cfg_deaggr *ep_deaggr); + void (*ipa_cfg_ep_nat)(u32 pipe_number, + const struct ipa_ep_cfg_nat *ipa_ep_nat_cfg); + void (*ipa_cfg_ep_mode)(u32 pipe_number, u32 dst_pipe_number, + const struct ipa_ep_cfg_mode *ep_mode); + void (*ipa_cfg_ep_route)(u32 pipe_index, u32 rt_tbl_index); + void (*ipa_cfg_ep_holb)(u32 pipe_index, + const struct ipa_ep_cfg_holb *ep_holb); + void (*ipa_cfg_route)(struct ipa_route *route); + int (*ipa_read_gen_reg)(char *buff, int max_len); + int (*ipa_read_ep_reg)(char *buff, int max_len, int pipe); + void (*ipa_write_dbg_cnt)(int option); + int (*ipa_read_dbg_cnt)(char *buf, int max_len); + void (*ipa_cfg_ep_status)(u32 clnt_hdl, + const struct ipa_ep_cfg_status *ep_status); + int (*ipa_commit_flt)(enum ipa_ip_type ip); + int (*ipa_commit_rt)(enum ipa_ip_type ip); + int (*ipa_generate_rt_hw_rule)(enum ipa_ip_type ip, + struct ipa_rt_entry *entry, u8 *buf); + int (*ipa_commit_hdr)(void); + void (*ipa_cfg_ep_cfg)(u32 clnt_hdl, + const struct ipa_ep_cfg_cfg *cfg); + void (*ipa_cfg_ep_metadata_mask)(u32 clnt_hdl, + const struct ipa_ep_cfg_metadata_mask *metadata_mask); + void (*ipa_enable_clks)(void); + void (*ipa_disable_clks)(void); + struct msm_bus_scale_pdata *msm_bus_data_ptr; + + void (*ipa_cfg_ep_metadata)(u32 pipe_number, + const struct ipa_ep_cfg_metadata *metadata); +}; + +extern struct ipa_context *ipa_ctx; + +/* public APIs */ +/* + * Connect / Disconnect + */ +int ipa2_connect(const struct ipa_connect_params *in, + struct ipa_sps_params *sps, u32 *clnt_hdl); +int ipa2_disconnect(u32 clnt_hdl); + +/* + * Resume / Suspend + */ +int ipa2_reset_endpoint(u32 clnt_hdl); + +/* + * Remove ep delay + */ +int ipa2_clear_endpoint_delay(u32 clnt_hdl); + +/* + * Disable ep + */ +int ipa2_disable_endpoint(u32 clnt_hdl); + +/* + * Configuration + */ +int ipa2_cfg_ep(u32 clnt_hdl, const struct ipa_ep_cfg *ipa_ep_cfg); + +int ipa2_cfg_ep_nat(u32 clnt_hdl, const struct ipa_ep_cfg_nat *ipa_ep_cfg); + +int ipa2_cfg_ep_hdr(u32 clnt_hdl, const struct ipa_ep_cfg_hdr *ipa_ep_cfg); + +int ipa2_cfg_ep_hdr_ext(u32 clnt_hdl, + const struct ipa_ep_cfg_hdr_ext *ipa_ep_cfg); + +int ipa2_cfg_ep_mode(u32 clnt_hdl, const struct ipa_ep_cfg_mode *ipa_ep_cfg); + +int ipa2_cfg_ep_aggr(u32 clnt_hdl, const struct ipa_ep_cfg_aggr *ipa_ep_cfg); + +int ipa2_cfg_ep_deaggr(u32 clnt_hdl, + const struct ipa_ep_cfg_deaggr *ipa_ep_cfg); + +int ipa2_cfg_ep_route(u32 clnt_hdl, const struct ipa_ep_cfg_route *ipa_ep_cfg); + +int ipa2_cfg_ep_holb(u32 clnt_hdl, const struct ipa_ep_cfg_holb *ipa_ep_cfg); + +int ipa2_cfg_ep_cfg(u32 clnt_hdl, const struct ipa_ep_cfg_cfg *ipa_ep_cfg); + +int ipa2_cfg_ep_metadata_mask(u32 clnt_hdl, + const struct ipa_ep_cfg_metadata_mask *ipa_ep_cfg); + +int ipa2_cfg_ep_holb_by_client(enum ipa_client_type client, + const struct ipa_ep_cfg_holb *ipa_ep_cfg); + +int ipa2_cfg_ep_ctrl(u32 clnt_hdl, const struct ipa_ep_cfg_ctrl *ep_ctrl); + +/* + * Header removal / addition + */ +int ipa2_add_hdr(struct ipa_ioc_add_hdr *hdrs); + +int ipa2_add_hdr_usr(struct ipa_ioc_add_hdr *hdrs, bool by_user); + +int ipa2_del_hdr(struct ipa_ioc_del_hdr *hdls); + +int ipa2_del_hdr_by_user(struct ipa_ioc_del_hdr *hdls, bool by_user); + +int ipa2_commit_hdr(void); + +int ipa2_reset_hdr(bool user_only); + +int ipa2_get_hdr(struct ipa_ioc_get_hdr *lookup); + +int ipa2_put_hdr(u32 hdr_hdl); + +int ipa2_copy_hdr(struct ipa_ioc_copy_hdr *copy); + +/* + * Header Processing Context + */ +int ipa2_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs, + bool user_only); + +int ipa2_del_hdr_proc_ctx(struct ipa_ioc_del_hdr_proc_ctx *hdls); + +int ipa2_del_hdr_proc_ctx_by_user(struct ipa_ioc_del_hdr_proc_ctx *hdls, + bool by_user); + +/* + * Routing + */ +int ipa2_add_rt_rule(struct ipa_ioc_add_rt_rule *rules); + +int ipa2_add_rt_rule_usr(struct ipa_ioc_add_rt_rule *rules, + bool user_only); + +int ipa2_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls); + +int ipa2_commit_rt(enum ipa_ip_type ip); + +int ipa2_reset_rt(enum ipa_ip_type ip, bool user_only); + +int ipa2_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup); + +int ipa2_put_rt_tbl(u32 rt_tbl_hdl); + +int ipa2_query_rt_index(struct ipa_ioc_get_rt_tbl_indx *in); + +int ipa2_mdfy_rt_rule(struct ipa_ioc_mdfy_rt_rule *rules); + +/* + * Filtering + */ +int ipa2_add_flt_rule(struct ipa_ioc_add_flt_rule *rules); + +int ipa2_add_flt_rule_usr(struct ipa_ioc_add_flt_rule *rules, + bool user_only); + +int ipa2_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls); + +int ipa2_mdfy_flt_rule(struct ipa_ioc_mdfy_flt_rule *rules); + +int ipa2_commit_flt(enum ipa_ip_type ip); + +int ipa2_reset_flt(enum ipa_ip_type ip, bool user_only); + +/* + * NAT + */ +int ipa2_allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem); + +int ipa2_nat_init_cmd(struct ipa_ioc_v4_nat_init *init); + +int ipa2_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma); + +int ipa2_nat_del_cmd(struct ipa_ioc_v4_nat_del *del); + +/* + * Messaging + */ +int ipa2_send_msg(struct ipa_msg_meta *meta, void *buff, + ipa_msg_free_fn callback); +int ipa2_resend_wlan_msg(void); +int ipa2_register_pull_msg(struct ipa_msg_meta *meta, ipa_msg_pull_fn callback); +int ipa2_deregister_pull_msg(struct ipa_msg_meta *meta); + +/* + * Interface + */ +int ipa2_register_intf(const char *name, const struct ipa_tx_intf *tx, + const struct ipa_rx_intf *rx); +int ipa2_register_intf_ext(const char *name, const struct ipa_tx_intf *tx, + const struct ipa_rx_intf *rx, + const struct ipa_ext_intf *ext); +int ipa2_deregister_intf(const char *name); + +/* + * Aggregation + */ +int ipa2_set_aggr_mode(enum ipa_aggr_mode mode); + +int ipa2_set_qcncm_ndp_sig(char sig[3]); + +int ipa2_set_single_ndp_per_mbim(bool enable); + +/* + * Data path + */ +int ipa2_tx_dp(enum ipa_client_type dst, struct sk_buff *skb, + struct ipa_tx_meta *metadata); + +/* + * To transfer multiple data packets + * While passing the data descriptor list, the anchor node + * should be of type struct ipa_tx_data_desc not list_head + */ +int ipa2_tx_dp_mul(enum ipa_client_type dst, + struct ipa_tx_data_desc *data_desc); + +void ipa2_free_skb(struct ipa_rx_data *data); + +/* + * System pipes + */ +int ipa2_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl); + +int ipa2_teardown_sys_pipe(u32 clnt_hdl); + +int ipa2_sys_setup(struct ipa_sys_connect_params *sys_in, + unsigned long *ipa_bam_hdl, + u32 *ipa_pipe_num, u32 *clnt_hdl, bool en_status); + +int ipa2_sys_teardown(u32 clnt_hdl); + +int ipa2_sys_update_gsi_hdls(u32 clnt_hdl, unsigned long gsi_ch_hdl, + unsigned long gsi_ev_hdl); + +int ipa2_connect_wdi_pipe(struct ipa_wdi_in_params *in, + struct ipa_wdi_out_params *out); +int ipa2_disconnect_wdi_pipe(u32 clnt_hdl); +int ipa2_enable_wdi_pipe(u32 clnt_hdl); +int ipa2_disable_wdi_pipe(u32 clnt_hdl); +int ipa2_resume_wdi_pipe(u32 clnt_hdl); +int ipa2_suspend_wdi_pipe(u32 clnt_hdl); +int ipa2_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats); +u16 ipa2_get_smem_restr_bytes(void); +int ipa2_broadcast_wdi_quota_reach_ind(uint32_t fid, uint64_t num_bytes); +int ipa2_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *inp, + ipa_notify_cb notify, void *priv, u8 hdr_len, + struct ipa_ntn_conn_out_params *outp); +int ipa2_tear_down_uc_offload_pipes(int ipa_ep_idx_ul, int ipa_ep_idx_dl, + struct ipa_ntn_conn_in_params *params); +int ipa2_ntn_uc_reg_rdyCB(void (*ipauc_ready_cb)(void *), void *priv); +void ipa2_ntn_uc_dereg_rdyCB(void); + +int ipa2_conn_wdi3_pipes(struct ipa_wdi_conn_in_params *in, + struct ipa_wdi_conn_out_params *out, + ipa_wdi_meter_notifier_cb wdi_notify); +int ipa2_disconn_wdi3_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx); +int ipa2_enable_wdi3_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx); +int ipa2_disable_wdi3_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx); + +/* + * To retrieve doorbell physical address of + * wlan pipes + */ +int ipa2_uc_wdi_get_dbpa(struct ipa_wdi_db_params *out); + +/* + * To register uC ready callback if uC not ready + * and also check uC readiness + * if uC not ready only, register callback + */ +int ipa2_uc_reg_rdyCB(struct ipa_wdi_uc_ready_params *param); +/* + * To de-register uC ready callback + */ +int ipa2_uc_dereg_rdyCB(void); + +int ipa2_create_uc_smmu_mapping(int res_idx, bool wlan_smmu_en, + phys_addr_t pa, struct sg_table *sgt, size_t len, bool device, + unsigned long *iova); +/* + * Tethering bridge (Rmnet / MBIM) + */ +int ipa2_teth_bridge_init(struct teth_bridge_init_params *params); + +int ipa2_teth_bridge_disconnect(enum ipa_client_type client); + +int ipa2_teth_bridge_connect(struct teth_bridge_connect_params *connect_params); + +/* + * Tethering client info + */ +void ipa2_set_client(int index, enum ipacm_client_enum client, bool uplink); + +enum ipacm_client_enum ipa2_get_client(int pipe_idx); + +bool ipa2_get_client_uplink(int pipe_idx); + +int ipa2_get_wlan_stats(struct ipa_get_wdi_sap_stats *wdi_sap_stats); + +int ipa2_set_wlan_quota(struct ipa_set_wifi_quota *wdi_quota); + +/* + * IPADMA + */ +int ipa2_dma_init(void); + +int ipa2_dma_enable(void); + +int ipa2_dma_disable(void); + +int ipa2_dma_sync_memcpy(u64 dest, u64 src, int len); + +int ipa2_dma_async_memcpy(u64 dest, u64 src, int len, + void (*user_cb)(void *user1), void *user_param); + +int ipa2_dma_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len); + +void ipa2_dma_destroy(void); + +/* + * MHI APIs for IPA MHI client driver + */ +int ipa2_init_mhi(struct ipa_mhi_init_params *params); + +int ipa2_mhi_init_engine(struct ipa_mhi_init_engine *params); + +int ipa2_connect_mhi_pipe(struct ipa_mhi_connect_params_internal *in, + u32 *clnt_hdl); + +int ipa2_disconnect_mhi_pipe(u32 clnt_hdl); + +bool ipa2_mhi_sps_channel_empty(enum ipa_client_type client); + +int ipa2_disable_sps_pipe(enum ipa_client_type client); + +int ipa2_mhi_reset_channel_internal(enum ipa_client_type client); + +int ipa2_mhi_start_channel_internal(enum ipa_client_type client); + +int ipa2_mhi_suspend_ul_channels(void); + +int ipa2_mhi_resume_channels_internal(enum ipa_client_type client, + bool LPTransitionRejected, bool brstmode_enabled, + union __packed gsi_channel_scratch ch_scratch, u8 index); + +/* + * mux id + */ +int ipa2_write_qmap_id(struct ipa_ioc_write_qmapid *param_in); + +/* + * interrupts + */ +int ipa2_add_interrupt_handler(enum ipa_irq_type interrupt, + ipa_irq_handler_t handler, + bool deferred_flag, + void *private_data); + +int ipa2_remove_interrupt_handler(enum ipa_irq_type interrupt); + +/* + * Miscellaneous + */ +void ipa2_bam_reg_dump(void); + +int ipa2_get_ep_mapping(enum ipa_client_type client); + +bool ipa2_is_ready(void); + +void ipa2_proxy_clk_vote(void); +void ipa2_proxy_clk_unvote(void); + +bool ipa2_is_client_handle_valid(u32 clnt_hdl); + +enum ipa_client_type ipa2_get_client_mapping(int pipe_idx); + +enum ipa_rm_resource_name ipa2_get_rm_resource_from_ep(int pipe_idx); + +bool ipa2_get_modem_cfg_emb_pipe_flt(void); + +/* internal functions */ + +int ipa2_bind_api_controller(enum ipa_hw_type ipa_hw_type, + struct ipa_api_controller *api_ctrl); + +int ipa_send_one(struct ipa_sys_context *sys, struct ipa_desc *desc, + bool in_atomic); +int ipa_send(struct ipa_sys_context *sys, u32 num_desc, struct ipa_desc *desc, + bool in_atomic); +int ipa2_get_ep_mapping(enum ipa_client_type client); + +int ipa_generate_hw_rule(enum ipa_ip_type ip, + const struct ipa_rule_attrib *attrib, + u8 **buf, + u16 *en_rule); +int ipa_init_hw(void); +struct ipa_rt_tbl *__ipa_find_rt_tbl(enum ipa_ip_type ip, const char *name); +int ipa_set_single_ndp_per_mbim(bool enable); +int ipa_set_hw_timer_fix_for_mbim_aggr(bool enable); +void ipa_debugfs_init(void); +void ipa_debugfs_remove(void); + +void ipa_dump_buff_internal(void *base, dma_addr_t phy_base, u32 size); + +void ipa_rx_timeout_min_max_calc(u32 *min, u32 *max, s8 time); + +#ifdef IPA_DEBUG +#define IPA_DUMP_BUFF(base, phy_base, size) \ + ipa_dump_buff_internal(base, phy_base, size) +#else +#define IPA_DUMP_BUFF(base, phy_base, size) +#endif +int ipa_controller_static_bind(struct ipa_controller *controller, + enum ipa_hw_type ipa_hw_type); +int ipa_cfg_route(struct ipa_route *route); +int ipa_send_cmd(u16 num_desc, struct ipa_desc *descr); +int ipa_cfg_filter(u32 disable); +int ipa_pipe_mem_init(u32 start_ofst, u32 size); +int ipa_pipe_mem_alloc(u32 *ofst, u32 size); +int ipa_pipe_mem_free(u32 ofst, u32 size); +int ipa_straddle_boundary(u32 start, u32 end, u32 boundary); +struct ipa_context *ipa_get_ctx(void); +void ipa_enable_clks(void); +void ipa_disable_clks(void); +void ipa2_inc_client_enable_clks(struct ipa_active_client_logging_info *id); +int ipa2_inc_client_enable_clks_no_block(struct ipa_active_client_logging_info + *id); +void ipa2_dec_client_disable_clks(struct ipa_active_client_logging_info *id); +void ipa2_active_clients_log_dec(struct ipa_active_client_logging_info *id, + bool int_ctx); +void ipa2_active_clients_log_inc(struct ipa_active_client_logging_info *id, + bool int_ctx); +int ipa2_active_clients_log_print_buffer(char *buf, int size); +int ipa2_active_clients_log_print_table(char *buf, int size); +void ipa2_active_clients_log_clear(void); +int ipa_interrupts_init(u32 ipa_irq, u32 ee, struct device *ipa_dev); +int __ipa_del_rt_rule(u32 rule_hdl); +int __ipa_del_hdr(u32 hdr_hdl, bool by_user); +int __ipa_release_hdr(u32 hdr_hdl); +int __ipa_release_hdr_proc_ctx(u32 proc_ctx_hdl); +int _ipa_read_gen_reg_v1_1(char *buff, int max_len); +int _ipa_read_gen_reg_v2_0(char *buff, int max_len); +int _ipa_read_ep_reg_v1_1(char *buf, int max_len, int pipe); +int _ipa_read_ep_reg_v2_0(char *buf, int max_len, int pipe); +void _ipa_write_dbg_cnt_v1_1(int option); +void _ipa_write_dbg_cnt_v2_0(int option); +int _ipa_read_dbg_cnt_v1_1(char *buf, int max_len); +int _ipa_read_dbg_cnt_v2_0(char *buf, int max_len); +void _ipa_enable_clks_v1_1(void); +void _ipa_enable_clks_v2_0(void); +void _ipa_disable_clks_v1_1(void); +void _ipa_disable_clks_v2_0(void); + +static inline u32 ipa_read_reg(void *base, u32 offset) +{ + return ioread32(base + offset); +} + +static inline u32 ipa_read_reg_field(void *base, u32 offset, + u32 mask, u32 shift) +{ + return (ipa_read_reg(base, offset) & mask) >> shift; +} + +static inline void ipa_write_reg(void *base, u32 offset, u32 val) +{ + iowrite32(val, base + offset); +} + +ssize_t ipa_read(struct file *filp, char __user *buf, size_t count, + loff_t *f_pos); +int ipa_pull_msg(struct ipa_msg_meta *meta, char *buff, size_t count); +int ipa_query_intf(struct ipa_ioc_query_intf *lookup); +int ipa_query_intf_tx_props(struct ipa_ioc_query_intf_tx_props *tx); +int ipa_query_intf_rx_props(struct ipa_ioc_query_intf_rx_props *rx); +int ipa_query_intf_ext_props(struct ipa_ioc_query_intf_ext_props *ext); + +void wwan_cleanup(void); + +int teth_bridge_driver_init(void); +void ipa_lan_rx_cb(void *priv, enum ipa_dp_evt_type evt, unsigned long data); + +int _ipa_init_sram_v2(void); +int _ipa_init_sram_v2_5(void); +int _ipa_init_sram_v2_6L(void); +int _ipa_init_hdr_v2(void); +int _ipa_init_hdr_v2_5(void); +int _ipa_init_hdr_v2_6L(void); +int _ipa_init_rt4_v2(void); +int _ipa_init_rt6_v2(void); +int _ipa_init_flt4_v2(void); +int _ipa_init_flt6_v2(void); + +int __ipa_commit_flt_v1_1(enum ipa_ip_type ip); +int __ipa_commit_flt_v2(enum ipa_ip_type ip); +int __ipa_commit_rt_v1_1(enum ipa_ip_type ip); +int __ipa_commit_rt_v2(enum ipa_ip_type ip); +int __ipa_generate_rt_hw_rule_v2(enum ipa_ip_type ip, + struct ipa_rt_entry *entry, u8 *buf); +int __ipa_generate_rt_hw_rule_v2_5(enum ipa_ip_type ip, + struct ipa_rt_entry *entry, u8 *buf); +int __ipa_generate_rt_hw_rule_v2_6L(enum ipa_ip_type ip, + struct ipa_rt_entry *entry, u8 *buf); + +int __ipa_commit_hdr_v1_1(void); +int __ipa_commit_hdr_v2(void); +int __ipa_commit_hdr_v2_5(void); +int __ipa_commit_hdr_v2_6L(void); +int ipa_generate_flt_eq(enum ipa_ip_type ip, + const struct ipa_rule_attrib *attrib, + struct ipa_ipfltri_rule_eq *eq_attrib); +void ipa_skb_recycle(struct sk_buff *skb); +void ipa_install_dflt_flt_rules(u32 ipa_ep_idx); +void ipa_delete_dflt_flt_rules(u32 ipa_ep_idx); + +int ipa_enable_data_path(u32 clnt_hdl); +int ipa_disable_data_path(u32 clnt_hdl); +int ipa2_enable_force_clear(u32 request_id, bool throttle_source, + u32 source_pipe_bitmask); +int ipa2_disable_force_clear(u32 request_id); +int ipa_id_alloc(void *ptr); +void *ipa_id_find(u32 id); +void ipa_id_remove(u32 id); + +int ipa2_set_required_perf_profile(enum ipa_voltage_level floor_voltage, + u32 bandwidth_mbps); + +int ipa2_cfg_ep_status(u32 clnt_hdl, + const struct ipa_ep_cfg_status *ipa_ep_cfg); +int ipa_cfg_aggr_cntr_granularity(u8 aggr_granularity); +int ipa_cfg_eot_coal_cntr_granularity(u8 eot_coal_granularity); + +int ipa2_suspend_resource_no_block(enum ipa_rm_resource_name name); +int ipa2_suspend_resource_sync(enum ipa_rm_resource_name name); +int ipa2_resume_resource(enum ipa_rm_resource_name name); +bool ipa_should_pipe_be_suspended(enum ipa_client_type client); +int ipa_tag_aggr_force_close(int pipe_num); + +void ipa_active_clients_lock(void); +int ipa_active_clients_trylock(unsigned long *flags); +void ipa_active_clients_unlock(void); +void ipa_active_clients_trylock_unlock(unsigned long *flags); +int ipa2_wdi_init(void); +int ipa_write_qmapid_wdi_pipe(u32 clnt_hdl, u8 qmap_id); +int ipa_tag_process(struct ipa_desc *desc, int num_descs, + unsigned long timeout); + +int ipa_q6_pre_shutdown_cleanup(void); +int ipa_q6_post_shutdown_cleanup(void); +int ipa_init_q6_smem(void); +int ipa_q6_monitor_holb_mitigation(bool enable); + +int ipa_sps_connect_safe(struct sps_pipe *h, struct sps_connect *connect, + enum ipa_client_type ipa_client); + +int ipa_uc_interface_init(void); +int ipa_uc_reset_pipe(enum ipa_client_type ipa_client); +int ipa_uc_monitor_holb(enum ipa_client_type ipa_client, bool enable); +int ipa2_uc_state_check(void); +int ipa_uc_loaded_check(void); +int ipa_uc_send_cmd(u32 cmd, u32 opcode, u32 expected_status, + bool polling_mode, unsigned long timeout_jiffies); +void ipa_register_panic_hdlr(void); +void ipa_uc_register_handlers(enum ipa_hw_features feature, + struct ipa_uc_hdlrs *hdlrs); +int create_nat_device(void); +int ipa_uc_notify_clk_state(bool enabled); +void ipa_dma_async_memcpy_notify_cb(void *priv, + enum ipa_dp_evt_type evt, unsigned long data); + +int ipa_uc_update_hw_flags(u32 flags); + +int ipa2_uc_mhi_init(void (*ready_cb)(void), void (*wakeup_request_cb)(void)); +void ipa2_uc_mhi_cleanup(void); +int ipa2_uc_mhi_send_dl_ul_sync_info(union IpaHwMhiDlUlSyncCmdData_t *cmd); +int ipa_uc_mhi_init_engine(struct ipa_mhi_msi_info *msi, u32 mmio_addr, + u32 host_ctrl_addr, u32 host_data_addr, u32 first_ch_idx, + u32 first_evt_idx); +int ipa_uc_mhi_init_channel(int ipa_ep_idx, int channelHandle, + int contexArrayIndex, int channelDirection); +int ipa2_uc_mhi_reset_channel(int channelHandle); +int ipa2_uc_mhi_suspend_channel(int channelHandle); +int ipa_uc_mhi_resume_channel(int channelHandle, bool LPTransitionRejected); +int ipa2_uc_mhi_stop_event_update_channel(int channelHandle); +int ipa2_uc_mhi_print_stats(char *dbg_buff, int size); +int ipa_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len); +u32 ipa_get_num_pipes(void); +u32 ipa_get_sys_yellow_wm(struct ipa_sys_context *sys); +struct ipa_smmu_cb_ctx *ipa2_get_smmu_ctx(void); +struct ipa_smmu_cb_ctx *ipa2_get_wlan_smmu_ctx(void); +struct ipa_smmu_cb_ctx *ipa2_get_uc_smmu_ctx(void); +struct iommu_domain *ipa_get_uc_smmu_domain(void); +struct iommu_domain *ipa2_get_wlan_smmu_domain(void); +int ipa2_ap_suspend(struct device *dev); +int ipa2_ap_resume(struct device *dev); +struct iommu_domain *ipa2_get_smmu_domain(void); +struct device *ipa2_get_dma_dev(void); +int ipa2_release_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info); +int ipa2_create_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info); +void ipa_suspend_apps_pipes(bool suspend); +void ipa_update_repl_threshold(enum ipa_client_type ipa_client); +void ipa_flow_control(enum ipa_client_type ipa_client, bool enable, + uint32_t qmap_id); +int ipa2_restore_suspend_handler(void); +void ipa_sps_irq_control_all(bool enable); +void ipa_inc_acquire_wakelock(enum ipa_wakelock_ref_client ref_client); +void ipa_dec_release_wakelock(enum ipa_wakelock_ref_client ref_client); +int ipa_iommu_map(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, size_t size, int prot); +int ipa2_rx_poll(u32 clnt_hdl, int budget); +void ipa2_recycle_wan_skb(struct sk_buff *skb); +int ipa_ntn_init(void); +int ipa2_get_ntn_stats(struct IpaHwStatsNTNInfoData_t *stats); +int ipa2_register_ipa_ready_cb(void (*ipa_ready_cb)(void *), + void *user_data); +struct device *ipa2_get_pdev(void); +#endif /* _IPA_I_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_interrupts.c b/drivers/platform/msm/ipa/ipa_v2/ipa_interrupts.c new file mode 100644 index 000000000000..ad4ffe81cdd1 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_interrupts.c @@ -0,0 +1,380 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2014-2016, 2020, The Linux Foundation. All rights reserved. + */ +#include +#include "ipa_i.h" + +#define INTERRUPT_WORKQUEUE_NAME "ipa_interrupt_wq" +#define IPA_IRQ_NUM_MAX 32 + +struct ipa_interrupt_info { + ipa_irq_handler_t handler; + enum ipa_irq_type interrupt; + void *private_data; + bool deferred_flag; +}; + +struct ipa_interrupt_work_wrap { + struct work_struct interrupt_work; + ipa_irq_handler_t handler; + enum ipa_irq_type interrupt; + void *private_data; + void *interrupt_data; +}; + +static struct ipa_interrupt_info ipa_interrupt_to_cb[IPA_IRQ_NUM_MAX]; +static struct workqueue_struct *ipa_interrupt_wq; +static u32 ipa_ee; + +static void ipa_interrupt_defer(struct work_struct *work); +static DECLARE_WORK(ipa_interrupt_defer_work, ipa_interrupt_defer); + +static int ipa2_irq_mapping[IPA_IRQ_MAX] = { + [IPA_BAD_SNOC_ACCESS_IRQ] = 0, + [IPA_EOT_COAL_IRQ] = 1, + [IPA_UC_IRQ_0] = 2, + [IPA_UC_IRQ_1] = 3, + [IPA_UC_IRQ_2] = 4, + [IPA_UC_IRQ_3] = 5, + [IPA_UC_IN_Q_NOT_EMPTY_IRQ] = 6, + [IPA_UC_RX_CMD_Q_NOT_FULL_IRQ] = 7, + [IPA_UC_TX_CMD_Q_NOT_FULL_IRQ] = 8, + [IPA_UC_TO_PROC_ACK_Q_NOT_FULL_IRQ] = 9, + [IPA_PROC_TO_UC_ACK_Q_NOT_EMPTY_IRQ] = 10, + [IPA_RX_ERR_IRQ] = 11, + [IPA_DEAGGR_ERR_IRQ] = 12, + [IPA_TX_ERR_IRQ] = 13, + [IPA_STEP_MODE_IRQ] = 14, + [IPA_PROC_ERR_IRQ] = 15, + [IPA_TX_SUSPEND_IRQ] = 16, + [IPA_TX_HOLB_DROP_IRQ] = 17, + [IPA_BAM_IDLE_IRQ] = 18, +}; + +static void deferred_interrupt_work(struct work_struct *work) +{ + struct ipa_interrupt_work_wrap *work_data = + container_of(work, + struct ipa_interrupt_work_wrap, + interrupt_work); + IPADBG("call handler from workq...\n"); + work_data->handler(work_data->interrupt, work_data->private_data, + work_data->interrupt_data); + kfree(work_data->interrupt_data); + kfree(work_data); +} + +static bool is_valid_ep(u32 ep_suspend_data) +{ + u32 bmsk = 1; + u32 i = 0; + + for (i = 0; i < ipa_ctx->ipa_num_pipes; i++) { + if ((ep_suspend_data & bmsk) && (ipa_ctx->ep[i].valid)) + return true; + bmsk = bmsk << 1; + } + return false; +} + +static int handle_interrupt(int irq_num, bool isr_context) +{ + struct ipa_interrupt_info interrupt_info; + struct ipa_interrupt_work_wrap *work_data; + u32 suspend_data; + void *interrupt_data = NULL; + struct ipa_tx_suspend_irq_data *suspend_interrupt_data = NULL; + int res; + + interrupt_info = ipa_interrupt_to_cb[irq_num]; + if (interrupt_info.handler == NULL) { + IPAERR("A callback function wasn't set for interrupt num %d\n", + irq_num); + return -EINVAL; + } + + switch (interrupt_info.interrupt) { + case IPA_TX_SUSPEND_IRQ: + IPADBG_LOW("processing TX_SUSPEND interrupt work-around\n"); + suspend_data = ipa_read_reg(ipa_ctx->mmio, + IPA_IRQ_SUSPEND_INFO_EE_n_ADDR(ipa_ee)); + if (!is_valid_ep(suspend_data)) + return 0; + IPADBG_LOW("get interrupt %d\n", suspend_data); + suspend_interrupt_data = + kzalloc(sizeof(*suspend_interrupt_data), GFP_ATOMIC); + if (!suspend_interrupt_data) { + IPAERR("failed allocating suspend_interrupt_data\n"); + return -ENOMEM; + } + suspend_interrupt_data->endpoints = suspend_data; + interrupt_data = suspend_interrupt_data; + break; + default: + break; + } + + /* Force defer processing if in ISR context. */ + if (interrupt_info.deferred_flag || isr_context) { + work_data = kzalloc(sizeof(struct ipa_interrupt_work_wrap), + GFP_ATOMIC); + if (!work_data) { + IPAERR("failed allocating ipa_interrupt_work_wrap\n"); + res = -ENOMEM; + goto fail_alloc_work; + } + INIT_WORK(&work_data->interrupt_work, deferred_interrupt_work); + work_data->handler = interrupt_info.handler; + work_data->interrupt = interrupt_info.interrupt; + work_data->private_data = interrupt_info.private_data; + work_data->interrupt_data = interrupt_data; + queue_work(ipa_interrupt_wq, &work_data->interrupt_work); + + } else { + interrupt_info.handler(interrupt_info.interrupt, + interrupt_info.private_data, + interrupt_data); + kfree(interrupt_data); + } + + return 0; + +fail_alloc_work: + kfree(interrupt_data); + return res; +} + +static inline bool is_uc_irq(int irq_num) +{ + if (ipa_interrupt_to_cb[irq_num].interrupt >= IPA_UC_IRQ_0 && + ipa_interrupt_to_cb[irq_num].interrupt <= IPA_UC_IRQ_3) + return true; + else + return false; +} + +static void ipa_process_interrupts(bool isr_context) +{ + u32 reg; + u32 bmsk; + u32 i = 0; + u32 en; + bool uc_irq; + + en = ipa_read_reg(ipa_ctx->mmio, IPA_IRQ_EN_EE_n_ADDR(ipa_ee)); + reg = ipa_read_reg(ipa_ctx->mmio, IPA_IRQ_STTS_EE_n_ADDR(ipa_ee)); + IPADBG_LOW( + "ISR enter\n isr_ctx = %d EN reg = 0x%x STTS reg = 0x%x\n", + isr_context, en, reg); + while (en & reg) { + bmsk = 1; + for (i = 0; i < IPA_IRQ_NUM_MAX; i++) { + if (!(en & reg & bmsk)) { + bmsk = bmsk << 1; + continue; + } + uc_irq = is_uc_irq(i); + /* + * Clear uC interrupt before processing to avoid + * clearing unhandled interrupts + */ + if (uc_irq) + ipa_write_reg(ipa_ctx->mmio, + IPA_IRQ_CLR_EE_n_ADDR(ipa_ee), bmsk); + + /* Process the interrupts */ + handle_interrupt(i, isr_context); + + /* + * Clear non uC interrupt after processing + * to avoid clearing interrupt data + */ + if (!uc_irq) + ipa_write_reg(ipa_ctx->mmio, + IPA_IRQ_CLR_EE_n_ADDR(ipa_ee), bmsk); + + bmsk = bmsk << 1; + } + /* + * Check pending interrupts that may have + * been raised since last read + */ + reg = ipa_read_reg(ipa_ctx->mmio, + IPA_IRQ_STTS_EE_n_ADDR(ipa_ee)); + } + IPADBG_LOW("Exit\n"); +} + +static void ipa_interrupt_defer(struct work_struct *work) +{ + IPADBG_LOW("processing interrupts in wq\n"); + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + ipa_process_interrupts(false); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + IPADBG_LOW("Done\n"); +} + +static irqreturn_t ipa_isr(int irq, void *ctxt) +{ + unsigned long flags; + + IPADBG_LOW("Enter\n"); + /* defer interrupt handling in case IPA is not clocked on */ + if (ipa_active_clients_trylock(&flags) == 0) { + IPADBG("defer interrupt processing\n"); + queue_work(ipa_ctx->power_mgmt_wq, &ipa_interrupt_defer_work); + return IRQ_HANDLED; + } + + if (ipa_ctx->ipa_active_clients.cnt == 0) { + IPADBG("defer interrupt processing\n"); + queue_work(ipa_ctx->power_mgmt_wq, &ipa_interrupt_defer_work); + goto bail; + } + + ipa_process_interrupts(true); + IPADBG_LOW("Exit\n"); +bail: + ipa_active_clients_trylock_unlock(&flags); + return IRQ_HANDLED; +} +/** + * ipa2_add_interrupt_handler() - Adds handler to an interrupt type + * @interrupt: Interrupt type + * @handler: The handler to be added + * @deferred_flag: whether the handler processing should be deferred in + * a workqueue + * @private_data: the client's private data + * + * Adds handler to an interrupt type and enable the specific bit + * in IRQ_EN register, associated interrupt in IRQ_STTS register will be enabled + */ +int ipa2_add_interrupt_handler(enum ipa_irq_type interrupt, + ipa_irq_handler_t handler, + bool deferred_flag, + void *private_data) +{ + u32 val; + u32 bmsk; + int irq_num; + + IPADBG_LOW("interrupt_enum(%d)\n", interrupt); + if (interrupt < IPA_BAD_SNOC_ACCESS_IRQ || + interrupt >= IPA_IRQ_MAX) { + IPAERR("invalid interrupt number %d\n", interrupt); + return -EINVAL; + } + + irq_num = ipa2_irq_mapping[interrupt]; + if (irq_num < 0 || irq_num >= IPA_IRQ_NUM_MAX) { + IPAERR("interrupt %d not supported\n", interrupt); + WARN_ON(1); + return -EFAULT; + } + + ipa_interrupt_to_cb[irq_num].deferred_flag = deferred_flag; + ipa_interrupt_to_cb[irq_num].handler = handler; + ipa_interrupt_to_cb[irq_num].private_data = private_data; + ipa_interrupt_to_cb[irq_num].interrupt = interrupt; + + val = ipa_read_reg(ipa_ctx->mmio, IPA_IRQ_EN_EE_n_ADDR(ipa_ee)); + IPADBG("read IPA_IRQ_EN_EE_n_ADDR register. reg = %d\n", val); + bmsk = 1 << irq_num; + val |= bmsk; + ipa_write_reg(ipa_ctx->mmio, IPA_IRQ_EN_EE_n_ADDR(ipa_ee), val); + IPADBG_LOW("wrote IPA_IRQ_EN_EE_n_ADDR register. reg = %d\n", val); + return 0; +} + +/** + * ipa2_remove_interrupt_handler() - Removes handler to an interrupt type + * @interrupt: Interrupt type + * + * Removes the handler and disable the specific bit in IRQ_EN register + */ +int ipa2_remove_interrupt_handler(enum ipa_irq_type interrupt) +{ + u32 val; + u32 bmsk; + int irq_num; + + if (interrupt < IPA_BAD_SNOC_ACCESS_IRQ || + interrupt >= IPA_IRQ_MAX) { + IPAERR("invalid interrupt number %d\n", interrupt); + return -EINVAL; + } + + irq_num = ipa2_irq_mapping[interrupt]; + if (irq_num < 0 || irq_num >= IPA_IRQ_NUM_MAX) { + IPAERR("interrupt %d not supported\n", interrupt); + WARN_ON(1); + return -EFAULT; + } + + kfree(ipa_interrupt_to_cb[irq_num].private_data); + ipa_interrupt_to_cb[irq_num].deferred_flag = false; + ipa_interrupt_to_cb[irq_num].handler = NULL; + ipa_interrupt_to_cb[irq_num].private_data = NULL; + ipa_interrupt_to_cb[irq_num].interrupt = -1; + + val = ipa_read_reg(ipa_ctx->mmio, IPA_IRQ_EN_EE_n_ADDR(ipa_ee)); + bmsk = 1 << irq_num; + val &= ~bmsk; + ipa_write_reg(ipa_ctx->mmio, IPA_IRQ_EN_EE_n_ADDR(ipa_ee), val); + + return 0; +} + +/** + * ipa_interrupts_init() - Initialize the IPA interrupts framework + * @ipa_irq: The interrupt number to allocate + * @ee: Execution environment + * @ipa_dev: The basic device structure representing the IPA driver + * + * - Initialize the ipa_interrupt_to_cb array + * - Clear interrupts status + * - Register the ipa interrupt handler - ipa_isr + * - Enable apps processor wakeup by IPA interrupts + */ +int ipa_interrupts_init(u32 ipa_irq, u32 ee, struct device *ipa_dev) +{ + int idx; + u32 reg = 0xFFFFFFFF; + int res = 0; + + ipa_ee = ee; + for (idx = 0; idx < IPA_IRQ_NUM_MAX; idx++) { + ipa_interrupt_to_cb[idx].deferred_flag = false; + ipa_interrupt_to_cb[idx].handler = NULL; + ipa_interrupt_to_cb[idx].private_data = NULL; + ipa_interrupt_to_cb[idx].interrupt = -1; + } + + ipa_interrupt_wq = create_singlethread_workqueue( + INTERRUPT_WORKQUEUE_NAME); + if (!ipa_interrupt_wq) { + IPAERR("workqueue creation failed\n"); + return -ENOMEM; + } + + /*Clearing interrupts status*/ + ipa_write_reg(ipa_ctx->mmio, IPA_IRQ_CLR_EE_n_ADDR(ipa_ee), reg); + + res = request_irq(ipa_irq, (irq_handler_t) ipa_isr, + IRQF_TRIGGER_RISING, "ipa", ipa_dev); + if (res) { + IPAERR("fail to register IPA IRQ handler irq=%d\n", ipa_irq); + return -ENODEV; + } + IPADBG("IPA IRQ handler irq=%d registered\n", ipa_irq); + + res = enable_irq_wake(ipa_irq); + if (res) + IPAERR("fail to enable IPA IRQ wakeup irq=%d res=%d\n", + ipa_irq, res); + else + IPADBG("IPA IRQ wakeup enabled irq=%d\n", ipa_irq); + + return 0; +} diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c b/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c new file mode 100644 index 000000000000..18b6e2ffa578 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c @@ -0,0 +1,835 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2013-2019, 2020, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include "ipa_i.h" +#include + +struct ipa_intf { + char name[IPA_RESOURCE_NAME_MAX]; + struct list_head link; + u32 num_tx_props; + u32 num_rx_props; + u32 num_ext_props; + struct ipa_ioc_tx_intf_prop *tx; + struct ipa_ioc_rx_intf_prop *rx; + struct ipa_ioc_ext_intf_prop *ext; + enum ipa_client_type excp_pipe; +}; + +struct ipa_push_msg { + struct ipa_msg_meta meta; + ipa_msg_free_fn callback; + void *buff; + struct list_head link; +}; + +struct ipa_pull_msg { + struct ipa_msg_meta meta; + ipa_msg_pull_fn callback; + struct list_head link; +}; + +/** + * ipa2_register_intf() - register "logical" interface + * @name: [in] interface name + * @tx: [in] TX properties of the interface + * @rx: [in] RX properties of the interface + * + * Register an interface and its tx and rx properties, this allows + * configuration of rules from user-space + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_register_intf(const char *name, const struct ipa_tx_intf *tx, + const struct ipa_rx_intf *rx) +{ + if (unlikely(!ipa_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return -EINVAL; + } + + return ipa2_register_intf_ext(name, tx, rx, NULL); +} + +/** + * ipa2_register_intf_ext() - register "logical" interface which has only + * extended properties + * @name: [in] interface name + * @tx: [in] TX properties of the interface + * @rx: [in] RX properties of the interface + * @ext: [in] EXT properties of the interface + * + * Register an interface and its tx, rx and ext properties, this allows + * configuration of rules from user-space + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_register_intf_ext(const char *name, const struct ipa_tx_intf *tx, + const struct ipa_rx_intf *rx, + const struct ipa_ext_intf *ext) +{ + struct ipa_intf *intf; + u32 len; + + if (name == NULL || (tx == NULL && rx == NULL && ext == NULL)) { + IPAERR("invalid params name=%p tx=%p rx=%p ext=%p\n", name, + tx, rx, ext); + return -EINVAL; + } + + if (tx && tx->num_props > IPA_NUM_PROPS_MAX) { + IPAERR("invalid tx num_props=%d max=%d\n", tx->num_props, + IPA_NUM_PROPS_MAX); + return -EINVAL; + } + + if (rx && rx->num_props > IPA_NUM_PROPS_MAX) { + IPAERR("invalid rx num_props=%d max=%d\n", rx->num_props, + IPA_NUM_PROPS_MAX); + return -EINVAL; + } + + if (ext && ext->num_props > IPA_NUM_PROPS_MAX) { + IPAERR("invalid ext num_props=%d max=%d\n", ext->num_props, + IPA_NUM_PROPS_MAX); + return -EINVAL; + } + + len = sizeof(struct ipa_intf); + intf = kzalloc(len, GFP_KERNEL); + if (intf == NULL) { + IPAERR("fail to alloc 0x%x bytes\n", len); + return -ENOMEM; + } + + strlcpy(intf->name, name, IPA_RESOURCE_NAME_MAX); + + if (tx) { + intf->num_tx_props = tx->num_props; + len = tx->num_props * sizeof(struct ipa_ioc_tx_intf_prop); + intf->tx = kmemdup(tx->prop, len, GFP_KERNEL); + if (intf->tx == NULL) { + IPAERR("fail to alloc 0x%x bytes\n", len); + kfree(intf); + return -ENOMEM; + } + memcpy(intf->tx, tx->prop, len); + } + + if (rx) { + intf->num_rx_props = rx->num_props; + len = rx->num_props * sizeof(struct ipa_ioc_rx_intf_prop); + intf->rx = kmemdup(rx->prop, len, GFP_KERNEL); + if (intf->rx == NULL) { + IPAERR("fail to alloc 0x%x bytes\n", len); + kfree(intf->tx); + kfree(intf); + return -ENOMEM; + } + memcpy(intf->rx, rx->prop, len); + } + + if (ext) { + intf->num_ext_props = ext->num_props; + len = ext->num_props * sizeof(struct ipa_ioc_ext_intf_prop); + intf->ext = kmemdup(ext->prop, len, GFP_KERNEL); + if (intf->ext == NULL) { + IPAERR("fail to alloc 0x%x bytes\n", len); + kfree(intf->rx); + kfree(intf->tx); + kfree(intf); + return -ENOMEM; + } + memcpy(intf->ext, ext->prop, len); + } + + if (ext && ext->excp_pipe_valid) + intf->excp_pipe = ext->excp_pipe; + else + intf->excp_pipe = IPA_CLIENT_APPS_LAN_CONS; + + mutex_lock(&ipa_ctx->lock); + list_add_tail(&intf->link, &ipa_ctx->intf_list); + mutex_unlock(&ipa_ctx->lock); + + return 0; +} + +/** + * ipa2_deregister_intf() - de-register previously registered logical interface + * @name: [in] interface name + * + * De-register a previously registered interface + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_deregister_intf(const char *name) +{ + struct ipa_intf *entry; + struct ipa_intf *next; + int result = -EINVAL; + + if (unlikely(!ipa_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return -EINVAL; + } + + if (name == NULL) { + IPAERR("invalid param name=%p\n", name); + return result; + } + + mutex_lock(&ipa_ctx->lock); + list_for_each_entry_safe(entry, next, &ipa_ctx->intf_list, link) { + if (!strcmp(entry->name, name)) { + list_del(&entry->link); + kfree(entry->ext); + kfree(entry->rx); + kfree(entry->tx); + kfree(entry); + result = 0; + break; + } + } + mutex_unlock(&ipa_ctx->lock); + return result; +} + +/** + * ipa_query_intf() - query logical interface properties + * @lookup: [inout] interface name and number of properties + * + * Obtain the handle and number of tx and rx properties for the named + * interface, used as part of querying the tx and rx properties for + * configuration of various rules from user-space + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_query_intf(struct ipa_ioc_query_intf *lookup) +{ + struct ipa_intf *entry; + int result = -EINVAL; + + if (lookup == NULL) { + IPAERR("invalid param lookup=%p\n", lookup); + return result; + } + + mutex_lock(&ipa_ctx->lock); + lookup->name[IPA_RESOURCE_NAME_MAX-1] = '\0'; + list_for_each_entry(entry, &ipa_ctx->intf_list, link) { + if (!strcmp(entry->name, lookup->name)) { + lookup->num_tx_props = entry->num_tx_props; + lookup->num_rx_props = entry->num_rx_props; + lookup->num_ext_props = entry->num_ext_props; + lookup->excp_pipe = entry->excp_pipe; + result = 0; + break; + } + } + mutex_unlock(&ipa_ctx->lock); + return result; +} + +/** + * ipa_query_intf_tx_props() - qeury TX props of an interface + * @tx: [inout] interface tx attributes + * + * Obtain the tx properties for the specified interface + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_query_intf_tx_props(struct ipa_ioc_query_intf_tx_props *tx) +{ + struct ipa_intf *entry; + int result = -EINVAL; + + if (tx == NULL) { + IPAERR("invalid param tx=%p\n", tx); + return result; + } + + mutex_lock(&ipa_ctx->lock); + tx->name[IPA_RESOURCE_NAME_MAX-1] = '\0'; + list_for_each_entry(entry, &ipa_ctx->intf_list, link) { + if (!strcmp(entry->name, tx->name)) { + /* add the entry check */ + if (entry->num_tx_props != tx->num_tx_props) { + IPAERR("invalid entry number(%u %u)\n", + entry->num_tx_props, + tx->num_tx_props); + mutex_unlock(&ipa_ctx->lock); + return result; + } + memcpy(tx->tx, entry->tx, entry->num_tx_props * + sizeof(struct ipa_ioc_tx_intf_prop)); + result = 0; + break; + } + } + mutex_unlock(&ipa_ctx->lock); + return result; +} + +/** + * ipa_query_intf_rx_props() - qeury RX props of an interface + * @rx: [inout] interface rx attributes + * + * Obtain the rx properties for the specified interface + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_query_intf_rx_props(struct ipa_ioc_query_intf_rx_props *rx) +{ + struct ipa_intf *entry; + int result = -EINVAL; + + if (rx == NULL) { + IPAERR("invalid param rx=%p\n", rx); + return result; + } + + mutex_lock(&ipa_ctx->lock); + rx->name[IPA_RESOURCE_NAME_MAX-1] = '\0'; + list_for_each_entry(entry, &ipa_ctx->intf_list, link) { + if (!strcmp(entry->name, rx->name)) { + /* add the entry check */ + if (entry->num_rx_props != rx->num_rx_props) { + IPAERR("invalid entry number(%u %u)\n", + entry->num_rx_props, + rx->num_rx_props); + mutex_unlock(&ipa_ctx->lock); + return result; + } + memcpy(rx->rx, entry->rx, entry->num_rx_props * + sizeof(struct ipa_ioc_rx_intf_prop)); + result = 0; + break; + } + } + mutex_unlock(&ipa_ctx->lock); + return result; +} + +/** + * ipa_query_intf_ext_props() - qeury EXT props of an interface + * @ext: [inout] interface ext attributes + * + * Obtain the ext properties for the specified interface + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_query_intf_ext_props(struct ipa_ioc_query_intf_ext_props *ext) +{ + struct ipa_intf *entry; + int result = -EINVAL; + + if (ext == NULL) { + IPAERR("invalid param ext=%p\n", ext); + return result; + } + + mutex_lock(&ipa_ctx->lock); + list_for_each_entry(entry, &ipa_ctx->intf_list, link) { + if (!strcmp(entry->name, ext->name)) { + /* add the entry check */ + if (entry->num_ext_props != ext->num_ext_props) { + IPAERR("invalid entry number(%u %u)\n", + entry->num_ext_props, + ext->num_ext_props); + mutex_unlock(&ipa_ctx->lock); + return result; + } + memcpy(ext->ext, entry->ext, entry->num_ext_props * + sizeof(struct ipa_ioc_ext_intf_prop)); + result = 0; + break; + } + } + mutex_unlock(&ipa_ctx->lock); + return result; +} + +static void ipa2_send_msg_free(void *buff, u32 len, u32 type) +{ + kfree(buff); +} + +static int wlan_msg_process(struct ipa_msg_meta *meta, void *buff) +{ + struct ipa_push_msg *msg_dup; + struct ipa_wlan_msg_ex *event_ex_cur_con = NULL; + struct ipa_wlan_msg_ex *event_ex_list = NULL; + struct ipa_wlan_msg *event_ex_cur_discon = NULL; + void *data_dup = NULL; + struct ipa_push_msg *entry; + struct ipa_push_msg *next; + int cnt = 0, total = 0, max = 0; + uint8_t mac[IPA_MAC_ADDR_SIZE]; + uint8_t mac2[IPA_MAC_ADDR_SIZE]; + + if (meta->msg_type == WLAN_CLIENT_CONNECT_EX) { + /* debug print */ + event_ex_cur_con = buff; + for (cnt = 0; cnt < event_ex_cur_con->num_of_attribs; cnt++) { + if (event_ex_cur_con->attribs[cnt].attrib_type == + WLAN_HDR_ATTRIB_MAC_ADDR) { + IPADBG("%02x:%02x:%02x:%02x:%02x:%02x,(%d)\n", + event_ex_cur_con->attribs[cnt].u.mac_addr[0], + event_ex_cur_con->attribs[cnt].u.mac_addr[1], + event_ex_cur_con->attribs[cnt].u.mac_addr[2], + event_ex_cur_con->attribs[cnt].u.mac_addr[3], + event_ex_cur_con->attribs[cnt].u.mac_addr[4], + event_ex_cur_con->attribs[cnt].u.mac_addr[5], + meta->msg_type); + } + } + + mutex_lock(&ipa_ctx->msg_wlan_client_lock); + msg_dup = kzalloc(sizeof(struct ipa_push_msg), GFP_KERNEL); + if (msg_dup == NULL) { + IPAERR("fail to alloc ipa_msg container\n"); + mutex_unlock(&ipa_ctx->msg_wlan_client_lock); + return -ENOMEM; + } + msg_dup->meta = *meta; + if (meta->msg_len > 0 && buff) { + data_dup = kmemdup(buff, meta->msg_len, GFP_KERNEL); + if (data_dup == NULL) { + IPAERR("fail to alloc data_dup container\n"); + kfree(msg_dup); + mutex_unlock(&ipa_ctx->msg_wlan_client_lock); + return -ENOMEM; + } + memcpy(data_dup, buff, meta->msg_len); + msg_dup->buff = data_dup; + msg_dup->callback = ipa2_send_msg_free; + } + list_add_tail(&msg_dup->link, &ipa_ctx->msg_wlan_client_list); + mutex_unlock(&ipa_ctx->msg_wlan_client_lock); + } + + /* remove the cache */ + if (meta->msg_type == WLAN_CLIENT_DISCONNECT) { + /* debug print */ + event_ex_cur_discon = buff; + IPADBG("Mac %02x:%02x:%02x:%02x:%02x:%02x,msg %d\n", + event_ex_cur_discon->mac_addr[0], + event_ex_cur_discon->mac_addr[1], + event_ex_cur_discon->mac_addr[2], + event_ex_cur_discon->mac_addr[3], + event_ex_cur_discon->mac_addr[4], + event_ex_cur_discon->mac_addr[5], + meta->msg_type); + memcpy(mac2, + event_ex_cur_discon->mac_addr, + sizeof(mac2)); + + mutex_lock(&ipa_ctx->msg_wlan_client_lock); + list_for_each_entry_safe(entry, next, + &ipa_ctx->msg_wlan_client_list, + link) { + event_ex_list = entry->buff; + max = event_ex_list->num_of_attribs; + for (cnt = 0; cnt < max; cnt++) { + memcpy(mac, + event_ex_list->attribs[cnt].u.mac_addr, + sizeof(mac)); + if (event_ex_list->attribs[cnt].attrib_type == + WLAN_HDR_ATTRIB_MAC_ADDR) { + pr_debug("%02x:%02x:%02x:%02x:%02x:%02x\n", + mac[0], mac[1], mac[2], + mac[3], mac[4], mac[5]); + + /* compare to delete one*/ + if (memcmp(mac2, + mac, + sizeof(mac)) == 0) { + IPADBG("clean %d\n", total); + list_del(&entry->link); + kfree(entry); + break; + } + } + } + total++; + } + mutex_unlock(&ipa_ctx->msg_wlan_client_lock); + } + return 0; +} + +/** + * ipa2_send_msg() - Send "message" from kernel client to IPA driver + * @meta: [in] message meta-data + * @buff: [in] the payload for message + * @callback: [in] free callback + * + * Client supplies the message meta-data and payload which IPA driver buffers + * till read by user-space. After read from user space IPA driver invokes the + * callback supplied to free the message payload. Client must not touch/free + * the message payload after calling this API. + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_send_msg(struct ipa_msg_meta *meta, void *buff, + ipa_msg_free_fn callback) +{ + struct ipa_push_msg *msg; + void *data = NULL; + + if (unlikely(!ipa_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return -EINVAL; + } + + if (meta == NULL || (buff == NULL && callback != NULL) || + (buff != NULL && callback == NULL)) { + IPAERR_RL("invalid param meta=%p buff=%p, callback=%p\n", + meta, buff, callback); + return -EINVAL; + } + + if (meta->msg_type >= IPA_EVENT_MAX_NUM) { + IPAERR_RL("unsupported message type %d\n", meta->msg_type); + return -EINVAL; + } + + msg = kzalloc(sizeof(struct ipa_push_msg), GFP_KERNEL); + if (msg == NULL) { + IPAERR("fail to alloc ipa_msg container\n"); + return -ENOMEM; + } + + msg->meta = *meta; + if (meta->msg_len > 0 && buff) { + data = kmemdup(buff, meta->msg_len, GFP_KERNEL); + if (data == NULL) { + IPAERR("fail to alloc data container\n"); + kfree(msg); + return -ENOMEM; + } + msg->buff = data; + msg->callback = ipa2_send_msg_free; + } + + mutex_lock(&ipa_ctx->msg_lock); + list_add_tail(&msg->link, &ipa_ctx->msg_list); + /* support for softap client event cache */ + if (wlan_msg_process(meta, buff)) + IPAERR("wlan_msg_process failed\n"); + + /* unlock only after process */ + mutex_unlock(&ipa_ctx->msg_lock); + IPA_STATS_INC_CNT(ipa_ctx->stats.msg_w[meta->msg_type]); + + wake_up(&ipa_ctx->msg_waitq); + if (buff) + callback(buff, meta->msg_len, meta->msg_type); + + return 0; +} + +/** + * ipa2_resend_wlan_msg() - Resend cached "message" to IPACM + * + * resend wlan client connect events to user-space + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_resend_wlan_msg(void) +{ + struct ipa_wlan_msg_ex *event_ex_list = NULL; + struct ipa_push_msg *entry; + struct ipa_push_msg *next; + int cnt = 0, total = 0; + struct ipa_push_msg *msg; + void *data = NULL; + + IPADBG("\n"); + + mutex_lock(&ipa_ctx->msg_wlan_client_lock); + list_for_each_entry_safe(entry, next, &ipa_ctx->msg_wlan_client_list, + link) { + + event_ex_list = entry->buff; + for (cnt = 0; cnt < event_ex_list->num_of_attribs; cnt++) { + if (event_ex_list->attribs[cnt].attrib_type == + WLAN_HDR_ATTRIB_MAC_ADDR) { + IPADBG("%d-Mac %02x:%02x:%02x:%02x:%02x:%02x\n", + total, + event_ex_list->attribs[cnt].u.mac_addr[0], + event_ex_list->attribs[cnt].u.mac_addr[1], + event_ex_list->attribs[cnt].u.mac_addr[2], + event_ex_list->attribs[cnt].u.mac_addr[3], + event_ex_list->attribs[cnt].u.mac_addr[4], + event_ex_list->attribs[cnt].u.mac_addr[5]); + } + } + + msg = kzalloc(sizeof(struct ipa_push_msg), GFP_KERNEL); + if (msg == NULL) { + IPAERR("fail to alloc ipa_msg container\n"); + mutex_unlock(&ipa_ctx->msg_wlan_client_lock); + return -ENOMEM; + } + msg->meta = entry->meta; + data = kmemdup(entry->buff, entry->meta.msg_len, GFP_KERNEL); + if (data == NULL) { + IPAERR("fail to alloc data container\n"); + kfree(msg); + mutex_unlock(&ipa_ctx->msg_wlan_client_lock); + return -ENOMEM; + } + msg->buff = data; + msg->callback = ipa2_send_msg_free; + mutex_lock(&ipa_ctx->msg_lock); + list_add_tail(&msg->link, &ipa_ctx->msg_list); + mutex_unlock(&ipa_ctx->msg_lock); + wake_up(&ipa_ctx->msg_waitq); + + total++; + } + mutex_unlock(&ipa_ctx->msg_wlan_client_lock); + return 0; +} + +/** + * ipa2_register_pull_msg() - register pull message type + * @meta: [in] message meta-data + * @callback: [in] pull callback + * + * Register message callback by kernel client with IPA driver for IPA driver to + * pull message on-demand. + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_register_pull_msg(struct ipa_msg_meta *meta, ipa_msg_pull_fn callback) +{ + struct ipa_pull_msg *msg; + + if (meta == NULL || callback == NULL) { + IPAERR("invalid param meta=%p callback=%p\n", meta, callback); + return -EINVAL; + } + + msg = kzalloc(sizeof(struct ipa_pull_msg), GFP_KERNEL); + if (msg == NULL) { + IPAERR("fail to alloc ipa_msg container\n"); + return -ENOMEM; + } + + msg->meta = *meta; + msg->callback = callback; + + mutex_lock(&ipa_ctx->msg_lock); + list_add_tail(&msg->link, &ipa_ctx->pull_msg_list); + mutex_unlock(&ipa_ctx->msg_lock); + + return 0; +} + +/** + * ipa2_deregister_pull_msg() - De-register pull message type + * @meta: [in] message meta-data + * + * De-register "message" by kernel client from IPA driver + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_deregister_pull_msg(struct ipa_msg_meta *meta) +{ + struct ipa_pull_msg *entry; + struct ipa_pull_msg *next; + int result = -EINVAL; + + if (meta == NULL) { + IPAERR("invalid param name=%p\n", meta); + return result; + } + + mutex_lock(&ipa_ctx->msg_lock); + list_for_each_entry_safe(entry, next, &ipa_ctx->pull_msg_list, link) { + if (entry->meta.msg_len == meta->msg_len && + entry->meta.msg_type == meta->msg_type) { + list_del(&entry->link); + kfree(entry); + result = 0; + break; + } + } + mutex_unlock(&ipa_ctx->msg_lock); + return result; +} + +/** + * ipa_read() - read message from IPA device + * @filp: [in] file pointer + * @buf: [out] buffer to read into + * @count: [in] size of above buffer + * @f_pos: [inout] file position + * + * Uer-space should continually read from /dev/ipa, read wll block when there + * are no messages to read. Upon return, user-space should read the ipa_msg_meta + * from the start of the buffer to know what type of message was read and its + * length in the remainder of the buffer. Buffer supplied must be big enough to + * hold the message meta-data and the largest defined message type + * + * Returns: how many bytes copied to buffer + * + * Note: Should not be called from atomic context + */ +ssize_t ipa_read(struct file *filp, char __user *buf, size_t count, + loff_t *f_pos) +{ + char __user *start; + struct ipa_push_msg *msg = NULL; + int ret; + DEFINE_WAIT_FUNC(wait, woken_wake_function); + int locked; + + start = buf; + + add_wait_queue(&ipa_ctx->msg_waitq, &wait); + while (1) { + mutex_lock(&ipa_ctx->msg_lock); + locked = 1; + if (!list_empty(&ipa_ctx->msg_list)) { + msg = list_first_entry(&ipa_ctx->msg_list, + struct ipa_push_msg, link); + list_del(&msg->link); + } + + if (msg) { + IPADBG("msg=%pK\n", msg); + locked = 0; + mutex_unlock(&ipa_ctx->msg_lock); + if (count < sizeof(struct ipa_msg_meta)) { + kfree(msg); + msg = NULL; + ret = -EFAULT; + break; + } + if (copy_to_user(buf, &msg->meta, + sizeof(struct ipa_msg_meta))) { + kfree(msg); + msg = NULL; + ret = -EFAULT; + break; + } + buf += sizeof(struct ipa_msg_meta); + count -= sizeof(struct ipa_msg_meta); + if (msg->buff) { + if (count >= msg->meta.msg_len) { + if (copy_to_user(buf, msg->buff, + msg->meta.msg_len)) { + kfree(msg); + msg = NULL; + ret = -EFAULT; + break; + } + } else { + kfree(msg); + msg = NULL; + ret = -EFAULT; + break; + } + buf += msg->meta.msg_len; + count -= msg->meta.msg_len; + msg->callback(msg->buff, msg->meta.msg_len, + msg->meta.msg_type); + } + IPA_STATS_INC_CNT( + ipa_ctx->stats.msg_r[msg->meta.msg_type]); + kfree(msg); + msg = NULL; + } + + ret = -EAGAIN; + if (filp->f_flags & O_NONBLOCK) + break; + + ret = -EINTR; + if (signal_pending(current)) + break; + + if (start != buf) + break; + + locked = 0; + mutex_unlock(&ipa_ctx->msg_lock); + wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); + } + + remove_wait_queue(&ipa_ctx->msg_waitq, &wait); + if (start != buf && ret != -EFAULT) + ret = buf - start; + + if (locked) + mutex_unlock(&ipa_ctx->msg_lock); + + return ret; +} + +/** + * ipa_pull_msg() - pull the specified message from client + * @meta: [in] message meta-data + * @buf: [out] buffer to read into + * @count: [in] size of above buffer + * + * Populate the supplied buffer with the pull message which is fetched + * from client, the message must have previously been registered with + * the IPA driver + * + * Returns: how many bytes copied to buffer + * + * Note: Should not be called from atomic context + */ +int ipa_pull_msg(struct ipa_msg_meta *meta, char *buff, size_t count) +{ + struct ipa_pull_msg *entry; + int result = -EINVAL; + + if (meta == NULL || buff == NULL || !count) { + IPAERR_RL("invalid param name=%p buff=%p count=%zu\n", + meta, buff, count); + return result; + } + + mutex_lock(&ipa_ctx->msg_lock); + list_for_each_entry(entry, &ipa_ctx->pull_msg_list, link) { + if (entry->meta.msg_len == meta->msg_len && + entry->meta.msg_type == meta->msg_type) { + result = entry->callback(buff, count, meta->msg_type); + break; + } + } + mutex_unlock(&ipa_ctx->msg_lock); + return result; +} diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_mhi.c b/drivers/platform/msm/ipa/ipa_v2/ipa_mhi.c new file mode 100644 index 000000000000..72f4b20069f4 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_mhi.c @@ -0,0 +1,336 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2015-2016, 2020, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "ipa_i.h" +#include "ipa_qmi_service.h" + +#define IPA_MHI_DRV_NAME "ipa_mhi" +#define IPA_MHI_DBG(fmt, args...) \ + do { \ + pr_debug(IPA_MHI_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPA_MHI_DBG_LOW(fmt, args...) \ + do { \ + pr_debug(IPA_MHI_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPA_MHI_ERR(fmt, args...) \ + do { \ + pr_err(IPA_MHI_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPA_MHI_FUNC_ENTRY() \ + IPA_MHI_DBG_LOW("ENTRY\n") +#define IPA_MHI_FUNC_EXIT() \ + IPA_MHI_DBG_LOW("EXIT\n") + + +bool ipa2_mhi_sps_channel_empty(enum ipa_client_type client) +{ + u32 pipe_idx; + bool pending; + + pipe_idx = ipa2_get_ep_mapping(client); + if (sps_pipe_pending_desc(ipa_ctx->bam_handle, + pipe_idx, &pending)) { + IPA_MHI_ERR("sps_pipe_pending_desc failed\n"); + WARN_ON(1); + return false; + } + + return !pending; +} + +int ipa2_disable_sps_pipe(enum ipa_client_type client) +{ + int ipa_ep_index; + int res; + + ipa_ep_index = ipa2_get_ep_mapping(client); + + res = sps_pipe_disable(ipa_ctx->bam_handle, ipa_ep_index); + if (res) { + IPA_MHI_ERR("sps_pipe_disable fail %d\n", res); + return res; + } + + return 0; +} + +int ipa2_mhi_reset_channel_internal(enum ipa_client_type client) +{ + int res; + + IPA_MHI_FUNC_ENTRY(); + + res = ipa_disable_data_path(ipa2_get_ep_mapping(client)); + if (res) { + IPA_MHI_ERR("ipa_disable_data_path failed %d\n", res); + return res; + } + IPA_MHI_FUNC_EXIT(); + + return 0; +} + +int ipa2_mhi_start_channel_internal(enum ipa_client_type client) +{ + int res; + + IPA_MHI_FUNC_ENTRY(); + + res = ipa_enable_data_path(ipa2_get_ep_mapping(client)); + if (res) { + IPA_MHI_ERR("ipa_enable_data_path failed %d\n", res); + return res; + } + IPA_MHI_FUNC_EXIT(); + + return 0; +} + +int ipa2_mhi_init_engine(struct ipa_mhi_init_engine *params) +{ + int res; + + IPA_MHI_FUNC_ENTRY(); + + if (!params) { + IPA_MHI_ERR("null args\n"); + return -EINVAL; + } + + if (ipa2_uc_state_check()) { + IPA_MHI_ERR("IPA uc is not loaded\n"); + return -EAGAIN; + } + + /* Initialize IPA MHI engine */ + res = ipa_uc_mhi_init_engine(params->uC.msi, params->uC.mmio_addr, + params->uC.host_ctrl_addr, params->uC.host_data_addr, + params->uC.first_ch_idx, params->uC.first_er_idx); + if (res) { + IPA_MHI_ERR("failed to start MHI engine %d\n", res); + goto fail_init_engine; + } + + /* Update UL/DL sync if valid */ + res = ipa2_uc_mhi_send_dl_ul_sync_info( + params->uC.ipa_cached_dl_ul_sync_info); + if (res) { + IPA_MHI_ERR("failed to update ul/dl sync %d\n", res); + goto fail_init_engine; + } + + IPA_MHI_FUNC_EXIT(); + return 0; + +fail_init_engine: + return res; +} + +/** + * ipa2_connect_mhi_pipe() - Connect pipe to IPA and start corresponding + * MHI channel + * @in: connect parameters + * @clnt_hdl: [out] client handle for this pipe + * + * This function is called by IPA MHI client driver on MHI channel start. + * This function is called after MHI engine was started. + * This function is doing the following: + * - Send command to uC to start corresponding MHI channel + * - Configure IPA EP control + * + * Return codes: 0 : success + * negative : error + */ +int ipa2_connect_mhi_pipe(struct ipa_mhi_connect_params_internal *in, + u32 *clnt_hdl) +{ + struct ipa_ep_context *ep; + int ipa_ep_idx; + int res; + + IPA_MHI_FUNC_ENTRY(); + + if (!in || !clnt_hdl) { + IPA_MHI_ERR("NULL args\n"); + return -EINVAL; + } + + if (in->sys->client >= IPA_CLIENT_MAX) { + IPA_MHI_ERR("bad parm client:%d\n", in->sys->client); + return -EINVAL; + } + + ipa_ep_idx = ipa2_get_ep_mapping(in->sys->client); + if (ipa_ep_idx == -1) { + IPA_MHI_ERR("Invalid client.\n"); + return -EINVAL; + } + + ep = &ipa_ctx->ep[ipa_ep_idx]; + + IPA_MHI_DBG("client %d channelHandle %d channelIndex %d\n", + in->sys->client, in->start.uC.index, in->start.uC.id); + + if (ep->valid == 1) { + IPA_MHI_ERR("EP already allocated.\n"); + goto fail_ep_exists; + } + + memset(ep, 0, offsetof(struct ipa_ep_context, sys)); + ep->valid = 1; + ep->skip_ep_cfg = in->sys->skip_ep_cfg; + ep->client = in->sys->client; + ep->client_notify = in->sys->notify; + ep->priv = in->sys->priv; + ep->keep_ipa_awake = in->sys->keep_ipa_awake; + + /* start channel in uC */ + if (in->start.uC.state == IPA_HW_MHI_CHANNEL_STATE_INVALID) { + IPA_MHI_DBG("Initializing channel\n"); + res = ipa_uc_mhi_init_channel(ipa_ep_idx, in->start.uC.index, + in->start.uC.id, + (IPA_CLIENT_IS_PROD(ep->client) ? 1 : 2)); + if (res) { + IPA_MHI_ERR("init_channel failed %d\n", res); + goto fail_init_channel; + } + } else if (in->start.uC.state == IPA_HW_MHI_CHANNEL_STATE_DISABLE) { + IPA_MHI_DBG("Starting channel\n"); + res = ipa_uc_mhi_resume_channel(in->start.uC.index, false); + if (res) { + IPA_MHI_ERR("init_channel failed %d\n", res); + goto fail_init_channel; + } + } else { + IPA_MHI_ERR("Invalid channel state %d\n", in->start.uC.state); + goto fail_init_channel; + } + + res = ipa_enable_data_path(ipa_ep_idx); + if (res) { + IPA_MHI_ERR("enable data path failed res=%d clnt=%d.\n", res, + ipa_ep_idx); + goto fail_enable_dp; + } + + if (!ep->skip_ep_cfg) { + if (ipa2_cfg_ep(ipa_ep_idx, &in->sys->ipa_ep_cfg)) { + IPAERR("fail to configure EP.\n"); + goto fail_ep_cfg; + } + if (ipa2_cfg_ep_status(ipa_ep_idx, &ep->status)) { + IPAERR("fail to configure status of EP.\n"); + goto fail_ep_cfg; + } + IPA_MHI_DBG("ep configuration successful\n"); + } else { + IPA_MHI_DBG("skipping ep configuration\n"); + } + + *clnt_hdl = ipa_ep_idx; + + if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(in->sys->client)) + ipa_install_dflt_flt_rules(ipa_ep_idx); + + ipa_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg; + IPA_MHI_DBG("client %d (ep: %d) connected\n", in->sys->client, + ipa_ep_idx); + + IPA_MHI_FUNC_EXIT(); + + return 0; + +fail_ep_cfg: + ipa_disable_data_path(ipa_ep_idx); +fail_enable_dp: + ipa_uc_mhi_reset_channel(in->start.uC.index); +fail_init_channel: + memset(ep, 0, offsetof(struct ipa_ep_context, sys)); +fail_ep_exists: + return -EPERM; +} + +/** + * ipa2_disconnect_mhi_pipe() - Disconnect pipe from IPA and reset corresponding + * MHI channel + * @in: connect parameters + * @clnt_hdl: [out] client handle for this pipe + * + * This function is called by IPA MHI client driver on MHI channel reset. + * This function is called after MHI channel was started. + * This function is doing the following: + * - Send command to uC to reset corresponding MHI channel + * - Configure IPA EP control + * + * Return codes: 0 : success + * negative : error + */ +int ipa2_disconnect_mhi_pipe(u32 clnt_hdl) +{ + IPA_MHI_FUNC_ENTRY(); + + if (clnt_hdl >= ipa_ctx->ipa_num_pipes) { + IPAERR("invalid handle %d\n", clnt_hdl); + return -EINVAL; + } + + if (ipa_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("pipe was not connected %d\n", clnt_hdl); + return -EINVAL; + } + + ipa_ctx->ep[clnt_hdl].valid = 0; + + ipa_delete_dflt_flt_rules(clnt_hdl); + + IPA_MHI_DBG("client (ep: %d) disconnected\n", clnt_hdl); + IPA_MHI_FUNC_EXIT(); + return 0; +} + +int ipa2_mhi_resume_channels_internal(enum ipa_client_type client, + bool LPTransitionRejected, bool brstmode_enabled, + union __packed gsi_channel_scratch ch_scratch, u8 index) +{ + int res; + + IPA_MHI_FUNC_ENTRY(); + res = ipa_uc_mhi_resume_channel(index, LPTransitionRejected); + if (res) { + IPA_MHI_ERR("failed to suspend channel %u error %d\n", + index, res); + return res; + } + + IPA_MHI_FUNC_EXIT(); + return 0; +} + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("IPA MHI driver"); diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c b/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c new file mode 100644 index 000000000000..862943461ac9 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c @@ -0,0 +1,878 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2018, 2020, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include "ipa_i.h" + +#define IPA_NAT_PHYS_MEM_OFFSET 0 +#define IPA_NAT_PHYS_MEM_SIZE IPA_RAM_NAT_SIZE + +#define IPA_NAT_SYSTEM_MEMORY 0 +#define IPA_NAT_SHARED_MEMORY 1 +#define IPA_NAT_TEMP_MEM_SIZE 128 + +enum nat_table_type { + IPA_NAT_BASE_TBL = 0, + IPA_NAT_EXPN_TBL = 1, + IPA_NAT_INDX_TBL = 2, + IPA_NAT_INDEX_EXPN_TBL = 3, +}; + +#define NAT_TABLE_ENTRY_SIZE_BYTE 32 +#define NAT_INTEX_TABLE_ENTRY_SIZE_BYTE 4 + +/* + * Max NAT table entries is limited 1000 entries. + * Limit the memory size required by user to prevent kernel memory starvation + */ +#define IPA_TABLE_MAX_ENTRIES 1000 +#define MAX_ALLOC_NAT_SIZE (IPA_TABLE_MAX_ENTRIES * NAT_TABLE_ENTRY_SIZE_BYTE) + +static int ipa_nat_vma_fault_remap( + struct vm_area_struct *vma, struct vm_fault *vmf) +{ + IPADBG("\n"); + vmf->page = NULL; + + return VM_FAULT_SIGBUS; +} + +/* VMA related file operations functions */ +static const struct vm_operations_struct ipa_nat_remap_vm_ops = { + .fault = ipa_nat_vma_fault_remap, +}; + +static int ipa_nat_open(struct inode *inode, struct file *filp) +{ + struct ipa_nat_mem *nat_ctx; + + IPADBG("\n"); + nat_ctx = container_of(inode->i_cdev, struct ipa_nat_mem, cdev); + filp->private_data = nat_ctx; + IPADBG("return\n"); + + return 0; +} + +static int ipa_nat_mmap(struct file *filp, struct vm_area_struct *vma) +{ + unsigned long vsize = vma->vm_end - vma->vm_start; + struct ipa_nat_mem *nat_ctx = (struct ipa_nat_mem *)filp->private_data; + unsigned long phys_addr; + int result; + + mutex_lock(&nat_ctx->lock); + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + if (nat_ctx->is_sys_mem) { + IPADBG("Mapping system memory\n"); + if (nat_ctx->is_mapped) { + IPAERR("mapping already exists, only 1 supported\n"); + result = -EINVAL; + goto bail; + } + IPADBG("map sz=0x%zx\n", nat_ctx->size); + result = + dma_mmap_coherent( + ipa_ctx->pdev, vma, + nat_ctx->vaddr, nat_ctx->dma_handle, + nat_ctx->size); + + if (result) { + IPAERR("unable to map memory. Err:%d\n", result); + goto bail; + } + ipa_ctx->nat_mem.nat_base_address = nat_ctx->vaddr; + } else { + IPADBG("Mapping shared(local) memory\n"); + IPADBG("map sz=0x%lx\n", vsize); + + if ((IPA_NAT_PHYS_MEM_SIZE == 0) || + (vsize > IPA_NAT_PHYS_MEM_SIZE)) { + result = -EINVAL; + goto bail; + } + phys_addr = ipa_ctx->ipa_wrapper_base + + ipa_ctx->ctrl->ipa_reg_base_ofst + + IPA_SRAM_DIRECT_ACCESS_N_OFST(IPA_NAT_PHYS_MEM_OFFSET); + + if (remap_pfn_range( + vma, vma->vm_start, + phys_addr >> PAGE_SHIFT, vsize, vma->vm_page_prot)) { + IPAERR("remap failed\n"); + result = -EAGAIN; + goto bail; + } + ipa_ctx->nat_mem.nat_base_address = (void *)vma->vm_start; + } + nat_ctx->is_mapped = true; + vma->vm_ops = &ipa_nat_remap_vm_ops; + IPADBG("return\n"); + result = 0; +bail: + mutex_unlock(&nat_ctx->lock); + return result; +} + +static const struct file_operations ipa_nat_fops = { + .owner = THIS_MODULE, + .open = ipa_nat_open, + .mmap = ipa_nat_mmap +}; + +/** + * allocate_temp_nat_memory() - Allocates temp nat memory + * + * Called during nat table delete + */ +void allocate_temp_nat_memory(void) +{ + struct ipa_nat_mem *nat_ctx = &(ipa_ctx->nat_mem); + int gfp_flags = GFP_KERNEL | __GFP_ZERO; + + nat_ctx->tmp_vaddr = + dma_alloc_coherent(ipa_ctx->pdev, IPA_NAT_TEMP_MEM_SIZE, + &nat_ctx->tmp_dma_handle, gfp_flags); + + if (nat_ctx->tmp_vaddr == NULL) { + IPAERR("Temp Memory alloc failed\n"); + nat_ctx->is_tmp_mem = false; + return; + } + + nat_ctx->is_tmp_mem = true; + IPADBG("IPA NAT allocated temp memory successfully\n"); +} + +/** + * create_nat_device() - Create the NAT device + * + * Called during ipa init to create nat device + * + * Returns: 0 on success, negative on failure + */ +int create_nat_device(void) +{ + struct ipa_nat_mem *nat_ctx = &(ipa_ctx->nat_mem); + int result; + + IPADBG("\n"); + + mutex_lock(&nat_ctx->lock); + nat_ctx->class = class_create(THIS_MODULE, NAT_DEV_NAME); + if (IS_ERR(nat_ctx->class)) { + IPAERR("unable to create the class\n"); + result = -ENODEV; + goto vaddr_alloc_fail; + } + result = alloc_chrdev_region(&nat_ctx->dev_num, + 0, + 1, + NAT_DEV_NAME); + if (result) { + IPAERR("alloc_chrdev_region err.\n"); + result = -ENODEV; + goto alloc_chrdev_region_fail; + } + + nat_ctx->dev = + device_create(nat_ctx->class, NULL, nat_ctx->dev_num, nat_ctx, + "%s", NAT_DEV_NAME); + + if (IS_ERR(nat_ctx->dev)) { + IPAERR("device_create err:%ld\n", PTR_ERR(nat_ctx->dev)); + result = -ENODEV; + goto device_create_fail; + } + + cdev_init(&nat_ctx->cdev, &ipa_nat_fops); + nat_ctx->cdev.owner = THIS_MODULE; + nat_ctx->cdev.ops = &ipa_nat_fops; + + result = cdev_add(&nat_ctx->cdev, nat_ctx->dev_num, 1); + if (result) { + IPAERR("cdev_add err=%d\n", -result); + goto cdev_add_fail; + } + IPADBG("ipa nat dev added successful. major:%d minor:%d\n", + MAJOR(nat_ctx->dev_num), + MINOR(nat_ctx->dev_num)); + + nat_ctx->is_dev = true; + allocate_temp_nat_memory(); + IPADBG("IPA NAT device created successfully\n"); + result = 0; + goto bail; + +cdev_add_fail: + device_destroy(nat_ctx->class, nat_ctx->dev_num); +device_create_fail: + unregister_chrdev_region(nat_ctx->dev_num, 1); +alloc_chrdev_region_fail: + class_destroy(nat_ctx->class); +vaddr_alloc_fail: + if (nat_ctx->vaddr) { + IPADBG("Releasing system memory\n"); + dma_free_coherent( + ipa_ctx->pdev, nat_ctx->size, + nat_ctx->vaddr, nat_ctx->dma_handle); + nat_ctx->vaddr = NULL; + nat_ctx->dma_handle = 0; + nat_ctx->size = 0; + } + +bail: + mutex_unlock(&nat_ctx->lock); + + return result; +} + +/** + * ipa2_allocate_nat_device() - Allocates memory for the NAT device + * @mem: [in/out] memory parameters + * + * Called by NAT client driver to allocate memory for the NAT entries. Based on + * the request size either shared or system memory will be used. + * + * Returns: 0 on success, negative on failure + */ +int ipa2_allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem) +{ + struct ipa_nat_mem *nat_ctx = &(ipa_ctx->nat_mem); + int gfp_flags = GFP_KERNEL | __GFP_ZERO; + int result; + + IPADBG("passed memory size %zu\n", mem->size); + + mutex_lock(&nat_ctx->lock); + if (strcmp(mem->dev_name, NAT_DEV_NAME)) { + IPAERR_RL("Nat device name mismatch\n"); + IPAERR_RL("Expect: %s Recv: %s\n", NAT_DEV_NAME, mem->dev_name); + result = -EPERM; + goto bail; + } + + if (!nat_ctx->is_dev) { + IPAERR("Nat device not created successfully during boot up\n"); + result = -EPERM; + goto bail; + } + + if (nat_ctx->is_dev_init) { + IPAERR("Device already init\n"); + result = 0; + goto bail; + } + + if (mem->size > MAX_ALLOC_NAT_SIZE) { + IPAERR("Trying allocate more size = %zu, Max allowed = %d\n", + mem->size, MAX_ALLOC_NAT_SIZE); + result = -EPERM; + goto bail; + } + + if (mem->size <= 0 || + nat_ctx->is_dev_init) { + IPAERR_RL("Invalid Parameters or device is already init\n"); + result = -EPERM; + goto bail; + } + + if (mem->size > IPA_NAT_PHYS_MEM_SIZE) { + IPADBG("Allocating system memory\n"); + nat_ctx->is_sys_mem = true; + nat_ctx->vaddr = + dma_alloc_coherent(ipa_ctx->pdev, mem->size, + &nat_ctx->dma_handle, gfp_flags); + if (nat_ctx->vaddr == NULL) { + IPAERR("memory alloc failed\n"); + result = -ENOMEM; + goto bail; + } + nat_ctx->size = mem->size; + } else { + IPADBG("using shared(local) memory\n"); + nat_ctx->is_sys_mem = false; + } + + nat_ctx->is_dev_init = true; + IPADBG("IPA NAT dev init successfully\n"); + result = 0; + +bail: + mutex_unlock(&nat_ctx->lock); + + return result; +} + +/* IOCTL function handlers */ +/** + * ipa2_nat_init_cmd() - Post IP_V4_NAT_INIT command to IPA HW + * @init: [in] initialization command attributes + * + * Called by NAT client driver to post IP_V4_NAT_INIT command to IPA HW + * + * Returns: 0 on success, negative on failure + */ +int ipa2_nat_init_cmd(struct ipa_ioc_v4_nat_init *init) +{ +#define TBL_ENTRY_SIZE 32 +#define INDX_TBL_ENTRY_SIZE 4 + + struct ipa_register_write *reg_write_nop; + struct ipa_desc desc[2]; + struct ipa_ip_v4_nat_init *cmd; + u16 size = sizeof(struct ipa_ip_v4_nat_init); + int result; + u32 offset = 0; + size_t tmp; + gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0); + + mutex_lock(&ipa_ctx->nat_mem.lock); + + if (!ipa_ctx->nat_mem.is_dev_init) { + IPAERR_RL("Nat table not initialized\n"); + mutex_unlock(&ipa_ctx->nat_mem.lock); + return -EPERM; + } + + IPADBG("\n"); + if (init->table_entries == 0) { + IPADBG("Table entries is zero\n"); + mutex_unlock(&ipa_ctx->nat_mem.lock); + return -EPERM; + } + + /* check for integer overflow */ + if (init->ipv4_rules_offset > + (UINT_MAX - (TBL_ENTRY_SIZE * (init->table_entries + 1)))) { + IPAERR_RL("Detected overflow\n"); + mutex_unlock(&ipa_ctx->nat_mem.lock); + return -EPERM; + } + /* Check Table Entry offset is not + * beyond allocated size + */ + tmp = init->ipv4_rules_offset + + (TBL_ENTRY_SIZE * (init->table_entries + 1)); + if (tmp > ipa_ctx->nat_mem.size) { + IPAERR_RL("Table rules offset not valid\n"); + IPAERR_RL("offset:%d entries:%d size:%zu mem_size:%zu\n", + init->ipv4_rules_offset, (init->table_entries + 1), + tmp, ipa_ctx->nat_mem.size); + mutex_unlock(&ipa_ctx->nat_mem.lock); + return -EPERM; + } + + /* check for integer overflow */ + if (init->expn_rules_offset > + UINT_MAX - (TBL_ENTRY_SIZE * init->expn_table_entries)) { + IPAERR_RL("Detected overflow\n"); + mutex_unlock(&ipa_ctx->nat_mem.lock); + return -EPERM; + } + /* Check Expn Table Entry offset is not + * beyond allocated size + */ + tmp = init->expn_rules_offset + + (TBL_ENTRY_SIZE * init->expn_table_entries); + if (tmp > ipa_ctx->nat_mem.size) { + IPAERR_RL("Expn Table rules offset not valid\n"); + IPAERR_RL("offset:%d entries:%d size:%zu mem_size:%zu\n", + init->expn_rules_offset, init->expn_table_entries, + tmp, ipa_ctx->nat_mem.size); + mutex_unlock(&ipa_ctx->nat_mem.lock); + return -EPERM; + } + + /* check for integer overflow */ + if (init->index_offset > + UINT_MAX - (INDX_TBL_ENTRY_SIZE * (init->table_entries + 1))) { + IPAERR_RL("Detected overflow\n"); + mutex_unlock(&ipa_ctx->nat_mem.lock); + return -EPERM; + } + /* Check Indx Table Entry offset is not + * beyond allocated size + */ + tmp = init->index_offset + + (INDX_TBL_ENTRY_SIZE * (init->table_entries + 1)); + if (tmp > ipa_ctx->nat_mem.size) { + IPAERR_RL("Indx Table rules offset not valid\n"); + IPAERR_RL("offset:%d entries:%d size:%zu mem_size:%zu\n", + init->index_offset, (init->table_entries + 1), + tmp, ipa_ctx->nat_mem.size); + mutex_unlock(&ipa_ctx->nat_mem.lock); + return -EPERM; + } + + /* check for integer overflow */ + if (init->index_expn_offset > + (UINT_MAX - (INDX_TBL_ENTRY_SIZE * init->expn_table_entries))) { + IPAERR_RL("Detected overflow\n"); + mutex_unlock(&ipa_ctx->nat_mem.lock); + return -EPERM; + } + /* Check Expn Table entry offset is not + * beyond allocated size + */ + tmp = init->index_expn_offset + + (INDX_TBL_ENTRY_SIZE * init->expn_table_entries); + if (tmp > ipa_ctx->nat_mem.size) { + IPAERR_RL("Indx Expn Table rules offset not valid\n"); + IPAERR_RL("offset:%d entries:%d size:%zu mem_size:%zu\n", + init->index_expn_offset, init->expn_table_entries, + tmp, ipa_ctx->nat_mem.size); + mutex_unlock(&ipa_ctx->nat_mem.lock); + return -EPERM; + } + + memset(&desc, 0, sizeof(desc)); + /* NO-OP IC for ensuring that IPA pipeline is empty */ + reg_write_nop = kzalloc(sizeof(*reg_write_nop), flag); + if (!reg_write_nop) { + IPAERR("no mem\n"); + result = -ENOMEM; + goto bail; + } + + reg_write_nop->skip_pipeline_clear = 0; + reg_write_nop->value_mask = 0x0; + + desc[0].opcode = IPA_REGISTER_WRITE; + desc[0].type = IPA_IMM_CMD_DESC; + desc[0].callback = NULL; + desc[0].user1 = NULL; + desc[0].user2 = 0; + desc[0].pyld = (void *)reg_write_nop; + desc[0].len = sizeof(*reg_write_nop); + + cmd = kmalloc(size, flag); + if (!cmd) { + IPAERR("Failed to alloc immediate command object\n"); + result = -ENOMEM; + goto free_nop; + } + if (ipa_ctx->nat_mem.vaddr) { + IPADBG("using system memory for nat table\n"); + cmd->ipv4_rules_addr_type = IPA_NAT_SYSTEM_MEMORY; + cmd->ipv4_expansion_rules_addr_type = IPA_NAT_SYSTEM_MEMORY; + cmd->index_table_addr_type = IPA_NAT_SYSTEM_MEMORY; + cmd->index_table_expansion_addr_type = IPA_NAT_SYSTEM_MEMORY; + + offset = UINT_MAX - ipa_ctx->nat_mem.dma_handle; + + if ((init->ipv4_rules_offset > offset) || + (init->expn_rules_offset > offset) || + (init->index_offset > offset) || + (init->index_expn_offset > offset)) { + IPAERR_RL("Failed due to integer overflow\n"); + IPAERR_RL("nat.mem.dma_handle: 0x%pa\n", + &ipa_ctx->nat_mem.dma_handle); + IPAERR_RL("ipv4_rules_offset: 0x%x\n", + init->ipv4_rules_offset); + IPAERR_RL("expn_rules_offset: 0x%x\n", + init->expn_rules_offset); + IPAERR_RL("index_offset: 0x%x\n", + init->index_offset); + IPAERR_RL("index_expn_offset: 0x%x\n", + init->index_expn_offset); + result = -EPERM; + goto free_mem; + } + cmd->ipv4_rules_addr = + ipa_ctx->nat_mem.dma_handle + init->ipv4_rules_offset; + IPADBG("ipv4_rules_offset:0x%x\n", init->ipv4_rules_offset); + + cmd->ipv4_expansion_rules_addr = + ipa_ctx->nat_mem.dma_handle + init->expn_rules_offset; + IPADBG("expn_rules_offset:0x%x\n", init->expn_rules_offset); + + cmd->index_table_addr = + ipa_ctx->nat_mem.dma_handle + init->index_offset; + IPADBG("index_offset:0x%x\n", init->index_offset); + + cmd->index_table_expansion_addr = + ipa_ctx->nat_mem.dma_handle + init->index_expn_offset; + IPADBG("index_expn_offset:0x%x\n", init->index_expn_offset); + } else { + IPADBG("using shared(local) memory for nat table\n"); + cmd->ipv4_rules_addr_type = IPA_NAT_SHARED_MEMORY; + cmd->ipv4_expansion_rules_addr_type = IPA_NAT_SHARED_MEMORY; + cmd->index_table_addr_type = IPA_NAT_SHARED_MEMORY; + cmd->index_table_expansion_addr_type = IPA_NAT_SHARED_MEMORY; + + cmd->ipv4_rules_addr = init->ipv4_rules_offset + + IPA_RAM_NAT_OFST; + + cmd->ipv4_expansion_rules_addr = init->expn_rules_offset + + IPA_RAM_NAT_OFST; + + cmd->index_table_addr = init->index_offset + + IPA_RAM_NAT_OFST; + + cmd->index_table_expansion_addr = init->index_expn_offset + + IPA_RAM_NAT_OFST; + } + cmd->table_index = init->tbl_index; + IPADBG("Table index:0x%x\n", cmd->table_index); + cmd->size_base_tables = init->table_entries; + IPADBG("Base Table size:0x%x\n", cmd->size_base_tables); + cmd->size_expansion_tables = init->expn_table_entries; + IPADBG("Expansion Table size:0x%x\n", cmd->size_expansion_tables); + cmd->public_ip_addr = init->ip_addr; + IPADBG("Public ip address:0x%x\n", cmd->public_ip_addr); + desc[1].opcode = IPA_IP_V4_NAT_INIT; + desc[1].type = IPA_IMM_CMD_DESC; + desc[1].callback = NULL; + desc[1].user1 = NULL; + desc[1].user2 = 0; + desc[1].pyld = (void *)cmd; + desc[1].len = size; + IPADBG("posting v4 init command\n"); + if (ipa_send_cmd(2, desc)) { + IPAERR_RL("Fail to send immediate command\n"); + result = -EPERM; + goto free_mem; + } + + ipa_ctx->nat_mem.public_ip_addr = init->ip_addr; + IPADBG("Table ip address:0x%x", ipa_ctx->nat_mem.public_ip_addr); + + ipa_ctx->nat_mem.ipv4_rules_addr = + (char *)ipa_ctx->nat_mem.nat_base_address + init->ipv4_rules_offset; + IPADBG("ipv4_rules_addr: 0x%p\n", + ipa_ctx->nat_mem.ipv4_rules_addr); + + ipa_ctx->nat_mem.ipv4_expansion_rules_addr = + (char *)ipa_ctx->nat_mem.nat_base_address + init->expn_rules_offset; + IPADBG("ipv4_expansion_rules_addr: 0x%p\n", + ipa_ctx->nat_mem.ipv4_expansion_rules_addr); + + ipa_ctx->nat_mem.index_table_addr = + (char *)ipa_ctx->nat_mem.nat_base_address + init->index_offset; + IPADBG("index_table_addr: 0x%p\n", + ipa_ctx->nat_mem.index_table_addr); + + ipa_ctx->nat_mem.index_table_expansion_addr = + (char *)ipa_ctx->nat_mem.nat_base_address + init->index_expn_offset; + IPADBG("index_table_expansion_addr: 0x%p\n", + ipa_ctx->nat_mem.index_table_expansion_addr); + + IPADBG("size_base_tables: %d\n", init->table_entries); + ipa_ctx->nat_mem.size_base_tables = init->table_entries; + + IPADBG("size_expansion_tables: %d\n", init->expn_table_entries); + ipa_ctx->nat_mem.size_expansion_tables = init->expn_table_entries; + + IPADBG("return\n"); + result = 0; +free_mem: + kfree(cmd); +free_nop: + kfree(reg_write_nop); +bail: + mutex_unlock(&ipa_ctx->nat_mem.lock); + return result; +} + +/** + * ipa2_nat_dma_cmd() - Post NAT_DMA command to IPA HW + * @dma: [in] initialization command attributes + * + * Called by NAT client driver to post NAT_DMA command to IPA HW + * + * Returns: 0 on success, negative on failure + */ +int ipa2_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma) +{ +#define NUM_OF_DESC 2 + + struct ipa_register_write *reg_write_nop = NULL; + struct ipa_nat_dma *cmd = NULL; + struct ipa_desc *desc = NULL; + u16 size = 0, cnt = 0; + int ret = 0; + gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0); + + if (!ipa_ctx->nat_mem.is_dev_init) { + IPAERR_RL("Nat table not initialized\n"); + return -EPERM; + } + + IPADBG("\n"); + if (dma->entries <= 0) { + IPAERR_RL("Invalid number of commands %d\n", + dma->entries); + ret = -EPERM; + goto bail; + } + + for (cnt = 0; cnt < dma->entries; cnt++) { + if (dma->dma[cnt].table_index >= 1) { + IPAERR_RL("Invalid table index %d\n", + dma->dma[cnt].table_index); + ret = -EPERM; + goto bail; + } + + switch (dma->dma[cnt].base_addr) { + case IPA_NAT_BASE_TBL: + if (dma->dma[cnt].offset >= + (ipa_ctx->nat_mem.size_base_tables + 1) * + NAT_TABLE_ENTRY_SIZE_BYTE) { + IPAERR_RL("Invalid offset %d\n", + dma->dma[cnt].offset); + ret = -EPERM; + goto bail; + } + + break; + + case IPA_NAT_EXPN_TBL: + if (dma->dma[cnt].offset >= + ipa_ctx->nat_mem.size_expansion_tables * + NAT_TABLE_ENTRY_SIZE_BYTE) { + IPAERR_RL("Invalid offset %d\n", + dma->dma[cnt].offset); + ret = -EPERM; + goto bail; + } + + break; + + case IPA_NAT_INDX_TBL: + if (dma->dma[cnt].offset >= + (ipa_ctx->nat_mem.size_base_tables + 1) * + NAT_INTEX_TABLE_ENTRY_SIZE_BYTE) { + IPAERR_RL("Invalid offset %d\n", + dma->dma[cnt].offset); + ret = -EPERM; + goto bail; + } + + break; + + case IPA_NAT_INDEX_EXPN_TBL: + if (dma->dma[cnt].offset >= + ipa_ctx->nat_mem.size_expansion_tables * + NAT_INTEX_TABLE_ENTRY_SIZE_BYTE) { + IPAERR_RL("Invalid offset %d\n", + dma->dma[cnt].offset); + ret = -EPERM; + goto bail; + } + + break; + + default: + IPAERR_RL("Invalid base_addr %d\n", + dma->dma[cnt].base_addr); + ret = -EPERM; + goto bail; + } + } + + size = sizeof(struct ipa_desc) * NUM_OF_DESC; + desc = kzalloc(size, GFP_KERNEL); + if (desc == NULL) { + IPAERR("Failed to alloc memory\n"); + ret = -ENOMEM; + goto bail; + } + + size = sizeof(struct ipa_nat_dma); + cmd = kzalloc(size, flag); + if (cmd == NULL) { + IPAERR("Failed to alloc memory\n"); + ret = -ENOMEM; + goto bail; + } + + /* NO-OP IC for ensuring that IPA pipeline is empty */ + reg_write_nop = kzalloc(sizeof(*reg_write_nop), flag); + if (!reg_write_nop) { + IPAERR("Failed to alloc memory\n"); + ret = -ENOMEM; + goto bail; + } + + reg_write_nop->skip_pipeline_clear = 0; + reg_write_nop->value_mask = 0x0; + + desc[0].type = IPA_IMM_CMD_DESC; + desc[0].opcode = IPA_REGISTER_WRITE; + desc[0].callback = NULL; + desc[0].user1 = NULL; + desc[0].user2 = 0; + desc[0].len = sizeof(*reg_write_nop); + desc[0].pyld = (void *)reg_write_nop; + + for (cnt = 0; cnt < dma->entries; cnt++) { + cmd->table_index = dma->dma[cnt].table_index; + cmd->base_addr = dma->dma[cnt].base_addr; + cmd->offset = dma->dma[cnt].offset; + cmd->data = dma->dma[cnt].data; + + desc[1].type = IPA_IMM_CMD_DESC; + desc[1].opcode = IPA_NAT_DMA; + desc[1].callback = NULL; + desc[1].user1 = NULL; + desc[1].user2 = 0; + desc[1].len = sizeof(struct ipa_nat_dma); + desc[1].pyld = (void *)cmd; + + ret = ipa_send_cmd(NUM_OF_DESC, desc); + if (ret == -EPERM) + IPAERR("Fail to send immediate command %d\n", cnt); + } + +bail: + kfree(cmd); + + kfree(desc); + + kfree(reg_write_nop); + + return ret; +} + +/** + * ipa_nat_free_mem_and_device() - free the NAT memory and remove the device + * @nat_ctx: [in] the IPA NAT memory to free + * + * Called by NAT client driver to free the NAT memory and remove the device + */ +void ipa_nat_free_mem_and_device(struct ipa_nat_mem *nat_ctx) +{ + IPADBG("\n"); + mutex_lock(&nat_ctx->lock); + + if (nat_ctx->is_sys_mem) { + IPADBG("freeing the dma memory\n"); + dma_free_coherent( + ipa_ctx->pdev, nat_ctx->size, + nat_ctx->vaddr, nat_ctx->dma_handle); + nat_ctx->size = 0; + nat_ctx->vaddr = NULL; + } + nat_ctx->is_mapped = false; + nat_ctx->is_sys_mem = false; + nat_ctx->is_dev_init = false; + + mutex_unlock(&nat_ctx->lock); + IPADBG("return\n"); +} + +/** + * ipa2_nat_del_cmd() - Delete a NAT table + * @del: [in] delete table table table parameters + * + * Called by NAT client driver to delete the nat table + * + * Returns: 0 on success, negative on failure + */ +int ipa2_nat_del_cmd(struct ipa_ioc_v4_nat_del *del) +{ + struct ipa_register_write *reg_write_nop; + struct ipa_desc desc[2]; + struct ipa_ip_v4_nat_init *cmd; + u16 size = sizeof(struct ipa_ip_v4_nat_init); + u8 mem_type = IPA_NAT_SHARED_MEMORY; + u32 base_addr = IPA_NAT_PHYS_MEM_OFFSET; + int result; + gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0); + + if (!ipa_ctx->nat_mem.is_dev_init) { + IPAERR_RL("Nat table not initialized\n"); + return -EPERM; + } + + if (!ipa_ctx->nat_mem.public_ip_addr) { + IPAERR_RL("Public IP addr not assigned and trying to delete\n"); + return -EPERM; + } + + IPADBG("\n"); + if (ipa_ctx->nat_mem.is_tmp_mem) { + IPAERR("using temp memory during nat del\n"); + mem_type = IPA_NAT_SYSTEM_MEMORY; + base_addr = ipa_ctx->nat_mem.tmp_dma_handle; + } + + memset(&desc, 0, sizeof(desc)); + /* NO-OP IC for ensuring that IPA pipeline is empty */ + reg_write_nop = kzalloc(sizeof(*reg_write_nop), flag); + if (!reg_write_nop) { + IPAERR("no mem\n"); + result = -ENOMEM; + goto bail; + } + + reg_write_nop->skip_pipeline_clear = 0; + reg_write_nop->value_mask = 0x0; + + desc[0].opcode = IPA_REGISTER_WRITE; + desc[0].type = IPA_IMM_CMD_DESC; + desc[0].callback = NULL; + desc[0].user1 = NULL; + desc[0].user2 = 0; + desc[0].pyld = (void *)reg_write_nop; + desc[0].len = sizeof(*reg_write_nop); + + cmd = kmalloc(size, flag); + if (cmd == NULL) { + IPAERR("Failed to alloc immediate command object\n"); + result = -ENOMEM; + goto free_nop; + } + cmd->table_index = del->table_index; + cmd->ipv4_rules_addr = base_addr; + cmd->ipv4_rules_addr_type = mem_type; + cmd->ipv4_expansion_rules_addr = base_addr; + cmd->ipv4_expansion_rules_addr_type = mem_type; + cmd->index_table_addr = base_addr; + cmd->index_table_addr_type = mem_type; + cmd->index_table_expansion_addr = base_addr; + cmd->index_table_expansion_addr_type = mem_type; + cmd->size_base_tables = 0; + cmd->size_expansion_tables = 0; + cmd->public_ip_addr = 0; + + desc[1].opcode = IPA_IP_V4_NAT_INIT; + desc[1].type = IPA_IMM_CMD_DESC; + desc[1].callback = NULL; + desc[1].user1 = NULL; + desc[1].user2 = 0; + desc[1].pyld = (void *)cmd; + desc[1].len = size; + if (ipa_send_cmd(2, desc)) { + IPAERR("Fail to send immediate command\n"); + result = -EPERM; + goto free_mem; + } + + ipa_ctx->nat_mem.size_base_tables = 0; + ipa_ctx->nat_mem.size_expansion_tables = 0; + ipa_ctx->nat_mem.public_ip_addr = 0; + ipa_ctx->nat_mem.ipv4_rules_addr = 0; + ipa_ctx->nat_mem.ipv4_expansion_rules_addr = 0; + ipa_ctx->nat_mem.index_table_addr = 0; + ipa_ctx->nat_mem.index_table_expansion_addr = 0; + + ipa_nat_free_mem_and_device(&ipa_ctx->nat_mem); + IPADBG("return\n"); + result = 0; +free_mem: + kfree(cmd); +free_nop: + kfree(reg_write_nop); +bail: + return result; +} diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c new file mode 100644 index 000000000000..47325d7ef056 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c @@ -0,0 +1,1274 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2013-2018, 2020, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ipa_qmi_service.h" +#include "ipa_ram_mmap.h" +#include "../ipa_common_i.h" + +#define IPA_Q6_SVC_VERS 1 +#define IPA_A5_SVC_VERS 1 +#define Q6_QMI_COMPLETION_TIMEOUT (60*HZ) + +#define IPA_A5_SERVICE_SVC_ID 0x31 +#define IPA_A5_SERVICE_INS_ID 1 +#define IPA_Q6_SERVICE_SVC_ID 0x31 +#define IPA_Q6_SERVICE_INS_ID 2 + +#define QMI_SEND_STATS_REQ_TIMEOUT_MS 5000 +#define QMI_SEND_REQ_TIMEOUT_MS 60000 + +#define QMI_IPA_FORCE_CLEAR_DATAPATH_TIMEOUT_MS 1000 + +static struct qmi_handle *ipa_svc_handle; +static void ipa_a5_svc_recv_msg(struct work_struct *work); +static DECLARE_DELAYED_WORK(work_recv_msg, ipa_a5_svc_recv_msg); +static struct workqueue_struct *ipa_svc_workqueue; +static struct workqueue_struct *ipa_clnt_req_workqueue; +static struct workqueue_struct *ipa_clnt_resp_workqueue; +static void *curr_conn; +static bool qmi_modem_init_fin, qmi_indication_fin; +static uint32_t ipa_wan_platform; +struct ipa_qmi_context *ipa_qmi_ctx; +static bool first_time_handshake; +static atomic_t workqueues_stopped; +static atomic_t ipa_qmi_initialized; +struct mutex ipa_qmi_lock; + +/* QMI A5 service */ + +static struct msg_desc ipa_indication_reg_req_desc = { + .max_msg_len = QMI_IPA_INDICATION_REGISTER_REQ_MAX_MSG_LEN_V01, + .msg_id = QMI_IPA_INDICATION_REGISTER_REQ_V01, + .ei_array = ipa_indication_reg_req_msg_data_v01_ei, +}; +static struct msg_desc ipa_indication_reg_resp_desc = { + .max_msg_len = QMI_IPA_INDICATION_REGISTER_RESP_MAX_MSG_LEN_V01, + .msg_id = QMI_IPA_INDICATION_REGISTER_RESP_V01, + .ei_array = ipa_indication_reg_resp_msg_data_v01_ei, +}; +static struct msg_desc ipa_master_driver_complete_indication_desc = { + .max_msg_len = QMI_IPA_MASTER_DRIVER_INIT_COMPLETE_IND_MAX_MSG_LEN_V01, + .msg_id = QMI_IPA_MASTER_DRIVER_INIT_COMPLETE_IND_V01, + .ei_array = ipa_master_driver_init_complt_ind_msg_data_v01_ei, +}; +static struct msg_desc ipa_install_fltr_rule_req_desc = { + .max_msg_len = QMI_IPA_INSTALL_FILTER_RULE_REQ_MAX_MSG_LEN_V01, + .msg_id = QMI_IPA_INSTALL_FILTER_RULE_REQ_V01, + .ei_array = ipa_install_fltr_rule_req_msg_data_v01_ei, +}; +static struct msg_desc ipa_install_fltr_rule_resp_desc = { + .max_msg_len = QMI_IPA_INSTALL_FILTER_RULE_RESP_MAX_MSG_LEN_V01, + .msg_id = QMI_IPA_INSTALL_FILTER_RULE_RESP_V01, + .ei_array = ipa_install_fltr_rule_resp_msg_data_v01_ei, +}; +static struct msg_desc ipa_filter_installed_notif_req_desc = { + .max_msg_len = QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_MAX_MSG_LEN_V01, + .msg_id = QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_V01, + .ei_array = ipa_fltr_installed_notif_req_msg_data_v01_ei, +}; +static struct msg_desc ipa_filter_installed_notif_resp_desc = { + .max_msg_len = QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_MAX_MSG_LEN_V01, + .msg_id = QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_V01, + .ei_array = ipa_fltr_installed_notif_resp_msg_data_v01_ei, +}; +static struct msg_desc ipa_config_req_desc = { + .max_msg_len = QMI_IPA_CONFIG_REQ_MAX_MSG_LEN_V01, + .msg_id = QMI_IPA_CONFIG_REQ_V01, + .ei_array = ipa_config_req_msg_data_v01_ei, +}; +static struct msg_desc ipa_config_resp_desc = { + .max_msg_len = QMI_IPA_CONFIG_RESP_MAX_MSG_LEN_V01, + .msg_id = QMI_IPA_CONFIG_RESP_V01, + .ei_array = ipa_config_resp_msg_data_v01_ei, +}; + +static int handle_indication_req(void *req_h, void *req) +{ + struct ipa_indication_reg_req_msg_v01 *indication_req; + struct ipa_indication_reg_resp_msg_v01 resp; + struct ipa_master_driver_init_complt_ind_msg_v01 ind; + int rc; + + indication_req = (struct ipa_indication_reg_req_msg_v01 *)req; + IPAWANDBG("Received INDICATION Request\n"); + + memset(&resp, 0, sizeof(struct ipa_indication_reg_resp_msg_v01)); + resp.resp.result = IPA_QMI_RESULT_SUCCESS_V01; + rc = qmi_send_resp_from_cb(ipa_svc_handle, curr_conn, req_h, + &ipa_indication_reg_resp_desc, &resp, sizeof(resp)); + qmi_indication_fin = true; + /* check if need sending indication to modem */ + if (qmi_modem_init_fin) { + IPAWANDBG("send indication to modem (%d)\n", + qmi_modem_init_fin); + memset(&ind, 0, sizeof(struct + ipa_master_driver_init_complt_ind_msg_v01)); + ind.master_driver_init_status.result = + IPA_QMI_RESULT_SUCCESS_V01; + rc = qmi_send_ind_from_cb(ipa_svc_handle, curr_conn, + &ipa_master_driver_complete_indication_desc, + &ind, + sizeof(ind)); + } else { + IPAWANERR("not send indication\n"); + } + return rc; +} + + +static int handle_install_filter_rule_req(void *req_h, void *req) +{ + struct ipa_install_fltr_rule_req_msg_v01 *rule_req; + struct ipa_install_fltr_rule_resp_msg_v01 resp; + uint32_t rule_hdl[MAX_NUM_Q6_RULE]; + int rc = 0, i; + + rule_req = (struct ipa_install_fltr_rule_req_msg_v01 *)req; + memset(rule_hdl, 0, sizeof(rule_hdl)); + memset(&resp, 0, sizeof(struct ipa_install_fltr_rule_resp_msg_v01)); + IPAWANDBG("Received install filter Request\n"); + + rc = copy_ul_filter_rule_to_ipa((struct + ipa_install_fltr_rule_req_msg_v01*)req, rule_hdl); + if (rc) + IPAWANERR("copy UL rules from modem is failed\n"); + + resp.resp.result = IPA_QMI_RESULT_SUCCESS_V01; + if (rule_req->filter_spec_list_valid == true) { + resp.filter_handle_list_valid = true; + if (rule_req->filter_spec_list_len > MAX_NUM_Q6_RULE) { + resp.filter_handle_list_len = MAX_NUM_Q6_RULE; + IPAWANERR("installed (%d) max Q6-UL rules ", + MAX_NUM_Q6_RULE); + IPAWANERR("but modem gives total (%u)\n", + rule_req->filter_spec_list_len); + } else { + resp.filter_handle_list_len = + rule_req->filter_spec_list_len; + } + } else { + resp.filter_handle_list_valid = false; + } + + /* construct UL filter rules response to Modem*/ + for (i = 0; i < resp.filter_handle_list_len; i++) { + resp.filter_handle_list[i].filter_spec_identifier = + rule_req->filter_spec_list[i].filter_spec_identifier; + resp.filter_handle_list[i].filter_handle = rule_hdl[i]; + } + + rc = qmi_send_resp_from_cb(ipa_svc_handle, curr_conn, req_h, + &ipa_install_fltr_rule_resp_desc, &resp, sizeof(resp)); + + IPAWANDBG("Replied to install filter request\n"); + return rc; +} + +static int handle_filter_installed_notify_req(void *req_h, void *req) +{ + struct ipa_fltr_installed_notif_resp_msg_v01 resp; + int rc = 0; + + memset(&resp, 0, sizeof(struct ipa_fltr_installed_notif_resp_msg_v01)); + IPAWANDBG("Received filter_install_notify Request\n"); + resp.resp.result = IPA_QMI_RESULT_SUCCESS_V01; + + rc = qmi_send_resp_from_cb(ipa_svc_handle, curr_conn, req_h, + &ipa_filter_installed_notif_resp_desc, + &resp, sizeof(resp)); + + IPAWANDBG("Responsed filter_install_notify Request\n"); + return rc; +} + +static int handle_ipa_config_req(void *req_h, void *req) +{ + struct ipa_config_resp_msg_v01 resp; + int rc; + + memset(&resp, 0, sizeof(struct ipa_config_resp_msg_v01)); + resp.resp.result = IPA_QMI_RESULT_SUCCESS_V01; + IPAWANDBG("Received IPA CONFIG Request\n"); + rc = ipa_mhi_handle_ipa_config_req( + (struct ipa_config_req_msg_v01 *)req); + if (rc) { + IPAERR("ipa_mhi_handle_ipa_config_req failed %d\n", rc); + resp.resp.result = IPA_QMI_RESULT_FAILURE_V01; + } + rc = qmi_send_resp_from_cb(ipa_svc_handle, curr_conn, req_h, + &ipa_config_resp_desc, + &resp, sizeof(resp)); + IPAWANDBG("Responsed IPA CONFIG Request\n"); + return rc; +} + +static int ipa_a5_svc_connect_cb(struct qmi_handle *handle, + void *conn_h) +{ + if (ipa_svc_handle != handle || !conn_h) + return -EINVAL; + + if (curr_conn) { + IPAWANERR("Service is busy\n"); + return -ECONNREFUSED; + } + curr_conn = conn_h; + return 0; +} + +static int ipa_a5_svc_disconnect_cb(struct qmi_handle *handle, + void *conn_h) +{ + if (ipa_svc_handle != handle || curr_conn != conn_h) + return -EINVAL; + + curr_conn = NULL; + return 0; +} + +static int ipa_a5_svc_req_desc_cb(unsigned int msg_id, + struct msg_desc **req_desc) +{ + int rc; + + switch (msg_id) { + case QMI_IPA_INDICATION_REGISTER_REQ_V01: + *req_desc = &ipa_indication_reg_req_desc; + rc = sizeof(struct ipa_indication_reg_req_msg_v01); + break; + + case QMI_IPA_INSTALL_FILTER_RULE_REQ_V01: + *req_desc = &ipa_install_fltr_rule_req_desc; + rc = sizeof(struct ipa_install_fltr_rule_req_msg_v01); + break; + case QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_V01: + *req_desc = &ipa_filter_installed_notif_req_desc; + rc = sizeof(struct ipa_fltr_installed_notif_req_msg_v01); + break; + case QMI_IPA_CONFIG_REQ_V01: + *req_desc = &ipa_config_req_desc; + rc = sizeof(struct ipa_config_req_msg_v01); + break; + default: + rc = -ENOTSUPP; + break; + } + return rc; +} + +static int ipa_a5_svc_req_cb(struct qmi_handle *handle, void *conn_h, + void *req_h, unsigned int msg_id, void *req) +{ + int rc; + + if (ipa_svc_handle != handle || curr_conn != conn_h) + return -EINVAL; + + switch (msg_id) { + case QMI_IPA_INDICATION_REGISTER_REQ_V01: + rc = handle_indication_req(req_h, req); + break; + case QMI_IPA_INSTALL_FILTER_RULE_REQ_V01: + rc = handle_install_filter_rule_req(req_h, req); + rc = wwan_update_mux_channel_prop(); + break; + case QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_V01: + rc = handle_filter_installed_notify_req(req_h, req); + break; + case QMI_IPA_CONFIG_REQ_V01: + rc = handle_ipa_config_req(req_h, req); + break; + default: + rc = -ENOTSUPP; + break; + } + return rc; +} + +static void ipa_a5_svc_recv_msg(struct work_struct *work) +{ + int rc; + + do { + IPAWANDBG_LOW("Notified about a Receive Event"); + rc = qmi_recv_msg(ipa_svc_handle); + } while (rc == 0); + if (rc != -ENOMSG) + IPAWANERR("Error receiving message\n"); +} + +static void qmi_ipa_a5_svc_ntfy(struct qmi_handle *handle, + enum qmi_event_type event, void *priv) +{ + switch (event) { + case QMI_RECV_MSG: + if (!atomic_read(&workqueues_stopped)) + queue_delayed_work(ipa_svc_workqueue, + &work_recv_msg, 0); + break; + default: + break; + } +} + +static struct qmi_svc_ops_options ipa_a5_svc_ops_options = { + .version = 1, + .service_id = IPA_A5_SERVICE_SVC_ID, + .service_vers = IPA_A5_SVC_VERS, + .service_ins = IPA_A5_SERVICE_INS_ID, + .connect_cb = ipa_a5_svc_connect_cb, + .disconnect_cb = ipa_a5_svc_disconnect_cb, + .req_desc_cb = ipa_a5_svc_req_desc_cb, + .req_cb = ipa_a5_svc_req_cb, +}; + + +/****************************************************/ +/* QMI A5 client ->Q6 */ +/****************************************************/ +static void ipa_q6_clnt_recv_msg(struct work_struct *work); +static DECLARE_DELAYED_WORK(work_recv_msg_client, ipa_q6_clnt_recv_msg); +static void ipa_q6_clnt_svc_arrive(struct work_struct *work); +static DECLARE_DELAYED_WORK(work_svc_arrive, ipa_q6_clnt_svc_arrive); +static void ipa_q6_clnt_svc_exit(struct work_struct *work); +static DECLARE_DELAYED_WORK(work_svc_exit, ipa_q6_clnt_svc_exit); +/* Test client port for IPC Router */ +static struct qmi_handle *ipa_q6_clnt; +static int ipa_q6_clnt_reset; + +static int ipa_check_qmi_response(int rc, + int req_id, + enum ipa_qmi_result_type_v01 result, + enum ipa_qmi_error_type_v01 error, + char *resp_type) +{ + if (rc < 0) { + if (rc == -ETIMEDOUT && ipa_rmnet_ctx.ipa_rmnet_ssr) { + IPAWANERR( + "Timeout for qmi request id %d\n", req_id); + return rc; + } + if ((rc == -ENETRESET) || (rc == -ENODEV)) { + IPAWANERR( + "SSR while waiting for qmi request id %d\n", req_id); + return rc; + } + IPAWANERR("Error sending qmi request id %d, rc = %d\n", + req_id, rc); + return rc; + } + if (result != IPA_QMI_RESULT_SUCCESS_V01 && + ipa_rmnet_ctx.ipa_rmnet_ssr) { + IPAWANERR( + "Got bad response %d from request id %d (error %d)\n", + req_id, result, error); + return result; + } + IPAWANDBG_LOW("Received %s successfully\n", resp_type); + return 0; +} + +static int qmi_init_modem_send_sync_msg(void) +{ + struct ipa_init_modem_driver_req_msg_v01 req; + struct ipa_init_modem_driver_resp_msg_v01 resp; + struct msg_desc req_desc, resp_desc; + int rc; + u16 smem_restr_bytes = ipa2_get_smem_restr_bytes(); + + memset(&req, 0, sizeof(struct ipa_init_modem_driver_req_msg_v01)); + memset(&resp, 0, sizeof(struct ipa_init_modem_driver_resp_msg_v01)); + + req.platform_type_valid = true; + req.platform_type = ipa_wan_platform; + + req.hdr_tbl_info_valid = (IPA_MEM_PART(modem_hdr_size) != 0); + req.hdr_tbl_info.modem_offset_start = + IPA_MEM_PART(modem_hdr_ofst) + smem_restr_bytes; + req.hdr_tbl_info.modem_offset_end = IPA_MEM_PART(modem_hdr_ofst) + + smem_restr_bytes + IPA_MEM_PART(modem_hdr_size) - 1; + + req.v4_route_tbl_info_valid = true; + req.v4_route_tbl_info.route_tbl_start_addr = IPA_MEM_PART(v4_rt_ofst) + + smem_restr_bytes; + req.v4_route_tbl_info.num_indices = IPA_MEM_PART(v4_modem_rt_index_hi); + req.v6_route_tbl_info_valid = true; + + req.v6_route_tbl_info.route_tbl_start_addr = IPA_MEM_PART(v6_rt_ofst) + + smem_restr_bytes; + req.v6_route_tbl_info.num_indices = IPA_MEM_PART(v6_modem_rt_index_hi); + + req.v4_filter_tbl_start_addr_valid = true; + req.v4_filter_tbl_start_addr = + IPA_MEM_PART(v4_flt_ofst) + smem_restr_bytes; + + req.v6_filter_tbl_start_addr_valid = true; + req.v6_filter_tbl_start_addr = + IPA_MEM_PART(v6_flt_ofst) + smem_restr_bytes; + + req.modem_mem_info_valid = (IPA_MEM_PART(modem_size) != 0); + req.modem_mem_info.block_start_addr = + IPA_MEM_PART(modem_ofst) + smem_restr_bytes; + req.modem_mem_info.size = IPA_MEM_PART(modem_size); + + req.ctrl_comm_dest_end_pt_valid = true; + req.ctrl_comm_dest_end_pt = + ipa2_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS); + + req.hdr_proc_ctx_tbl_info_valid = + (IPA_MEM_PART(modem_hdr_proc_ctx_size) != 0); + req.hdr_proc_ctx_tbl_info.modem_offset_start = + IPA_MEM_PART(modem_hdr_proc_ctx_ofst) + smem_restr_bytes; + req.hdr_proc_ctx_tbl_info.modem_offset_end = + IPA_MEM_PART(modem_hdr_proc_ctx_ofst) + + IPA_MEM_PART(modem_hdr_proc_ctx_size) + smem_restr_bytes - 1; + + req.zip_tbl_info_valid = (IPA_MEM_PART(modem_comp_decomp_size) != 0); + req.zip_tbl_info.modem_offset_start = + IPA_MEM_PART(modem_comp_decomp_size) + smem_restr_bytes; + req.zip_tbl_info.modem_offset_end = + IPA_MEM_PART(modem_comp_decomp_ofst) + + IPA_MEM_PART(modem_comp_decomp_size) + smem_restr_bytes - 1; + + if (!ipa_uc_loaded_check()) { /* First time boot */ + req.is_ssr_bootup_valid = false; + req.is_ssr_bootup = 0; + } else { /* After SSR boot */ + req.is_ssr_bootup_valid = true; + req.is_ssr_bootup = 1; + } + + IPAWANDBG("platform_type %d\n", req.platform_type); + IPAWANDBG("hdr_tbl_info.modem_offset_start %d\n", + req.hdr_tbl_info.modem_offset_start); + IPAWANDBG("hdr_tbl_info.modem_offset_end %d\n", + req.hdr_tbl_info.modem_offset_end); + IPAWANDBG("v4_route_tbl_info.route_tbl_start_addr %d\n", + req.v4_route_tbl_info.route_tbl_start_addr); + IPAWANDBG("v4_route_tbl_info.num_indices %d\n", + req.v4_route_tbl_info.num_indices); + IPAWANDBG("v6_route_tbl_info.route_tbl_start_addr %d\n", + req.v6_route_tbl_info.route_tbl_start_addr); + IPAWANDBG("v6_route_tbl_info.num_indices %d\n", + req.v6_route_tbl_info.num_indices); + IPAWANDBG("v4_filter_tbl_start_addr %d\n", + req.v4_filter_tbl_start_addr); + IPAWANDBG("v6_filter_tbl_start_addr %d\n", + req.v6_filter_tbl_start_addr); + IPAWANDBG("modem_mem_info.block_start_addr %d\n", + req.modem_mem_info.block_start_addr); + IPAWANDBG("modem_mem_info.size %d\n", + req.modem_mem_info.size); + IPAWANDBG("ctrl_comm_dest_end_pt %d\n", + req.ctrl_comm_dest_end_pt); + IPAWANDBG("is_ssr_bootup %d\n", + req.is_ssr_bootup); + + req_desc.max_msg_len = QMI_IPA_INIT_MODEM_DRIVER_REQ_MAX_MSG_LEN_V01; + req_desc.msg_id = QMI_IPA_INIT_MODEM_DRIVER_REQ_V01; + req_desc.ei_array = ipa_init_modem_driver_req_msg_data_v01_ei; + + resp_desc.max_msg_len = QMI_IPA_INIT_MODEM_DRIVER_RESP_MAX_MSG_LEN_V01; + resp_desc.msg_id = QMI_IPA_INIT_MODEM_DRIVER_RESP_V01; + resp_desc.ei_array = ipa_init_modem_driver_resp_msg_data_v01_ei; + + pr_info("Sending QMI_IPA_INIT_MODEM_DRIVER_REQ_V01\n"); + if (unlikely(!ipa_q6_clnt)) + return -ETIMEDOUT; + rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, &req, sizeof(req), + &resp_desc, &resp, sizeof(resp), + QMI_SEND_REQ_TIMEOUT_MS); + pr_info("QMI_IPA_INIT_MODEM_DRIVER_REQ_V01 response received\n"); + return ipa_check_qmi_response(rc, + QMI_IPA_INIT_MODEM_DRIVER_REQ_V01, resp.resp.result, + resp.resp.error, "ipa_init_modem_driver_resp_msg_v01"); +} + +/* sending filter-install-request to modem*/ +int qmi_filter_request_send(struct ipa_install_fltr_rule_req_msg_v01 *req) +{ + struct ipa_install_fltr_rule_resp_msg_v01 resp; + struct msg_desc req_desc, resp_desc; + int rc; + int i; + + /* check if modem up */ + if (!qmi_indication_fin || + !qmi_modem_init_fin || + !ipa_q6_clnt) { + IPAWANDBG("modem QMI haven't up yet\n"); + return -EINVAL; + } + + /* check if the filter rules from IPACM is valid */ + if (req->filter_spec_list_len == 0) { + IPAWANDBG("IPACM pass zero rules to Q6\n"); + } else { + IPAWANDBG("IPACM pass %u rules to Q6\n", + req->filter_spec_list_len); + } + + if (req->filter_spec_list_len >= QMI_IPA_MAX_FILTERS_V01) { + IPAWANDBG( + "IPACM passes the number of filtering rules exceed limit\n"); + return -EINVAL; + } else if (req->source_pipe_index_valid != 0) { + IPAWANDBG( + "IPACM passes source_pipe_index_valid not zero 0 != %d\n", + req->source_pipe_index_valid); + return -EINVAL; + } else if (req->source_pipe_index >= ipa_ctx->ipa_num_pipes) { + IPAWANDBG( + "IPACM passes source pipe index not valid ID = %d\n", + req->source_pipe_index); + return -EINVAL; + } + for (i = 0; i < req->filter_spec_list_len; i++) { + if ((req->filter_spec_list[i].ip_type != + QMI_IPA_IP_TYPE_V4_V01) && + (req->filter_spec_list[i].ip_type != + QMI_IPA_IP_TYPE_V6_V01)) + return -EINVAL; + if (req->filter_spec_list[i].is_mux_id_valid == false) + return -EINVAL; + if (req->filter_spec_list[i].is_routing_table_index_valid + == false) + return -EINVAL; + if ((req->filter_spec_list[i].filter_action <= + QMI_IPA_FILTER_ACTION_INVALID_V01) && + (req->filter_spec_list[i].filter_action > + QMI_IPA_FILTER_ACTION_EXCEPTION_V01)) + return -EINVAL; + } + mutex_lock(&ipa_qmi_lock); + if (ipa_qmi_ctx != NULL) { + /* cache the qmi_filter_request */ + memcpy(&(ipa_qmi_ctx->ipa_install_fltr_rule_req_msg_cache[ + ipa_qmi_ctx->num_ipa_install_fltr_rule_req_msg]), + req, + sizeof(struct ipa_install_fltr_rule_req_msg_v01)); + ipa_qmi_ctx->num_ipa_install_fltr_rule_req_msg++; + ipa_qmi_ctx->num_ipa_install_fltr_rule_req_msg %= 10; + } + mutex_unlock(&ipa_qmi_lock); + + req_desc.max_msg_len = QMI_IPA_INSTALL_FILTER_RULE_REQ_MAX_MSG_LEN_V01; + req_desc.msg_id = QMI_IPA_INSTALL_FILTER_RULE_REQ_V01; + req_desc.ei_array = ipa_install_fltr_rule_req_msg_data_v01_ei; + + memset(&resp, 0, sizeof(struct ipa_install_fltr_rule_resp_msg_v01)); + resp_desc.max_msg_len = + QMI_IPA_INSTALL_FILTER_RULE_RESP_MAX_MSG_LEN_V01; + resp_desc.msg_id = QMI_IPA_INSTALL_FILTER_RULE_RESP_V01; + resp_desc.ei_array = ipa_install_fltr_rule_resp_msg_data_v01_ei; + if (unlikely(!ipa_q6_clnt)) + return -ETIMEDOUT; + rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, + req, + sizeof(struct ipa_install_fltr_rule_req_msg_v01), + &resp_desc, &resp, sizeof(resp), + QMI_SEND_REQ_TIMEOUT_MS); + return ipa_check_qmi_response(rc, + QMI_IPA_INSTALL_FILTER_RULE_REQ_V01, resp.resp.result, + resp.resp.error, "ipa_install_filter"); +} + + +int qmi_enable_force_clear_datapath_send( + struct ipa_enable_force_clear_datapath_req_msg_v01 *req) +{ + struct ipa_enable_force_clear_datapath_resp_msg_v01 resp; + struct msg_desc req_desc, resp_desc; + int rc = 0; + + + if (!req || !req->source_pipe_bitmask) { + IPAWANERR("invalid params\n"); + return -EINVAL; + } + + req_desc.max_msg_len = + QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_REQ_MAX_MSG_LEN_V01; + req_desc.msg_id = QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_REQ_V01; + req_desc.ei_array = ipa_enable_force_clear_datapath_req_msg_data_v01_ei; + + memset(&resp, 0, sizeof(struct ipa_fltr_installed_notif_resp_msg_v01)); + resp_desc.max_msg_len = + QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_RESP_MAX_MSG_LEN_V01; + resp_desc.msg_id = QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_RESP_V01; + resp_desc.ei_array = + ipa_enable_force_clear_datapath_resp_msg_data_v01_ei; + if (unlikely(!ipa_q6_clnt)) + return -ETIMEDOUT; + rc = qmi_send_req_wait(ipa_q6_clnt, + &req_desc, + req, + sizeof(*req), + &resp_desc, &resp, sizeof(resp), + QMI_IPA_FORCE_CLEAR_DATAPATH_TIMEOUT_MS); + if (rc < 0) { + IPAWANERR("send req failed %d\n", rc); + return rc; + } + if (resp.resp.result != IPA_QMI_RESULT_SUCCESS_V01) { + IPAWANERR("filter_notify failed %d\n", + resp.resp.result); + return resp.resp.result; + } + IPAWANDBG("SUCCESS\n"); + return rc; +} + +int qmi_disable_force_clear_datapath_send( + struct ipa_disable_force_clear_datapath_req_msg_v01 *req) +{ + struct ipa_disable_force_clear_datapath_resp_msg_v01 resp; + struct msg_desc req_desc, resp_desc; + int rc = 0; + + + if (!req) { + IPAWANERR("invalid params\n"); + return -EINVAL; + } + + req_desc.max_msg_len = + QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_REQ_MAX_MSG_LEN_V01; + req_desc.msg_id = QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_REQ_V01; + req_desc.ei_array = + ipa_disable_force_clear_datapath_req_msg_data_v01_ei; + + memset(&resp, 0, sizeof(struct ipa_fltr_installed_notif_resp_msg_v01)); + resp_desc.max_msg_len = + QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_RESP_MAX_MSG_LEN_V01; + resp_desc.msg_id = QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_RESP_V01; + resp_desc.ei_array = + ipa_disable_force_clear_datapath_resp_msg_data_v01_ei; + if (unlikely(!ipa_q6_clnt)) + return -ETIMEDOUT; + rc = qmi_send_req_wait(ipa_q6_clnt, + &req_desc, + req, + sizeof(*req), + &resp_desc, &resp, sizeof(resp), + QMI_IPA_FORCE_CLEAR_DATAPATH_TIMEOUT_MS); + if (rc < 0) { + IPAWANERR("send req failed %d\n", rc); + return rc; + } + if (resp.resp.result != IPA_QMI_RESULT_SUCCESS_V01) { + IPAWANERR("filter_notify failed %d\n", + resp.resp.result); + return resp.resp.result; + } + IPAWANDBG("SUCCESS\n"); + return rc; +} + +/* sending filter-installed-notify-request to modem*/ +int qmi_filter_notify_send(struct ipa_fltr_installed_notif_req_msg_v01 *req) +{ + struct ipa_fltr_installed_notif_resp_msg_v01 resp; + struct msg_desc req_desc, resp_desc; + int rc = 0, i = 0; + + /* check if the filter rules from IPACM is valid */ + if (req->filter_index_list_len == 0) { + IPAWANDBG(" delete UL filter rule for pipe %d\n", + req->source_pipe_index); + } else if (req->filter_index_list_len > QMI_IPA_MAX_FILTERS_V01) { + IPAWANERR(" UL filter rule for pipe %d exceed max (%u)\n", + req->source_pipe_index, + req->filter_index_list_len); + return -EINVAL; + } else if (req->filter_index_list[0].filter_index == 0 && + req->source_pipe_index != + ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD)) { + IPAWANERR(" get index wrong for pipe %d\n", + req->source_pipe_index); + for (i = 0; i < req->filter_index_list_len; i++) + IPAWANERR(" %d-st handle %d index %d\n", + i, + req->filter_index_list[i].filter_handle, + req->filter_index_list[i].filter_index); + return -EINVAL; + } + + if (req->install_status != IPA_QMI_RESULT_SUCCESS_V01) { + IPAWANERR(" UL filter rule for pipe %d install_status = %d\n", + req->source_pipe_index, req->install_status); + return -EINVAL; + } else if (req->source_pipe_index >= ipa_ctx->ipa_num_pipes) { + IPAWANERR("IPACM passes source pipe index not valid ID = %d\n", + req->source_pipe_index); + return -EINVAL; + } else if (((req->embedded_pipe_index_valid != true) || + (req->embedded_call_mux_id_valid != true)) && + ((req->embedded_pipe_index_valid != false) || + (req->embedded_call_mux_id_valid != false))) { + IPAWANERR( + "IPACM passes embedded pipe and mux valid not valid\n"); + return -EINVAL; + } else if (req->embedded_pipe_index >= ipa_ctx->ipa_num_pipes) { + IPAWANERR("IPACM passes source pipe index not valid ID = %d\n", + req->source_pipe_index); + return -EINVAL; + } + + mutex_lock(&ipa_qmi_lock); + if (ipa_qmi_ctx != NULL) { + /* cache the qmi_filter_request */ + memcpy(&(ipa_qmi_ctx->ipa_fltr_installed_notif_req_msg_cache[ + ipa_qmi_ctx->num_ipa_fltr_installed_notif_req_msg]), + req, + sizeof(struct ipa_fltr_installed_notif_req_msg_v01)); + ipa_qmi_ctx->num_ipa_fltr_installed_notif_req_msg++; + ipa_qmi_ctx->num_ipa_fltr_installed_notif_req_msg %= 10; + } + mutex_unlock(&ipa_qmi_lock); + req_desc.max_msg_len = + QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_MAX_MSG_LEN_V01; + req_desc.msg_id = QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_V01; + req_desc.ei_array = ipa_fltr_installed_notif_req_msg_data_v01_ei; + + memset(&resp, 0, sizeof(struct ipa_fltr_installed_notif_resp_msg_v01)); + resp_desc.max_msg_len = + QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_MAX_MSG_LEN_V01; + resp_desc.msg_id = QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_V01; + resp_desc.ei_array = ipa_fltr_installed_notif_resp_msg_data_v01_ei; + if (unlikely(!ipa_q6_clnt)) + return -ETIMEDOUT; + rc = qmi_send_req_wait(ipa_q6_clnt, + &req_desc, + req, + sizeof(struct ipa_fltr_installed_notif_req_msg_v01), + &resp_desc, &resp, sizeof(resp), + QMI_SEND_REQ_TIMEOUT_MS); + return ipa_check_qmi_response(rc, + QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_V01, resp.resp.result, + resp.resp.error, "ipa_fltr_installed_notif_resp"); +} + +static void ipa_q6_clnt_recv_msg(struct work_struct *work) +{ + int rc; + + do { + IPAWANDBG_LOW("Notified about a Receive Event"); + rc = qmi_recv_msg(ipa_q6_clnt); + } while (rc == 0); + if (rc != -ENOMSG) + IPAWANERR("Error receiving message\n"); +} + +static void ipa_q6_clnt_notify(struct qmi_handle *handle, + enum qmi_event_type event, void *notify_priv) +{ + switch (event) { + case QMI_RECV_MSG: + IPAWANDBG_LOW("client qmi recv message called"); + if (!atomic_read(&workqueues_stopped)) + queue_delayed_work(ipa_clnt_resp_workqueue, + &work_recv_msg_client, 0); + break; + default: + break; + } +} + +static void ipa_q6_clnt_ind_cb(struct qmi_handle *handle, unsigned int msg_id, + void *msg, unsigned int msg_len, + void *ind_cb_priv) +{ + struct ipa_data_usage_quota_reached_ind_msg_v01 qmi_ind; + struct msg_desc qmi_ind_desc; + int rc = 0; + + if (handle != ipa_q6_clnt) { + IPAWANERR("Wrong client\n"); + return; + } + + if (msg_id == QMI_IPA_DATA_USAGE_QUOTA_REACHED_IND_V01) { + memset(&qmi_ind, 0, sizeof( + struct ipa_data_usage_quota_reached_ind_msg_v01)); + qmi_ind_desc.max_msg_len = + QMI_IPA_DATA_USAGE_QUOTA_REACHED_IND_MAX_MSG_LEN_V01; + qmi_ind_desc.msg_id = QMI_IPA_DATA_USAGE_QUOTA_REACHED_IND_V01; + qmi_ind_desc.ei_array = + ipa_data_usage_quota_reached_ind_msg_data_v01_ei; + + rc = qmi_kernel_decode(&qmi_ind_desc, &qmi_ind, msg, msg_len); + if (rc < 0) { + IPAWANERR("Error decoding msg_id %d\n", msg_id); + return; + } + IPAWANDBG("Quota reached indication on qmux(%d) Mbytes(%lu)\n", + qmi_ind.apn.mux_id, + (unsigned long) qmi_ind.apn.num_Mbytes); + ipa_broadcast_quota_reach_ind(qmi_ind.apn.mux_id, + IPA_UPSTEAM_MODEM); + } +} + +static void ipa_q6_clnt_svc_arrive(struct work_struct *work) +{ + int rc; + struct ipa_master_driver_init_complt_ind_msg_v01 ind; + + /* Create a Local client port for QMI communication */ + ipa_q6_clnt = qmi_handle_create(ipa_q6_clnt_notify, NULL); + if (!ipa_q6_clnt) { + IPAWANERR("QMI client handle alloc failed\n"); + return; + } + + IPAWANDBG("Lookup server name, get client-hdl(%p)\n", + ipa_q6_clnt); + rc = qmi_connect_to_service(ipa_q6_clnt, + IPA_Q6_SERVICE_SVC_ID, + IPA_Q6_SVC_VERS, + IPA_Q6_SERVICE_INS_ID); + if (rc < 0) { + IPAWANERR("Server not found\n"); + ipa_q6_clnt_svc_exit(0); + return; + } + + rc = qmi_register_ind_cb(ipa_q6_clnt, ipa_q6_clnt_ind_cb, NULL); + if (rc < 0) + IPAWANERR("Unable to register for indications\n"); + + ipa_q6_clnt_reset = 0; + IPAWANDBG("Q6 QMI service available now\n"); + /* Initialize modem IPA-driver */ + IPAWANDBG("send qmi_init_modem_send_sync_msg to modem\n"); + rc = qmi_init_modem_send_sync_msg(); + if ((rc == -ENETRESET) || (rc == -ENODEV)) { + IPAWANERR("qmi_init_modem_send_sync_msg failed due to SSR!\n"); + /* Cleanup will take place when ipa_wwan_remove is called */ + return; + } + if (rc != 0) { + IPAWANERR("qmi_init_modem_send_sync_msg failed\n"); + /* + * This is a very unexpected scenario, which requires a kernel + * panic in order to force dumps for QMI/Q6 side analysis. + */ + ipa_assert(); + return; + } + qmi_modem_init_fin = true; + + /* In cold-bootup, first_time_handshake = false */ + ipa_q6_handshake_complete(first_time_handshake); + first_time_handshake = true; + + IPAWANDBG("complete, qmi_modem_init_fin : %d\n", + qmi_modem_init_fin); + + if (qmi_indication_fin) { + IPAWANDBG("send indication to modem (%d)\n", + qmi_indication_fin); + memset(&ind, 0, sizeof(struct + ipa_master_driver_init_complt_ind_msg_v01)); + ind.master_driver_init_status.result = + IPA_QMI_RESULT_SUCCESS_V01; + rc = qmi_send_ind(ipa_svc_handle, curr_conn, + &ipa_master_driver_complete_indication_desc, + &ind, + sizeof(ind)); + IPAWANDBG("ipa_qmi_service_client good\n"); + } else { + IPAWANERR("not send indication (%d)\n", + qmi_indication_fin); + } +} + + +static void ipa_q6_clnt_svc_exit(struct work_struct *work) +{ + mutex_lock(&ipa_qmi_lock); + + if (ipa_q6_clnt) + qmi_handle_destroy(ipa_q6_clnt); + ipa_q6_clnt_reset = 1; + ipa_q6_clnt = NULL; + + mutex_unlock(&ipa_qmi_lock); +} + + +static int ipa_q6_clnt_svc_event_notify(struct notifier_block *this, + unsigned long code, + void *_cmd) +{ + IPAWANDBG("event %ld\n", code); + switch (code) { + case QMI_SERVER_ARRIVE: + if (!atomic_read(&workqueues_stopped)) + queue_delayed_work(ipa_clnt_req_workqueue, + &work_svc_arrive, 0); + break; + default: + break; + } + return 0; +} + + +static struct notifier_block ipa_q6_clnt_nb = { + .notifier_call = ipa_q6_clnt_svc_event_notify, +}; + +static void ipa_qmi_service_init_worker(void) +{ + int rc; + + /* Initialize QMI-service*/ + IPAWANDBG("IPA A7 QMI init OK :>>>>\n"); + + /* start the QMI msg cache */ + ipa_qmi_ctx = vzalloc(sizeof(*ipa_qmi_ctx)); + if (!ipa_qmi_ctx) { + IPAWANERR(":kzalloc err.\n"); + return; + } + ipa_qmi_ctx->modem_cfg_emb_pipe_flt = + ipa2_get_modem_cfg_emb_pipe_flt(); + + ipa_svc_workqueue = create_singlethread_workqueue("ipa_A7_svc"); + if (!ipa_svc_workqueue) { + IPAWANERR("Creating ipa_A7_svc workqueue failed\n"); + vfree(ipa_qmi_ctx); + ipa_qmi_ctx = NULL; + return; + } + + ipa_svc_handle = qmi_handle_create(qmi_ipa_a5_svc_ntfy, NULL); + if (!ipa_svc_handle) { + IPAWANERR("Creating ipa_A7_svc qmi handle failed\n"); + goto destroy_ipa_A7_svc_wq; + } + + /* + * Setting the current connection to NULL, as due to a race between + * server and client clean-up in SSR, the disconnect_cb might not + * have necessarily been called + */ + curr_conn = NULL; + + rc = qmi_svc_register(ipa_svc_handle, &ipa_a5_svc_ops_options); + if (rc < 0) { + IPAWANERR("Registering ipa_a5 svc failed %d\n", + rc); + goto destroy_qmi_handle; + } + + /* Initialize QMI-client */ + + ipa_clnt_req_workqueue = create_singlethread_workqueue("clnt_req"); + if (!ipa_clnt_req_workqueue) { + IPAWANERR("Creating clnt_req workqueue failed\n"); + goto deregister_qmi_srv; + } + + ipa_clnt_resp_workqueue = create_singlethread_workqueue("clnt_resp"); + if (!ipa_clnt_resp_workqueue) { + IPAWANERR("Creating clnt_resp workqueue failed\n"); + goto destroy_clnt_req_wq; + } + + rc = qmi_svc_event_notifier_register(IPA_Q6_SERVICE_SVC_ID, + IPA_Q6_SVC_VERS, + IPA_Q6_SERVICE_INS_ID, &ipa_q6_clnt_nb); + if (rc < 0) { + IPAWANERR("notifier register failed\n"); + goto destroy_clnt_resp_wq; + } + + atomic_set(&ipa_qmi_initialized, 1); + /* get Q6 service and start send modem-initial to Q6 */ + IPAWANDBG("wait service available\n"); + return; + +destroy_clnt_resp_wq: + destroy_workqueue(ipa_clnt_resp_workqueue); + ipa_clnt_resp_workqueue = NULL; +destroy_clnt_req_wq: + destroy_workqueue(ipa_clnt_req_workqueue); + ipa_clnt_req_workqueue = NULL; +deregister_qmi_srv: + qmi_svc_unregister(ipa_svc_handle); +destroy_qmi_handle: + qmi_handle_destroy(ipa_svc_handle); + ipa_svc_handle = 0; +destroy_ipa_A7_svc_wq: + destroy_workqueue(ipa_svc_workqueue); + ipa_svc_workqueue = NULL; + vfree(ipa_qmi_ctx); + ipa_qmi_ctx = NULL; +} + +int ipa_qmi_service_init(uint32_t wan_platform_type) +{ + ipa_wan_platform = wan_platform_type; + qmi_modem_init_fin = false; + qmi_indication_fin = false; + atomic_set(&workqueues_stopped, 0); + + if (atomic_read(&ipa_qmi_initialized) == 0) + ipa_qmi_service_init_worker(); + return 0; +} + +void ipa_qmi_service_exit(void) +{ + int ret = 0; + + atomic_set(&workqueues_stopped, 1); + + /* qmi-service */ + if (ipa_svc_handle) { + ret = qmi_svc_unregister(ipa_svc_handle); + if (ret < 0) + IPAWANERR("unregister qmi handle %p failed, ret=%d\n", + ipa_svc_handle, ret); + } + if (ipa_svc_workqueue) { + flush_workqueue(ipa_svc_workqueue); + destroy_workqueue(ipa_svc_workqueue); + ipa_svc_workqueue = NULL; + } + + if (ipa_svc_handle) { + ret = qmi_handle_destroy(ipa_svc_handle); + if (ret < 0) + IPAWANERR("Error destroying qmi handle %p, ret=%d\n", + ipa_svc_handle, ret); + } + ipa_svc_handle = 0; + + /* qmi-client */ + + /* Unregister from events */ + ret = qmi_svc_event_notifier_unregister(IPA_Q6_SERVICE_SVC_ID, + IPA_Q6_SVC_VERS, + IPA_Q6_SERVICE_INS_ID, &ipa_q6_clnt_nb); + if (ret < 0) + IPAWANERR( + "Error qmi_svc_event_notifier_unregister service %d, ret=%d\n", + IPA_Q6_SERVICE_SVC_ID, ret); + + /* Release client handle */ + ipa_q6_clnt_svc_exit(0); + + if (ipa_clnt_req_workqueue) { + destroy_workqueue(ipa_clnt_req_workqueue); + ipa_clnt_req_workqueue = NULL; + } + if (ipa_clnt_resp_workqueue) { + destroy_workqueue(ipa_clnt_resp_workqueue); + ipa_clnt_resp_workqueue = NULL; + } + + mutex_lock(&ipa_qmi_lock); + /* clean the QMI msg cache */ + if (ipa_qmi_ctx != NULL) { + vfree(ipa_qmi_ctx); + ipa_qmi_ctx = NULL; + } + mutex_unlock(&ipa_qmi_lock); + qmi_modem_init_fin = false; + qmi_indication_fin = false; + atomic_set(&ipa_qmi_initialized, 0); +} + +void ipa_qmi_stop_workqueues(void) +{ + IPAWANDBG("Stopping all QMI workqueues\n"); + + /* Stopping all workqueues so new work won't be scheduled */ + atomic_set(&workqueues_stopped, 1); + + /* Making sure that the current scheduled work won't be executed */ + cancel_delayed_work(&work_recv_msg); + cancel_delayed_work(&work_recv_msg_client); + cancel_delayed_work(&work_svc_arrive); + cancel_delayed_work(&work_svc_exit); +} + +/* voting for bus BW to ipa_rm*/ +int vote_for_bus_bw(uint32_t *bw_mbps) +{ + struct ipa_rm_perf_profile profile; + int ret; + + if (bw_mbps == NULL) { + IPAWANERR("Bus BW is invalid\n"); + return -EINVAL; + } + + memset(&profile, 0, sizeof(profile)); + profile.max_supported_bandwidth_mbps = *bw_mbps; + ret = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_PROD, + &profile); + if (ret) + IPAWANERR("Failed to set perf profile to BW %u\n", + profile.max_supported_bandwidth_mbps); + else + IPAWANDBG("Succeeded to set perf profile to BW %u\n", + profile.max_supported_bandwidth_mbps); + + return ret; +} + +int ipa_qmi_get_data_stats(struct ipa_get_data_stats_req_msg_v01 *req, + struct ipa_get_data_stats_resp_msg_v01 *resp) +{ + struct msg_desc req_desc, resp_desc; + int rc; + + req_desc.max_msg_len = QMI_IPA_GET_DATA_STATS_REQ_MAX_MSG_LEN_V01; + req_desc.msg_id = QMI_IPA_GET_DATA_STATS_REQ_V01; + req_desc.ei_array = ipa_get_data_stats_req_msg_data_v01_ei; + + resp_desc.max_msg_len = QMI_IPA_GET_DATA_STATS_RESP_MAX_MSG_LEN_V01; + resp_desc.msg_id = QMI_IPA_GET_DATA_STATS_RESP_V01; + resp_desc.ei_array = ipa_get_data_stats_resp_msg_data_v01_ei; + + IPAWANDBG_LOW("Sending QMI_IPA_GET_DATA_STATS_REQ_V01\n"); + if (unlikely(!ipa_q6_clnt)) + return -ETIMEDOUT; + rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, req, + sizeof(struct ipa_get_data_stats_req_msg_v01), + &resp_desc, resp, + sizeof(struct ipa_get_data_stats_resp_msg_v01), + QMI_SEND_STATS_REQ_TIMEOUT_MS); + + IPAWANDBG_LOW("QMI_IPA_GET_DATA_STATS_RESP_V01 received\n"); + + return ipa_check_qmi_response(rc, + QMI_IPA_GET_DATA_STATS_REQ_V01, resp->resp.result, + resp->resp.error, "ipa_get_data_stats_resp_msg_v01"); +} + +int ipa_qmi_get_network_stats(struct ipa_get_apn_data_stats_req_msg_v01 *req, + struct ipa_get_apn_data_stats_resp_msg_v01 *resp) +{ + struct msg_desc req_desc, resp_desc; + int rc; + + req_desc.max_msg_len = QMI_IPA_GET_APN_DATA_STATS_REQ_MAX_MSG_LEN_V01; + req_desc.msg_id = QMI_IPA_GET_APN_DATA_STATS_REQ_V01; + req_desc.ei_array = ipa_get_apn_data_stats_req_msg_data_v01_ei; + + resp_desc.max_msg_len = QMI_IPA_GET_APN_DATA_STATS_RESP_MAX_MSG_LEN_V01; + resp_desc.msg_id = QMI_IPA_GET_APN_DATA_STATS_RESP_V01; + resp_desc.ei_array = ipa_get_apn_data_stats_resp_msg_data_v01_ei; + + IPAWANDBG_LOW("Sending QMI_IPA_GET_APN_DATA_STATS_REQ_V01\n"); + if (unlikely(!ipa_q6_clnt)) + return -ETIMEDOUT; + rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, req, + sizeof(struct ipa_get_apn_data_stats_req_msg_v01), + &resp_desc, resp, + sizeof(struct ipa_get_apn_data_stats_resp_msg_v01), + QMI_SEND_STATS_REQ_TIMEOUT_MS); + + IPAWANDBG_LOW("QMI_IPA_GET_APN_DATA_STATS_RESP_V01 received\n"); + + return ipa_check_qmi_response(rc, + QMI_IPA_GET_APN_DATA_STATS_REQ_V01, resp->resp.result, + resp->resp.error, "ipa_get_apn_data_stats_req_msg_v01"); +} + +int ipa_qmi_set_data_quota(struct ipa_set_data_usage_quota_req_msg_v01 *req) +{ + struct ipa_set_data_usage_quota_resp_msg_v01 resp; + struct msg_desc req_desc, resp_desc; + int rc; + + memset(&resp, 0, sizeof(struct ipa_set_data_usage_quota_resp_msg_v01)); + + req_desc.max_msg_len = QMI_IPA_SET_DATA_USAGE_QUOTA_REQ_MAX_MSG_LEN_V01; + req_desc.msg_id = QMI_IPA_SET_DATA_USAGE_QUOTA_REQ_V01; + req_desc.ei_array = ipa_set_data_usage_quota_req_msg_data_v01_ei; + + resp_desc.max_msg_len = + QMI_IPA_SET_DATA_USAGE_QUOTA_RESP_MAX_MSG_LEN_V01; + resp_desc.msg_id = QMI_IPA_SET_DATA_USAGE_QUOTA_RESP_V01; + resp_desc.ei_array = ipa_set_data_usage_quota_resp_msg_data_v01_ei; + + IPAWANDBG_LOW("Sending QMI_IPA_SET_DATA_USAGE_QUOTA_REQ_V01\n"); + if (unlikely(!ipa_q6_clnt)) + return -ETIMEDOUT; + rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, req, + sizeof(struct ipa_set_data_usage_quota_req_msg_v01), + &resp_desc, &resp, sizeof(resp), + QMI_SEND_STATS_REQ_TIMEOUT_MS); + + IPAWANDBG_LOW("QMI_IPA_SET_DATA_USAGE_QUOTA_RESP_V01 received\n"); + + return ipa_check_qmi_response(rc, + QMI_IPA_SET_DATA_USAGE_QUOTA_REQ_V01, resp.resp.result, + resp.resp.error, "ipa_set_data_usage_quota_req_msg_v01"); +} + +int ipa_qmi_stop_data_qouta(void) +{ + struct ipa_stop_data_usage_quota_req_msg_v01 req; + struct ipa_stop_data_usage_quota_resp_msg_v01 resp; + struct msg_desc req_desc, resp_desc; + int rc; + + memset(&req, 0, sizeof(struct ipa_stop_data_usage_quota_req_msg_v01)); + memset(&resp, 0, sizeof(struct ipa_stop_data_usage_quota_resp_msg_v01)); + + req_desc.max_msg_len = + QMI_IPA_STOP_DATA_USAGE_QUOTA_REQ_MAX_MSG_LEN_V01; + req_desc.msg_id = QMI_IPA_STOP_DATA_USAGE_QUOTA_REQ_V01; + req_desc.ei_array = ipa_stop_data_usage_quota_req_msg_data_v01_ei; + + resp_desc.max_msg_len = + QMI_IPA_STOP_DATA_USAGE_QUOTA_RESP_MAX_MSG_LEN_V01; + resp_desc.msg_id = QMI_IPA_STOP_DATA_USAGE_QUOTA_RESP_V01; + resp_desc.ei_array = ipa_stop_data_usage_quota_resp_msg_data_v01_ei; + + IPAWANDBG_LOW("Sending QMI_IPA_STOP_DATA_USAGE_QUOTA_REQ_V01\n"); + if (unlikely(!ipa_q6_clnt)) + return -ETIMEDOUT; + rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, &req, sizeof(req), + &resp_desc, &resp, sizeof(resp), + QMI_SEND_STATS_REQ_TIMEOUT_MS); + + IPAWANDBG_LOW("QMI_IPA_STOP_DATA_USAGE_QUOTA_RESP_V01 received\n"); + + return ipa_check_qmi_response(rc, + QMI_IPA_STOP_DATA_USAGE_QUOTA_REQ_V01, resp.resp.result, + resp.resp.error, "ipa_stop_data_usage_quota_req_msg_v01"); +} + +void ipa_qmi_init(void) +{ + mutex_init(&ipa_qmi_lock); +} + +void ipa_qmi_cleanup(void) +{ + mutex_destroy(&ipa_qmi_lock); +} diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.h b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.h new file mode 100644 index 000000000000..3ce809f8d317 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.h @@ -0,0 +1,333 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2013-2018, 2020, The Linux Foundation. All rights reserved. + */ + +#ifndef IPA_QMI_SERVICE_H +#define IPA_QMI_SERVICE_H + +#include +#include +#include +#include +#include "ipa_i.h" +#include + +/** + * name of the DL wwan default routing tables for v4 and v6 + */ +#define IPA_A7_QMAP_HDR_NAME "ipa_qmap_hdr" +#define IPA_DFLT_WAN_RT_TBL_NAME "ipa_dflt_wan_rt" +#define MAX_NUM_Q6_RULE 35 +#define MAX_NUM_QMI_RULE_CACHE 10 +#define DEV_NAME "ipa-wan" +#define SUBSYS_MODEM "modem" + +#define IPAWANDBG(fmt, args...) \ + do { \ + pr_debug(DEV_NAME " %s:%d " fmt, __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + DEV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + DEV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPAWANDBG_LOW(fmt, args...) \ + do { \ + pr_debug(DEV_NAME " %s:%d " fmt, __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + DEV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPAWANERR(fmt, args...) \ + do { \ + pr_err(DEV_NAME " %s:%d " fmt, __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + DEV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + DEV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPAWANERR_RL(fmt, args...) \ + do { \ + pr_err_ratelimited_ipa(DEV_NAME " %s:%d " fmt, __func__,\ + __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + DEV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + DEV_NAME " %s:%d " fmt, ## args); \ + } while (0) + +#define IPAWANINFO(fmt, args...) \ + do { \ + pr_info(DEV_NAME " %s:%d " fmt, __func__, __LINE__, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \ + DEV_NAME " %s:%d " fmt, ## args); \ + IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \ + DEV_NAME " %s:%d " fmt, ## args); \ + } while (0) + + +extern struct ipa_qmi_context *ipa_qmi_ctx; +extern struct mutex ipa_qmi_lock; + +struct ipa_qmi_context { +struct ipa_ioc_ext_intf_prop q6_ul_filter_rule[MAX_NUM_Q6_RULE]; +u32 q6_ul_filter_rule_hdl[MAX_NUM_Q6_RULE]; +int num_ipa_install_fltr_rule_req_msg; +struct ipa_install_fltr_rule_req_msg_v01 + ipa_install_fltr_rule_req_msg_cache[MAX_NUM_QMI_RULE_CACHE]; +int num_ipa_fltr_installed_notif_req_msg; +struct ipa_fltr_installed_notif_req_msg_v01 + ipa_fltr_installed_notif_req_msg_cache[MAX_NUM_QMI_RULE_CACHE]; +bool modem_cfg_emb_pipe_flt; +}; + +struct rmnet_mux_val { + uint32_t mux_id; + int8_t vchannel_name[IFNAMSIZ]; + bool mux_channel_set; + bool ul_flt_reg; + bool mux_hdr_set; + uint32_t hdr_hdl; +}; + +extern struct elem_info ipa_init_modem_driver_req_msg_data_v01_ei[]; +extern struct elem_info ipa_init_modem_driver_resp_msg_data_v01_ei[]; +extern struct elem_info ipa_indication_reg_req_msg_data_v01_ei[]; +extern struct elem_info ipa_indication_reg_resp_msg_data_v01_ei[]; +extern struct elem_info ipa_master_driver_init_complt_ind_msg_data_v01_ei[]; +extern struct elem_info ipa_install_fltr_rule_req_msg_data_v01_ei[]; +extern struct elem_info ipa_install_fltr_rule_resp_msg_data_v01_ei[]; +extern struct elem_info ipa_fltr_installed_notif_req_msg_data_v01_ei[]; +extern struct elem_info ipa_fltr_installed_notif_resp_msg_data_v01_ei[]; +extern struct elem_info ipa_enable_force_clear_datapath_req_msg_data_v01_ei[]; +extern struct elem_info ipa_enable_force_clear_datapath_resp_msg_data_v01_ei[]; +extern struct elem_info ipa_disable_force_clear_datapath_req_msg_data_v01_ei[]; +extern struct elem_info ipa_disable_force_clear_datapath_resp_msg_data_v01_ei[]; +extern struct elem_info ipa_config_req_msg_data_v01_ei[]; +extern struct elem_info ipa_config_resp_msg_data_v01_ei[]; +extern struct elem_info ipa_get_data_stats_req_msg_data_v01_ei[]; +extern struct elem_info ipa_get_data_stats_resp_msg_data_v01_ei[]; +extern struct elem_info ipa_get_apn_data_stats_req_msg_data_v01_ei[]; +extern struct elem_info ipa_get_apn_data_stats_resp_msg_data_v01_ei[]; +extern struct elem_info ipa_set_data_usage_quota_req_msg_data_v01_ei[]; +extern struct elem_info ipa_set_data_usage_quota_resp_msg_data_v01_ei[]; +extern struct elem_info ipa_data_usage_quota_reached_ind_msg_data_v01_ei[]; +extern struct elem_info ipa_stop_data_usage_quota_req_msg_data_v01_ei[]; +extern struct elem_info ipa_stop_data_usage_quota_resp_msg_data_v01_ei[]; + +/** + * struct ipa_rmnet_context - IPA rmnet context + * @ipa_rmnet_ssr: support modem SSR + * @polling_interval: Requested interval for polling tethered statistics + * @metered_mux_id: The mux ID on which quota has been set + */ +struct ipa_rmnet_context { + bool ipa_rmnet_ssr; + u64 polling_interval; + u32 metered_mux_id; +}; + +extern struct ipa_rmnet_context ipa_rmnet_ctx; + +#ifdef CONFIG_RMNET_IPA + +int ipa_qmi_service_init(uint32_t wan_platform_type); + +void ipa_qmi_service_exit(void); + +/* sending filter-install-request to modem*/ +int qmi_filter_request_send(struct ipa_install_fltr_rule_req_msg_v01 *req); + +/* sending filter-installed-notify-request to modem*/ +int qmi_filter_notify_send(struct ipa_fltr_installed_notif_req_msg_v01 *req); + +/* voting for bus BW to ipa_rm*/ +int vote_for_bus_bw(uint32_t *bw_mbps); + +int qmi_enable_force_clear_datapath_send( + struct ipa_enable_force_clear_datapath_req_msg_v01 *req); + +int qmi_disable_force_clear_datapath_send( + struct ipa_disable_force_clear_datapath_req_msg_v01 *req); + +int copy_ul_filter_rule_to_ipa(struct ipa_install_fltr_rule_req_msg_v01 + *rule_req, uint32_t *rule_hdl); + +int wwan_update_mux_channel_prop(void); + +int wan_ioctl_init(void); + +void wan_ioctl_stop_qmi_messages(void); + +void wan_ioctl_enable_qmi_messages(void); + +void wan_ioctl_deinit(void); + +void ipa_qmi_stop_workqueues(void); + +int rmnet_ipa_poll_tethering_stats(struct wan_ioctl_poll_tethering_stats *data); + +int rmnet_ipa_set_data_quota(struct wan_ioctl_set_data_quota *data); + +void ipa_broadcast_quota_reach_ind(uint32_t mux_id, + enum ipa_upstream_type upstream_type); + +int rmnet_ipa_set_tether_client_pipe(struct wan_ioctl_set_tether_client_pipe + *data); + +int rmnet_ipa_query_tethering_stats(struct wan_ioctl_query_tether_stats *data, + bool reset); + +int rmnet_ipa_query_tethering_stats_all( + struct wan_ioctl_query_tether_stats_all *data); + +int rmnet_ipa_reset_tethering_stats(struct wan_ioctl_reset_tether_stats *data); + +int ipa_qmi_get_data_stats(struct ipa_get_data_stats_req_msg_v01 *req, + struct ipa_get_data_stats_resp_msg_v01 *resp); + +int ipa_qmi_get_network_stats(struct ipa_get_apn_data_stats_req_msg_v01 *req, + struct ipa_get_apn_data_stats_resp_msg_v01 *resp); + +int ipa_qmi_set_data_quota(struct ipa_set_data_usage_quota_req_msg_v01 *req); + +int ipa_qmi_stop_data_qouta(void); + +void ipa_q6_handshake_complete(bool ssr_bootup); + +void ipa_qmi_init(void); + +void ipa_qmi_cleanup(void); + +#else /* CONFIG_RMNET_IPA */ + +static inline int ipa_qmi_service_init(uint32_t wan_platform_type) +{ + return -EPERM; +} + +static inline void ipa_qmi_service_exit(void) { } + +/* sending filter-install-request to modem*/ +static inline int qmi_filter_request_send( + struct ipa_install_fltr_rule_req_msg_v01 *req) +{ + return -EPERM; +} + +/* sending filter-installed-notify-request to modem*/ +static inline int qmi_filter_notify_send( + struct ipa_fltr_installed_notif_req_msg_v01 *req) +{ + return -EPERM; +} + +static inline int qmi_enable_force_clear_datapath_send( + struct ipa_enable_force_clear_datapath_req_msg_v01 *req) +{ + return -EPERM; +} + +static inline int qmi_disable_force_clear_datapath_send( + struct ipa_disable_force_clear_datapath_req_msg_v01 *req) +{ + return -EPERM; +} + +static inline int copy_ul_filter_rule_to_ipa( + struct ipa_install_fltr_rule_req_msg_v01 *rule_req, uint32_t *rule_hdl) +{ + return -EPERM; +} + +static inline int wwan_update_mux_channel_prop(void) +{ + return -EPERM; +} + +static inline int wan_ioctl_init(void) +{ + return -EPERM; +} + +static inline void wan_ioctl_stop_qmi_messages(void) { } + +static inline void wan_ioctl_enable_qmi_messages(void) { } + +static inline void wan_ioctl_deinit(void) { } + +static inline void ipa_qmi_stop_workqueues(void) { } + +static inline int vote_for_bus_bw(uint32_t *bw_mbps) +{ + return -EPERM; +} + +static inline int rmnet_ipa_poll_tethering_stats( + struct wan_ioctl_poll_tethering_stats *data) +{ + return -EPERM; +} + +static inline int rmnet_ipa_set_data_quota( + struct wan_ioctl_set_data_quota *data) +{ + return -EPERM; +} + +static inline void ipa_broadcast_quota_reach_ind +( + uint32_t mux_id, + enum ipa_upstream_type upstream_type) +{ +} + +static inline int rmnet_ipa_reset_tethering_stats +( + struct wan_ioctl_reset_tether_stats *data +) +{ + return -EPERM; + +} + +static inline int ipa_qmi_get_data_stats( + struct ipa_get_data_stats_req_msg_v01 *req, + struct ipa_get_data_stats_resp_msg_v01 *resp) +{ + return -EPERM; +} + +static inline int ipa_qmi_get_network_stats( + struct ipa_get_apn_data_stats_req_msg_v01 *req, + struct ipa_get_apn_data_stats_resp_msg_v01 *resp) +{ + return -EPERM; +} + +static inline int ipa_qmi_set_data_quota( + struct ipa_set_data_usage_quota_req_msg_v01 *req) +{ + return -EPERM; +} + +static inline int ipa_qmi_stop_data_qouta(void) +{ + return -EPERM; +} + +static inline void ipa_q6_handshake_complete(bool ssr_bootup) { } + +static inline void ipa_qmi_init(void) +{ +} + +static inline void ipa_qmi_cleanup(void) +{ +} + +#endif /* CONFIG_RMNET_IPA */ + +#endif /* IPA_QMI_SERVICE_H */ diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service_v01.c b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service_v01.c new file mode 100644 index 000000000000..8e09937002fc --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service_v01.c @@ -0,0 +1,2418 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2013-2017, 2020, The Linux Foundation. All rights reserved. + */ + +#include +#include + +#include + +/* Type Definitions */ +static struct elem_info ipa_hdr_tbl_info_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_hdr_tbl_info_type_v01, + modem_offset_start), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_hdr_tbl_info_type_v01, + modem_offset_end), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct elem_info ipa_route_tbl_info_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_route_tbl_info_type_v01, + route_tbl_start_addr), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_route_tbl_info_type_v01, + num_indices), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct elem_info ipa_modem_mem_info_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_modem_mem_info_type_v01, + block_start_addr), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_modem_mem_info_type_v01, + size), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct elem_info ipa_hdr_proc_ctx_tbl_info_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_hdr_proc_ctx_tbl_info_type_v01, + modem_offset_start), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_hdr_proc_ctx_tbl_info_type_v01, + modem_offset_end), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct elem_info ipa_zip_tbl_info_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_zip_tbl_info_type_v01, + modem_offset_start), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_zip_tbl_info_type_v01, + modem_offset_end), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct elem_info ipa_ipfltr_range_eq_16_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_ipfltr_range_eq_16_type_v01, + offset), + }, + { + .data_type = QMI_UNSIGNED_2_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint16_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_ipfltr_range_eq_16_type_v01, + range_low), + }, + { + .data_type = QMI_UNSIGNED_2_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint16_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_ipfltr_range_eq_16_type_v01, + range_high), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct elem_info ipa_ipfltr_mask_eq_32_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_ipfltr_mask_eq_32_type_v01, + offset), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_ipfltr_mask_eq_32_type_v01, + mask), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_ipfltr_mask_eq_32_type_v01, + value), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct elem_info ipa_ipfltr_eq_16_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_ipfltr_eq_16_type_v01, + offset), + }, + { + .data_type = QMI_UNSIGNED_2_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint16_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_ipfltr_eq_16_type_v01, + value), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct elem_info ipa_ipfltr_eq_32_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_ipfltr_eq_32_type_v01, + offset), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_ipfltr_eq_32_type_v01, + value), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct elem_info ipa_ipfltr_mask_eq_128_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_ipfltr_mask_eq_128_type_v01, + offset), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 16, + .elem_size = sizeof(uint8_t), + .is_array = STATIC_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_ipfltr_mask_eq_128_type_v01, + mask), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 16, + .elem_size = sizeof(uint8_t), + .is_array = STATIC_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_ipfltr_mask_eq_128_type_v01, + value), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct elem_info ipa_filter_rule_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_2_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint16_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_filter_rule_type_v01, + rule_eq_bitmap), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_filter_rule_type_v01, + tos_eq_present), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + tos_eq), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + protocol_eq_present), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + protocol_eq), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + num_ihl_offset_range_16), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS_V01, + .elem_size = sizeof( + struct ipa_ipfltr_range_eq_16_type_v01), + .is_array = STATIC_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + ihl_offset_range_16), + .ei_array = ipa_ipfltr_range_eq_16_type_data_v01_ei, + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + num_offset_meq_32), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_IPA_IPFLTR_NUM_MEQ_32_EQNS_V01, + .elem_size = sizeof(struct ipa_ipfltr_mask_eq_32_type_v01), + .is_array = STATIC_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + offset_meq_32), + .ei_array = ipa_ipfltr_mask_eq_32_type_data_v01_ei, + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + tc_eq_present), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + tc_eq), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + flow_eq_present), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + flow_eq), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + ihl_offset_eq_16_present), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct ipa_ipfltr_eq_16_type_v01), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + ihl_offset_eq_16), + .ei_array = ipa_ipfltr_eq_16_type_data_v01_ei, + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + ihl_offset_eq_32_present), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct ipa_ipfltr_eq_32_type_v01), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + ihl_offset_eq_32), + .ei_array = ipa_ipfltr_eq_32_type_data_v01_ei, + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + num_ihl_offset_meq_32), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_IPA_IPFLTR_NUM_IHL_MEQ_32_EQNS_V01, + .elem_size = sizeof(struct ipa_ipfltr_mask_eq_32_type_v01), + .is_array = STATIC_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + ihl_offset_meq_32), + .ei_array = ipa_ipfltr_mask_eq_32_type_data_v01_ei, + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + num_offset_meq_128), + }, + { + .data_type = QMI_STRUCT, + .elem_len = + QMI_IPA_IPFLTR_NUM_MEQ_128_EQNS_V01, + .elem_size = sizeof( + struct ipa_ipfltr_mask_eq_128_type_v01), + .is_array = STATIC_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_filter_rule_type_v01, + offset_meq_128), + .ei_array = ipa_ipfltr_mask_eq_128_type_data_v01_ei, + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + metadata_meq32_present), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct ipa_ipfltr_mask_eq_32_type_v01), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + metadata_meq32), + .ei_array = ipa_ipfltr_mask_eq_32_type_data_v01_ei, + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_rule_type_v01, + ipv4_frag_eq_present), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct elem_info ipa_filter_spec_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_spec_type_v01, + filter_spec_identifier), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_spec_type_v01, + ip_type), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct ipa_filter_rule_type_v01), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_spec_type_v01, + filter_rule), + .ei_array = ipa_filter_rule_type_data_v01_ei, + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_spec_type_v01, + filter_action), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_spec_type_v01, + is_routing_table_index_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_spec_type_v01, + route_table_index), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_spec_type_v01, + is_mux_id_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_filter_spec_type_v01, + mux_id), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct elem_info + ipa_filter_rule_identifier_to_handle_map_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_filter_rule_identifier_to_handle_map_v01, + filter_spec_identifier), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_filter_rule_identifier_to_handle_map_v01, + filter_handle), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct elem_info ipa_filter_handle_to_index_map_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_filter_handle_to_index_map_v01, + filter_handle), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_filter_handle_to_index_map_v01, + filter_index), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info ipa_init_modem_driver_req_msg_data_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + platform_type_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + platform_type), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + hdr_tbl_info_valid), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct ipa_hdr_tbl_info_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + hdr_tbl_info), + .ei_array = ipa_hdr_tbl_info_type_data_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + v4_route_tbl_info_valid), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct ipa_route_tbl_info_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + v4_route_tbl_info), + .ei_array = ipa_route_tbl_info_type_data_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + v6_route_tbl_info_valid), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct ipa_route_tbl_info_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + v6_route_tbl_info), + .ei_array = ipa_route_tbl_info_type_data_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + v4_filter_tbl_start_addr_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + v4_filter_tbl_start_addr), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x15, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + v6_filter_tbl_start_addr_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x15, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + v6_filter_tbl_start_addr), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x16, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + modem_mem_info_valid), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct ipa_modem_mem_info_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x16, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + modem_mem_info), + .ei_array = ipa_modem_mem_info_type_data_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x17, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + ctrl_comm_dest_end_pt_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x17, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + ctrl_comm_dest_end_pt), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x18, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + is_ssr_bootup_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x18, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + is_ssr_bootup), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x19, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + hdr_proc_ctx_tbl_info_valid), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof( + struct ipa_hdr_proc_ctx_tbl_info_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x19, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + hdr_proc_ctx_tbl_info), + .ei_array = ipa_hdr_proc_ctx_tbl_info_type_data_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x1A, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + zip_tbl_info_valid), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct ipa_zip_tbl_info_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x1A, + .offset = offsetof( + struct ipa_init_modem_driver_req_msg_v01, + zip_tbl_info), + .ei_array = ipa_zip_tbl_info_type_data_v01_ei, + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info ipa_init_modem_driver_resp_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ipa_init_modem_driver_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_init_modem_driver_resp_msg_v01, + ctrl_comm_dest_end_pt_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_init_modem_driver_resp_msg_v01, + ctrl_comm_dest_end_pt), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_init_modem_driver_resp_msg_v01, + default_end_pt_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_init_modem_driver_resp_msg_v01, + default_end_pt), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info ipa_indication_reg_req_msg_data_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_indication_reg_req_msg_v01, + master_driver_init_complete_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_indication_reg_req_msg_v01, + master_driver_init_complete), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_indication_reg_req_msg_v01, + data_usage_quota_reached_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_indication_reg_req_msg_v01, + data_usage_quota_reached), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info ipa_indication_reg_resp_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ipa_indication_reg_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info ipa_master_driver_init_complt_ind_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct + ipa_master_driver_init_complt_ind_msg_v01, + master_driver_init_status), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info ipa_install_fltr_rule_req_msg_data_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_install_fltr_rule_req_msg_v01, + filter_spec_list_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_install_fltr_rule_req_msg_v01, + filter_spec_list_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_IPA_MAX_FILTERS_V01, + .elem_size = sizeof(struct ipa_filter_spec_type_v01), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_install_fltr_rule_req_msg_v01, + filter_spec_list), + .ei_array = ipa_filter_spec_type_data_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_install_fltr_rule_req_msg_v01, + source_pipe_index_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_install_fltr_rule_req_msg_v01, + source_pipe_index), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof( + struct ipa_install_fltr_rule_req_msg_v01, + num_ipv4_filters_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof( + struct ipa_install_fltr_rule_req_msg_v01, + num_ipv4_filters), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof( + struct ipa_install_fltr_rule_req_msg_v01, + num_ipv6_filters_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof( + struct ipa_install_fltr_rule_req_msg_v01, + num_ipv6_filters), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof( + struct ipa_install_fltr_rule_req_msg_v01, + xlat_filter_indices_list_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof( + struct ipa_install_fltr_rule_req_msg_v01, + xlat_filter_indices_list_len), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = QMI_IPA_MAX_FILTERS_V01, + .elem_size = sizeof(uint32_t), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x14, + .offset = offsetof( + struct ipa_install_fltr_rule_req_msg_v01, + xlat_filter_indices_list), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info ipa_install_fltr_rule_resp_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ipa_install_fltr_rule_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_install_fltr_rule_resp_msg_v01, + filter_handle_list_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_install_fltr_rule_resp_msg_v01, + filter_handle_list_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_IPA_MAX_FILTERS_V01, + .elem_size = sizeof( + struct ipa_filter_rule_identifier_to_handle_map_v01), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_install_fltr_rule_resp_msg_v01, + filter_handle_list), + .ei_array = + ipa_filter_rule_identifier_to_handle_map_data_v01_ei, + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info ipa_fltr_installed_notif_req_msg_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + source_pipe_index), + }, + { + .data_type = QMI_UNSIGNED_2_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint16_t), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + install_status), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x03, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + filter_index_list_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_IPA_MAX_FILTERS_V01, + .elem_size = sizeof( + struct ipa_filter_handle_to_index_map_v01), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x03, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + filter_index_list), + .ei_array = ipa_filter_handle_to_index_map_data_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + embedded_pipe_index_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + embedded_pipe_index), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + retain_header_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + retain_header), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + embedded_call_mux_id_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + embedded_call_mux_id), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + num_ipv4_filters_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + num_ipv4_filters), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + num_ipv6_filters_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + num_ipv6_filters), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x15, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + start_ipv4_filter_idx_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x15, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + start_ipv4_filter_idx), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x16, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + start_ipv6_filter_idx_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x16, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + start_ipv6_filter_idx), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x17, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + rule_id_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x17, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + rule_id_len), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = QMI_IPA_MAX_FILTERS_V01, + .elem_size = sizeof(uint32_t), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x17, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + rule_id), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x18, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + dst_pipe_id_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x18, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + dst_pipe_id_len), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = QMI_IPA_MAX_CLIENT_DST_PIPES_V01, + .elem_size = sizeof(uint32_t), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x18, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + dst_pipe_id), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info ipa_fltr_installed_notif_resp_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ipa_fltr_installed_notif_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info ipa_enable_force_clear_datapath_req_msg_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof( + struct ipa_enable_force_clear_datapath_req_msg_v01, + source_pipe_bitmask), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ipa_enable_force_clear_datapath_req_msg_v01, + request_id), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_enable_force_clear_datapath_req_msg_v01, + throttle_source_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_enable_force_clear_datapath_req_msg_v01, + throttle_source), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info ipa_enable_force_clear_datapath_resp_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ipa_enable_force_clear_datapath_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info ipa_disable_force_clear_datapath_req_msg_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof( + struct ipa_disable_force_clear_datapath_req_msg_v01, + request_id), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info ipa_disable_force_clear_datapath_resp_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ipa_disable_force_clear_datapath_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info ipa_config_req_msg_data_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_config_req_msg_v01, + peripheral_type_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_config_req_msg_v01, + peripheral_type), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_config_req_msg_v01, + hw_deaggr_supported_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_config_req_msg_v01, + hw_deaggr_supported), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof( + struct ipa_config_req_msg_v01, + max_aggr_frame_size_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof( + struct ipa_config_req_msg_v01, + max_aggr_frame_size), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof( + struct ipa_config_req_msg_v01, + ipa_ingress_pipe_mode_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof( + struct ipa_config_req_msg_v01, + ipa_ingress_pipe_mode), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof( + struct ipa_config_req_msg_v01, + peripheral_speed_info_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof( + struct ipa_config_req_msg_v01, + peripheral_speed_info), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x15, + .offset = offsetof( + struct ipa_config_req_msg_v01, + dl_accumulation_time_limit_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x15, + .offset = offsetof( + struct ipa_config_req_msg_v01, + dl_accumulation_time_limit), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x16, + .offset = offsetof( + struct ipa_config_req_msg_v01, + dl_accumulation_pkt_limit_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x16, + .offset = offsetof( + struct ipa_config_req_msg_v01, + dl_accumulation_pkt_limit), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x17, + .offset = offsetof( + struct ipa_config_req_msg_v01, + dl_accumulation_byte_limit_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x17, + .offset = offsetof( + struct ipa_config_req_msg_v01, + dl_accumulation_byte_limit), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x18, + .offset = offsetof( + struct ipa_config_req_msg_v01, + ul_accumulation_time_limit_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x18, + .offset = offsetof( + struct ipa_config_req_msg_v01, + ul_accumulation_time_limit), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x19, + .offset = offsetof( + struct ipa_config_req_msg_v01, + hw_control_flags_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x19, + .offset = offsetof( + struct ipa_config_req_msg_v01, + hw_control_flags), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x1A, + .offset = offsetof( + struct ipa_config_req_msg_v01, + ul_msi_event_threshold_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x1A, + .offset = offsetof( + struct ipa_config_req_msg_v01, + ul_msi_event_threshold), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x1B, + .offset = offsetof( + struct ipa_config_req_msg_v01, + dl_msi_event_threshold_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x1B, + .offset = offsetof( + struct ipa_config_req_msg_v01, + dl_msi_event_threshold), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info ipa_config_resp_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ipa_config_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info ipa_get_data_stats_req_msg_data_v01_ei[] = { + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof( + struct ipa_get_data_stats_req_msg_v01, + ipa_stats_type), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_get_data_stats_req_msg_v01, + reset_stats_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_get_data_stats_req_msg_v01, + reset_stats), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct elem_info ipa_pipe_stats_info_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_pipe_stats_info_type_v01, + pipe_index), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint64_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_pipe_stats_info_type_v01, + num_ipv4_packets), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint64_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_pipe_stats_info_type_v01, + num_ipv4_bytes), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint64_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_pipe_stats_info_type_v01, + num_ipv6_packets), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint64_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_pipe_stats_info_type_v01, + num_ipv6_bytes), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct elem_info ipa_stats_type_filter_rule_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct + ipa_stats_type_filter_rule_v01, + filter_rule_index), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint64_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct + ipa_stats_type_filter_rule_v01, + num_packets), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info ipa_get_data_stats_resp_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ipa_get_data_stats_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_get_data_stats_resp_msg_v01, + ipa_stats_type_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_get_data_stats_resp_msg_v01, + ipa_stats_type), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_get_data_stats_resp_msg_v01, + ul_src_pipe_stats_list_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_get_data_stats_resp_msg_v01, + ul_src_pipe_stats_list_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_IPA_MAX_PIPES_V01, + .elem_size = sizeof(struct ipa_pipe_stats_info_type_v01), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_get_data_stats_resp_msg_v01, + ul_src_pipe_stats_list), + .ei_array = ipa_pipe_stats_info_type_data_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof( + struct ipa_get_data_stats_resp_msg_v01, + dl_dst_pipe_stats_list_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof( + struct ipa_get_data_stats_resp_msg_v01, + dl_dst_pipe_stats_list_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_IPA_MAX_PIPES_V01, + .elem_size = sizeof(struct ipa_pipe_stats_info_type_v01), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x12, + .offset = offsetof( + struct ipa_get_data_stats_resp_msg_v01, + dl_dst_pipe_stats_list), + .ei_array = ipa_pipe_stats_info_type_data_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof( + struct ipa_get_data_stats_resp_msg_v01, + dl_filter_rule_stats_list_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof( + struct ipa_get_data_stats_resp_msg_v01, + dl_filter_rule_stats_list_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_IPA_MAX_FILTERS_V01, + .elem_size = sizeof(struct ipa_pipe_stats_info_type_v01), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x13, + .offset = offsetof( + struct ipa_get_data_stats_resp_msg_v01, + dl_filter_rule_stats_list), + .ei_array = ipa_stats_type_filter_rule_data_v01_ei, + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct elem_info ipa_apn_data_stats_info_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct + ipa_apn_data_stats_info_type_v01, + mux_id), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint64_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct + ipa_apn_data_stats_info_type_v01, + num_ul_packets), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint64_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct + ipa_apn_data_stats_info_type_v01, + num_ul_bytes), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint64_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct + ipa_apn_data_stats_info_type_v01, + num_dl_packets), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint64_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct + ipa_apn_data_stats_info_type_v01, + num_dl_bytes), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info ipa_get_apn_data_stats_req_msg_data_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_get_apn_data_stats_req_msg_v01, + mux_id_list_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_get_apn_data_stats_req_msg_v01, + mux_id_list_len), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = QMI_IPA_MAX_APN_V01, + .elem_size = sizeof(uint32_t), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_get_apn_data_stats_req_msg_v01, + mux_id_list), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info ipa_get_apn_data_stats_resp_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ipa_get_apn_data_stats_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_get_apn_data_stats_resp_msg_v01, + apn_data_stats_list_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_get_apn_data_stats_resp_msg_v01, + apn_data_stats_list_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_IPA_MAX_APN_V01, + .elem_size = sizeof(struct + ipa_apn_data_stats_info_type_v01), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_get_apn_data_stats_resp_msg_v01, + apn_data_stats_list), + .ei_array = ipa_apn_data_stats_info_type_data_v01_ei, + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct elem_info ipa_data_usage_quota_info_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct + ipa_data_usage_quota_info_type_v01, + mux_id), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint64_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct + ipa_data_usage_quota_info_type_v01, + num_Mbytes), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info ipa_set_data_usage_quota_req_msg_data_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_set_data_usage_quota_req_msg_v01, + apn_quota_list_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_set_data_usage_quota_req_msg_v01, + apn_quota_list_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_IPA_MAX_APN_V01, + .elem_size = sizeof(struct + ipa_data_usage_quota_info_type_v01), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_set_data_usage_quota_req_msg_v01, + apn_quota_list), + .ei_array = ipa_data_usage_quota_info_type_data_v01_ei, + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info ipa_set_data_usage_quota_resp_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ipa_set_data_usage_quota_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info ipa_data_usage_quota_reached_ind_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct + ipa_data_usage_quota_info_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof( + struct ipa_data_usage_quota_reached_ind_msg_v01, + apn), + .ei_array = ipa_data_usage_quota_info_type_data_v01_ei, + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info ipa_stop_data_usage_quota_req_msg_data_v01_ei[] = { + /* ipa_stop_data_usage_quota_req_msg is empty */ + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info ipa_stop_data_usage_quota_resp_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ipa_stop_data_usage_quota_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_ram_mmap.h b/drivers/platform/msm/ipa/ipa_v2/ipa_ram_mmap.h new file mode 100644 index 000000000000..bbc0c1619bb2 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_ram_mmap.h @@ -0,0 +1,553 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2015, 2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _IPA_RAM_MMAP_H_ +#define _IPA_RAM_MMAP_H_ + +/* + * This header defines the memory map of the IPA RAM (not all SRAM is + * available for SW use) + * In case of restricted bytes the actual starting address will be + * advanced by the number of needed bytes + */ + +#define IPA_RAM_NAT_OFST 0 +#define IPA_RAM_NAT_SIZE 0 + +#define IPA_MEM_v1_RAM_HDR_OFST (IPA_RAM_NAT_OFST + IPA_RAM_NAT_SIZE) +#define IPA_MEM_v1_RAM_HDR_SIZE 1664 +#define IPA_MEM_v1_RAM_V4_FLT_OFST (IPA_MEM_v1_RAM_HDR_OFST +\ + IPA_MEM_v1_RAM_HDR_SIZE) +#define IPA_MEM_v1_RAM_V4_FLT_SIZE 2176 +#define IPA_MEM_v1_RAM_V4_RT_OFST (IPA_MEM_v1_RAM_V4_FLT_OFST +\ + IPA_MEM_v1_RAM_V4_FLT_SIZE) +#define IPA_MEM_v1_RAM_V4_RT_SIZE 512 +#define IPA_MEM_v1_RAM_V6_FLT_OFST (IPA_MEM_v1_RAM_V4_RT_OFST +\ + IPA_MEM_v1_RAM_V4_RT_SIZE) +#define IPA_MEM_v1_RAM_V6_FLT_SIZE 1792 +#define IPA_MEM_v1_RAM_V6_RT_OFST (IPA_MEM_v1_RAM_V6_FLT_OFST +\ + IPA_MEM_v1_RAM_V6_FLT_SIZE) +#define IPA_MEM_v1_RAM_V6_RT_SIZE 512 +#define IPA_MEM_v1_RAM_END_OFST (IPA_MEM_v1_RAM_V6_RT_OFST +\ + IPA_MEM_v1_RAM_V6_RT_SIZE) + +#define IPA_MEM_RAM_V6_RT_SIZE_DDR 16384 +#define IPA_MEM_RAM_V4_RT_SIZE_DDR 16384 +#define IPA_MEM_RAM_V6_FLT_SIZE_DDR 16384 +#define IPA_MEM_RAM_V4_FLT_SIZE_DDR 16384 +#define IPA_MEM_RAM_HDR_PROC_CTX_SIZE_DDR 0 + +#define IPA_MEM_CANARY_SIZE 4 +#define IPA_MEM_CANARY_VAL 0xdeadbeef + +#define IPA_MEM_RAM_MODEM_NETWORK_STATS_SIZE 256 +/* + * IPA v2.0 and v2.1 SRAM memory layout: + * +-------------+ + * | V4 FLT HDR | + * +-------------+ + * | CANARY | + * +-------------+ + * | CANARY | + * +-------------+ + * | V6 FLT HDR | + * +-------------+ + * | CANARY | + * +-------------+ + * | CANARY | + * +-------------+ + * | V4 RT HDR | + * +-------------+ + * | CANARY | + * +-------------+ + * | V6 RT HDR | + * +-------------+ + * | CANARY | + * +-------------+ + * | MODEM HDR | + * +-------------+ + * | APPS HDR | + * +-------------+ + * | CANARY | + * +-------------+ + * | MODEM MEM | + * +-------------+ + * | CANARY | + * +-------------+ + * | APPS V4 FLT | + * +-------------+ + * | APPS V6 FLT | + * +-------------+ + * | CANARY | + * +-------------+ + * | UC INFO | + * +-------------+ + */ +#define IPA_MEM_v2_RAM_OFST_START 128 +#define IPA_MEM_v2_RAM_V4_FLT_OFST IPA_MEM_v2_RAM_OFST_START +#define IPA_MEM_v2_RAM_V4_FLT_SIZE 88 + +/* V4 filtering header table is 8B aligned */ +#if (IPA_MEM_v2_RAM_V4_FLT_OFST & 7) +#error V4 filtering header table is not 8B aligned +#endif + +#define IPA_MEM_v2_RAM_V6_FLT_OFST (IPA_MEM_v2_RAM_V4_FLT_OFST + \ + IPA_MEM_v2_RAM_V4_FLT_SIZE + 2*IPA_MEM_CANARY_SIZE) +#define IPA_MEM_v2_RAM_V6_FLT_SIZE 88 + +/* V6 filtering header table is 8B aligned */ +#if (IPA_MEM_v2_RAM_V6_FLT_OFST & 7) +#error V6 filtering header table is not 8B aligned +#endif + +#define IPA_MEM_v2_RAM_V4_RT_OFST (IPA_MEM_v2_RAM_V6_FLT_OFST + \ + IPA_MEM_v2_RAM_V6_FLT_SIZE + 2*IPA_MEM_CANARY_SIZE) +#define IPA_MEM_v2_RAM_V4_NUM_INDEX 11 +#define IPA_MEM_v2_V4_MODEM_RT_INDEX_LO 0 +#define IPA_MEM_v2_V4_MODEM_RT_INDEX_HI 3 +#define IPA_MEM_v2_V4_APPS_RT_INDEX_LO 4 +#define IPA_MEM_v2_V4_APPS_RT_INDEX_HI 10 +#define IPA_MEM_v2_RAM_V4_RT_SIZE (IPA_MEM_v2_RAM_V4_NUM_INDEX * 4) + +/* V4 routing header table is 8B aligned */ +#if (IPA_MEM_v2_RAM_V4_RT_OFST & 7) +#error V4 routing header table is not 8B aligned +#endif + +#define IPA_MEM_v2_RAM_V6_RT_OFST (IPA_MEM_v2_RAM_V4_RT_OFST + \ + IPA_MEM_v2_RAM_V4_RT_SIZE + IPA_MEM_CANARY_SIZE) +#define IPA_MEM_v2_RAM_V6_NUM_INDEX 11 +#define IPA_MEM_v2_V6_MODEM_RT_INDEX_LO 0 +#define IPA_MEM_v2_V6_MODEM_RT_INDEX_HI 3 +#define IPA_MEM_v2_V6_APPS_RT_INDEX_LO 4 +#define IPA_MEM_v2_V6_APPS_RT_INDEX_HI 10 +#define IPA_MEM_v2_RAM_V6_RT_SIZE (IPA_MEM_v2_RAM_V6_NUM_INDEX * 4) + +/* V6 routing header table is 8B aligned */ +#if (IPA_MEM_v2_RAM_V6_RT_OFST & 7) +#error V6 routing header table is not 8B aligned +#endif + +#define IPA_MEM_v2_RAM_MODEM_HDR_OFST (IPA_MEM_v2_RAM_V6_RT_OFST + \ + IPA_MEM_v2_RAM_V6_RT_SIZE + IPA_MEM_CANARY_SIZE) +#define IPA_MEM_v2_RAM_MODEM_HDR_SIZE 320 + +/* header table is 8B aligned */ +#if (IPA_MEM_v2_RAM_MODEM_HDR_OFST & 7) +#error header table is not 8B aligned +#endif + +#define IPA_MEM_v2_RAM_APPS_HDR_OFST (IPA_MEM_v2_RAM_MODEM_HDR_OFST + \ + IPA_MEM_v2_RAM_MODEM_HDR_SIZE) +#define IPA_MEM_v2_RAM_APPS_HDR_SIZE 72 + +/* header table is 8B aligned */ +#if (IPA_MEM_v2_RAM_APPS_HDR_OFST & 7) +#error header table is not 8B aligned +#endif + +#define IPA_MEM_v2_RAM_MODEM_OFST (IPA_MEM_v2_RAM_APPS_HDR_OFST + \ + IPA_MEM_v2_RAM_APPS_HDR_SIZE + IPA_MEM_CANARY_SIZE) +#define IPA_MEM_v2_RAM_MODEM_SIZE 3532 + +/* modem memory is 4B aligned */ +#if (IPA_MEM_v2_RAM_MODEM_OFST & 3) +#error modem memory is not 4B aligned +#endif + +#define IPA_MEM_v2_RAM_APPS_V4_FLT_OFST (IPA_MEM_v2_RAM_MODEM_OFST + \ + IPA_MEM_v2_RAM_MODEM_SIZE + IPA_MEM_CANARY_SIZE) +#define IPA_MEM_v2_RAM_APPS_V4_FLT_SIZE 1920 + +/* filtering rule is 4B aligned */ +#if (IPA_MEM_v2_RAM_APPS_V4_FLT_OFST & 3) +#error filtering rule is not 4B aligned +#endif + +#define IPA_MEM_v2_RAM_APPS_V6_FLT_OFST (IPA_MEM_v2_RAM_APPS_V4_FLT_OFST + \ + IPA_MEM_v2_RAM_APPS_V4_FLT_SIZE) +#define IPA_MEM_v2_RAM_APPS_V6_FLT_SIZE 1372 + +/* filtering rule is 4B aligned */ +#if (IPA_MEM_v2_RAM_APPS_V6_FLT_OFST & 3) +#error filtering rule is not 4B aligned +#endif + +#define IPA_MEM_v2_RAM_UC_INFO_OFST (IPA_MEM_v2_RAM_APPS_V6_FLT_OFST + \ + IPA_MEM_v2_RAM_APPS_V6_FLT_SIZE + IPA_MEM_CANARY_SIZE) +#define IPA_MEM_v2_RAM_UC_INFO_SIZE 292 + +/* uC info 4B aligned */ +#if (IPA_MEM_v2_RAM_UC_INFO_OFST & 3) +#error uC info is not 4B aligned +#endif + +#define IPA_MEM_v2_RAM_END_OFST (IPA_MEM_v2_RAM_UC_INFO_OFST + \ + IPA_MEM_v2_RAM_UC_INFO_SIZE) +#define IPA_MEM_v2_RAM_APPS_V4_RT_OFST IPA_MEM_v2_RAM_END_OFST +#define IPA_MEM_v2_RAM_APPS_V4_RT_SIZE 0 +#define IPA_MEM_v2_RAM_APPS_V6_RT_OFST IPA_MEM_v2_RAM_END_OFST +#define IPA_MEM_v2_RAM_APPS_V6_RT_SIZE 0 +#define IPA_MEM_v2_RAM_HDR_SIZE_DDR 4096 + +/* + * IPA v2.5/v2.6 SRAM memory layout: + * +----------------+ + * | UC INFO | + * +----------------+ + * | CANARY | + * +----------------+ + * | CANARY | + * +----------------+ + * | V4 FLT HDR | + * +----------------+ + * | CANARY | + * +----------------+ + * | CANARY | + * +----------------+ + * | V6 FLT HDR | + * +----------------+ + * | CANARY | + * +----------------+ + * | CANARY | + * +----------------+ + * | V4 RT HDR | + * +----------------+ + * | CANARY | + * +----------------+ + * | V6 RT HDR | + * +----------------+ + * | CANARY | + * +----------------+ + * | MODEM HDR | + * +----------------+ + * | APPS HDR | + * +----------------+ + * | CANARY | + * +----------------+ + * | CANARY | + * +----------------+ + * | MODEM PROC CTX | + * +----------------+ + * | APPS PROC CTX | + * +----------------+ + * | CANARY | + * +----------------+ + * | MODEM MEM | + * +----------------+ + * | CANARY | + * +----------------+ + */ + +#define IPA_MEM_v2_5_RAM_UC_MEM_SIZE 128 +#define IPA_MEM_v2_5_RAM_UC_INFO_OFST IPA_MEM_v2_5_RAM_UC_MEM_SIZE +#define IPA_MEM_v2_5_RAM_UC_INFO_SIZE 512 + +/* uC info 4B aligned */ +#if (IPA_MEM_v2_5_RAM_UC_INFO_OFST & 3) +#error uC info is not 4B aligned +#endif + +#define IPA_MEM_v2_5_RAM_OFST_START (IPA_MEM_v2_5_RAM_UC_INFO_OFST + \ + IPA_MEM_v2_5_RAM_UC_INFO_SIZE) + +#define IPA_MEM_v2_5_RAM_V4_FLT_OFST (IPA_MEM_v2_5_RAM_OFST_START + \ + 2 * IPA_MEM_CANARY_SIZE) +#define IPA_MEM_v2_5_RAM_V4_FLT_SIZE 88 + +/* V4 filtering header table is 8B aligned */ +#if (IPA_MEM_v2_5_RAM_V4_FLT_OFST & 7) +#error V4 filtering header table is not 8B aligned +#endif + +#define IPA_MEM_v2_5_RAM_V6_FLT_OFST (IPA_MEM_v2_5_RAM_V4_FLT_OFST + \ + IPA_MEM_v2_5_RAM_V4_FLT_SIZE + 2 * IPA_MEM_CANARY_SIZE) +#define IPA_MEM_v2_5_RAM_V6_FLT_SIZE 88 + +/* V6 filtering header table is 8B aligned */ +#if (IPA_MEM_v2_5_RAM_V6_FLT_OFST & 7) +#error V6 filtering header table is not 8B aligned +#endif + +#define IPA_MEM_v2_5_RAM_V4_RT_OFST (IPA_MEM_v2_5_RAM_V6_FLT_OFST + \ + IPA_MEM_v2_5_RAM_V6_FLT_SIZE + 2 * IPA_MEM_CANARY_SIZE) +#define IPA_MEM_v2_5_RAM_V4_NUM_INDEX 15 +#define IPA_MEM_v2_5_V4_MODEM_RT_INDEX_LO 0 +#define IPA_MEM_v2_5_V4_MODEM_RT_INDEX_HI 6 +#define IPA_MEM_v2_5_V4_APPS_RT_INDEX_LO \ + (IPA_MEM_v2_5_V4_MODEM_RT_INDEX_HI + 1) +#define IPA_MEM_v2_5_V4_APPS_RT_INDEX_HI \ + (IPA_MEM_v2_5_RAM_V4_NUM_INDEX - 1) +#define IPA_MEM_v2_5_RAM_V4_RT_SIZE (IPA_MEM_v2_5_RAM_V4_NUM_INDEX * 4) + +/* V4 routing header table is 8B aligned */ +#if (IPA_MEM_v2_5_RAM_V4_RT_OFST & 7) +#error V4 routing header table is not 8B aligned +#endif + +#define IPA_MEM_v2_5_RAM_V6_RT_OFST (IPA_MEM_v2_5_RAM_V4_RT_OFST + \ + IPA_MEM_v2_5_RAM_V4_RT_SIZE + IPA_MEM_CANARY_SIZE) +#define IPA_MEM_v2_5_RAM_V6_NUM_INDEX 15 +#define IPA_MEM_v2_5_V6_MODEM_RT_INDEX_LO 0 +#define IPA_MEM_v2_5_V6_MODEM_RT_INDEX_HI 6 +#define IPA_MEM_v2_5_V6_APPS_RT_INDEX_LO \ + (IPA_MEM_v2_5_V6_MODEM_RT_INDEX_HI + 1) +#define IPA_MEM_v2_5_V6_APPS_RT_INDEX_HI \ + (IPA_MEM_v2_5_RAM_V6_NUM_INDEX - 1) +#define IPA_MEM_v2_5_RAM_V6_RT_SIZE (IPA_MEM_v2_5_RAM_V6_NUM_INDEX * 4) + +/* V6 routing header table is 8B aligned */ +#if (IPA_MEM_v2_5_RAM_V6_RT_OFST & 7) +#error V6 routing header table is not 8B aligned +#endif + +#define IPA_MEM_v2_5_RAM_MODEM_HDR_OFST (IPA_MEM_v2_5_RAM_V6_RT_OFST + \ + IPA_MEM_v2_5_RAM_V6_RT_SIZE + IPA_MEM_CANARY_SIZE) +#define IPA_MEM_v2_5_RAM_MODEM_HDR_SIZE 320 + +/* header table is 8B aligned */ +#if (IPA_MEM_v2_5_RAM_MODEM_HDR_OFST & 7) +#error header table is not 8B aligned +#endif + +#define IPA_MEM_v2_5_RAM_APPS_HDR_OFST (IPA_MEM_v2_5_RAM_MODEM_HDR_OFST + \ + IPA_MEM_v2_5_RAM_MODEM_HDR_SIZE) +#define IPA_MEM_v2_5_RAM_APPS_HDR_SIZE 0 + +/* header table is 8B aligned */ +#if (IPA_MEM_v2_5_RAM_APPS_HDR_OFST & 7) +#error header table is not 8B aligned +#endif + +#define IPA_MEM_v2_5_RAM_MODEM_HDR_PROC_CTX_OFST \ + (IPA_MEM_v2_5_RAM_APPS_HDR_OFST + IPA_MEM_v2_5_RAM_APPS_HDR_SIZE + \ + 2 * IPA_MEM_CANARY_SIZE) +#define IPA_MEM_v2_5_RAM_MODEM_HDR_PROC_CTX_SIZE 512 + +/* header processing context table is 8B aligned */ +#if (IPA_MEM_v2_5_RAM_MODEM_HDR_PROC_CTX_OFST & 7) +#error header processing context table is not 8B aligned +#endif + +#define IPA_MEM_v2_5_RAM_APPS_HDR_PROC_CTX_OFST \ + (IPA_MEM_v2_5_RAM_MODEM_HDR_PROC_CTX_OFST + \ + IPA_MEM_v2_5_RAM_MODEM_HDR_PROC_CTX_SIZE) +#define IPA_MEM_v2_5_RAM_APPS_HDR_PROC_CTX_SIZE 512 + +/* header processing context table is 8B aligned */ +#if (IPA_MEM_v2_5_RAM_APPS_HDR_PROC_CTX_OFST & 7) +#error header processing context table is not 8B aligned +#endif + +#define IPA_MEM_v2_5_RAM_MODEM_OFST (IPA_MEM_v2_5_RAM_APPS_HDR_PROC_CTX_OFST + \ + IPA_MEM_v2_5_RAM_APPS_HDR_PROC_CTX_SIZE + IPA_MEM_CANARY_SIZE) +#define IPA_MEM_v2_5_RAM_MODEM_SIZE 5800 + +/* modem memory is 4B aligned */ +#if (IPA_MEM_v2_5_RAM_MODEM_OFST & 3) +#error modem memory is not 4B aligned +#endif + +#define IPA_MEM_v2_5_RAM_APPS_V4_FLT_OFST (IPA_MEM_v2_5_RAM_MODEM_OFST + \ + IPA_MEM_v2_5_RAM_MODEM_SIZE) +#define IPA_MEM_v2_5_RAM_APPS_V4_FLT_SIZE 0 + +/* filtering rule is 4B aligned */ +#if (IPA_MEM_v2_5_RAM_APPS_V4_FLT_OFST & 3) +#error filtering rule is not 4B aligned +#endif + +#define IPA_MEM_v2_5_RAM_APPS_V6_FLT_OFST (IPA_MEM_v2_5_RAM_APPS_V4_FLT_OFST + \ + IPA_MEM_v2_5_RAM_APPS_V4_FLT_SIZE) +#define IPA_MEM_v2_5_RAM_APPS_V6_FLT_SIZE 0 + +/* filtering rule is 4B aligned */ +#if (IPA_MEM_v2_5_RAM_APPS_V6_FLT_OFST & 3) +#error filtering rule is not 4B aligned +#endif + +#define IPA_MEM_v2_5_RAM_END_OFST (IPA_MEM_v2_5_RAM_APPS_V6_FLT_OFST + \ + IPA_MEM_v2_5_RAM_APPS_V6_FLT_SIZE + IPA_MEM_CANARY_SIZE) +#define IPA_MEM_v2_5_RAM_APPS_V4_RT_OFST IPA_MEM_v2_5_RAM_END_OFST +#define IPA_MEM_v2_5_RAM_APPS_V4_RT_SIZE 0 +#define IPA_MEM_v2_5_RAM_APPS_V6_RT_OFST IPA_MEM_v2_5_RAM_END_OFST +#define IPA_MEM_v2_5_RAM_APPS_V6_RT_SIZE 0 +#define IPA_MEM_v2_5_RAM_HDR_SIZE_DDR 2048 + +/* + * IPA v2.6Lite SRAM memory layout: + * +----------------+ + * | UC INFO | + * +----------------+ + * | CANARY | + * +----------------+ + * | CANARY | + * +----------------+ + * | V4 FLT HDR | + * +----------------+ + * | CANARY | + * +----------------+ + * | CANARY | + * +----------------+ + * | V6 FLT HDR | + * +----------------+ + * | CANARY | + * +----------------+ + * | CANARY | + * +----------------+ + * | V4 RT HDR | + * +----------------+ + * | CANARY | + * +----------------+ + * | V6 RT HDR | + * +----------------+ + * | CANARY | + * +----------------+ + * | MODEM HDR | + * +----------------+ + * | CANARY | + * +----------------+ + * | CANARY | + * +----------------+ + * | COMP / DECOMP | + * +----------------+ + * | CANARY | + * +----------------+ + * | MODEM MEM | + * +----------------+ + * | CANARY | + * +----------------+ + */ + +#define IPA_MEM_v2_6L_RAM_UC_MEM_SIZE 128 +#define IPA_MEM_v2_6L_RAM_UC_INFO_OFST IPA_MEM_v2_6L_RAM_UC_MEM_SIZE +#define IPA_MEM_v2_6L_RAM_UC_INFO_SIZE 512 + +/* uC info 4B aligned */ +#if (IPA_MEM_v2_6L_RAM_UC_INFO_OFST & 3) +#error uC info is not 4B aligned +#endif + +#define IPA_MEM_v2_6L_RAM_OFST_START (IPA_MEM_v2_6L_RAM_UC_INFO_OFST + \ + IPA_MEM_v2_6L_RAM_UC_INFO_SIZE) + +#define IPA_MEM_v2_6L_RAM_V4_FLT_OFST (IPA_MEM_v2_6L_RAM_OFST_START + \ + 2 * IPA_MEM_CANARY_SIZE) +#define IPA_MEM_v2_6L_RAM_V4_FLT_SIZE 88 + +/* V4 filtering header table is 8B aligned */ +#if (IPA_MEM_v2_6L_RAM_V4_FLT_OFST & 7) +#error V4 filtering header table is not 8B aligned +#endif + +#define IPA_MEM_v2_6L_RAM_V6_FLT_OFST (IPA_MEM_v2_6L_RAM_V4_FLT_OFST + \ + IPA_MEM_v2_6L_RAM_V4_FLT_SIZE + 2 * IPA_MEM_CANARY_SIZE) +#define IPA_MEM_v2_6L_RAM_V6_FLT_SIZE 88 + +/* V6 filtering header table is 8B aligned */ +#if (IPA_MEM_v2_6L_RAM_V6_FLT_OFST & 7) +#error V6 filtering header table is not 8B aligned +#endif + +#define IPA_MEM_v2_6L_RAM_V4_RT_OFST (IPA_MEM_v2_6L_RAM_V6_FLT_OFST + \ + IPA_MEM_v2_6L_RAM_V6_FLT_SIZE + 2 * IPA_MEM_CANARY_SIZE) +#define IPA_MEM_v2_6L_RAM_V4_NUM_INDEX 15 +#define IPA_MEM_v2_6L_V4_MODEM_RT_INDEX_LO 0 +#define IPA_MEM_v2_6L_V4_MODEM_RT_INDEX_HI 6 +#define IPA_MEM_v2_6L_V4_APPS_RT_INDEX_LO \ + (IPA_MEM_v2_6L_V4_MODEM_RT_INDEX_HI + 1) +#define IPA_MEM_v2_6L_V4_APPS_RT_INDEX_HI \ + (IPA_MEM_v2_6L_RAM_V4_NUM_INDEX - 1) +#define IPA_MEM_v2_6L_RAM_V4_RT_SIZE (IPA_MEM_v2_6L_RAM_V4_NUM_INDEX * 4) + +/* V4 routing header table is 8B aligned */ +#if (IPA_MEM_v2_6L_RAM_V4_RT_OFST & 7) +#error V4 routing header table is not 8B aligned +#endif + +#define IPA_MEM_v2_6L_RAM_V6_RT_OFST (IPA_MEM_v2_6L_RAM_V4_RT_OFST + \ + IPA_MEM_v2_6L_RAM_V4_RT_SIZE + IPA_MEM_CANARY_SIZE) +#define IPA_MEM_v2_6L_RAM_V6_NUM_INDEX 15 +#define IPA_MEM_v2_6L_V6_MODEM_RT_INDEX_LO 0 +#define IPA_MEM_v2_6L_V6_MODEM_RT_INDEX_HI 6 +#define IPA_MEM_v2_6L_V6_APPS_RT_INDEX_LO \ + (IPA_MEM_v2_6L_V6_MODEM_RT_INDEX_HI + 1) +#define IPA_MEM_v2_6L_V6_APPS_RT_INDEX_HI \ + (IPA_MEM_v2_6L_RAM_V6_NUM_INDEX - 1) +#define IPA_MEM_v2_6L_RAM_V6_RT_SIZE (IPA_MEM_v2_6L_RAM_V6_NUM_INDEX * 4) + +/* V6 routing header table is 8B aligned */ +#if (IPA_MEM_v2_6L_RAM_V6_RT_OFST & 7) +#error V6 routing header table is not 8B aligned +#endif + +#define IPA_MEM_v2_6L_RAM_MODEM_HDR_OFST (IPA_MEM_v2_6L_RAM_V6_RT_OFST + \ + IPA_MEM_v2_6L_RAM_V6_RT_SIZE + IPA_MEM_CANARY_SIZE) +#define IPA_MEM_v2_6L_RAM_MODEM_HDR_SIZE 320 + +/* header table is 8B aligned */ +#if (IPA_MEM_v2_6L_RAM_MODEM_HDR_OFST & 7) +#error header table is not 8B aligned +#endif + +#define IPA_MEM_v2_6L_RAM_APPS_HDR_OFST (IPA_MEM_v2_6L_RAM_MODEM_HDR_OFST + \ + IPA_MEM_v2_6L_RAM_MODEM_HDR_SIZE) +#define IPA_MEM_v2_6L_RAM_APPS_HDR_SIZE 0 + +/* header table is 8B aligned */ +#if (IPA_MEM_v2_6L_RAM_APPS_HDR_OFST & 7) +#error header table is not 8B aligned +#endif + +#define IPA_MEM_v2_6L_RAM_MODEM_COMP_DECOMP_OFST \ + (IPA_MEM_v2_6L_RAM_APPS_HDR_OFST + IPA_MEM_v2_6L_RAM_APPS_HDR_SIZE + \ + 2 * IPA_MEM_CANARY_SIZE) +#define IPA_MEM_v2_6L_RAM_MODEM_COMP_DECOMP_SIZE 512 + +/* comp/decomp memory region is 8B aligned */ +#if (IPA_MEM_v2_6L_RAM_MODEM_COMP_DECOMP_OFST & 7) +#error header processing context table is not 8B aligned +#endif + +#define IPA_MEM_v2_6L_RAM_MODEM_OFST \ + (IPA_MEM_v2_6L_RAM_MODEM_COMP_DECOMP_OFST + \ + IPA_MEM_v2_6L_RAM_MODEM_COMP_DECOMP_SIZE + IPA_MEM_CANARY_SIZE) +#define IPA_MEM_v2_6L_RAM_MODEM_SIZE 6376 + +/* modem memory is 4B aligned */ +#if (IPA_MEM_v2_6L_RAM_MODEM_OFST & 3) +#error modem memory is not 4B aligned +#endif + +#define IPA_MEM_v2_6L_RAM_APPS_V4_FLT_OFST (IPA_MEM_v2_6L_RAM_MODEM_OFST + \ + IPA_MEM_v2_6L_RAM_MODEM_SIZE) +#define IPA_MEM_v2_6L_RAM_APPS_V4_FLT_SIZE 0 + +/* filtering rule is 4B aligned */ +#if (IPA_MEM_v2_6L_RAM_APPS_V4_FLT_OFST & 3) +#error filtering rule is not 4B aligned +#endif + +#define IPA_MEM_v2_6L_RAM_APPS_V6_FLT_OFST \ + (IPA_MEM_v2_6L_RAM_APPS_V4_FLT_OFST + \ + IPA_MEM_v2_6L_RAM_APPS_V4_FLT_SIZE) +#define IPA_MEM_v2_6L_RAM_APPS_V6_FLT_SIZE 0 + +/* filtering rule is 4B aligned */ +#if (IPA_MEM_v2_6L_RAM_APPS_V6_FLT_OFST & 3) +#error filtering rule is not 4B aligned +#endif + +#define IPA_MEM_v2_6L_RAM_END_OFST (IPA_MEM_v2_6L_RAM_APPS_V6_FLT_OFST + \ + IPA_MEM_v2_6L_RAM_APPS_V6_FLT_SIZE + IPA_MEM_CANARY_SIZE) + +#define IPA_MEM_v2_6L_RAM_APPS_V4_RT_OFST IPA_MEM_v2_6L_RAM_END_OFST +#define IPA_MEM_v2_6L_RAM_APPS_V4_RT_SIZE 0 +#define IPA_MEM_v2_6L_RAM_APPS_V6_RT_OFST IPA_MEM_v2_6L_RAM_END_OFST +#define IPA_MEM_v2_6L_RAM_APPS_V6_RT_SIZE 0 +#define IPA_MEM_v2_6L_RAM_HDR_SIZE_DDR 2048 + +#endif /* _IPA_RAM_MMAP_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_reg.h b/drivers/platform/msm/ipa/ipa_v2/ipa_reg.h new file mode 100644 index 000000000000..873d8e472547 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_reg.h @@ -0,0 +1,312 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012-2016, 2020, The Linux Foundation. All rights reserved. + */ + +#ifndef __IPA_REG_H__ +#define __IPA_REG_H__ + +/* + * IPA's BAM specific registers + * Used for IPA HW 1.0 only + */ + +#define IPA_BAM_REG_BASE_OFST 0x00004000 +#define IPA_BAM_CNFG_BITS_OFST 0x7c +#define IPA_BAM_REMAP_SIZE (0x1000) + +#define IPA_FILTER_FILTER_EN_BMSK 0x1 +#define IPA_FILTER_FILTER_EN_SHFT 0x0 +#define IPA_AGGREGATION_SPARE_REG_2_OFST 0x00002094 +#define IPA_AGGREGATION_QCNCM_SIG0_SHFT 16 +#define IPA_AGGREGATION_QCNCM_SIG1_SHFT 8 + +#define IPA_AGGREGATION_SPARE_REG_1_OFST 0x00002090 +#define IPA_AGGREGATION_SPARE_REG_2_OFST 0x00002094 + +#define IPA_AGGREGATION_SINGLE_NDP_MSK 0x1 +#define IPA_AGGREGATION_SINGLE_NDP_BMSK 0xfffffffe + +#define IPA_AGGREGATION_MODE_MSK 0x1 +#define IPA_AGGREGATION_MODE_SHFT 31 +#define IPA_AGGREGATION_MODE_BMSK 0x7fffffff + +#define IPA_AGGREGATION_QCNCM_SIG_BMSK 0xff000000 + +#define IPA_FILTER_FILTER_EN_BMSK 0x1 +#define IPA_FILTER_FILTER_EN_SHFT 0x0 + +#define IPA_AGGREGATION_HW_TIMER_FIX_MBIM_AGGR_SHFT 2 +#define IPA_AGGREGATION_HW_TIMER_FIX_MBIM_AGGR_BMSK 0x4 + +#define IPA_HEAD_OF_LINE_BLOCK_EN_OFST 0x00000044 + +/* + * End of IPA 1.0 Registers + */ + + +/* + * IPA HW 2.0 Registers + */ +#define IPA_REG_BASE 0x0 + +#define IPA_IRQ_STTS_EE_n_ADDR(n) (IPA_REG_BASE + 0x00001008 + 0x1000 * (n)) +#define IPA_IRQ_STTS_EE_n_MAXn 3 + +#define IPA_IRQ_EN_EE_n_ADDR(n) (IPA_REG_BASE + 0x0000100c + 0x1000 * (n)) +#define IPA_IRQ_EN_EE_n_MAXn 3 + + +#define IPA_IRQ_CLR_EE_n_ADDR(n) (IPA_REG_BASE + 0x00001010 + 0x1000 * (n)) +#define IPA_IRQ_CLR_EE_n_MAXn 3 + +#define IPA_IRQ_SUSPEND_INFO_EE_n_ADDR(n) \ + (IPA_REG_BASE + 0x00001098 + 0x1000 * (n)) +#define IPA_IRQ_SUSPEND_INFO_EE_n_MAXn 3 +/* + * End of IPA 2.0 Registers + */ + +/* + * IPA HW 2.5 Registers + */ +#define IPA_BCR_OFST 0x000005B0 +#define IPA_COUNTER_CFG_OFST 0x000005E8 +#define IPA_COUNTER_CFG_EOT_COAL_GRAN_BMSK 0xF +#define IPA_COUNTER_CFG_EOT_COAL_GRAN_SHFT 0x0 +#define IPA_COUNTER_CFG_AGGR_GRAN_BMSK 0x1F0 +#define IPA_COUNTER_CFG_AGGR_GRAN_SHFT 0x4 + /* + * End of IPA 2.5 Registers + */ + +/* + * IPA HW 2.6/2.6L Registers + */ +#define IPA_ENABLED_PIPES_OFST 0x000005DC +#define IPA_YELLOW_MARKER_SYS_CFG_OFST 0x00000728 +/* + * End of IPA 2.6/2.6L Registers + */ + +/* + * Common Registers + */ +#define IPA_REG_BASE_OFST_v2_0 0x00020000 +#define IPA_REG_BASE_OFST_v2_5 0x00040000 +#define IPA_REG_BASE_OFST_v2_6L IPA_REG_BASE_OFST_v2_5 +#define IPA_COMP_SW_RESET_OFST 0x0000003c + +#define IPA_VERSION_OFST 0x00000034 +#define IPA_COMP_HW_VERSION_OFST 0x00000030 + +#define IPA_SHARED_MEM_SIZE_OFST_v1_1 0x00000050 +#define IPA_SHARED_MEM_SIZE_OFST_v2_0 0x00000050 +#define IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_BMSK_v2_0 0xffff0000 +#define IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_SHFT_v2_0 0x10 +#define IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_BMSK_v2_0 0xffff +#define IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_SHFT_v2_0 0x0 + +#define IPA_ENDP_INIT_AGGR_N_OFST_v1_1(n) (0x000001c0 + 0x4 * (n)) +#define IPA_ENDP_INIT_AGGR_N_OFST_v2_0(n) (0x00000320 + 0x4 * (n)) + +#define IPA_ENDP_INIT_ROUTE_N_OFST_v1_1(n) (0x00000220 + 0x4 * (n)) +#define IPA_ENDP_INIT_ROUTE_N_OFST_v2_0(n) (0x00000370 + 0x4 * (n)) +#define IPA_ENDP_INIT_ROUTE_N_ROUTE_TABLE_INDEX_BMSK 0x1f +#define IPA_ENDP_INIT_ROUTE_N_ROUTE_TABLE_INDEX_SHFT 0x0 + +#define IPA_ROUTE_OFST_v1_1 0x00000044 + +#define IPA_ROUTE_ROUTE_DIS_SHFT 0x0 +#define IPA_ROUTE_ROUTE_DIS_BMSK 0x1 +#define IPA_ROUTE_ROUTE_DEF_PIPE_SHFT 0x1 +#define IPA_ROUTE_ROUTE_DEF_PIPE_BMSK 0x3e +#define IPA_ROUTE_ROUTE_DEF_HDR_TABLE_SHFT 0x6 +#define IPA_ROUTE_ROUTE_DEF_HDR_OFST_SHFT 0x7 +#define IPA_ROUTE_ROUTE_DEF_HDR_OFST_BMSK 0x1ff80 +#define IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_BMSK 0x3e0000 +#define IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_SHFT 0x11 + +#define IPA_FILTER_OFST_v1_1 0x00000048 + +#define IPA_SRAM_DIRECT_ACCESS_N_OFST_v1_1(n) (0x00004000 + 0x4 * (n)) +#define IPA_SRAM_DIRECT_ACCESS_N_OFST_v2_0(n) (0x00005000 + 0x4 * (n)) +#define IPA_SRAM_DIRECT_ACCESS_N_OFST(n) (0x00004000 + 0x4 * (n)) +#define IPA_SRAM_SW_FIRST_v2_5 0x00005000 +#define IPA_ROUTE_ROUTE_DEF_HDR_TABLE_BMSK 0x40 +#define IPA_ENDP_INIT_NAT_N_NAT_EN_SHFT 0x0 +#define IPA_COMP_CFG_OFST 0x00000038 + +#define IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_BMSK 0x1 +#define IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_SHFT 0x16 +#define IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_BMSK 0x200000 +#define IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_SHFT 0x15 +#define IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK 0x1f8000 +#define IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT 0xf +#define IPA_ENDP_INIT_AGGR_N_AGGR_TIME_LIMIT_BMSK 0x7c00 +#define IPA_ENDP_INIT_AGGR_N_AGGR_TIME_LIMIT_SHFT 0xa +#define IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_BMSK 0x3e0 +#define IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_SHFT 0x5 +#define IPA_ENDP_INIT_AGGR_N_AGGR_TYPE_BMSK 0x1c +#define IPA_ENDP_INIT_AGGR_N_AGGR_TYPE_SHFT 0x2 +#define IPA_ENDP_INIT_AGGR_N_AGGR_EN_BMSK 0x3 +#define IPA_ENDP_INIT_AGGR_N_AGGR_EN_SHFT 0x0 + +#define IPA_ENDP_INIT_MODE_N_OFST_v1_1(n) (0x00000170 + 0x4 * (n)) +#define IPA_ENDP_INIT_MODE_N_OFST_v2_0(n) (0x000002c0 + 0x4 * (n)) +#define IPA_ENDP_INIT_MODE_N_RMSK 0x7f +#define IPA_ENDP_INIT_MODE_N_MAX 19 +#define IPA_ENDP_INIT_MODE_N_DEST_PIPE_INDEX_BMSK_v1_1 0x7c +#define IPA_ENDP_INIT_MODE_N_DEST_PIPE_INDEX_SHFT_v1_1 0x2 +#define IPA_ENDP_INIT_MODE_N_DEST_PIPE_INDEX_BMSK_v2_0 0x1f0 +#define IPA_ENDP_INIT_MODE_N_DEST_PIPE_INDEX_SHFT_v2_0 0x4 +#define IPA_ENDP_INIT_MODE_N_MODE_BMSK 0x7 +#define IPA_ENDP_INIT_MODE_N_MODE_SHFT 0x0 + +#define IPA_ENDP_INIT_HDR_N_OFST_v1_1(n) (0x00000120 + 0x4 * (n)) +#define IPA_ENDP_INIT_HDR_N_OFST_v2_0(n) (0x00000170 + 0x4 * (n)) +#define IPA_ENDP_INIT_HDR_N_HDR_LEN_BMSK 0x3f +#define IPA_ENDP_INIT_HDR_N_HDR_LEN_SHFT 0x0 +#define IPA_ENDP_INIT_HDR_N_HDR_ADDITIONAL_CONST_LEN_BMSK 0x7e000 +#define IPA_ENDP_INIT_HDR_N_HDR_ADDITIONAL_CONST_LEN_SHFT 0xd +#define IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_BMSK 0x3f00000 +#define IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_SHFT 0x14 +#define IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_VALID_BMSK 0x80000 +#define IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_VALID_SHFT 0x13 +#define IPA_ENDP_INIT_HDR_N_HDR_METADATA_REG_VALID_BMSK_v2 0x10000000 +#define IPA_ENDP_INIT_HDR_N_HDR_METADATA_REG_VALID_SHFT_v2 0x1c +#define IPA_ENDP_INIT_HDR_N_HDR_LEN_INC_DEAGG_HDR_BMSK_v2 0x8000000 +#define IPA_ENDP_INIT_HDR_N_HDR_LEN_INC_DEAGG_HDR_SHFT_v2 0x1b +#define IPA_ENDP_INIT_HDR_N_HDR_A5_MUX_BMSK 0x4000000 +#define IPA_ENDP_INIT_HDR_N_HDR_A5_MUX_SHFT 0x1a +#define IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_VALID_BMSK 0x40 +#define IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_VALID_SHFT 0x6 +#define IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_SHFT 0x7 +#define IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_BMSK 0x1f80 + +#define IPA_ENDP_INIT_NAT_N_OFST_v1_1(n) (0x000000c0 + 0x4 * (n)) +#define IPA_ENDP_INIT_NAT_N_OFST_v2_0(n) (0x00000120 + 0x4 * (n)) +#define IPA_ENDP_INIT_NAT_N_NAT_EN_BMSK 0x3 +#define IPA_ENDP_INIT_NAT_N_NAT_EN_SHFT 0x0 + + +#define IPA_ENDP_INIT_HDR_EXT_n_OFST_v2_0(n) (0x000001c0 + 0x4 * (n)) +#define IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_BMSK 0x1 +#define IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_SHFT 0x0 +#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_BMSK 0x2 +#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_SHFT 0x1 +#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_BMSK 0x4 +#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_SHFT 0x2 +#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_BMSK 0x8 +#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_SHFT 0x3 +#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_BMSK 0x3f0 +#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_SHFT 0x4 +#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_BMSK_v2_0 0x1c00 +#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_SHFT 0xa +#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_BMSK_v2_5 0x3c00 + + + +/* + * IPA HW 1.1 specific Registers + */ + +#define IPA_FILTER_FILTER_DIS_BMSK 0x1 +#define IPA_FILTER_FILTER_DIS_SHFT 0x0 +#define IPA_SINGLE_NDP_MODE_OFST 0x00000064 +#define IPA_QCNCM_OFST 0x00000060 + +#define IPA_ENDP_INIT_CTRL_N_OFST(n) (0x00000070 + 0x4 * (n)) +#define IPA_ENDP_INIT_CTRL_N_RMSK 0x1 +#define IPA_ENDP_INIT_CTRL_N_MAX 19 +#define IPA_ENDP_INIT_CTRL_N_ENDP_SUSPEND_BMSK 0x1 +#define IPA_ENDP_INIT_CTRL_N_ENDP_SUSPEND_SHFT 0x0 +#define IPA_ENDP_INIT_CTRL_N_ENDP_DELAY_BMSK 0x2 +#define IPA_ENDP_INIT_CTRL_N_ENDP_DELAY_SHFT 0x1 + +#define IPA_ENDP_INIT_HOL_BLOCK_EN_N_OFST_v1_1(n) (0x00000270 + 0x4 * (n)) +#define IPA_ENDP_INIT_HOL_BLOCK_EN_N_OFST_v2_0(n) (0x000003c0 + 0x4 * (n)) +#define IPA_ENDP_INIT_HOL_BLOCK_EN_N_RMSK 0x1 +#define IPA_ENDP_INIT_HOL_BLOCK_EN_N_MAX 19 +#define IPA_ENDP_INIT_HOL_BLOCK_EN_N_EN_BMSK 0x1 +#define IPA_ENDP_INIT_HOL_BLOCK_EN_N_EN_SHFT 0x0 + +#define IPA_ENDP_INIT_DEAGGR_n_OFST_v2_0(n) (0x00000470 + 0x04 * (n)) +#define IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_BMSK 0x3F +#define IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_SHFT 0x0 +#define IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_BMSK 0x40 +#define IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_SHFT 0x6 +#define IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_BMSK 0x3F00 +#define IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_SHFT 0x8 +#define IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_BMSK 0xFFFF0000 +#define IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_SHFT 0x10 + +#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_OFST_v1_1(n) (0x000002c0 + 0x4 * (n)) +#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_OFST_v2_0(n) (0x00000420 + 0x4 * (n)) +#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_RMSK 0x1ff +#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_MAX 19 +#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_TIMER_BMSK 0x1ff +#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_TIMER_SHFT 0x0 + +#define IPA_DEBUG_CNT_REG_N_OFST_v1_1(n) (0x00000340 + 0x4 * (n)) +#define IPA_DEBUG_CNT_REG_N_OFST_v2_0(n) (0x00000600 + 0x4 * (n)) +#define IPA_DEBUG_CNT_REG_N_RMSK 0xffffffff +#define IPA_DEBUG_CNT_REG_N_MAX 15 +#define IPA_DEBUG_CNT_REG_N_DBG_CNT_REG_BMSK 0xffffffff +#define IPA_DEBUG_CNT_REG_N_DBG_CNT_REG_SHFT 0x0 + +#define IPA_DEBUG_CNT_CTRL_N_OFST_v1_1(n) (0x00000380 + 0x4 * (n)) +#define IPA_DEBUG_CNT_CTRL_N_OFST_v2_0(n) (0x00000640 + 0x4 * (n)) +#define IPA_DEBUG_CNT_CTRL_N_RMSK 0x1ff1f171 +#define IPA_DEBUG_CNT_CTRL_N_MAX 15 +#define IPA_DEBUG_CNT_CTRL_N_DBG_CNT_RULE_INDEX_BMSK 0x1ff00000 +#define IPA_DEBUG_CNT_CTRL_N_DBG_CNT_RULE_INDEX_SHFT 0x14 +#define IPA_DEBUG_CNT_CTRL_N_DBG_CNT_SOURCE_PIPE_BMSK 0x1f000 +#define IPA_DEBUG_CNT_CTRL_N_DBG_CNT_SOURCE_PIPE_SHFT 0xc +#define IPA_DEBUG_CNT_CTRL_N_DBG_CNT_PRODUCT_BMSK 0x100 +#define IPA_DEBUG_CNT_CTRL_N_DBG_CNT_PRODUCT_SHFT 0x8 +#define IPA_DEBUG_CNT_CTRL_N_DBG_CNT_TYPE_BMSK 0x70 +#define IPA_DEBUG_CNT_CTRL_N_DBG_CNT_TYPE_SHFT 0x4 +#define IPA_DEBUG_CNT_CTRL_N_DBG_CNT_EN_BMSK 0x1 +#define IPA_DEBUG_CNT_CTRL_N_DBG_CNT_EN_SHFT 0x0 + +#define IPA_ENDP_STATUS_n_OFST(n) (0x000004c0 + 0x4 * (n)) +#define IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK 0x3e +#define IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT 0x1 +#define IPA_ENDP_STATUS_n_STATUS_EN_BMSK 0x1 +#define IPA_ENDP_STATUS_n_STATUS_EN_SHFT 0x0 + +#define IPA_ENDP_INIT_CFG_n_OFST(n) (0x000000c0 + 0x4 * (n)) +#define IPA_ENDP_INIT_CFG_n_RMSK 0x7f +#define IPA_ENDP_INIT_CFG_n_MAXn 19 +#define IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_BMSK 0x78 +#define IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_SHFT 0x3 +#define IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_BMSK 0x6 +#define IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_SHFT 0x1 +#define IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_BMSK 0x1 +#define IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_SHFT 0x0 + +#define IPA_ENDP_INIT_HDR_METADATA_MASK_n_OFST(n) (0x00000220 + 0x4 * (n)) +#define IPA_ENDP_INIT_HDR_METADATA_MASK_n_RMSK 0xffffffff +#define IPA_ENDP_INIT_HDR_METADATA_MASK_n_MAXn 19 +#define IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_BMSK 0xffffffff +#define IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_SHFT 0x0 + +#define IPA_ENDP_INIT_HDR_METADATA_n_OFST(n) (0x00000270 + 0x4 * (n)) +#define IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_BMASK 0xFF0000 +#define IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_SHFT 0x10 + +#define IPA_IRQ_EE_UC_n_OFFS(n) (0x0000101c + 0x1000 * (n)) +#define IPA_IRQ_EE_UC_n_RMSK 0x1 +#define IPA_IRQ_EE_UC_n_MAXn 3 +#define IPA_IRQ_EE_UC_n_INT_BMSK 0x1 +#define IPA_IRQ_EE_UC_n_INT_SHFT 0x0 + +#define IPA_UC_MAILBOX_m_n_OFFS(m, n) (0x0001a000 + 0x80 * (m) + 0x4 * (n)) +#define IPA_UC_MAILBOX_m_n_OFFS_v2_5(m, n) (0x00022000 + 0x80 * (m) + 0x4 * (n)) + +#define IPA_SYS_PKT_PROC_CNTXT_BASE_OFST (0x000005d8) +#define IPA_LOCAL_PKT_PROC_CNTXT_BASE_OFST (0x000005e0) + +#endif diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c new file mode 100644 index 000000000000..fe9e5fa451a6 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c @@ -0,0 +1,1669 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. + */ + +#include +#include "ipa_i.h" + +#define IPA_RT_TABLE_INDEX_NOT_FOUND (-1) +#define IPA_RT_TABLE_WORD_SIZE (4) +#define IPA_RT_INDEX_BITMAP_SIZE (32) +#define IPA_RT_TABLE_MEMORY_ALLIGNMENT (127) +#define IPA_RT_ENTRY_MEMORY_ALLIGNMENT (3) +#define IPA_RT_BIT_MASK (0x1) +#define IPA_RT_STATUS_OF_ADD_FAILED (-1) +#define IPA_RT_STATUS_OF_DEL_FAILED (-1) +#define IPA_RT_STATUS_OF_MDFY_FAILED (-1) + +/** + * __ipa_generate_rt_hw_rule_v2() - generates the routing hardware rule + * @ip: the ip address family type + * @entry: routing entry + * @buf: output buffer, buf == NULL means + * caller wants to know the size of the rule as seen + * by HW so they did not pass a valid buffer, we will use a + * scratch buffer instead. + * With this scheme we are going to + * generate the rule twice, once to know size using scratch + * buffer and second to write the rule to the actual caller + * supplied buffer which is of required size + * + * Returns: 0 on success, negative on failure + * + * caller needs to hold any needed locks to ensure integrity + * + */ +int __ipa_generate_rt_hw_rule_v2(enum ipa_ip_type ip, + struct ipa_rt_entry *entry, u8 *buf) +{ + struct ipa_rt_rule_hw_hdr *rule_hdr; + const struct ipa_rt_rule *rule = + (const struct ipa_rt_rule *)&entry->rule; + u16 en_rule = 0; + u32 tmp[IPA_RT_FLT_HW_RULE_BUF_SIZE/4]; + u8 *start; + int pipe_idx; + struct ipa_hdr_entry *hdr_entry; + + if (buf == NULL) { + memset(tmp, 0, (IPA_RT_FLT_HW_RULE_BUF_SIZE/4)); + buf = (u8 *)tmp; + } + + start = buf; + rule_hdr = (struct ipa_rt_rule_hw_hdr *)buf; + pipe_idx = ipa2_get_ep_mapping(entry->rule.dst); + if (pipe_idx == -1) { + IPAERR("Wrong destination pipe specified in RT rule\n"); + WARN_ON(1); + return -EPERM; + } + if (!IPA_CLIENT_IS_CONS(entry->rule.dst)) { + IPAERR("No RT rule on IPA_client_producer pipe.\n"); + IPAERR("pipe_idx: %d dst_pipe: %d\n", + pipe_idx, entry->rule.dst); + WARN_ON(1); + return -EPERM; + } + rule_hdr->u.hdr.pipe_dest_idx = pipe_idx; + rule_hdr->u.hdr.system = !ipa_ctx->hdr_tbl_lcl; + + /* Adding check to confirm still + * header entry present in header table or not + */ + + if (entry->hdr) { + hdr_entry = ipa_id_find(entry->rule.hdr_hdl); + if (!hdr_entry || hdr_entry->cookie != IPA_HDR_COOKIE) { + IPAERR_RL("Header entry already deleted\n"); + return -EPERM; + } + } + if (entry->hdr) { + if (entry->hdr->cookie == IPA_HDR_COOKIE) { + rule_hdr->u.hdr.hdr_offset = + entry->hdr->offset_entry->offset >> 2; + } else { + IPAERR("Entry hdr deleted by user = %d cookie = %u\n", + entry->hdr->user_deleted, entry->hdr->cookie); + WARN_ON(1); + rule_hdr->u.hdr.hdr_offset = 0; + } + } else { + rule_hdr->u.hdr.hdr_offset = 0; + } + buf += sizeof(struct ipa_rt_rule_hw_hdr); + + if (ipa_generate_hw_rule(ip, &rule->attrib, &buf, &en_rule)) { + IPAERR("fail to generate hw rule\n"); + return -EPERM; + } + + IPADBG_LOW("en_rule 0x%x\n", en_rule); + + rule_hdr->u.hdr.en_rule = en_rule; + ipa_write_32(rule_hdr->u.word, (u8 *)rule_hdr); + + if (entry->hw_len == 0) { + entry->hw_len = buf - start; + } else if (entry->hw_len != (buf - start)) { + IPAERR( + "hw_len differs b/w passes passed=0x%x calc=0x%zxtd\n", + entry->hw_len, + (buf - start)); + return -EPERM; + } + + return 0; +} + +/** + * __ipa_generate_rt_hw_rule_v2_5() - generates the routing hardware rule + * @ip: the ip address family type + * @entry: routing entry + * @buf: output buffer, buf == NULL means + * caller wants to know the size of the rule as seen + * by HW so they did not pass a valid buffer, we will use a + * scratch buffer instead. + * With this scheme we are going to + * generate the rule twice, once to know size using scratch + * buffer and second to write the rule to the actual caller + * supplied buffer which is of required size + * + * Returns: 0 on success, negative on failure + * + * caller needs to hold any needed locks to ensure integrity + * + */ +int __ipa_generate_rt_hw_rule_v2_5(enum ipa_ip_type ip, + struct ipa_rt_entry *entry, u8 *buf) +{ + struct ipa_rt_rule_hw_hdr *rule_hdr; + const struct ipa_rt_rule *rule = + (const struct ipa_rt_rule *)&entry->rule; + u16 en_rule = 0; + u32 tmp[IPA_RT_FLT_HW_RULE_BUF_SIZE/4]; + u8 *start; + int pipe_idx; + struct ipa_hdr_entry *hdr_entry; + struct ipa_hdr_proc_ctx_entry *hdr_proc_entry; + + if (buf == NULL) { + memset(tmp, 0, IPA_RT_FLT_HW_RULE_BUF_SIZE); + buf = (u8 *)tmp; + } + + start = buf; + rule_hdr = (struct ipa_rt_rule_hw_hdr *)buf; + pipe_idx = ipa2_get_ep_mapping(entry->rule.dst); + if (pipe_idx == -1) { + IPAERR("Wrong destination pipe specified in RT rule\n"); + WARN_ON(1); + return -EPERM; + } + if (!IPA_CLIENT_IS_CONS(entry->rule.dst)) { + IPAERR("No RT rule on IPA_client_producer pipe.\n"); + IPAERR("pipe_idx: %d dst_pipe: %d\n", + pipe_idx, entry->rule.dst); + WARN_ON(1); + return -EPERM; + } + rule_hdr->u.hdr_v2_5.pipe_dest_idx = pipe_idx; + /* Adding check to confirm still + * header entry present in header table or not + */ + + if (entry->hdr) { + hdr_entry = ipa_id_find(entry->rule.hdr_hdl); + if (!hdr_entry || hdr_entry->cookie != IPA_HDR_COOKIE) { + IPAERR_RL("Header entry already deleted\n"); + return -EPERM; + } + } else if (entry->proc_ctx) { + hdr_proc_entry = ipa_id_find(entry->rule.hdr_proc_ctx_hdl); + if (!hdr_proc_entry || + hdr_proc_entry->cookie != IPA_PROC_HDR_COOKIE) { + IPAERR_RL("Proc header entry already deleted\n"); + return -EPERM; + } + } + if (entry->proc_ctx || (entry->hdr && entry->hdr->is_hdr_proc_ctx)) { + struct ipa_hdr_proc_ctx_entry *proc_ctx; + + proc_ctx = (entry->proc_ctx) ? : entry->hdr->proc_ctx; + rule_hdr->u.hdr_v2_5.system = !ipa_ctx->hdr_proc_ctx_tbl_lcl; + ipa_assert_on(proc_ctx->offset_entry->offset & 31); + rule_hdr->u.hdr_v2_5.proc_ctx = 1; + rule_hdr->u.hdr_v2_5.hdr_offset = + (proc_ctx->offset_entry->offset + + ipa_ctx->hdr_proc_ctx_tbl.start_offset) >> 5; + } else if (entry->hdr) { + rule_hdr->u.hdr_v2_5.system = !ipa_ctx->hdr_tbl_lcl; + ipa_assert_on(entry->hdr->offset_entry->offset & 3); + rule_hdr->u.hdr_v2_5.proc_ctx = 0; + rule_hdr->u.hdr_v2_5.hdr_offset = + entry->hdr->offset_entry->offset >> 2; + } else { + rule_hdr->u.hdr_v2_5.proc_ctx = 0; + rule_hdr->u.hdr_v2_5.hdr_offset = 0; + } + buf += sizeof(struct ipa_rt_rule_hw_hdr); + + if (ipa_generate_hw_rule(ip, &rule->attrib, &buf, &en_rule)) { + IPAERR("fail to generate hw rule\n"); + return -EPERM; + } + + IPADBG("en_rule 0x%x\n", en_rule); + + rule_hdr->u.hdr_v2_5.en_rule = en_rule; + ipa_write_32(rule_hdr->u.word, (u8 *)rule_hdr); + + if (entry->hw_len == 0) { + entry->hw_len = buf - start; + } else if (entry->hw_len != (buf - start)) { + IPAERR("hw_len differs b/w passes passed=0x%x calc=0x%zxtd\n", + entry->hw_len, (buf - start)); + return -EPERM; + } + + return 0; +} + +/** + * __ipa_generate_rt_hw_rule_v2_6L() - generates the routing hardware rule + * @ip: the ip address family type + * @entry: routing entry + * @buf: output buffer, buf == NULL means that the caller wants to know the size + * of the rule as seen by HW so they did not pass a valid buffer, we will + * use a scratch buffer instead. + * With this scheme we are going to generate the rule twice, once to know + * size using scratch buffer and second to write the rule to the actual + * caller supplied buffer which is of required size. + * + * Returns: 0 on success, negative on failure + * + * caller needs to hold any needed locks to ensure integrity + * + */ +int __ipa_generate_rt_hw_rule_v2_6L(enum ipa_ip_type ip, + struct ipa_rt_entry *entry, u8 *buf) +{ + /* Same implementation as IPAv2 */ + return __ipa_generate_rt_hw_rule_v2(ip, entry, buf); +} + +/** + * ipa_get_rt_hw_tbl_size() - returns the size of HW routing table + * @ip: the ip address family type + * @hdr_sz: header size + * @max_rt_idx: maximal index + * + * Returns: size on success, negative on failure + * + * caller needs to hold any needed locks to ensure integrity + * + * the MSB set in rt_idx_bitmap indicates the size of hdr of routing tbl + */ +static int ipa_get_rt_hw_tbl_size(enum ipa_ip_type ip, u32 *hdr_sz, + int *max_rt_idx) +{ + struct ipa_rt_tbl_set *set; + struct ipa_rt_tbl *tbl; + struct ipa_rt_entry *entry; + u32 total_sz = 0; + u32 tbl_sz; + u32 bitmap = ipa_ctx->rt_idx_bitmap[ip]; + int highest_bit_set = IPA_RT_TABLE_INDEX_NOT_FOUND; + int i; + int res; + + *hdr_sz = 0; + set = &ipa_ctx->rt_tbl_set[ip]; + + for (i = 0; i < IPA_RT_INDEX_BITMAP_SIZE; i++) { + if (bitmap & IPA_RT_BIT_MASK) + highest_bit_set = i; + bitmap >>= 1; + } + + *max_rt_idx = highest_bit_set; + if (highest_bit_set == IPA_RT_TABLE_INDEX_NOT_FOUND) { + IPAERR("no rt tbls present\n"); + total_sz = IPA_RT_TABLE_WORD_SIZE; + *hdr_sz = IPA_RT_TABLE_WORD_SIZE; + return total_sz; + } + + *hdr_sz = (highest_bit_set + 1) * IPA_RT_TABLE_WORD_SIZE; + total_sz += *hdr_sz; + list_for_each_entry(tbl, &set->head_rt_tbl_list, link) { + tbl_sz = 0; + list_for_each_entry(entry, &tbl->head_rt_rule_list, link) { + res = ipa_ctx->ctrl->ipa_generate_rt_hw_rule( + ip, + entry, + NULL); + if (res) { + IPAERR("failed to find HW RT rule size\n"); + return -EPERM; + } + tbl_sz += entry->hw_len; + } + + if (tbl_sz) + tbl->sz = tbl_sz + IPA_RT_TABLE_WORD_SIZE; + + if (tbl->in_sys) + continue; + + if (tbl_sz) { + /* add the terminator */ + total_sz += (tbl_sz + IPA_RT_TABLE_WORD_SIZE); + /* every rule-set should start at word boundary */ + total_sz = (total_sz + IPA_RT_ENTRY_MEMORY_ALLIGNMENT) & + ~IPA_RT_ENTRY_MEMORY_ALLIGNMENT; + } + } + + IPADBG("RT HW TBL SZ %d HDR SZ %d IP %d\n", total_sz, *hdr_sz, ip); + + return total_sz; +} + +static int ipa_generate_rt_hw_tbl_common(enum ipa_ip_type ip, u8 *base, u8 *hdr, + u32 body_ofst, u32 apps_start_idx) +{ + struct ipa_rt_tbl *tbl; + struct ipa_rt_entry *entry; + struct ipa_rt_tbl_set *set; + u32 offset; + u8 *body; + struct ipa_mem_buffer rt_tbl_mem; + u8 *rt_tbl_mem_body; + int res; + + /* build the rt tbl in the DMA buffer to submit to IPA HW */ + body = base; + + set = &ipa_ctx->rt_tbl_set[ip]; + list_for_each_entry(tbl, &set->head_rt_tbl_list, link) { + if (!tbl->in_sys) { + offset = body - base + body_ofst; + if (offset & IPA_RT_ENTRY_MEMORY_ALLIGNMENT) { + IPAERR("offset is not word multiple %d\n", + offset); + goto proc_err; + } + + /* convert offset to words from bytes */ + offset &= ~IPA_RT_ENTRY_MEMORY_ALLIGNMENT; + /* rule is at an offset from base */ + offset |= IPA_RT_BIT_MASK; + + /* update the hdr at the right index */ + ipa_write_32(offset, hdr + + ((tbl->idx - apps_start_idx) * + IPA_RT_TABLE_WORD_SIZE)); + + /* generate the rule-set */ + list_for_each_entry(entry, &tbl->head_rt_rule_list, + link) { + res = ipa_ctx->ctrl->ipa_generate_rt_hw_rule( + ip, + entry, + body); + if (res) { + IPAERR("failed to gen HW RT rule\n"); + goto proc_err; + } + body += entry->hw_len; + } + + /* write the rule-set terminator */ + body = ipa_write_32(0, body); + if ((long)body & IPA_RT_ENTRY_MEMORY_ALLIGNMENT) + /* advance body to next word boundary */ + body = body + (IPA_RT_TABLE_WORD_SIZE - + ((long)body & + IPA_RT_ENTRY_MEMORY_ALLIGNMENT)); + } else { + if (tbl->sz == 0) { + IPAERR("cannot generate 0 size table\n"); + goto proc_err; + } + + /* allocate memory for the RT tbl */ + rt_tbl_mem.size = tbl->sz; + rt_tbl_mem.base = + dma_alloc_coherent(ipa_ctx->pdev, rt_tbl_mem.size, + &rt_tbl_mem.phys_base, GFP_KERNEL); + if (!rt_tbl_mem.base) { + IPAERR("fail to alloc DMA buff of size %d\n", + rt_tbl_mem.size); + WARN_ON(1); + goto proc_err; + } + + WARN_ON(rt_tbl_mem.phys_base & + IPA_RT_ENTRY_MEMORY_ALLIGNMENT); + rt_tbl_mem_body = rt_tbl_mem.base; + memset(rt_tbl_mem.base, 0, rt_tbl_mem.size); + /* update the hdr at the right index */ + ipa_write_32(rt_tbl_mem.phys_base, + hdr + ((tbl->idx - apps_start_idx) * + IPA_RT_TABLE_WORD_SIZE)); + /* generate the rule-set */ + list_for_each_entry(entry, &tbl->head_rt_rule_list, + link) { + res = ipa_ctx->ctrl->ipa_generate_rt_hw_rule( + ip, + entry, + rt_tbl_mem_body); + if (res) { + IPAERR("failed to gen HW RT rule\n"); + WARN_ON(1); + goto rt_table_mem_alloc_failed; + } + rt_tbl_mem_body += entry->hw_len; + } + + /* write the rule-set terminator */ + rt_tbl_mem_body = ipa_write_32(0, rt_tbl_mem_body); + + if (tbl->curr_mem.phys_base) { + WARN_ON(tbl->prev_mem.phys_base); + tbl->prev_mem = tbl->curr_mem; + } + tbl->curr_mem = rt_tbl_mem; + } + } + + return 0; + +rt_table_mem_alloc_failed: + dma_free_coherent(ipa_ctx->pdev, rt_tbl_mem.size, + rt_tbl_mem.base, rt_tbl_mem.phys_base); +proc_err: + return -EPERM; +} + + +/** + * ipa_generate_rt_hw_tbl() - generates the routing hardware table + * @ip: [in] the ip address family type + * @mem: [out] buffer to put the filtering table + * + * Returns: 0 on success, negative on failure + */ +static int ipa_generate_rt_hw_tbl_v1_1(enum ipa_ip_type ip, + struct ipa_mem_buffer *mem) +{ + u32 hdr_sz; + u8 *hdr; + u8 *body; + u8 *base; + int max_rt_idx; + int i; + int res; + + res = ipa_get_rt_hw_tbl_size(ip, &hdr_sz, &max_rt_idx); + if (res < 0) { + IPAERR("ipa_get_rt_hw_tbl_size failed %d\n", res); + goto error; + } + + mem->size = res; + mem->size = (mem->size + IPA_RT_TABLE_MEMORY_ALLIGNMENT) & + ~IPA_RT_TABLE_MEMORY_ALLIGNMENT; + + if (mem->size == 0) { + IPAERR("rt tbl empty ip=%d\n", ip); + goto error; + } + mem->base = dma_zalloc_coherent(ipa_ctx->pdev, mem->size, + &mem->phys_base, GFP_KERNEL); + if (!mem->base) { + IPAERR("fail to alloc DMA buff of size %d\n", mem->size); + goto error; + } + + /* build the rt tbl in the DMA buffer to submit to IPA HW */ + base = hdr = (u8 *)mem->base; + body = base + hdr_sz; + + /* setup all indices to point to the empty sys rt tbl */ + for (i = 0; i <= max_rt_idx; i++) + ipa_write_32(ipa_ctx->empty_rt_tbl_mem.phys_base, + hdr + (i * IPA_RT_TABLE_WORD_SIZE)); + + if (ipa_generate_rt_hw_tbl_common(ip, base, hdr, 0, 0)) { + IPAERR("fail to generate RT tbl\n"); + goto proc_err; + } + + return 0; + +proc_err: + dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base, mem->phys_base); + mem->base = NULL; +error: + return -EPERM; +} + +static void __ipa_reap_sys_rt_tbls(enum ipa_ip_type ip) +{ + struct ipa_rt_tbl *tbl; + struct ipa_rt_tbl *next; + struct ipa_rt_tbl_set *set; + + set = &ipa_ctx->rt_tbl_set[ip]; + list_for_each_entry(tbl, &set->head_rt_tbl_list, link) { + if (tbl->prev_mem.phys_base) { + IPADBG_LOW("reaping rt"); + IPADBG_LOW("tbl name=%s ip=%d\n", + tbl->name, ip); + dma_free_coherent(ipa_ctx->pdev, tbl->prev_mem.size, + tbl->prev_mem.base, + tbl->prev_mem.phys_base); + memset(&tbl->prev_mem, 0, sizeof(tbl->prev_mem)); + } + } + + set = &ipa_ctx->reap_rt_tbl_set[ip]; + list_for_each_entry_safe(tbl, next, &set->head_rt_tbl_list, link) { + list_del(&tbl->link); + WARN_ON(tbl->prev_mem.phys_base != 0); + if (tbl->curr_mem.phys_base) { + IPADBG_LOW("reaping sys"); + IPADBG_LOW("rt tbl name=%s ip=%d\n", + tbl->name, ip); + dma_free_coherent(ipa_ctx->pdev, tbl->curr_mem.size, + tbl->curr_mem.base, + tbl->curr_mem.phys_base); + kmem_cache_free(ipa_ctx->rt_tbl_cache, tbl); + } + } +} + +int __ipa_commit_rt_v1_1(enum ipa_ip_type ip) +{ + struct ipa_desc desc = { 0 }; + struct ipa_mem_buffer *mem; + void *cmd; + struct ipa_ip_v4_routing_init *v4; + struct ipa_ip_v6_routing_init *v6; + u16 avail; + u16 size; + gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0); + + mem = kmalloc(sizeof(struct ipa_mem_buffer), GFP_KERNEL); + if (!mem) { + IPAERR("failed to alloc memory object\n"); + goto fail_alloc_mem; + } + + if (ip == IPA_IP_v4) { + avail = ipa_ctx->ip4_rt_tbl_lcl ? IPA_MEM_v1_RAM_V4_RT_SIZE : + IPA_MEM_PART(v4_rt_size_ddr); + size = sizeof(struct ipa_ip_v4_routing_init); + } else { + avail = ipa_ctx->ip6_rt_tbl_lcl ? IPA_MEM_v1_RAM_V6_RT_SIZE : + IPA_MEM_PART(v6_rt_size_ddr); + size = sizeof(struct ipa_ip_v6_routing_init); + } + cmd = kmalloc(size, flag); + if (!cmd) { + IPAERR("failed to alloc immediate command object\n"); + goto fail_alloc_cmd; + } + + if (ipa_generate_rt_hw_tbl_v1_1(ip, mem)) { + IPAERR("fail to generate RT HW TBL ip %d\n", ip); + goto fail_hw_tbl_gen; + } + + if (mem->size > avail) { + IPAERR("tbl too big, needed %d avail %d\n", mem->size, avail); + goto fail_send_cmd; + } + + if (ip == IPA_IP_v4) { + v4 = (struct ipa_ip_v4_routing_init *)cmd; + desc.opcode = IPA_IP_V4_ROUTING_INIT; + v4->ipv4_rules_addr = mem->phys_base; + v4->size_ipv4_rules = mem->size; + v4->ipv4_addr = IPA_MEM_v1_RAM_V4_RT_OFST; + IPADBG("putting Routing IPv4 rules to phys 0x%x", + v4->ipv4_addr); + } else { + v6 = (struct ipa_ip_v6_routing_init *)cmd; + desc.opcode = IPA_IP_V6_ROUTING_INIT; + v6->ipv6_rules_addr = mem->phys_base; + v6->size_ipv6_rules = mem->size; + v6->ipv6_addr = IPA_MEM_v1_RAM_V6_RT_OFST; + IPADBG("putting Routing IPv6 rules to phys 0x%x", + v6->ipv6_addr); + } + + desc.pyld = cmd; + desc.len = size; + desc.type = IPA_IMM_CMD_DESC; + IPA_DUMP_BUFF(mem->base, mem->phys_base, mem->size); + + if (ipa_send_cmd(1, &desc)) { + IPAERR("fail to send immediate command\n"); + goto fail_send_cmd; + } + + __ipa_reap_sys_rt_tbls(ip); + dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base, mem->phys_base); + kfree(cmd); + kfree(mem); + + return 0; + +fail_send_cmd: + if (mem->base) + dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base, + mem->phys_base); +fail_hw_tbl_gen: + kfree(cmd); +fail_alloc_cmd: + kfree(mem); +fail_alloc_mem: + return -EPERM; +} + +static int ipa_generate_rt_hw_tbl_v2(enum ipa_ip_type ip, + struct ipa_mem_buffer *mem, struct ipa_mem_buffer *head) +{ + u32 hdr_sz; + u8 *hdr; + u8 *body; + u8 *base; + int max_rt_idx; + int i; + u32 *entr; + int num_index; + u32 body_start_offset; + u32 apps_start_idx; + int res; + + if (ip == IPA_IP_v4) { + num_index = IPA_MEM_PART(v4_apps_rt_index_hi) - + IPA_MEM_PART(v4_apps_rt_index_lo) + 1; + body_start_offset = IPA_MEM_PART(apps_v4_rt_ofst) - + IPA_MEM_PART(v4_rt_ofst); + apps_start_idx = IPA_MEM_PART(v4_apps_rt_index_lo); + } else { + num_index = IPA_MEM_PART(v6_apps_rt_index_hi) - + IPA_MEM_PART(v6_apps_rt_index_lo) + 1; + body_start_offset = IPA_MEM_PART(apps_v6_rt_ofst) - + IPA_MEM_PART(v6_rt_ofst); + apps_start_idx = IPA_MEM_PART(v6_apps_rt_index_lo); + } + + head->size = num_index * 4; + head->base = dma_alloc_coherent(ipa_ctx->pdev, head->size, + &head->phys_base, GFP_KERNEL); + if (!head->base) { + IPAERR("fail to alloc DMA buff of size %d\n", head->size); + goto err; + } + entr = (u32 *)head->base; + hdr = (u8 *)head->base; + for (i = 1; i <= num_index; i++) { + *entr = ipa_ctx->empty_rt_tbl_mem.phys_base; + entr++; + } + + res = ipa_get_rt_hw_tbl_size(ip, &hdr_sz, &max_rt_idx); + if (res < 0) { + IPAERR("ipa_get_rt_hw_tbl_size failed %d\n", res); + goto base_err; + } + + mem->size = res; + mem->size -= hdr_sz; + mem->size = (mem->size + IPA_RT_TABLE_MEMORY_ALLIGNMENT) & + ~IPA_RT_TABLE_MEMORY_ALLIGNMENT; + + if (mem->size > 0) { + mem->base = dma_zalloc_coherent(ipa_ctx->pdev, mem->size, + &mem->phys_base, GFP_KERNEL); + if (!mem->base) { + IPAERR("fail to alloc DMA buff of size %d\n", + mem->size); + goto base_err; + } + } + + /* build the rt tbl in the DMA buffer to submit to IPA HW */ + body = base = (u8 *)mem->base; + + if (ipa_generate_rt_hw_tbl_common(ip, base, hdr, body_start_offset, + apps_start_idx)) { + IPAERR("fail to generate RT tbl\n"); + goto proc_err; + } + + return 0; + +proc_err: + if (mem->size) + dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base, + mem->phys_base); +base_err: + dma_free_coherent(ipa_ctx->pdev, head->size, head->base, + head->phys_base); +err: + return -EPERM; +} + +int __ipa_commit_rt_v2(enum ipa_ip_type ip) +{ + struct ipa_desc desc[2]; + struct ipa_mem_buffer body; + struct ipa_mem_buffer head; + struct ipa_hw_imm_cmd_dma_shared_mem *cmd1 = NULL; + struct ipa_hw_imm_cmd_dma_shared_mem *cmd2 = NULL; + gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0); + u16 avail; + u32 num_modem_rt_index; + int rc = 0; + u32 local_addr1; + u32 local_addr2; + bool lcl; + + memset(desc, 0, 2 * sizeof(struct ipa_desc)); + + if (ip == IPA_IP_v4) { + avail = ipa_ctx->ip4_rt_tbl_lcl ? + IPA_MEM_PART(apps_v4_rt_size) : + IPA_MEM_PART(v4_rt_size_ddr); + num_modem_rt_index = + IPA_MEM_PART(v4_modem_rt_index_hi) - + IPA_MEM_PART(v4_modem_rt_index_lo) + 1; + local_addr1 = ipa_ctx->smem_restricted_bytes + + IPA_MEM_PART(v4_rt_ofst) + + num_modem_rt_index * 4; + local_addr2 = ipa_ctx->smem_restricted_bytes + + IPA_MEM_PART(apps_v4_rt_ofst); + lcl = ipa_ctx->ip4_rt_tbl_lcl; + } else { + avail = ipa_ctx->ip6_rt_tbl_lcl ? + IPA_MEM_PART(apps_v6_rt_size) : + IPA_MEM_PART(v6_rt_size_ddr); + num_modem_rt_index = + IPA_MEM_PART(v6_modem_rt_index_hi) - + IPA_MEM_PART(v6_modem_rt_index_lo) + 1; + local_addr1 = ipa_ctx->smem_restricted_bytes + + IPA_MEM_PART(v6_rt_ofst) + + num_modem_rt_index * 4; + local_addr2 = ipa_ctx->smem_restricted_bytes + + IPA_MEM_PART(apps_v6_rt_ofst); + lcl = ipa_ctx->ip6_rt_tbl_lcl; + } + + if (ipa_generate_rt_hw_tbl_v2(ip, &body, &head)) { + IPAERR("fail to generate RT HW TBL ip %d\n", ip); + rc = -EFAULT; + goto fail_gen; + } + + if (body.size > avail) { + IPAERR("tbl too big, needed %d avail %d\n", body.size, avail); + rc = -EFAULT; + goto fail_send_cmd; + } + + cmd1 = kzalloc(sizeof(struct ipa_hw_imm_cmd_dma_shared_mem), + flag); + if (cmd1 == NULL) { + IPAERR("Failed to alloc immediate command object\n"); + rc = -ENOMEM; + goto fail_send_cmd; + } + + cmd1->size = head.size; + cmd1->system_addr = head.phys_base; + cmd1->local_addr = local_addr1; + desc[0].opcode = IPA_DMA_SHARED_MEM; + desc[0].pyld = (void *)cmd1; + desc[0].len = sizeof(struct ipa_hw_imm_cmd_dma_shared_mem); + desc[0].type = IPA_IMM_CMD_DESC; + + if (lcl) { + cmd2 = kzalloc(sizeof(struct ipa_hw_imm_cmd_dma_shared_mem), + flag); + if (cmd2 == NULL) { + IPAERR("Failed to alloc immediate command object\n"); + rc = -ENOMEM; + goto fail_send_cmd1; + } + + cmd2->size = body.size; + cmd2->system_addr = body.phys_base; + cmd2->local_addr = local_addr2; + + desc[1].opcode = IPA_DMA_SHARED_MEM; + desc[1].pyld = (void *)cmd2; + desc[1].len = sizeof(struct ipa_hw_imm_cmd_dma_shared_mem); + desc[1].type = IPA_IMM_CMD_DESC; + + if (ipa_send_cmd(2, desc)) { + IPAERR("fail to send immediate command\n"); + rc = -EFAULT; + goto fail_send_cmd2; + } + } else { + if (ipa_send_cmd(1, desc)) { + IPAERR("fail to send immediate command\n"); + rc = -EFAULT; + goto fail_send_cmd1; + } + } + + IPADBG("HEAD\n"); + IPA_DUMP_BUFF(head.base, head.phys_base, head.size); + if (body.size) { + IPADBG("BODY\n"); + IPA_DUMP_BUFF(body.base, body.phys_base, body.size); + } + __ipa_reap_sys_rt_tbls(ip); + +fail_send_cmd2: + kfree(cmd2); +fail_send_cmd1: + kfree(cmd1); +fail_send_cmd: + dma_free_coherent(ipa_ctx->pdev, head.size, head.base, head.phys_base); + if (body.size) + dma_free_coherent(ipa_ctx->pdev, body.size, body.base, + body.phys_base); +fail_gen: + return rc; +} + +/** + * __ipa_find_rt_tbl() - find the routing table + * which name is given as parameter + * @ip: [in] the ip address family type of the wanted routing table + * @name: [in] the name of the wanted routing table + * + * Returns: the routing table which name is given as parameter, or NULL if it + * doesn't exist + */ +struct ipa_rt_tbl *__ipa_find_rt_tbl(enum ipa_ip_type ip, const char *name) +{ + struct ipa_rt_tbl *entry; + struct ipa_rt_tbl_set *set; + + set = &ipa_ctx->rt_tbl_set[ip]; + list_for_each_entry(entry, &set->head_rt_tbl_list, link) { + if (!strcmp(name, entry->name)) + return entry; + } + + return NULL; +} + +/** + * ipa2_query_rt_index() - find the routing table index + * which name and ip type are given as parameters + * @in: [out] the index of the wanted routing table + * + * Returns: the routing table which name is given as parameter, or NULL if it + * doesn't exist + */ +int ipa2_query_rt_index(struct ipa_ioc_get_rt_tbl_indx *in) +{ + struct ipa_rt_tbl *entry; + + if (in->ip >= IPA_IP_MAX) { + IPAERR_RL("bad parm\n"); + return -EINVAL; + } + + mutex_lock(&ipa_ctx->lock); + /* check if this table exists */ + in->name[IPA_RESOURCE_NAME_MAX-1] = '\0'; + entry = __ipa_find_rt_tbl(in->ip, in->name); + if (!entry) { + mutex_unlock(&ipa_ctx->lock); + return -EFAULT; + } + + in->idx = entry->idx; + mutex_unlock(&ipa_ctx->lock); + return 0; +} + +static struct ipa_rt_tbl *__ipa_add_rt_tbl(enum ipa_ip_type ip, + const char *name) +{ + struct ipa_rt_tbl *entry; + struct ipa_rt_tbl_set *set; + int i; + int id; + + if (ip >= IPA_IP_MAX || name == NULL) { + IPAERR("bad parm\n"); + goto error; + } + + set = &ipa_ctx->rt_tbl_set[ip]; + /* check if this table exists */ + entry = __ipa_find_rt_tbl(ip, name); + if (!entry) { + entry = kmem_cache_zalloc(ipa_ctx->rt_tbl_cache, GFP_KERNEL); + if (!entry) { + IPAERR("failed to alloc RT tbl object\n"); + goto error; + } + /* find a routing tbl index */ + for (i = 0; i < IPA_RT_INDEX_BITMAP_SIZE; i++) { + if (!test_bit(i, &ipa_ctx->rt_idx_bitmap[ip])) { + entry->idx = i; + set_bit(i, &ipa_ctx->rt_idx_bitmap[ip]); + break; + } + } + if (i == IPA_RT_INDEX_BITMAP_SIZE) { + IPAERR("not free RT tbl indices left\n"); + goto fail_rt_idx_alloc; + } + + INIT_LIST_HEAD(&entry->head_rt_rule_list); + INIT_LIST_HEAD(&entry->link); + strlcpy(entry->name, name, IPA_RESOURCE_NAME_MAX); + entry->set = set; + entry->cookie = IPA_RT_TBL_COOKIE; + entry->in_sys = (ip == IPA_IP_v4) ? + !ipa_ctx->ip4_rt_tbl_lcl : !ipa_ctx->ip6_rt_tbl_lcl; + set->tbl_cnt++; + list_add(&entry->link, &set->head_rt_tbl_list); + + IPADBG("add rt tbl idx=%d tbl_cnt=%d ip=%d\n", entry->idx, + set->tbl_cnt, ip); + + id = ipa_id_alloc(entry); + if (id < 0) { + IPAERR("failed to add to tree\n"); + WARN_ON(1); + goto ipa_insert_failed; + } + entry->id = id; + } + + return entry; + +ipa_insert_failed: + set->tbl_cnt--; + list_del(&entry->link); +fail_rt_idx_alloc: + entry->cookie = 0; + kmem_cache_free(ipa_ctx->rt_tbl_cache, entry); +error: + return NULL; +} + +static int __ipa_del_rt_tbl(struct ipa_rt_tbl *entry) +{ + enum ipa_ip_type ip = IPA_IP_MAX; + u32 id; + + if (entry == NULL || (entry->cookie != IPA_RT_TBL_COOKIE)) { + IPAERR_RL("bad parms\n"); + return -EINVAL; + } + id = entry->id; + if (ipa_id_find(id) == NULL) { + IPAERR_RL("lookup failed\n"); + return -EPERM; + } + + if (entry->set == &ipa_ctx->rt_tbl_set[IPA_IP_v4]) + ip = IPA_IP_v4; + else if (entry->set == &ipa_ctx->rt_tbl_set[IPA_IP_v6]) + ip = IPA_IP_v6; + else { + WARN_ON(1); + return -EPERM; + } + + + if (!entry->in_sys) { + list_del(&entry->link); + clear_bit(entry->idx, &ipa_ctx->rt_idx_bitmap[ip]); + entry->set->tbl_cnt--; + IPADBG_LOW("del rt tbl_idx=%d tbl_cnt=%d\n", entry->idx, + entry->set->tbl_cnt); + kmem_cache_free(ipa_ctx->rt_tbl_cache, entry); + } else { + list_move(&entry->link, + &ipa_ctx->reap_rt_tbl_set[ip].head_rt_tbl_list); + clear_bit(entry->idx, &ipa_ctx->rt_idx_bitmap[ip]); + entry->set->tbl_cnt--; + IPADBG_LOW("del sys rt tbl_idx=%d tbl_cnt=%d\n", entry->idx, + entry->set->tbl_cnt); + } + + /* remove the handle from the database */ + ipa_id_remove(id); + return 0; +} + +static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name, + const struct ipa_rt_rule *rule, u8 at_rear, u32 *rule_hdl, + bool user) +{ + struct ipa_rt_tbl *tbl; + struct ipa_rt_entry *entry; + struct ipa_hdr_entry *hdr = NULL; + struct ipa_hdr_proc_ctx_entry *proc_ctx = NULL; + int id; + + if (rule->hdr_hdl && rule->hdr_proc_ctx_hdl) { + IPAERR("rule contains both hdr_hdl and hdr_proc_ctx_hdl\n"); + goto error; + } + + if (rule->hdr_hdl) { + hdr = ipa_id_find(rule->hdr_hdl); + if ((hdr == NULL) || (hdr->cookie != IPA_HDR_COOKIE)) { + IPAERR("rt rule does not point to valid hdr\n"); + goto error; + } + } else if (rule->hdr_proc_ctx_hdl) { + proc_ctx = ipa_id_find(rule->hdr_proc_ctx_hdl); + if ((proc_ctx == NULL) || + (proc_ctx->cookie != IPA_PROC_HDR_COOKIE)) { + IPAERR("rt rule does not point to valid proc ctx\n"); + goto error; + } + } + + + tbl = __ipa_add_rt_tbl(ip, name); + if (tbl == NULL || (tbl->cookie != IPA_RT_TBL_COOKIE)) { + IPAERR("bad params\n"); + goto error; + } + /* + * do not allow any rules to be added at end of the "default" routing + * tables + */ + if (!strcmp(tbl->name, IPA_DFLT_RT_TBL_NAME) && + (tbl->rule_cnt > 0)) { + IPAERR_RL("cannot add rules to default rt table\n"); + goto error; + } + + entry = kmem_cache_zalloc(ipa_ctx->rt_rule_cache, GFP_KERNEL); + if (!entry) { + IPAERR("failed to alloc RT rule object\n"); + goto error; + } + INIT_LIST_HEAD(&entry->link); + entry->cookie = IPA_RT_RULE_COOKIE; + entry->rule = *rule; + entry->tbl = tbl; + entry->hdr = hdr; + entry->proc_ctx = proc_ctx; + if (at_rear) + list_add_tail(&entry->link, &tbl->head_rt_rule_list); + else + list_add(&entry->link, &tbl->head_rt_rule_list); + tbl->rule_cnt++; + if (entry->hdr) + entry->hdr->ref_cnt++; + else if (entry->proc_ctx) + entry->proc_ctx->ref_cnt++; + id = ipa_id_alloc(entry); + if (id < 0) { + IPAERR("failed to add to tree\n"); + WARN_ON(1); + goto ipa_insert_failed; + } + IPADBG_LOW("add rt rule tbl_idx=%d", tbl->idx); + IPADBG_LOW("rule_cnt=%d\n", tbl->rule_cnt); + *rule_hdl = id; + entry->id = id; + entry->ipacm_installed = user; + + return 0; + +ipa_insert_failed: + if (entry->hdr) + entry->hdr->ref_cnt--; + else if (entry->proc_ctx) + entry->proc_ctx->ref_cnt--; + list_del(&entry->link); + kmem_cache_free(ipa_ctx->rt_rule_cache, entry); +error: + return -EPERM; +} + +/** + * ipa2_add_rt_rule() - Add the specified routing rules to SW and optionally + * commit to IPA HW + * @rules: [inout] set of routing rules to add + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_add_rt_rule(struct ipa_ioc_add_rt_rule *rules) +{ + return ipa2_add_rt_rule_usr(rules, false); +} + +/** + * ipa2_add_rt_rule_usr() - Add the specified routing rules to SW and optionally + * commit to IPA HW + * @rules: [inout] set of routing rules to add + * @user_only: [in] indicate installed by userspace module + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_add_rt_rule_usr(struct ipa_ioc_add_rt_rule *rules, bool user_only) +{ + int i; + int ret; + + if (rules == NULL || rules->num_rules == 0 || rules->ip >= IPA_IP_MAX) { + IPAERR_RL("bad parm\n"); + return -EINVAL; + } + + mutex_lock(&ipa_ctx->lock); + for (i = 0; i < rules->num_rules; i++) { + rules->rt_tbl_name[IPA_RESOURCE_NAME_MAX-1] = '\0'; + if (__ipa_add_rt_rule(rules->ip, rules->rt_tbl_name, + &rules->rules[i].rule, + rules->rules[i].at_rear, + &rules->rules[i].rt_rule_hdl, + user_only)) { + IPAERR_RL("failed to add rt rule %d\n", i); + rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED; + } else { + rules->rules[i].status = 0; + } + } + + if (rules->commit) + if (ipa_ctx->ctrl->ipa_commit_rt(rules->ip)) { + ret = -EPERM; + goto bail; + } + + ret = 0; +bail: + mutex_unlock(&ipa_ctx->lock); + return ret; +} + +int __ipa_del_rt_rule(u32 rule_hdl) +{ + struct ipa_rt_entry *entry; + int id; + struct ipa_hdr_entry *hdr_entry; + struct ipa_hdr_proc_ctx_entry *hdr_proc_entry; + + entry = ipa_id_find(rule_hdl); + + if (entry == NULL) { + IPAERR_RL("lookup failed\n"); + return -EINVAL; + } + + if (entry->cookie != IPA_RT_RULE_COOKIE) { + IPAERR_RL("bad params\n"); + return -EINVAL; + } + + if (!strcmp(entry->tbl->name, IPA_DFLT_RT_TBL_NAME)) { + IPADBG("Deleting rule from default rt table idx=%u\n", + entry->tbl->idx); + if (entry->tbl->rule_cnt == 1) { + IPAERR_RL("Default tbl last rule cannot be deleted\n"); + return -EINVAL; + } + } + /* Adding check to confirm still + * header entry present in header table or not + */ + + if (entry->hdr) { + hdr_entry = ipa_id_find(entry->rule.hdr_hdl); + if (!hdr_entry || hdr_entry->cookie != IPA_HDR_COOKIE) { + IPAERR_RL("Header entry already deleted\n"); + return -EINVAL; + } + } else if (entry->proc_ctx) { + hdr_proc_entry = ipa_id_find(entry->rule.hdr_proc_ctx_hdl); + if (!hdr_proc_entry || + hdr_proc_entry->cookie != IPA_PROC_HDR_COOKIE) { + IPAERR_RL("Proc header entry already deleted\n"); + return -EINVAL; + } + } + + if (entry->hdr) + __ipa_release_hdr(entry->hdr->id); + else if (entry->proc_ctx) + __ipa_release_hdr_proc_ctx(entry->proc_ctx->id); + list_del(&entry->link); + entry->tbl->rule_cnt--; + IPADBG_LOW("del rt rule tbl_idx=%d rule_cnt=%d\n", entry->tbl->idx, + entry->tbl->rule_cnt); + if (entry->tbl->rule_cnt == 0 && entry->tbl->ref_cnt == 0) { + if (__ipa_del_rt_tbl(entry->tbl)) + IPAERR_RL("fail to del RT tbl\n"); + } + entry->cookie = 0; + id = entry->id; + kmem_cache_free(ipa_ctx->rt_rule_cache, entry); + + /* remove the handle from the database */ + ipa_id_remove(id); + + return 0; +} + +/** + * ipa2_del_rt_rule() - Remove the specified routing rules to SW and optionally + * commit to IPA HW + * @hdls: [inout] set of routing rules to delete + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls) +{ + int i; + int ret; + + if (hdls == NULL || hdls->num_hdls == 0 || hdls->ip >= IPA_IP_MAX) { + IPAERR_RL("bad parm\n"); + return -EINVAL; + } + + mutex_lock(&ipa_ctx->lock); + for (i = 0; i < hdls->num_hdls; i++) { + if (__ipa_del_rt_rule(hdls->hdl[i].hdl)) { + IPAERR_RL("failed to del rt rule %i\n", i); + hdls->hdl[i].status = IPA_RT_STATUS_OF_DEL_FAILED; + } else { + hdls->hdl[i].status = 0; + } + } + + if (hdls->commit) + if (ipa_ctx->ctrl->ipa_commit_rt(hdls->ip)) { + ret = -EPERM; + goto bail; + } + + ret = 0; +bail: + mutex_unlock(&ipa_ctx->lock); + return ret; +} + +/** + * ipa2_commit_rt_rule() - Commit the current SW routing table of specified type + * to IPA HW + * @ip: The family of routing tables + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_commit_rt(enum ipa_ip_type ip) +{ + int ret; + + if (ip >= IPA_IP_MAX) { + IPAERR_RL("bad parm\n"); + return -EINVAL; + } + + /* + * issue a commit on the filtering module of same IP type since + * filtering rules point to routing tables + */ + if (ipa2_commit_flt(ip)) + return -EPERM; + + mutex_lock(&ipa_ctx->lock); + if (ipa_ctx->ctrl->ipa_commit_rt(ip)) { + ret = -EPERM; + goto bail; + } + + ret = 0; +bail: + mutex_unlock(&ipa_ctx->lock); + return ret; +} + +/** + * ipa2_reset_rt() - reset the current SW routing table of specified type + * (does not commit to HW) + * @ip: [in] The family of routing tables + * @user_only: [in] indicate delete rules installed by userspace + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_reset_rt(enum ipa_ip_type ip, bool user_only) +{ + struct ipa_rt_tbl *tbl; + struct ipa_rt_tbl *tbl_next; + struct ipa_rt_tbl_set *set; + struct ipa_rt_entry *rule; + struct ipa_rt_entry *rule_next; + struct ipa_rt_tbl_set *rset; + u32 apps_start_idx; + struct ipa_hdr_entry *hdr_entry; + struct ipa_hdr_proc_ctx_entry *hdr_proc_entry; + int id; + bool tbl_user = false; + + if (ip >= IPA_IP_MAX) { + IPAERR_RL("bad parm\n"); + return -EINVAL; + } + + if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_0) { + if (ip == IPA_IP_v4) + apps_start_idx = IPA_MEM_PART(v4_apps_rt_index_lo); + else + apps_start_idx = IPA_MEM_PART(v6_apps_rt_index_lo); + } else { + apps_start_idx = 0; + } + + /* + * issue a reset on the filtering module of same IP type since + * filtering rules point to routing tables + */ + if (ipa2_reset_flt(ip, user_only)) + IPAERR_RL("fail to reset flt ip=%d\n", ip); + + set = &ipa_ctx->rt_tbl_set[ip]; + rset = &ipa_ctx->reap_rt_tbl_set[ip]; + mutex_lock(&ipa_ctx->lock); + IPADBG("reset rt ip=%d\n", ip); + list_for_each_entry_safe(tbl, tbl_next, &set->head_rt_tbl_list, link) { + tbl_user = false; + list_for_each_entry_safe(rule, rule_next, + &tbl->head_rt_rule_list, link) { + if (ipa_id_find(rule->id) == NULL) { + WARN_ON(1); + mutex_unlock(&ipa_ctx->lock); + return -EFAULT; + } + + /* indicate if tbl used for user-specified rules*/ + if (rule->ipacm_installed) { + IPADBG("tbl_user %d, tbl-index %d\n", + tbl_user, tbl->id); + tbl_user = true; + } + /* + * for the "default" routing tbl, remove all but the + * last rule + */ + if (tbl->idx == apps_start_idx && tbl->rule_cnt == 1) + continue; + if (!user_only || + rule->ipacm_installed) { + list_del(&rule->link); + if (rule->hdr) { + hdr_entry = ipa_id_find( + rule->rule.hdr_hdl); + if (!hdr_entry || + hdr_entry->cookie != IPA_HDR_COOKIE) { + IPAERR_RL( + "Header already deleted\n"); + mutex_unlock(&ipa_ctx->lock); + return -EINVAL; + } + } else if (rule->proc_ctx) { + hdr_proc_entry = + ipa_id_find( + rule->rule.hdr_proc_ctx_hdl); + if (!hdr_proc_entry || + hdr_proc_entry->cookie != + IPA_PROC_HDR_COOKIE) { + IPAERR_RL( + "Proc entry already deleted\n"); + mutex_unlock(&ipa_ctx->lock); + return -EINVAL; + } + } + tbl->rule_cnt--; + if (rule->hdr) + __ipa_release_hdr(rule->hdr->id); + else if (rule->proc_ctx) + __ipa_release_hdr_proc_ctx( + rule->proc_ctx->id); + rule->cookie = 0; + id = rule->id; + kmem_cache_free(ipa_ctx->rt_rule_cache, rule); + + /* remove the handle from the database */ + ipa_id_remove(id); + } + } + + if (ipa_id_find(tbl->id) == NULL) { + WARN_ON(1); + mutex_unlock(&ipa_ctx->lock); + return -EFAULT; + } + id = tbl->id; + + /* do not remove the "default" routing tbl which has index 0 */ + if (tbl->idx != apps_start_idx) { + if (!user_only || tbl_user) { + if (!tbl->in_sys) { + list_del(&tbl->link); + set->tbl_cnt--; + clear_bit(tbl->idx, + &ipa_ctx->rt_idx_bitmap[ip]); + IPADBG("rst rt tbl_idx=%d tbl_cnt=%d\n", + tbl->idx, set->tbl_cnt); + kmem_cache_free(ipa_ctx->rt_tbl_cache, + tbl); + } else { + list_move(&tbl->link, + &rset->head_rt_tbl_list); + clear_bit(tbl->idx, + &ipa_ctx->rt_idx_bitmap[ip]); + set->tbl_cnt--; + IPADBG("rst tbl_idx=%d cnt=%d\n", + tbl->idx, set->tbl_cnt); + } + /* remove the handle from the database */ + ipa_id_remove(id); + } + } + } + + /* commit the change to IPA-HW */ + if (ipa_ctx->ctrl->ipa_commit_rt(IPA_IP_v4) || + ipa_ctx->ctrl->ipa_commit_rt(IPA_IP_v6)) { + IPAERR("fail to commit rt-rule\n"); + WARN_ON_RATELIMIT_IPA(1); + mutex_unlock(&ipa_ctx->lock); + return -EPERM; + } + mutex_unlock(&ipa_ctx->lock); + + return 0; +} + +/** + * ipa2_get_rt_tbl() - lookup the specified routing table and return handle if + * it exists, if lookup succeeds the routing table ref cnt is increased + * @lookup: [inout] routing table to lookup and its handle + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + * Caller should call ipa_put_rt_tbl later if this function succeeds + */ +int ipa2_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup) +{ + struct ipa_rt_tbl *entry; + int result = -EFAULT; + + if (lookup == NULL || lookup->ip >= IPA_IP_MAX) { + IPAERR_RL("bad parm\n"); + return -EINVAL; + } + mutex_lock(&ipa_ctx->lock); + lookup->name[IPA_RESOURCE_NAME_MAX-1] = '\0'; + entry = __ipa_find_rt_tbl(lookup->ip, lookup->name); + if (entry && entry->cookie == IPA_RT_TBL_COOKIE) { + if (entry->ref_cnt == U32_MAX) { + IPAERR("fail: ref count crossed limit\n"); + goto ret; + } + entry->ref_cnt++; + lookup->hdl = entry->id; + + /* commit for get */ + if (ipa_ctx->ctrl->ipa_commit_rt(lookup->ip)) + IPAERR_RL("fail to commit RT tbl\n"); + + result = 0; + } + +ret: + mutex_unlock(&ipa_ctx->lock); + + return result; +} + +/** + * ipa2_put_rt_tbl() - Release the specified routing table handle + * @rt_tbl_hdl: [in] the routing table handle to release + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_put_rt_tbl(u32 rt_tbl_hdl) +{ + struct ipa_rt_tbl *entry; + enum ipa_ip_type ip = IPA_IP_MAX; + int result = 0; + + mutex_lock(&ipa_ctx->lock); + entry = ipa_id_find(rt_tbl_hdl); + if (entry == NULL) { + IPAERR_RL("lookup failed\n"); + result = -EINVAL; + goto ret; + } + + if ((entry->cookie != IPA_RT_TBL_COOKIE) || entry->ref_cnt == 0) { + IPAERR_RL("bad parms\n"); + result = -EINVAL; + goto ret; + } + + if (entry->set == &ipa_ctx->rt_tbl_set[IPA_IP_v4]) + ip = IPA_IP_v4; + else if (entry->set == &ipa_ctx->rt_tbl_set[IPA_IP_v6]) + ip = IPA_IP_v6; + else { + WARN_ON(1); + result = -EINVAL; + goto ret; + } + + entry->ref_cnt--; + if (entry->ref_cnt == 0 && entry->rule_cnt == 0) { + if (__ipa_del_rt_tbl(entry)) + IPAERR_RL("fail to del RT tbl\n"); + /* commit for put */ + if (ipa_ctx->ctrl->ipa_commit_rt(ip)) + IPAERR_RL("fail to commit RT tbl\n"); + } + + result = 0; + +ret: + mutex_unlock(&ipa_ctx->lock); + + return result; +} + + +static int __ipa_mdfy_rt_rule(struct ipa_rt_rule_mdfy *rtrule) +{ + struct ipa_rt_entry *entry; + struct ipa_hdr_entry *hdr = NULL; + struct ipa_hdr_entry *hdr_entry; + + if (rtrule->rule.hdr_hdl) { + hdr = ipa_id_find(rtrule->rule.hdr_hdl); + if ((hdr == NULL) || (hdr->cookie != IPA_HDR_COOKIE)) { + IPAERR_RL("rt rule does not point to valid hdr\n"); + goto error; + } + } + + entry = ipa_id_find(rtrule->rt_rule_hdl); + if (entry == NULL) { + IPAERR_RL("lookup failed\n"); + goto error; + } + + if (entry->cookie != IPA_RT_RULE_COOKIE) { + IPAERR_RL("bad params\n"); + goto error; + } + + if (!strcmp(entry->tbl->name, IPA_DFLT_RT_TBL_NAME)) { + IPAERR_RL("Default tbl rule cannot be modified\n"); + return -EINVAL; + } + + /* Adding check to confirm still + * header entry present in header table or not + */ + + if (entry->hdr) { + hdr_entry = ipa_id_find(entry->rule.hdr_hdl); + if (!hdr_entry || hdr_entry->cookie != IPA_HDR_COOKIE) { + IPAERR_RL("Header entry already deleted\n"); + return -EPERM; + } + } + if (entry->hdr) + entry->hdr->ref_cnt--; + + entry->rule = rtrule->rule; + entry->hdr = hdr; + + if (entry->hdr) + entry->hdr->ref_cnt++; + + return 0; + +error: + return -EPERM; +} + +/** + * ipa2_mdfy_rt_rule() - Modify the specified routing rules in SW and optionally + * commit to IPA HW + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_mdfy_rt_rule(struct ipa_ioc_mdfy_rt_rule *hdls) +{ + int i; + int result; + + if (hdls == NULL || hdls->num_rules == 0 || hdls->ip >= IPA_IP_MAX) { + IPAERR_RL("bad parm\n"); + return -EINVAL; + } + + mutex_lock(&ipa_ctx->lock); + for (i = 0; i < hdls->num_rules; i++) { + if (__ipa_mdfy_rt_rule(&hdls->rules[i])) { + IPAERR_RL("failed to mdfy rt rule %i\n", i); + hdls->rules[i].status = IPA_RT_STATUS_OF_MDFY_FAILED; + } else { + hdls->rules[i].status = 0; + } + } + + if (hdls->commit) + if (ipa_ctx->ctrl->ipa_commit_rt(hdls->ip)) { + result = -EPERM; + goto bail; + } + result = 0; +bail: + mutex_unlock(&ipa_ctx->lock); + + return result; +} diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_trace.h b/drivers/platform/msm/ipa/ipa_v2/ipa_trace.h new file mode 100644 index 000000000000..d07bbaad0e09 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_trace.h @@ -0,0 +1,145 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2015-2016, 2020, The Linux Foundation. All rights reserved. + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM ipa +#define TRACE_INCLUDE_FILE ipa_trace + +#if !defined(_IPA_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _IPA_TRACE_H + +#include + +TRACE_EVENT( + intr_to_poll, + + TP_PROTO(unsigned long client), + + TP_ARGS(client), + + TP_STRUCT__entry( + __field(unsigned long, client) + ), + + TP_fast_assign( + __entry->client = client; + ), + + TP_printk("client=%lu", __entry->client) +); + +TRACE_EVENT( + poll_to_intr, + + TP_PROTO(unsigned long client), + + TP_ARGS(client), + + TP_STRUCT__entry( + __field(unsigned long, client) + ), + + TP_fast_assign( + __entry->client = client; + ), + + TP_printk("client=%lu", __entry->client) +); + +TRACE_EVENT( + idle_sleep_enter, + + TP_PROTO(unsigned long client), + + TP_ARGS(client), + + TP_STRUCT__entry( + __field(unsigned long, client) + ), + + TP_fast_assign( + __entry->client = client; + ), + + TP_printk("client=%lu", __entry->client) +); + +TRACE_EVENT( + idle_sleep_exit, + + TP_PROTO(unsigned long client), + + TP_ARGS(client), + + TP_STRUCT__entry( + __field(unsigned long, client) + ), + + TP_fast_assign( + __entry->client = client; + ), + + TP_printk("client=%lu", __entry->client) +); + +TRACE_EVENT( + rmnet_ipa_netifni, + + TP_PROTO(unsigned long rx_pkt_cnt), + + TP_ARGS(rx_pkt_cnt), + + TP_STRUCT__entry( + __field(unsigned long, rx_pkt_cnt) + ), + + TP_fast_assign( + __entry->rx_pkt_cnt = rx_pkt_cnt; + ), + + TP_printk("rx_pkt_cnt=%lu", __entry->rx_pkt_cnt) +); + +TRACE_EVENT( + rmnet_ipa_netifrx, + + TP_PROTO(unsigned long rx_pkt_cnt), + + TP_ARGS(rx_pkt_cnt), + + TP_STRUCT__entry( + __field(unsigned long, rx_pkt_cnt) + ), + + TP_fast_assign( + __entry->rx_pkt_cnt = rx_pkt_cnt; + ), + + TP_printk("rx_pkt_cnt=%lu", __entry->rx_pkt_cnt) +); + +TRACE_EVENT( + rmnet_ipa_netif_rcv_skb, + + TP_PROTO(unsigned long rx_pkt_cnt), + + TP_ARGS(rx_pkt_cnt), + + TP_STRUCT__entry( + __field(unsigned long, rx_pkt_cnt) + ), + + TP_fast_assign( + __entry->rx_pkt_cnt = rx_pkt_cnt; + ), + + TP_printk("rx_pkt_cnt=%lu", __entry->rx_pkt_cnt) +); +#endif /* _IPA_TRACE_H */ + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#include diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc.c b/drivers/platform/msm/ipa/ipa_v2/ipa_uc.c new file mode 100644 index 000000000000..74b4fe632475 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc.c @@ -0,0 +1,940 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2017, 2020, The Linux Foundation. All rights reserved. + */ +#include "ipa_i.h" +#include + +#define IPA_RAM_UC_SMEM_SIZE 128 +#define IPA_HW_INTERFACE_VERSION 0x0111 +#define IPA_PKT_FLUSH_TO_US 100 +#define IPA_UC_POLL_SLEEP_USEC 100 +#define IPA_UC_POLL_MAX_RETRY 10000 +#define HOLB_WORKQUEUE_NAME "ipa_holb_wq" + +static struct workqueue_struct *ipa_holb_wq; +static void ipa_start_monitor_holb(struct work_struct *work); +static DECLARE_WORK(ipa_holb_work, ipa_start_monitor_holb); + +/** + * enum ipa_cpu_2_hw_commands - Values that represent the commands from the CPU + * IPA_CPU_2_HW_CMD_NO_OP : No operation is required. + * IPA_CPU_2_HW_CMD_UPDATE_FLAGS : Update SW flags which defines the behavior + * of HW. + * IPA_CPU_2_HW_CMD_DEBUG_RUN_TEST : Launch predefined test over HW. + * IPA_CPU_2_HW_CMD_DEBUG_GET_INFO : Read HW internal debug information. + * IPA_CPU_2_HW_CMD_ERR_FATAL : CPU instructs HW to perform error fatal + * handling. + * IPA_CPU_2_HW_CMD_CLK_GATE : CPU instructs HW to goto Clock Gated state. + * IPA_CPU_2_HW_CMD_CLK_UNGATE : CPU instructs HW to goto Clock Ungated state. + * IPA_CPU_2_HW_CMD_MEMCPY : CPU instructs HW to do memcopy using QMB. + * IPA_CPU_2_HW_CMD_RESET_PIPE : Command to reset a pipe - SW WA for a HW bug. + */ +enum ipa_cpu_2_hw_commands { + IPA_CPU_2_HW_CMD_NO_OP = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 0), + IPA_CPU_2_HW_CMD_UPDATE_FLAGS = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1), + IPA_CPU_2_HW_CMD_DEBUG_RUN_TEST = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2), + IPA_CPU_2_HW_CMD_DEBUG_GET_INFO = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 3), + IPA_CPU_2_HW_CMD_ERR_FATAL = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 4), + IPA_CPU_2_HW_CMD_CLK_GATE = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 5), + IPA_CPU_2_HW_CMD_CLK_UNGATE = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 6), + IPA_CPU_2_HW_CMD_MEMCPY = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 7), + IPA_CPU_2_HW_CMD_RESET_PIPE = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 8), + IPA_CPU_2_HW_CMD_UPDATE_HOLB_MONITORING = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 9), +}; + +/** + * enum ipa_hw_2_cpu_responses - Values that represent common HW responses + * to CPU commands. + * @IPA_HW_2_CPU_RESPONSE_INIT_COMPLETED : HW shall send this command once + * boot sequence is completed and HW is ready to serve commands from CPU + * @IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED: Response to CPU commands + */ +enum ipa_hw_2_cpu_responses { + IPA_HW_2_CPU_RESPONSE_INIT_COMPLETED = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1), + IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2), +}; + +/** + * enum ipa_hw_2_cpu_events - Values that represent HW event to be sent to CPU. + * @IPA_HW_2_CPU_EVENT_ERROR : Event specify a system error is detected by the + * device + * @IPA_HW_2_CPU_EVENT_LOG_INFO : Event providing logging specific information + */ +enum ipa_hw_2_cpu_events { + IPA_HW_2_CPU_EVENT_ERROR = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1), + IPA_HW_2_CPU_EVENT_LOG_INFO = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2), +}; + +/** + * enum ipa_hw_errors - Common error types. + * @IPA_HW_ERROR_NONE : No error persists + * @IPA_HW_INVALID_DOORBELL_ERROR : Invalid data read from doorbell + * @IPA_HW_DMA_ERROR : Unexpected DMA error + * @IPA_HW_FATAL_SYSTEM_ERROR : HW has crashed and requires reset. + * @IPA_HW_INVALID_OPCODE : Invalid opcode sent + * @IPA_HW_ZIP_ENGINE_ERROR : ZIP engine error + */ +enum ipa_hw_errors { + IPA_HW_ERROR_NONE = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 0), + IPA_HW_INVALID_DOORBELL_ERROR = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1), + IPA_HW_DMA_ERROR = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2), + IPA_HW_FATAL_SYSTEM_ERROR = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 3), + IPA_HW_INVALID_OPCODE = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 4), + IPA_HW_ZIP_ENGINE_ERROR = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 5) +}; + +/** + * struct IpaHwResetPipeCmdData_t - Structure holding the parameters + * for IPA_CPU_2_HW_CMD_MEMCPY command. + * + * The parameters are passed as immediate params in the shared memory + */ +struct IpaHwMemCopyData_t { + u32 destination_addr; + u32 source_addr; + u32 dest_buffer_size; + u32 source_buffer_size; +}; + +/** + * union IpaHwResetPipeCmdData_t - Structure holding the parameters + * for IPA_CPU_2_HW_CMD_RESET_PIPE command. + * @pipeNum : Pipe number to be reset + * @direction : 1 - IPA Producer, 0 - IPA Consumer + * @reserved_02_03 : Reserved + * + * The parameters are passed as immediate params in the shared memory + */ +union IpaHwResetPipeCmdData_t { + struct IpaHwResetPipeCmdParams_t { + u8 pipeNum; + u8 direction; + u32 reserved_02_03; + } __packed params; + u32 raw32b; +} __packed; + +/** + * union IpaHwmonitorHolbCmdData_t - Structure holding the parameters + * for IPA_CPU_2_HW_CMD_UPDATE_HOLB_MONITORING command. + * @monitorPipe : Indication whether to monitor the pipe. 0 – Do not Monitor + * Pipe, 1 – Monitor Pipe + * @pipeNum : Pipe to be monitored/not monitored + * @reserved_02_03 : Reserved + * + * The parameters are passed as immediate params in the shared memory + */ +union IpaHwmonitorHolbCmdData_t { + struct IpaHwmonitorHolbCmdParams_t { + u8 monitorPipe; + u8 pipeNum; + u32 reserved_02_03:16; + } __packed params; + u32 raw32b; +} __packed; + + +/** + * union IpaHwCpuCmdCompletedResponseData_t - Structure holding the parameters + * for IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED response. + * @originalCmdOp : The original command opcode + * @status : 0 for success indication, otherwise failure + * @reserved : Reserved + * + * Parameters are sent as 32b immediate parameters. + */ +union IpaHwCpuCmdCompletedResponseData_t { + struct IpaHwCpuCmdCompletedResponseParams_t { + u32 originalCmdOp:8; + u32 status:8; + u32 reserved:16; + } __packed params; + u32 raw32b; +} __packed; + +/** + * union IpaHwErrorEventData_t - HW->CPU Common Events + * @errorType : Entered when a system error is detected by the HW. Type of + * error is specified by IPA_HW_ERRORS + * @reserved : Reserved + */ +union IpaHwErrorEventData_t { + struct IpaHwErrorEventParams_t { + u32 errorType:8; + u32 reserved:24; + } __packed params; + u32 raw32b; +} __packed; + +/** + * union IpaHwUpdateFlagsCmdData_t - Structure holding the parameters for + * IPA_CPU_2_HW_CMD_UPDATE_FLAGS command + * @newFlags: SW flags defined the behavior of HW. + * This field is expected to be used as bitmask for enum ipa_hw_flags + */ +union IpaHwUpdateFlagsCmdData_t { + struct IpaHwUpdateFlagsCmdParams_t { + u32 newFlags; + } params; + u32 raw32b; +}; + +struct ipa_uc_hdlrs uc_hdlrs[IPA_HW_NUM_FEATURES] = { { 0 } }; + +static inline const char *ipa_hw_error_str(enum ipa_hw_errors err_type) +{ + const char *str; + + switch (err_type) { + case IPA_HW_ERROR_NONE: + str = "IPA_HW_ERROR_NONE"; + break; + case IPA_HW_INVALID_DOORBELL_ERROR: + str = "IPA_HW_INVALID_DOORBELL_ERROR"; + break; + case IPA_HW_FATAL_SYSTEM_ERROR: + str = "IPA_HW_FATAL_SYSTEM_ERROR"; + break; + case IPA_HW_INVALID_OPCODE: + str = "IPA_HW_INVALID_OPCODE"; + break; + case IPA_HW_ZIP_ENGINE_ERROR: + str = "IPA_HW_ZIP_ENGINE_ERROR"; + break; + default: + str = "INVALID ipa_hw_errors type"; + } + + return str; +} + +static void ipa_log_evt_hdlr(void) +{ + int i; + + if (!ipa_ctx->uc_ctx.uc_event_top_ofst) { + ipa_ctx->uc_ctx.uc_event_top_ofst = + ipa_ctx->uc_ctx.uc_sram_mmio->eventParams; + if (ipa_ctx->uc_ctx.uc_event_top_ofst + + sizeof(struct IpaHwEventLogInfoData_t) >= + ipa_ctx->ctrl->ipa_reg_base_ofst + + IPA_SRAM_DIRECT_ACCESS_N_OFST_v2_0(0) + + ipa_ctx->smem_sz) { + IPAERR("uc_top 0x%x outside SRAM\n", + ipa_ctx->uc_ctx.uc_event_top_ofst); + goto bad_uc_top_ofst; + } + + ipa_ctx->uc_ctx.uc_event_top_mmio = ioremap( + ipa_ctx->ipa_wrapper_base + + ipa_ctx->uc_ctx.uc_event_top_ofst, + sizeof(struct IpaHwEventLogInfoData_t)); + if (!ipa_ctx->uc_ctx.uc_event_top_mmio) { + IPAERR("fail to ioremap uc top\n"); + goto bad_uc_top_ofst; + } + + for (i = 0; i < IPA_HW_NUM_FEATURES; i++) { + if (uc_hdlrs[i].ipa_uc_event_log_info_hdlr) + uc_hdlrs[i].ipa_uc_event_log_info_hdlr + (ipa_ctx->uc_ctx.uc_event_top_mmio); + } + } else { + + if (ipa_ctx->uc_ctx.uc_sram_mmio->eventParams != + ipa_ctx->uc_ctx.uc_event_top_ofst) { + IPAERR("uc top ofst changed new=%u cur=%u\n", + ipa_ctx->uc_ctx.uc_sram_mmio->eventParams, + ipa_ctx->uc_ctx.uc_event_top_ofst); + } + } + + return; + +bad_uc_top_ofst: + ipa_ctx->uc_ctx.uc_event_top_ofst = 0; +} + +/** + * ipa2_uc_state_check() - Check the status of the uC interface + * + * Return value: 0 if the uC is loaded, interface is initialized + * and there was no recent failure in one of the commands. + * A negative value is returned otherwise. + */ +int ipa2_uc_state_check(void) +{ + if (!ipa_ctx->uc_ctx.uc_inited) { + IPAERR("uC interface not initialized\n"); + return -EFAULT; + } + + if (!ipa_ctx->uc_ctx.uc_loaded) { + IPAERR("uC is not loaded\n"); + return -EFAULT; + } + + if (ipa_ctx->uc_ctx.uc_failed) { + IPAERR("uC has failed its last command\n"); + return -EFAULT; + } + + return 0; +} +EXPORT_SYMBOL(ipa2_uc_state_check); + +/** + * ipa_uc_loaded_check() - Check the uC has been loaded + * + * Return value: 1 if the uC is loaded, 0 otherwise + */ +int ipa_uc_loaded_check(void) +{ + return ipa_ctx->uc_ctx.uc_loaded; +} +EXPORT_SYMBOL(ipa_uc_loaded_check); + +static void ipa_uc_event_handler(enum ipa_irq_type interrupt, + void *private_data, + void *interrupt_data) +{ + union IpaHwErrorEventData_t evt; + u8 feature; + + WARN_ON(private_data != ipa_ctx); + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + IPADBG("uC evt opcode=%u\n", + ipa_ctx->uc_ctx.uc_sram_mmio->eventOp); + + + feature = EXTRACT_UC_FEATURE(ipa_ctx->uc_ctx.uc_sram_mmio->eventOp); + + if (0 > feature || IPA_HW_FEATURE_MAX <= feature) { + IPAERR("Invalid feature %u for event %u\n", + feature, ipa_ctx->uc_ctx.uc_sram_mmio->eventOp); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return; + } + /* Feature specific handling */ + if (uc_hdlrs[feature].ipa_uc_event_hdlr) + uc_hdlrs[feature].ipa_uc_event_hdlr + (ipa_ctx->uc_ctx.uc_sram_mmio); + + /* General handling */ + if (ipa_ctx->uc_ctx.uc_sram_mmio->eventOp == + IPA_HW_2_CPU_EVENT_ERROR) { + evt.raw32b = ipa_ctx->uc_ctx.uc_sram_mmio->eventParams; + IPAERR("uC Error, evt errorType = %s\n", + ipa_hw_error_str(evt.params.errorType)); + ipa_ctx->uc_ctx.uc_failed = true; + ipa_ctx->uc_ctx.uc_error_type = evt.params.errorType; + if (evt.params.errorType == IPA_HW_ZIP_ENGINE_ERROR) { + IPAERR("IPA has encountered a ZIP engine error\n"); + ipa_ctx->uc_ctx.uc_zip_error = true; + } + ipa_assert(); + } else if (ipa_ctx->uc_ctx.uc_sram_mmio->eventOp == + IPA_HW_2_CPU_EVENT_LOG_INFO) { + IPADBG("uC evt log info ofst=0x%x\n", + ipa_ctx->uc_ctx.uc_sram_mmio->eventParams); + ipa_log_evt_hdlr(); + } else { + IPADBG("unsupported uC evt opcode=%u\n", + ipa_ctx->uc_ctx.uc_sram_mmio->eventOp); + } + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + +} + +static int ipa_uc_panic_notifier(struct notifier_block *this, + unsigned long event, void *ptr) +{ + int result = 0; + struct ipa_active_client_logging_info log_info; + + IPADBG("this=%p evt=%lu ptr=%p\n", this, event, ptr); + + result = ipa2_uc_state_check(); + if (result) + goto fail; + IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info); + if (ipa2_inc_client_enable_clks_no_block(&log_info)) + goto fail; + + ipa_ctx->uc_ctx.uc_sram_mmio->cmdOp = + IPA_CPU_2_HW_CMD_ERR_FATAL; + /* ensure write to shared memory is done before triggering uc */ + wmb(); + ipa_write_reg(ipa_ctx->mmio, IPA_IRQ_EE_UC_n_OFFS(0), 0x1); + /* give uc enough time to save state */ + udelay(IPA_PKT_FLUSH_TO_US); + + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + IPADBG("err_fatal issued\n"); + +fail: + return NOTIFY_DONE; +} + +static struct notifier_block ipa_uc_panic_blk = { + .notifier_call = ipa_uc_panic_notifier, +}; + +void ipa_register_panic_hdlr(void) +{ + atomic_notifier_chain_register(&panic_notifier_list, + &ipa_uc_panic_blk); +} + +static void ipa_uc_response_hdlr(enum ipa_irq_type interrupt, + void *private_data, + void *interrupt_data) +{ + union IpaHwCpuCmdCompletedResponseData_t uc_rsp; + u8 feature; + int res; + int i; + + WARN_ON(private_data != ipa_ctx); + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + IPADBG("uC rsp opcode=%u\n", + ipa_ctx->uc_ctx.uc_sram_mmio->responseOp); + + feature = EXTRACT_UC_FEATURE(ipa_ctx->uc_ctx.uc_sram_mmio->responseOp); + + if (0 > feature || IPA_HW_FEATURE_MAX <= feature) { + IPAERR("Invalid feature %u for event %u\n", + feature, ipa_ctx->uc_ctx.uc_sram_mmio->eventOp); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return; + } + + /* Feature specific handling */ + if (uc_hdlrs[feature].ipa_uc_response_hdlr) { + res = uc_hdlrs[feature].ipa_uc_response_hdlr( + ipa_ctx->uc_ctx.uc_sram_mmio, + &ipa_ctx->uc_ctx.uc_status); + if (res == 0) { + IPADBG("feature %d specific response handler\n", + feature); + complete_all(&ipa_ctx->uc_ctx.uc_completion); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return; + } + } + + /* General handling */ + if (ipa_ctx->uc_ctx.uc_sram_mmio->responseOp == + IPA_HW_2_CPU_RESPONSE_INIT_COMPLETED) { + ipa_ctx->uc_ctx.uc_loaded = true; + IPAERR("IPA uC loaded\n"); + /* + * The proxy vote is held until uC is loaded to ensure that + * IPA_HW_2_CPU_RESPONSE_INIT_COMPLETED is received. + */ + ipa2_proxy_clk_unvote(); + for (i = 0; i < IPA_HW_NUM_FEATURES; i++) { + if (uc_hdlrs[i].ipa_uc_loaded_hdlr) + uc_hdlrs[i].ipa_uc_loaded_hdlr(); + } + /* Queue the work to enable holb monitoring on IPA-USB Producer + * pipe if valid. + */ + if (ipa_ctx->ipa_hw_type == IPA_HW_v2_6L) + queue_work(ipa_holb_wq, &ipa_holb_work); + } else if (ipa_ctx->uc_ctx.uc_sram_mmio->responseOp == + IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED) { + uc_rsp.raw32b = ipa_ctx->uc_ctx.uc_sram_mmio->responseParams; + IPADBG("uC cmd response opcode=%u status=%u\n", + uc_rsp.params.originalCmdOp, + uc_rsp.params.status); + if (uc_rsp.params.originalCmdOp == + ipa_ctx->uc_ctx.pending_cmd) { + ipa_ctx->uc_ctx.uc_status = uc_rsp.params.status; + complete_all(&ipa_ctx->uc_ctx.uc_completion); + } else { + IPAERR("Expected cmd=%u rcvd cmd=%u\n", + ipa_ctx->uc_ctx.pending_cmd, + uc_rsp.params.originalCmdOp); + } + } else { + IPAERR("Unsupported uC rsp opcode = %u\n", + ipa_ctx->uc_ctx.uc_sram_mmio->responseOp); + } + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); +} + +/** + * ipa_uc_interface_init() - Initialize the interface with the uC + * + * Return value: 0 on success, negative value otherwise + */ +int ipa_uc_interface_init(void) +{ + int result; + unsigned long phys_addr; + + if (ipa_ctx->uc_ctx.uc_inited) { + IPADBG("uC interface already initialized\n"); + return 0; + } + + ipa_holb_wq = create_singlethread_workqueue( + HOLB_WORKQUEUE_NAME); + if (!ipa_holb_wq) { + IPAERR("HOLB workqueue creation failed\n"); + return -ENOMEM; + } + + mutex_init(&ipa_ctx->uc_ctx.uc_lock); + + if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_5) { + phys_addr = ipa_ctx->ipa_wrapper_base + + ipa_ctx->ctrl->ipa_reg_base_ofst + + IPA_SRAM_SW_FIRST_v2_5; + } else { + phys_addr = ipa_ctx->ipa_wrapper_base + + ipa_ctx->ctrl->ipa_reg_base_ofst + + IPA_SRAM_DIRECT_ACCESS_N_OFST_v2_0( + ipa_ctx->smem_restricted_bytes / 4); + } + + ipa_ctx->uc_ctx.uc_sram_mmio = ioremap(phys_addr, + IPA_RAM_UC_SMEM_SIZE); + if (!ipa_ctx->uc_ctx.uc_sram_mmio) { + IPAERR("Fail to ioremap IPA uC SRAM\n"); + result = -ENOMEM; + goto remap_fail; + } + + result = ipa2_add_interrupt_handler(IPA_UC_IRQ_0, + ipa_uc_event_handler, true, + ipa_ctx); + if (result) { + IPAERR("Fail to register for UC_IRQ0 rsp interrupt\n"); + result = -EFAULT; + goto irq_fail0; + } + + result = ipa2_add_interrupt_handler(IPA_UC_IRQ_1, + ipa_uc_response_hdlr, true, + ipa_ctx); + if (result) { + IPAERR("fail to register for UC_IRQ1 rsp interrupt\n"); + result = -EFAULT; + goto irq_fail1; + } + + ipa_ctx->uc_ctx.uc_inited = true; + + IPADBG("IPA uC interface is initialized\n"); + return 0; + +irq_fail1: + ipa2_remove_interrupt_handler(IPA_UC_IRQ_0); +irq_fail0: + iounmap(ipa_ctx->uc_ctx.uc_sram_mmio); +remap_fail: + return result; +} +EXPORT_SYMBOL(ipa_uc_interface_init); + +/** + * ipa_uc_send_cmd() - Send a command to the uC + * + * Note: In case the operation times out (No response from the uC) or + * polling maximal amount of retries has reached, the logic + * considers it as an invalid state of the uC/IPA, and + * issues a kernel panic. + * + * Returns: 0 on success. + * -EINVAL in case of invalid input. + * -EBADF in case uC interface is not initialized / + * or the uC has failed previously. + * -EFAULT in case the received status doesn't match + * the expected. + */ +int ipa_uc_send_cmd(u32 cmd, u32 opcode, u32 expected_status, + bool polling_mode, unsigned long timeout_jiffies) +{ + int index; + union IpaHwCpuCmdCompletedResponseData_t uc_rsp; + int retries = 0; + + mutex_lock(&ipa_ctx->uc_ctx.uc_lock); + + if (ipa2_uc_state_check()) { + IPADBG("uC send command aborted\n"); + mutex_unlock(&ipa_ctx->uc_ctx.uc_lock); + return -EBADF; + } + +send_cmd: + init_completion(&ipa_ctx->uc_ctx.uc_completion); + + ipa_ctx->uc_ctx.uc_sram_mmio->cmdParams = cmd; + ipa_ctx->uc_ctx.uc_sram_mmio->cmdOp = opcode; + ipa_ctx->uc_ctx.pending_cmd = opcode; + + ipa_ctx->uc_ctx.uc_sram_mmio->responseOp = 0; + ipa_ctx->uc_ctx.uc_sram_mmio->responseParams = 0; + + ipa_ctx->uc_ctx.uc_status = 0; + + /* ensure write to shared memory is done before triggering uc */ + wmb(); + + ipa_write_reg(ipa_ctx->mmio, IPA_IRQ_EE_UC_n_OFFS(0), 0x1); + + if (polling_mode) { + for (index = 0; index < IPA_UC_POLL_MAX_RETRY; index++) { + if (ipa_ctx->uc_ctx.uc_sram_mmio->responseOp == + IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED) { + uc_rsp.raw32b = + ipa_ctx->uc_ctx.uc_sram_mmio->responseParams; + if (uc_rsp.params.originalCmdOp == + ipa_ctx->uc_ctx.pending_cmd) { + ipa_ctx->uc_ctx.pending_cmd = -1; + break; + } + } + usleep_range(IPA_UC_POLL_SLEEP_USEC, + IPA_UC_POLL_SLEEP_USEC); + } + + if (index == IPA_UC_POLL_MAX_RETRY) { + IPAERR("uC max polling retries reached\n"); + if (ipa_ctx->uc_ctx.uc_failed) { + IPAERR("uC reported on Error, errorType = %s\n", + ipa_hw_error_str( + ipa_ctx->uc_ctx.uc_error_type)); + } + mutex_unlock(&ipa_ctx->uc_ctx.uc_lock); + ipa_assert(); + return -EFAULT; + } + } else { + if (wait_for_completion_timeout(&ipa_ctx->uc_ctx.uc_completion, + timeout_jiffies) == 0) { + IPAERR("uC timed out\n"); + if (ipa_ctx->uc_ctx.uc_failed) { + IPAERR("uC reported on Error, + errorType = %s\n", ipa_hw_error_str( + ipa_ctx->uc_ctx.uc_error_type)); + } + mutex_unlock(&ipa_ctx->uc_ctx.uc_lock); + ipa_assert(); + return -EFAULT; + } + } + + if (ipa_ctx->uc_ctx.uc_status != expected_status) { + if (IPA_HW_2_CPU_WDI_RX_FSM_TRANSITION_ERROR == + ipa_ctx->uc_ctx.uc_status) { + retries++; + if (retries == IPA_BAM_STOP_MAX_RETRY) { + IPAERR("Failed after %d tries\n", retries); + mutex_unlock(&ipa_ctx->uc_ctx.uc_lock); + /* + * Max retry reached, + * assert to check why cmd send failed. + */ + ipa_assert(); + } else { + /* sleep for short period to flush IPA */ + usleep_range(IPA_UC_WAIT_MIN_SLEEP, + IPA_UC_WAII_MAX_SLEEP); + goto send_cmd; + } + } + + IPAERR("Recevied status %u, Expected status %u\n", + ipa_ctx->uc_ctx.uc_status, expected_status); + ipa_ctx->uc_ctx.pending_cmd = -1; + mutex_unlock(&ipa_ctx->uc_ctx.uc_lock); + return -EFAULT; + } + + ipa_ctx->uc_ctx.pending_cmd = -1; + mutex_unlock(&ipa_ctx->uc_ctx.uc_lock); + + IPADBG("uC cmd %u send succeeded\n", opcode); + + return 0; +} +EXPORT_SYMBOL(ipa_uc_send_cmd); + +/** + * ipa_uc_register_handlers() - Registers event, response and log event + * handlers for a specific feature.Please note + * that currently only one handler can be + * registered per feature. + * + * Return value: None + */ +void ipa_uc_register_handlers(enum ipa_hw_features feature, + struct ipa_uc_hdlrs *hdlrs) +{ + + if (0 > feature || IPA_HW_FEATURE_MAX <= feature) { + IPAERR("Feature %u is invalid, not registering hdlrs\n", + feature); + return; + } + + mutex_lock(&ipa_ctx->uc_ctx.uc_lock); + uc_hdlrs[feature] = *hdlrs; + mutex_unlock(&ipa_ctx->uc_ctx.uc_lock); + + IPADBG("uC handlers registered for feature %u\n", feature); +} +EXPORT_SYMBOL(ipa_uc_register_handlers); + +/** + * ipa_uc_reset_pipe() - reset a BAM pipe using the uC interface + * @ipa_client: [in] ipa client handle representing the pipe + * + * The function uses the uC interface in order to issue a BAM + * PIPE reset request. The uC makes sure there's no traffic in + * the TX command queue before issuing the reset. + * + * Returns: 0 on success, negative on failure + */ +int ipa_uc_reset_pipe(enum ipa_client_type ipa_client) +{ + union IpaHwResetPipeCmdData_t cmd; + int ep_idx; + int ret; + + ep_idx = ipa2_get_ep_mapping(ipa_client); + if (ep_idx == -1) { + IPAERR("Invalid IPA client\n"); + return 0; + } + + /* + * If the uC interface has not been initialized yet, + * continue with the sequence without resetting the + * pipe. + */ + if (ipa2_uc_state_check()) { + IPADBG("uC interface will not be used to reset %s pipe %d\n", + IPA_CLIENT_IS_PROD(ipa_client) ? "CONS" : "PROD", + ep_idx); + return 0; + } + + /* + * IPA consumer = 0, IPA producer = 1. + * IPA driver concept of PROD/CONS is the opposite of the + * IPA HW concept. Therefore, IPA AP CLIENT PRODUCER = IPA CONSUMER, + * and vice-versa. + */ + cmd.params.direction = (u8)(IPA_CLIENT_IS_PROD(ipa_client) ? 0 : 1); + cmd.params.pipeNum = (u8)ep_idx; + + IPADBG("uC pipe reset on IPA %s pipe %d\n", + IPA_CLIENT_IS_PROD(ipa_client) ? "CONS" : "PROD", ep_idx); + + ret = ipa_uc_send_cmd(cmd.raw32b, IPA_CPU_2_HW_CMD_RESET_PIPE, 0, + false, 10*HZ); + + return ret; +} +EXPORT_SYMBOL(ipa_uc_reset_pipe); + +/** + * ipa_uc_monitor_holb() - Enable/Disable holb monitoring of a producer pipe. + * @ipa_client: [in] ipa client handle representing the pipe + * + * The function uses the uC interface in order to disable/enable holb + * monitoring. + * + * Returns: 0 on success, negative on failure + */ +int ipa_uc_monitor_holb(enum ipa_client_type ipa_client, bool enable) +{ + union IpaHwmonitorHolbCmdData_t cmd; + int ep_idx; + int ret; + + /* + * HOLB monitoring is applicable to 2.6L. + * And also could be enabled from dtsi node. + */ + if (ipa_ctx->ipa_hw_type != IPA_HW_v2_6L || + !ipa_ctx->ipa_uc_monitor_holb) { + IPADBG("Not applicable on this target\n"); + return 0; + } + + ep_idx = ipa2_get_ep_mapping(ipa_client); + if (ep_idx == -1) { + IPAERR("Invalid IPA client\n"); + return 0; + } + + /* + * If the uC interface has not been initialized yet, + * continue with the sequence without resetting the + * pipe. + */ + if (ipa2_uc_state_check()) { + IPADBG("uC interface will not be used to reset %s pipe %d\n", + IPA_CLIENT_IS_PROD(ipa_client) ? "CONS" : "PROD", + ep_idx); + return 0; + } + + /* + * IPA consumer = 0, IPA producer = 1. + * IPA driver concept of PROD/CONS is the opposite of the + * IPA HW concept. Therefore, IPA AP CLIENT PRODUCER = IPA CONSUMER, + * and vice-versa. + */ + cmd.params.monitorPipe = (u8)(enable ? 1 : 0); + cmd.params.pipeNum = (u8)ep_idx; + + IPADBG("uC holb monitoring on IPA pipe %d, Enable: %d\n", + ep_idx, enable); + + ret = ipa_uc_send_cmd(cmd.raw32b, + IPA_CPU_2_HW_CMD_UPDATE_HOLB_MONITORING, 0, + false, 10*HZ); + + return ret; +} +EXPORT_SYMBOL(ipa_uc_monitor_holb); + +/** + * ipa_start_monitor_holb() - Send HOLB command to monitor IPA-USB + * producer pipe. + * + * This function is called after uc is loaded to start monitoring + * IPA pipe towrds USB in case if USB is already connected. + * + * Return codes: + * None + */ +static void ipa_start_monitor_holb(struct work_struct *work) +{ + IPADBG("starting holb monitoring on IPA_CLIENT_USB_CONS\n"); + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + ipa_uc_monitor_holb(IPA_CLIENT_USB_CONS, true); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); +} + + +/** + * ipa_uc_notify_clk_state() - notify to uC of clock enable / disable + * @enabled: true if clock are enabled + * + * The function uses the uC interface in order to notify uC before IPA clocks + * are disabled to make sure uC is not in the middle of operation. + * Also after clocks are enabled ned to notify uC to start processing. + * + * Returns: 0 on success, negative on failure + */ +int ipa_uc_notify_clk_state(bool enabled) +{ + u32 opcode; + + /* + * If the uC interface has not been initialized yet, + * don't notify the uC on the enable/disable + */ + if (ipa2_uc_state_check()) { + IPADBG("uC interface will not notify the UC on clock state\n"); + return 0; + } + + IPADBG("uC clock %s notification\n", (enabled) ? "UNGATE" : "GATE"); + + opcode = (enabled) ? IPA_CPU_2_HW_CMD_CLK_UNGATE : + IPA_CPU_2_HW_CMD_CLK_GATE; + + return ipa_uc_send_cmd(0, opcode, 0, true, 0); +} +EXPORT_SYMBOL(ipa_uc_notify_clk_state); + +/** + * ipa_uc_update_hw_flags() - send uC the HW flags to be used + * @flags: This field is expected to be used as bitmask for enum ipa_hw_flags + * + * Returns: 0 on success, negative on failure + */ +int ipa_uc_update_hw_flags(u32 flags) +{ + union IpaHwUpdateFlagsCmdData_t cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.params.newFlags = flags; + return ipa_uc_send_cmd(cmd.raw32b, IPA_CPU_2_HW_CMD_UPDATE_FLAGS, 0, + false, HZ); +} +EXPORT_SYMBOL(ipa_uc_update_hw_flags); + +/** + * ipa_uc_memcpy() - Perform a memcpy action using IPA uC + * @dest: physical address to store the copied data. + * @src: physical address of the source data to copy. + * @len: number of bytes to copy. + * + * Returns: 0 on success, negative on failure + */ +int ipa_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len) +{ + int res; + struct ipa_mem_buffer mem; + struct IpaHwMemCopyData_t *cmd; + + IPADBG("dest 0x%pa src 0x%pa len %d\n", &dest, &src, len); + mem.size = sizeof(cmd); + mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base, + GFP_KERNEL); + if (!mem.base) { + IPAERR("fail to alloc DMA buff of size %d\n", mem.size); + return -ENOMEM; + } + cmd = (struct IpaHwMemCopyData_t *)mem.base; + memset(cmd, 0, sizeof(*cmd)); + cmd->destination_addr = dest; + cmd->dest_buffer_size = len; + cmd->source_addr = src; + cmd->source_buffer_size = len; + res = ipa_uc_send_cmd((u32)mem.phys_base, IPA_CPU_2_HW_CMD_MEMCPY, 0, + true, 10 * HZ); + if (res) { + IPAERR("ipa_uc_send_cmd failed %d\n", res); + goto free_coherent; + } + + res = 0; +free_coherent: + dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base); + return res; +} diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_mhi.c b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_mhi.c new file mode 100644 index 000000000000..c77694d12e9b --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_mhi.c @@ -0,0 +1,959 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2015-2016, 2020, The Linux Foundation. All rights reserved. + */ + +#include +#include "ipa_i.h" + +/* MHI uC interface definitions */ +#define IPA_HW_INTERFACE_MHI_VERSION 0x0004 + +#define IPA_HW_MAX_NUMBER_OF_CHANNELS 2 +#define IPA_HW_MAX_NUMBER_OF_EVENTRINGS 2 +#define IPA_HW_MAX_CHANNEL_HANDLE (IPA_HW_MAX_NUMBER_OF_CHANNELS-1) + +/** + * Values that represent the MHI commands from CPU to IPA HW. + * @IPA_CPU_2_HW_CMD_MHI_INIT: Initialize HW to be ready for MHI processing. + * Once operation was completed HW shall respond with + * IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED. + * @IPA_CPU_2_HW_CMD_MHI_INIT_CHANNEL: Initialize specific channel to be ready + * to serve MHI transfers. Once initialization was completed HW shall + * respond with IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE. + * IPA_HW_MHI_CHANNEL_STATE_ENABLE + * @IPA_CPU_2_HW_CMD_MHI_UPDATE_MSI: Update MHI MSI interrupts data. + * Once operation was completed HW shall respond with + * IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED. + * @IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE: Change specific channel + * processing state following host request. Once operation was completed + * HW shall respond with IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE. + * @IPA_CPU_2_HW_CMD_MHI_DL_UL_SYNC_INFO: Info related to DL UL syncronization. + * @IPA_CPU_2_HW_CMD_MHI_STOP_EVENT_UPDATE: Cmd to stop event ring processing. + */ +enum ipa_cpu_2_hw_mhi_commands { + IPA_CPU_2_HW_CMD_MHI_INIT + = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 0), + IPA_CPU_2_HW_CMD_MHI_INIT_CHANNEL + = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 1), + IPA_CPU_2_HW_CMD_MHI_UPDATE_MSI + = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 2), + IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE + = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 3), + IPA_CPU_2_HW_CMD_MHI_DL_UL_SYNC_INFO + = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 4), + IPA_CPU_2_HW_CMD_MHI_STOP_EVENT_UPDATE + = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 5) +}; + +/** + * Values that represent MHI related HW responses to CPU commands. + * @IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE: Response to + * IPA_CPU_2_HW_CMD_MHI_INIT_CHANNEL or + * IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE commands. + */ +enum ipa_hw_2_cpu_mhi_responses { + IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE + = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 0), +}; + +/** + * Values that represent MHI related HW event to be sent to CPU. + * @IPA_HW_2_CPU_EVENT_MHI_CHANNEL_ERROR: Event specify the device detected an + * error in an element from the transfer ring associated with the channel + * @IPA_HW_2_CPU_EVENT_MHI_CHANNEL_WAKE_UP_REQUEST: Event specify a bam + * interrupt was asserted when MHI engine is suspended + */ +enum ipa_hw_2_cpu_mhi_events { + IPA_HW_2_CPU_EVENT_MHI_CHANNEL_ERROR + = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 0), + IPA_HW_2_CPU_EVENT_MHI_CHANNEL_WAKE_UP_REQUEST + = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 1), +}; + +/** + * Channel error types. + * @IPA_HW_CHANNEL_ERROR_NONE: No error persists. + * @IPA_HW_CHANNEL_INVALID_RE_ERROR: Invalid Ring Element was detected + */ +enum ipa_hw_channel_errors { + IPA_HW_CHANNEL_ERROR_NONE, + IPA_HW_CHANNEL_INVALID_RE_ERROR +}; + +/** + * MHI error types. + * @IPA_HW_INVALID_MMIO_ERROR: Invalid data read from MMIO space + * @IPA_HW_INVALID_CHANNEL_ERROR: Invalid data read from channel context array + * @IPA_HW_INVALID_EVENT_ERROR: Invalid data read from event ring context array + * @IPA_HW_NO_ED_IN_RING_ERROR: No event descriptors are available to report on + * secondary event ring + * @IPA_HW_LINK_ERROR: Link error + */ +enum ipa_hw_mhi_errors { + IPA_HW_INVALID_MMIO_ERROR + = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 0), + IPA_HW_INVALID_CHANNEL_ERROR + = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 1), + IPA_HW_INVALID_EVENT_ERROR + = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 2), + IPA_HW_NO_ED_IN_RING_ERROR + = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 4), + IPA_HW_LINK_ERROR + = FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 5), +}; + + +/** + * Structure referring to the common and MHI section of 128B shared memory + * located in offset zero of SW Partition in IPA SRAM. + * The shared memory is used for communication between IPA HW and CPU. + * @common: common section in IPA SRAM + * @interfaceVersionMhi: The MHI interface version as reported by HW + * @mhiState: Overall MHI state + * @reserved_2B: reserved + * @mhiCnl0State: State of MHI channel 0. + * The state carries information regarding the error type. + * See IPA_HW_MHI_CHANNEL_STATES. + * @mhiCnl0State: State of MHI channel 1. + * @mhiCnl0State: State of MHI channel 2. + * @mhiCnl0State: State of MHI channel 3 + * @mhiCnl0State: State of MHI channel 4. + * @mhiCnl0State: State of MHI channel 5. + * @mhiCnl0State: State of MHI channel 6. + * @mhiCnl0State: State of MHI channel 7. + * @reserved_37_34: reserved + * @reserved_3B_38: reserved + * @reserved_3F_3C: reserved + */ +struct IpaHwSharedMemMhiMapping_t { + struct IpaHwSharedMemCommonMapping_t common; + u16 interfaceVersionMhi; + u8 mhiState; + u8 reserved_2B; + u8 mhiCnl0State; + u8 mhiCnl1State; + u8 mhiCnl2State; + u8 mhiCnl3State; + u8 mhiCnl4State; + u8 mhiCnl5State; + u8 mhiCnl6State; + u8 mhiCnl7State; + u32 reserved_37_34; + u32 reserved_3B_38; + u32 reserved_3F_3C; +}; + + +/** + * Structure holding the parameters for IPA_CPU_2_HW_CMD_MHI_INIT command. + * Parameters are sent as pointer thus should be reside in address accessible + * to HW. + * @msiAddress: The MSI base (in device space) used for asserting the interrupt + * (MSI) associated with the event ring + * mmioBaseAddress: The address (in device space) of MMIO structure in + * host space + * deviceMhiCtrlBaseAddress: Base address of the memory region in the device + * address space where the MHI control data structures are allocated by + * the host, including channel context array, event context array, + * and rings. This value is used for host/device address translation. + * deviceMhiDataBaseAddress: Base address of the memory region in the device + * address space where the MHI data buffers are allocated by the host. + * This value is used for host/device address translation. + * firstChannelIndex: First channel ID. Doorbell 0 is mapped to this channel + * firstEventRingIndex: First event ring ID. Doorbell 16 is mapped to this + * event ring. + */ +struct IpaHwMhiInitCmdData_t { + u32 msiAddress; + u32 mmioBaseAddress; + u32 deviceMhiCtrlBaseAddress; + u32 deviceMhiDataBaseAddress; + u32 firstChannelIndex; + u32 firstEventRingIndex; +}; + +/** + * Structure holding the parameters for IPA_CPU_2_HW_CMD_MHI_INIT_CHANNEL + * command. Parameters are sent as 32b immediate parameters. + * @hannelHandle: The channel identifier as allocated by driver. + * value is within the range 0 to IPA_HW_MAX_CHANNEL_HANDLE + * @contexArrayIndex: Unique index for channels, between 0 and 255. The index is + * used as an index in channel context array structures. + * @bamPipeId: The BAM pipe number for pipe dedicated for this channel + * @channelDirection: The direction of the channel as defined in the channel + * type field (CHTYPE) in the channel context data structure. + * @reserved: reserved. + */ +union IpaHwMhiInitChannelCmdData_t { + struct IpaHwMhiInitChannelCmdParams_t { + u32 channelHandle:8; + u32 contexArrayIndex:8; + u32 bamPipeId:6; + u32 channelDirection:2; + u32 reserved:8; + } params; + u32 raw32b; +}; + +/** + * Structure holding the parameters for IPA_CPU_2_HW_CMD_MHI_UPDATE_MSI command. + * @msiAddress_low: The MSI lower base addr (in device space) used for asserting + * the interrupt (MSI) associated with the event ring. + * @msiAddress_hi: The MSI higher base addr (in device space) used for asserting + * the interrupt (MSI) associated with the event ring. + * @msiMask: Mask indicating number of messages assigned by the host to device + * @msiData: Data Pattern to use when generating the MSI + */ +struct IpaHwMhiMsiCmdData_t { + u32 msiAddress_low; + u32 msiAddress_hi; + u32 msiMask; + u32 msiData; +}; + +/** + * Structure holding the parameters for + * IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE command. + * Parameters are sent as 32b immediate parameters. + * @requestedState: The requested channel state as was indicated from Host. + * Use IPA_HW_MHI_CHANNEL_STATES to specify the requested state + * @channelHandle: The channel identifier as allocated by driver. + * value is within the range 0 to IPA_HW_MAX_CHANNEL_HANDLE + * @LPTransitionRejected: Indication that low power state transition was + * rejected + * @reserved: reserved + */ +union IpaHwMhiChangeChannelStateCmdData_t { + struct IpaHwMhiChangeChannelStateCmdParams_t { + u32 requestedState:8; + u32 channelHandle:8; + u32 LPTransitionRejected:8; + u32 reserved:8; + } params; + u32 raw32b; +}; + +/** + * Structure holding the parameters for + * IPA_CPU_2_HW_CMD_MHI_STOP_EVENT_UPDATE command. + * Parameters are sent as 32b immediate parameters. + * @channelHandle: The channel identifier as allocated by driver. + * value is within the range 0 to IPA_HW_MAX_CHANNEL_HANDLE + * @reserved: reserved + */ +union IpaHwMhiStopEventUpdateData_t { + struct IpaHwMhiStopEventUpdateDataParams_t { + u32 channelHandle:8; + u32 reserved:24; + } params; + u32 raw32b; +}; + +/** + * Structure holding the parameters for + * IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE response. + * Parameters are sent as 32b immediate parameters. + * @state: The new channel state. In case state is not as requested this is + * error indication for the last command + * @channelHandle: The channel identifier + * @additonalParams: For stop: the number of pending bam descriptors currently + * queued + */ +union IpaHwMhiChangeChannelStateResponseData_t { + struct IpaHwMhiChangeChannelStateResponseParams_t { + u32 state:8; + u32 channelHandle:8; + u32 additonalParams:16; + } params; + u32 raw32b; +}; + +/** + * Structure holding the parameters for + * IPA_HW_2_CPU_EVENT_MHI_CHANNEL_ERROR event. + * Parameters are sent as 32b immediate parameters. + * @errorType: Type of error - IPA_HW_CHANNEL_ERRORS + * @channelHandle: The channel identifier as allocated by driver. + * value is within the range 0 to IPA_HW_MAX_CHANNEL_HANDLE + * @reserved: reserved + */ +union IpaHwMhiChannelErrorEventData_t { + struct IpaHwMhiChannelErrorEventParams_t { + u32 errorType:8; + u32 channelHandle:8; + u32 reserved:16; + } params; + u32 raw32b; +}; + +/** + * Structure holding the parameters for + * IPA_HW_2_CPU_EVENT_MHI_CHANNEL_WAKE_UP_REQUEST event. + * Parameters are sent as 32b immediate parameters. + * @channelHandle: The channel identifier as allocated by driver. + * value is within the range 0 to IPA_HW_MAX_CHANNEL_HANDLE + * @reserved: reserved + */ +union IpaHwMhiChannelWakeupEventData_t { + struct IpaHwMhiChannelWakeupEventParams_t { + u32 channelHandle:8; + u32 reserved:24; + } params; + u32 raw32b; +}; + +/** + * Structure holding the MHI Common statistics + * @numULDLSync: Number of times UL activity trigged due to DL activity + * @numULTimerExpired: Number of times UL Accm Timer expired + */ +struct IpaHwStatsMhiCmnInfoData_t { + u32 numULDLSync; + u32 numULTimerExpired; + u32 numChEvCtxWpRead; + u32 reserved; +}; + +/** + * Structure holding the MHI Channel statistics + * @doorbellInt: The number of doorbell int + * @reProccesed: The number of ring elements processed + * @bamFifoFull: Number of times Bam Fifo got full + * @bamFifoEmpty: Number of times Bam Fifo got empty + * @bamFifoUsageHigh: Number of times Bam fifo usage went above 75% + * @bamFifoUsageLow: Number of times Bam fifo usage went below 25% + * @bamInt: Number of BAM Interrupts + * @ringFull: Number of times Transfer Ring got full + * @ringEmpty: umber of times Transfer Ring got empty + * @ringUsageHigh: Number of times Transfer Ring usage went above 75% + * @ringUsageLow: Number of times Transfer Ring usage went below 25% + * @delayedMsi: Number of times device triggered MSI to host after + * Interrupt Moderation Timer expiry + * @immediateMsi: Number of times device triggered MSI to host immediately + * @thresholdMsi: Number of times device triggered MSI due to max pending + * events threshold reached + * @numSuspend: Number of times channel was suspended + * @numResume: Number of times channel was suspended + * @num_OOB: Number of times we indicated that we are OOB + * @num_OOB_timer_expiry: Number of times we indicated that we are OOB + * after timer expiry + * @num_OOB_moderation_timer_start: Number of times we started timer after + * sending OOB and hitting OOB again before we processed threshold + * number of packets + * @num_db_mode_evt: Number of times we indicated that we are in Doorbell mode + */ +struct IpaHwStatsMhiCnlInfoData_t { + u32 doorbellInt; + u32 reProccesed; + u32 bamFifoFull; + u32 bamFifoEmpty; + u32 bamFifoUsageHigh; + u32 bamFifoUsageLow; + u32 bamInt; + u32 ringFull; + u32 ringEmpty; + u32 ringUsageHigh; + u32 ringUsageLow; + u32 delayedMsi; + u32 immediateMsi; + u32 thresholdMsi; + u32 numSuspend; + u32 numResume; + u32 num_OOB; + u32 num_OOB_timer_expiry; + u32 num_OOB_moderation_timer_start; + u32 num_db_mode_evt; +}; + +/** + * Structure holding the MHI statistics + * @mhiCmnStats: Stats pertaining to MHI + * @mhiCnlStats: Stats pertaining to each channel + */ +struct IpaHwStatsMhiInfoData_t { + struct IpaHwStatsMhiCmnInfoData_t mhiCmnStats; + struct IpaHwStatsMhiCnlInfoData_t mhiCnlStats[ + IPA_HW_MAX_NUMBER_OF_CHANNELS]; +}; + +/** + * Structure holding the MHI Common Config info + * @isDlUlSyncEnabled: Flag to indicate if DL-UL synchronization is enabled + * @UlAccmVal: Out Channel(UL) accumulation time in ms when DL UL Sync is + * enabled + * @ulMsiEventThreshold: Threshold at which HW fires MSI to host for UL events + * @dlMsiEventThreshold: Threshold at which HW fires MSI to host for DL events + */ +struct IpaHwConfigMhiCmnInfoData_t { + u8 isDlUlSyncEnabled; + u8 UlAccmVal; + u8 ulMsiEventThreshold; + u8 dlMsiEventThreshold; +}; + +/** + * Structure holding the parameters for MSI info data + * @msiAddress_low: The MSI lower base addr (in device space) used for asserting + * the interrupt (MSI) associated with the event ring. + * @msiAddress_hi: The MSI higher base addr (in device space) used for asserting + * the interrupt (MSI) associated with the event ring. + * @msiMask: Mask indicating number of messages assigned by the host to device + * @msiData: Data Pattern to use when generating the MSI + */ +struct IpaHwConfigMhiMsiInfoData_t { + u32 msiAddress_low; + u32 msiAddress_hi; + u32 msiMask; + u32 msiData; +}; + +/** + * Structure holding the MHI Channel Config info + * @transferRingSize: The Transfer Ring size in terms of Ring Elements + * @transferRingIndex: The Transfer Ring channel number as defined by host + * @eventRingIndex: The Event Ring Index associated with this Transfer Ring + * @bamPipeIndex: The BAM Pipe associated with this channel + * @isOutChannel: Indication for the direction of channel + * @reserved_0: Reserved byte for maintaining 4byte alignment + * @reserved_1: Reserved byte for maintaining 4byte alignment + */ +struct IpaHwConfigMhiCnlInfoData_t { + u16 transferRingSize; + u8 transferRingIndex; + u8 eventRingIndex; + u8 bamPipeIndex; + u8 isOutChannel; + u8 reserved_0; + u8 reserved_1; +}; + +/** + * Structure holding the MHI Event Config info + * @msiVec: msi vector to invoke MSI interrupt + * @intmodtValue: Interrupt moderation timer (in milliseconds) + * @eventRingSize: The Event Ring size in terms of Ring Elements + * @eventRingIndex: The Event Ring number as defined by host + * @reserved_0: Reserved byte for maintaining 4byte alignment + * @reserved_1: Reserved byte for maintaining 4byte alignment + * @reserved_2: Reserved byte for maintaining 4byte alignment + */ +struct IpaHwConfigMhiEventInfoData_t { + u32 msiVec; + u16 intmodtValue; + u16 eventRingSize; + u8 eventRingIndex; + u8 reserved_0; + u8 reserved_1; + u8 reserved_2; +}; + +/** + * Structure holding the MHI Config info + * @mhiCmnCfg: Common Config pertaining to MHI + * @mhiMsiCfg: Config pertaining to MSI config + * @mhiCnlCfg: Config pertaining to each channel + * @mhiEvtCfg: Config pertaining to each event Ring + */ +struct IpaHwConfigMhiInfoData_t { + struct IpaHwConfigMhiCmnInfoData_t mhiCmnCfg; + struct IpaHwConfigMhiMsiInfoData_t mhiMsiCfg; + struct IpaHwConfigMhiCnlInfoData_t mhiCnlCfg[ + IPA_HW_MAX_NUMBER_OF_CHANNELS]; + struct IpaHwConfigMhiEventInfoData_t mhiEvtCfg[ + IPA_HW_MAX_NUMBER_OF_EVENTRINGS]; +}; + + +struct ipa_uc_mhi_ctx { + u8 expected_responseOp; + u32 expected_responseParams; + void (*ready_cb)(void); + void (*wakeup_request_cb)(void); + u32 mhi_uc_stats_ofst; + struct IpaHwStatsMhiInfoData_t *mhi_uc_stats_mmio; +}; + +#define PRINT_COMMON_STATS(x) \ + (nBytes += scnprintf(&dbg_buff[nBytes], size - nBytes, \ + #x "=0x%x\n", ipa_uc_mhi_ctx->mhi_uc_stats_mmio->mhiCmnStats.x)) + +#define PRINT_CHANNEL_STATS(ch, x) \ + (nBytes += scnprintf(&dbg_buff[nBytes], size - nBytes, \ + #x "=0x%x\n", ipa_uc_mhi_ctx->mhi_uc_stats_mmio->mhiCnlStats[ch].x)) + +struct ipa_uc_mhi_ctx *ipa_uc_mhi_ctx; + +static int ipa_uc_mhi_response_hdlr(struct IpaHwSharedMemCommonMapping_t + *uc_sram_mmio, u32 *uc_status) +{ + IPADBG("responseOp=%d\n", uc_sram_mmio->responseOp); + if (uc_sram_mmio->responseOp == ipa_uc_mhi_ctx->expected_responseOp && + uc_sram_mmio->responseParams == + ipa_uc_mhi_ctx->expected_responseParams) { + *uc_status = 0; + return 0; + } + return -EINVAL; +} + +static void ipa_uc_mhi_event_hdlr(struct IpaHwSharedMemCommonMapping_t + *uc_sram_mmio) +{ + if (ipa_ctx->uc_ctx.uc_sram_mmio->eventOp == + IPA_HW_2_CPU_EVENT_MHI_CHANNEL_ERROR) { + union IpaHwMhiChannelErrorEventData_t evt; + + IPAERR("Channel error\n"); + evt.raw32b = uc_sram_mmio->eventParams; + IPAERR("errorType=%d channelHandle=%d reserved=%d\n", + evt.params.errorType, evt.params.channelHandle, + evt.params.reserved); + } else if (ipa_ctx->uc_ctx.uc_sram_mmio->eventOp == + IPA_HW_2_CPU_EVENT_MHI_CHANNEL_WAKE_UP_REQUEST) { + union IpaHwMhiChannelWakeupEventData_t evt; + + IPADBG("WakeUp channel request\n"); + evt.raw32b = uc_sram_mmio->eventParams; + IPADBG("channelHandle=%d reserved=%d\n", + evt.params.channelHandle, evt.params.reserved); + ipa_uc_mhi_ctx->wakeup_request_cb(); + } +} + +static void ipa_uc_mhi_event_log_info_hdlr( + struct IpaHwEventLogInfoData_t *uc_event_top_mmio) + +{ + if ((uc_event_top_mmio->featureMask & (1 << IPA_HW_FEATURE_MHI)) == 0) { + IPAERR("MHI feature missing 0x%x\n", + uc_event_top_mmio->featureMask); + return; + } + +if (uc_event_top_mmio->statsInfo.featureInfo[IPA_HW_FEATURE_MHI].params.size != + sizeof(struct IpaHwStatsMhiInfoData_t)) { + IPAERR("mhi stats sz invalid exp=%zu is=%u\n", + sizeof(struct IpaHwStatsMhiInfoData_t), + uc_event_top_mmio->statsInfo.featureInfo[IPA_HW_FEATURE_MHI].params.size + ); + return; +} + +ipa_uc_mhi_ctx->mhi_uc_stats_ofst = +uc_event_top_mmio->statsInfo.baseAddrOffset + +uc_event_top_mmio->statsInfo.featureInfo[IPA_HW_FEATURE_MHI].params.offset; +IPAERR("MHI stats ofst=0x%x\n", ipa_uc_mhi_ctx->mhi_uc_stats_ofst); + + if (ipa_uc_mhi_ctx->mhi_uc_stats_ofst + + sizeof(struct IpaHwStatsMhiInfoData_t) >= + ipa_ctx->ctrl->ipa_reg_base_ofst + + IPA_SRAM_DIRECT_ACCESS_N_OFST_v2_0(0) + + ipa_ctx->smem_sz) { + IPAERR("uc_mhi_stats 0x%x outside SRAM\n", + ipa_uc_mhi_ctx->mhi_uc_stats_ofst); + return; + } + + ipa_uc_mhi_ctx->mhi_uc_stats_mmio = + ioremap(ipa_ctx->ipa_wrapper_base + + ipa_uc_mhi_ctx->mhi_uc_stats_ofst, + sizeof(struct IpaHwStatsMhiInfoData_t)); + if (!ipa_uc_mhi_ctx->mhi_uc_stats_mmio) { + IPAERR("fail to ioremap uc mhi stats\n"); + return; + } +} + +int ipa2_uc_mhi_init(void (*ready_cb)(void), void (*wakeup_request_cb)(void)) +{ + struct ipa_uc_hdlrs hdlrs; + + if (ipa_uc_mhi_ctx) { + IPAERR("Already initialized\n"); + return -EFAULT; + } + + ipa_uc_mhi_ctx = kzalloc(sizeof(*ipa_uc_mhi_ctx), GFP_KERNEL); + if (!ipa_uc_mhi_ctx) { + IPAERR("no mem\n"); + return -ENOMEM; + } + + ipa_uc_mhi_ctx->ready_cb = ready_cb; + ipa_uc_mhi_ctx->wakeup_request_cb = wakeup_request_cb; + + memset(&hdlrs, 0, sizeof(hdlrs)); + hdlrs.ipa_uc_loaded_hdlr = ipa_uc_mhi_ctx->ready_cb; + hdlrs.ipa_uc_response_hdlr = ipa_uc_mhi_response_hdlr; + hdlrs.ipa_uc_event_hdlr = ipa_uc_mhi_event_hdlr; + hdlrs.ipa_uc_event_log_info_hdlr = ipa_uc_mhi_event_log_info_hdlr; + ipa_uc_register_handlers(IPA_HW_FEATURE_MHI, &hdlrs); + + IPADBG("Done\n"); + return 0; +} + +void ipa2_uc_mhi_cleanup(void) +{ + struct ipa_uc_hdlrs null_hdlrs = { 0 }; + + IPADBG("Enter\n"); + + if (!ipa_uc_mhi_ctx) { + IPAERR("ipa3_uc_mhi_ctx is not initialized\n"); + return; + } + ipa_uc_register_handlers(IPA_HW_FEATURE_MHI, &null_hdlrs); + kfree(ipa_uc_mhi_ctx); + ipa_uc_mhi_ctx = NULL; + + IPADBG("Done\n"); +} + +int ipa_uc_mhi_init_engine(struct ipa_mhi_msi_info *msi, u32 mmio_addr, + u32 host_ctrl_addr, u32 host_data_addr, u32 first_ch_idx, + u32 first_evt_idx) +{ + int res; + struct ipa_mem_buffer mem; + struct IpaHwMhiInitCmdData_t *init_cmd_data; + struct IpaHwMhiMsiCmdData_t *msi_cmd; + + if (!ipa_uc_mhi_ctx) { + IPAERR("Not initialized\n"); + return -EFAULT; + } + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + res = ipa_uc_update_hw_flags(0); + if (res) { + IPAERR("ipa_uc_update_hw_flags failed %d\n", res); + goto disable_clks; + } + + mem.size = sizeof(*init_cmd_data); + mem.base = dma_zalloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base, + GFP_KERNEL); + if (!mem.base) { + IPAERR("fail to alloc DMA buff of size %d\n", mem.size); + res = -ENOMEM; + goto disable_clks; + } + init_cmd_data = (struct IpaHwMhiInitCmdData_t *)mem.base; + init_cmd_data->msiAddress = msi->addr_low; + init_cmd_data->mmioBaseAddress = mmio_addr; + init_cmd_data->deviceMhiCtrlBaseAddress = host_ctrl_addr; + init_cmd_data->deviceMhiDataBaseAddress = host_data_addr; + init_cmd_data->firstChannelIndex = first_ch_idx; + init_cmd_data->firstEventRingIndex = first_evt_idx; + res = ipa_uc_send_cmd((u32)mem.phys_base, IPA_CPU_2_HW_CMD_MHI_INIT, 0, + false, HZ); + if (res) { + IPAERR("ipa_uc_send_cmd failed %d\n", res); + dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, + mem.phys_base); + goto disable_clks; + } + + dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base); + + mem.size = sizeof(*msi_cmd); + mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base, + GFP_KERNEL); + if (!mem.base) { + IPAERR("fail to alloc DMA buff of size %d\n", mem.size); + res = -ENOMEM; + goto disable_clks; + } + + msi_cmd = (struct IpaHwMhiMsiCmdData_t *)mem.base; + msi_cmd->msiAddress_hi = msi->addr_hi; + msi_cmd->msiAddress_low = msi->addr_low; + msi_cmd->msiData = msi->data; + msi_cmd->msiMask = msi->mask; + res = ipa_uc_send_cmd((u32)mem.phys_base, + IPA_CPU_2_HW_CMD_MHI_UPDATE_MSI, 0, false, HZ); + if (res) { + IPAERR("ipa_uc_send_cmd failed %d\n", res); + dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, + mem.phys_base); + goto disable_clks; + } + + dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base); + + res = 0; + +disable_clks: + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return res; + +} + +int ipa_uc_mhi_init_channel(int ipa_ep_idx, int channelHandle, + int contexArrayIndex, int channelDirection) + +{ + int res; + union IpaHwMhiInitChannelCmdData_t init_cmd; + union IpaHwMhiChangeChannelStateResponseData_t uc_rsp; + + if (!ipa_uc_mhi_ctx) { + IPAERR("Not initialized\n"); + return -EFAULT; + } + + if (ipa_ep_idx < 0 || ipa_ep_idx >= ipa_ctx->ipa_num_pipes) { + IPAERR("Invalid ipa_ep_idx.\n"); + return -EINVAL; + } + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + memset(&uc_rsp, 0, sizeof(uc_rsp)); + uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_RUN; + uc_rsp.params.channelHandle = channelHandle; + ipa_uc_mhi_ctx->expected_responseOp = + IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE; + ipa_uc_mhi_ctx->expected_responseParams = uc_rsp.raw32b; + + memset(&init_cmd, 0, sizeof(init_cmd)); + init_cmd.params.channelHandle = channelHandle; + init_cmd.params.contexArrayIndex = contexArrayIndex; + init_cmd.params.bamPipeId = ipa_ep_idx; + init_cmd.params.channelDirection = channelDirection; + + res = ipa_uc_send_cmd(init_cmd.raw32b, + IPA_CPU_2_HW_CMD_MHI_INIT_CHANNEL, 0, false, HZ); + if (res) { + IPAERR("ipa_uc_send_cmd failed %d\n", res); + goto disable_clks; + } + + res = 0; + +disable_clks: + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return res; +} + + +int ipa2_uc_mhi_reset_channel(int channelHandle) +{ + union IpaHwMhiChangeChannelStateCmdData_t cmd; + union IpaHwMhiChangeChannelStateResponseData_t uc_rsp; + int res; + + if (!ipa_uc_mhi_ctx) { + IPAERR("Not initialized\n"); + return -EFAULT; + } + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + memset(&uc_rsp, 0, sizeof(uc_rsp)); + uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_DISABLE; + uc_rsp.params.channelHandle = channelHandle; + ipa_uc_mhi_ctx->expected_responseOp = + IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE; + ipa_uc_mhi_ctx->expected_responseParams = uc_rsp.raw32b; + + memset(&cmd, 0, sizeof(cmd)); + cmd.params.requestedState = IPA_HW_MHI_CHANNEL_STATE_DISABLE; + cmd.params.channelHandle = channelHandle; + res = ipa_uc_send_cmd(cmd.raw32b, + IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE, 0, false, HZ); + if (res) { + IPAERR("ipa_uc_send_cmd failed %d\n", res); + goto disable_clks; + } + + res = 0; + +disable_clks: + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return res; +} + +int ipa2_uc_mhi_suspend_channel(int channelHandle) +{ + union IpaHwMhiChangeChannelStateCmdData_t cmd; + union IpaHwMhiChangeChannelStateResponseData_t uc_rsp; + int res; + + if (!ipa_uc_mhi_ctx) { + IPAERR("Not initialized\n"); + return -EFAULT; + } + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + memset(&uc_rsp, 0, sizeof(uc_rsp)); + uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_SUSPEND; + uc_rsp.params.channelHandle = channelHandle; + ipa_uc_mhi_ctx->expected_responseOp = + IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE; + ipa_uc_mhi_ctx->expected_responseParams = uc_rsp.raw32b; + + memset(&cmd, 0, sizeof(cmd)); + cmd.params.requestedState = IPA_HW_MHI_CHANNEL_STATE_SUSPEND; + cmd.params.channelHandle = channelHandle; + res = ipa_uc_send_cmd(cmd.raw32b, + IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE, 0, false, HZ); + if (res) { + IPAERR("ipa_uc_send_cmd failed %d\n", res); + goto disable_clks; + } + + res = 0; + +disable_clks: + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return res; +} + +int ipa_uc_mhi_resume_channel(int channelHandle, bool LPTransitionRejected) +{ + union IpaHwMhiChangeChannelStateCmdData_t cmd; + union IpaHwMhiChangeChannelStateResponseData_t uc_rsp; + int res; + + if (!ipa_uc_mhi_ctx) { + IPAERR("Not initialized\n"); + return -EFAULT; + } + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + memset(&uc_rsp, 0, sizeof(uc_rsp)); + uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_RUN; + uc_rsp.params.channelHandle = channelHandle; + ipa_uc_mhi_ctx->expected_responseOp = + IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE; + ipa_uc_mhi_ctx->expected_responseParams = uc_rsp.raw32b; + + memset(&cmd, 0, sizeof(cmd)); + cmd.params.requestedState = IPA_HW_MHI_CHANNEL_STATE_RUN; + cmd.params.channelHandle = channelHandle; + cmd.params.LPTransitionRejected = LPTransitionRejected; + res = ipa_uc_send_cmd(cmd.raw32b, + IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE, 0, false, HZ); + if (res) { + IPAERR("ipa_uc_send_cmd failed %d\n", res); + goto disable_clks; + } + + res = 0; + +disable_clks: + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return res; +} + +int ipa2_uc_mhi_stop_event_update_channel(int channelHandle) +{ + union IpaHwMhiStopEventUpdateData_t cmd; + int res; + + if (!ipa_uc_mhi_ctx) { + IPAERR("Not initialized\n"); + return -EFAULT; + } + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + memset(&cmd, 0, sizeof(cmd)); + cmd.params.channelHandle = channelHandle; + + ipa_uc_mhi_ctx->expected_responseOp = + IPA_CPU_2_HW_CMD_MHI_STOP_EVENT_UPDATE; + ipa_uc_mhi_ctx->expected_responseParams = cmd.raw32b; + + res = ipa_uc_send_cmd(cmd.raw32b, + IPA_CPU_2_HW_CMD_MHI_STOP_EVENT_UPDATE, 0, false, HZ); + if (res) { + IPAERR("ipa_uc_send_cmd failed %d\n", res); + goto disable_clks; + } + + res = 0; +disable_clks: + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return res; +} + +int ipa2_uc_mhi_send_dl_ul_sync_info(union IpaHwMhiDlUlSyncCmdData_t *cmd) +{ + int res; + + if (!ipa_uc_mhi_ctx) { + IPAERR("Not initialized\n"); + return -EFAULT; + } + + IPADBG("isDlUlSyncEnabled=0x%x UlAccmVal=0x%x\n", + cmd->params.isDlUlSyncEnabled, cmd->params.UlAccmVal); + IPADBG("ulMsiEventThreshold=0x%x dlMsiEventThreshold=0x%x\n", + cmd->params.ulMsiEventThreshold, + cmd->params.dlMsiEventThreshold); + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + res = ipa_uc_send_cmd(cmd->raw32b, + IPA_CPU_2_HW_CMD_MHI_DL_UL_SYNC_INFO, 0, false, HZ); + if (res) { + IPAERR("ipa_uc_send_cmd failed %d\n", res); + goto disable_clks; + } + + res = 0; +disable_clks: + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return res; +} + +int ipa2_uc_mhi_print_stats(char *dbg_buff, int size) +{ + int nBytes = 0; + int i; + + if (!ipa_uc_mhi_ctx->mhi_uc_stats_mmio) { + IPAERR("MHI uc stats is not valid\n"); + return 0; + } + + nBytes += scnprintf(&dbg_buff[nBytes], size - nBytes, + "Common Stats:\n"); + PRINT_COMMON_STATS(numULDLSync); + PRINT_COMMON_STATS(numULTimerExpired); + PRINT_COMMON_STATS(numChEvCtxWpRead); + + for (i = 0; i < IPA_HW_MAX_NUMBER_OF_CHANNELS; i++) { + nBytes += scnprintf(&dbg_buff[nBytes], size - nBytes, + "Channel %d Stats:\n", i); + PRINT_CHANNEL_STATS(i, doorbellInt); + PRINT_CHANNEL_STATS(i, reProccesed); + PRINT_CHANNEL_STATS(i, bamFifoFull); + PRINT_CHANNEL_STATS(i, bamFifoEmpty); + PRINT_CHANNEL_STATS(i, bamFifoUsageHigh); + PRINT_CHANNEL_STATS(i, bamFifoUsageLow); + PRINT_CHANNEL_STATS(i, bamInt); + PRINT_CHANNEL_STATS(i, ringFull); + PRINT_CHANNEL_STATS(i, ringEmpty); + PRINT_CHANNEL_STATS(i, ringUsageHigh); + PRINT_CHANNEL_STATS(i, ringUsageLow); + PRINT_CHANNEL_STATS(i, delayedMsi); + PRINT_CHANNEL_STATS(i, immediateMsi); + PRINT_CHANNEL_STATS(i, thresholdMsi); + PRINT_CHANNEL_STATS(i, numSuspend); + PRINT_CHANNEL_STATS(i, numResume); + PRINT_CHANNEL_STATS(i, num_OOB); + PRINT_CHANNEL_STATS(i, num_OOB_timer_expiry); + PRINT_CHANNEL_STATS(i, num_OOB_moderation_timer_start); + PRINT_CHANNEL_STATS(i, num_db_mode_evt); + } + + return nBytes; +} diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_ntn.c b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_ntn.c new file mode 100644 index 000000000000..bf4c8c57d5e9 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_ntn.c @@ -0,0 +1,448 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2016-2018, 2020, The Linux Foundation. All rights reserved. + */ +#include "ipa_i.h" + +#define IPA_UC_NTN_DB_PA_TX 0x79620DC +#define IPA_UC_NTN_DB_PA_RX 0x79620D8 + +static void ipa_uc_ntn_event_handler( + struct IpaHwSharedMemCommonMapping_t *uc_sram_mmio) +{ + union IpaHwNTNErrorEventData_t ntn_evt; + + if (uc_sram_mmio->eventOp == IPA_HW_2_CPU_EVENT_NTN_ERROR) { + ntn_evt.raw32b = uc_sram_mmio->eventParams; + IPADBG("uC NTN evt errType=%u pipe=%d cherrType=%u\n", + ntn_evt.params.ntn_error_type, + ntn_evt.params.ipa_pipe_number, + ntn_evt.params.ntn_ch_err_type); + } +} + +static void ipa_uc_ntn_event_log_info_handler( + struct IpaHwEventLogInfoData_t *uc_event_top_mmio) +{ + if ((uc_event_top_mmio->featureMask & (1 << IPA_HW_FEATURE_NTN)) == 0) { + IPAERR("NTN feature missing 0x%x\n", + uc_event_top_mmio->featureMask); + return; + } + +if (uc_event_top_mmio->statsInfo.featureInfo[IPA_HW_FEATURE_NTN].params.size != + sizeof(struct IpaHwStatsNTNInfoData_t)) { + IPAERR("NTN stats sz invalid exp=%zu is=%u\n", + sizeof(struct IpaHwStatsNTNInfoData_t), + uc_event_top_mmio->statsInfo.featureInfo[IPA_HW_FEATURE_NTN].params.size + ); + return; +} + +ipa_ctx->uc_ntn_ctx.ntn_uc_stats_ofst = +uc_event_top_mmio->statsInfo.baseAddrOffset + +uc_event_top_mmio->statsInfo.featureInfo[IPA_HW_FEATURE_NTN].params.offset; +IPAERR("NTN stats ofst=0x%x\n", ipa_ctx->uc_ntn_ctx.ntn_uc_stats_ofst); + + if (ipa_ctx->uc_ntn_ctx.ntn_uc_stats_ofst + + sizeof(struct IpaHwStatsNTNInfoData_t) >= + ipa_ctx->ctrl->ipa_reg_base_ofst + + IPA_SRAM_DIRECT_ACCESS_N_OFST_v2_0(0) + + ipa_ctx->smem_sz) { + IPAERR("uc_ntn_stats 0x%x outside SRAM\n", + ipa_ctx->uc_ntn_ctx.ntn_uc_stats_ofst); + return; + } + + ipa_ctx->uc_ntn_ctx.ntn_uc_stats_mmio = + ioremap(ipa_ctx->ipa_wrapper_base + + ipa_ctx->uc_ntn_ctx.ntn_uc_stats_ofst, + sizeof(struct IpaHwStatsNTNInfoData_t)); + if (!ipa_ctx->uc_ntn_ctx.ntn_uc_stats_mmio) { + IPAERR("fail to ioremap uc ntn stats\n"); + return; + } +} + +/** + * ipa2_get_wdi_stats() - Query WDI statistics from uc + * @stats: [inout] stats blob from client populated by driver + * + * Returns: 0 on success, negative on failure + * + * @note Cannot be called from atomic context + * + */ +int ipa2_get_ntn_stats(struct IpaHwStatsNTNInfoData_t *stats) +{ +#define TX_STATS(y) stats->tx_ch_stats[0].y = \ + ipa_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->tx_ch_stats[0].y +#define RX_STATS(y) stats->rx_ch_stats[0].y = \ + ipa_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->rx_ch_stats[0].y + + if (unlikely(!ipa_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return -EINVAL; + } + + if (!stats || !ipa_ctx->uc_ntn_ctx.ntn_uc_stats_mmio) { + IPAERR("bad parms stats=%p ntn_stats=%p\n", + stats, + ipa_ctx->uc_ntn_ctx.ntn_uc_stats_mmio); + return -EINVAL; + } + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + TX_STATS(num_pkts_processed); + TX_STATS(tail_ptr_val); + TX_STATS(num_db_fired); + TX_STATS(tx_comp_ring_stats.ringFull); + TX_STATS(tx_comp_ring_stats.ringEmpty); + TX_STATS(tx_comp_ring_stats.ringUsageHigh); + TX_STATS(tx_comp_ring_stats.ringUsageLow); + TX_STATS(tx_comp_ring_stats.RingUtilCount); + TX_STATS(bam_stats.bamFifoFull); + TX_STATS(bam_stats.bamFifoEmpty); + TX_STATS(bam_stats.bamFifoUsageHigh); + TX_STATS(bam_stats.bamFifoUsageLow); + TX_STATS(bam_stats.bamUtilCount); + TX_STATS(num_db); + TX_STATS(num_unexpected_db); + TX_STATS(num_bam_int_handled); + TX_STATS(num_bam_int_in_non_running_state); + TX_STATS(num_qmb_int_handled); + TX_STATS(num_bam_int_handled_while_wait_for_bam); + TX_STATS(num_bam_int_handled_while_not_in_bam); + + RX_STATS(max_outstanding_pkts); + RX_STATS(num_pkts_processed); + RX_STATS(rx_ring_rp_value); + RX_STATS(rx_ind_ring_stats.ringFull); + RX_STATS(rx_ind_ring_stats.ringEmpty); + RX_STATS(rx_ind_ring_stats.ringUsageHigh); + RX_STATS(rx_ind_ring_stats.ringUsageLow); + RX_STATS(rx_ind_ring_stats.RingUtilCount); + RX_STATS(bam_stats.bamFifoFull); + RX_STATS(bam_stats.bamFifoEmpty); + RX_STATS(bam_stats.bamFifoUsageHigh); + RX_STATS(bam_stats.bamFifoUsageLow); + RX_STATS(bam_stats.bamUtilCount); + RX_STATS(num_bam_int_handled); + RX_STATS(num_db); + RX_STATS(num_unexpected_db); + RX_STATS(num_pkts_in_dis_uninit_state); + RX_STATS(num_bam_int_handled_while_not_in_bam); + RX_STATS(num_bam_int_handled_while_in_bam_state); + + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + + return 0; +} + +int ipa2_register_ipa_ready_cb(void (*ipa_ready_cb)(void *), void *user_data) +{ + int ret; + + if (!ipa_ctx) { + IPAERR("IPA ctx is null\n"); + return -ENXIO; + } + + ret = ipa2_uc_state_check(); + if (ret) { + ipa_ctx->uc_ntn_ctx.uc_ready_cb = ipa_ready_cb; + ipa_ctx->uc_ntn_ctx.priv = user_data; + return 0; + } + + return -EEXIST; +} + +int ipa2_ntn_uc_reg_rdyCB(void (*ipauc_ready_cb)(void *), void *priv) +{ + return ipa2_register_ipa_ready_cb(ipauc_ready_cb, priv); +} + +void ipa2_ntn_uc_dereg_rdyCB(void) +{ + ipa_ctx->uc_ntn_ctx.uc_ready_cb = NULL; + ipa_ctx->uc_ntn_ctx.priv = NULL; +} + +static void ipa_uc_ntn_loaded_handler(void) +{ + if (!ipa_ctx) { + IPAERR("IPA ctx is null\n"); + return; + } + + if (ipa_ctx->uc_ntn_ctx.uc_ready_cb) { + ipa_ctx->uc_ntn_ctx.uc_ready_cb( + ipa_ctx->uc_ntn_ctx.priv); + + ipa_ctx->uc_ntn_ctx.uc_ready_cb = + NULL; + ipa_ctx->uc_ntn_ctx.priv = NULL; + } +} + +int ipa_ntn_init(void) +{ + struct ipa_uc_hdlrs uc_ntn_cbs = { 0 }; + + uc_ntn_cbs.ipa_uc_event_hdlr = ipa_uc_ntn_event_handler; + uc_ntn_cbs.ipa_uc_event_log_info_hdlr = + ipa_uc_ntn_event_log_info_handler; + uc_ntn_cbs.ipa_uc_loaded_hdlr = + ipa_uc_ntn_loaded_handler; + + ipa_uc_register_handlers(IPA_HW_FEATURE_NTN, &uc_ntn_cbs); + + return 0; +} + +static int ipa2_uc_send_ntn_setup_pipe_cmd( + struct ipa_ntn_setup_info *ntn_info, u8 dir) +{ + int ipa_ep_idx; + int result = 0; + struct ipa_mem_buffer cmd; + struct IpaHwNtnSetUpCmdData_t *Ntn_params; + struct IpaHwOffloadSetUpCmdData_t *cmd_data; + + if (ntn_info == NULL) { + IPAERR("invalid input\n"); + return -EINVAL; + } + + ipa_ep_idx = ipa_get_ep_mapping(ntn_info->client); + if (ipa_ep_idx == -1) { + IPAERR("fail to get ep idx.\n"); + return -EFAULT; + } + + IPADBG("client=%d ep=%d\n", ntn_info->client, ipa_ep_idx); + + IPADBG("ring_base_pa = 0x%pa\n", + &ntn_info->ring_base_pa); + IPADBG("ntn_ring_size = %d\n", ntn_info->ntn_ring_size); + IPADBG("buff_pool_base_pa = 0x%pa\n", &ntn_info->buff_pool_base_pa); + IPADBG("num_buffers = %d\n", ntn_info->num_buffers); + IPADBG("data_buff_size = %d\n", ntn_info->data_buff_size); + IPADBG("tail_ptr_base_pa = 0x%pa\n", &ntn_info->ntn_reg_base_ptr_pa); + + cmd.size = sizeof(*cmd_data); + cmd.base = dma_alloc_coherent(ipa_ctx->uc_pdev, cmd.size, + &cmd.phys_base, GFP_KERNEL); + if (cmd.base == NULL) { + IPAERR("fail to get DMA memory.\n"); + return -ENOMEM; + } + + cmd_data = (struct IpaHwOffloadSetUpCmdData_t *)cmd.base; + cmd_data->protocol = IPA_HW_FEATURE_NTN; + + Ntn_params = &cmd_data->SetupCh_params.NtnSetupCh_params; + Ntn_params->ring_base_pa = ntn_info->ring_base_pa; + Ntn_params->buff_pool_base_pa = ntn_info->buff_pool_base_pa; + Ntn_params->ntn_ring_size = ntn_info->ntn_ring_size; + Ntn_params->num_buffers = ntn_info->num_buffers; + Ntn_params->ntn_reg_base_ptr_pa = ntn_info->ntn_reg_base_ptr_pa; + Ntn_params->data_buff_size = ntn_info->data_buff_size; + Ntn_params->ipa_pipe_number = ipa_ep_idx; + Ntn_params->dir = dir; + + result = ipa_uc_send_cmd((u32)(cmd.phys_base), + IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP, + IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS, + false, 10*HZ); + if (result) + result = -EFAULT; + + dma_free_coherent(ipa_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base); + return result; +} + +/** + * ipa2_setup_uc_ntn_pipes() - setup uc offload pipes + */ +int ipa2_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *in, + ipa_notify_cb notify, void *priv, u8 hdr_len, + struct ipa_ntn_conn_out_params *outp) +{ + int ipa_ep_idx_ul, ipa_ep_idx_dl; + struct ipa_ep_context *ep_ul, *ep_dl; + int result = 0; + + if (in == NULL) { + IPAERR("invalid input\n"); + return -EINVAL; + } + + ipa_ep_idx_ul = ipa_get_ep_mapping(in->ul.client); + ipa_ep_idx_dl = ipa_get_ep_mapping(in->dl.client); + if (ipa_ep_idx_ul == -1 || ipa_ep_idx_dl == -1) { + IPAERR("fail to alloc EP.\n"); + return -EFAULT; + } + + ep_ul = &ipa_ctx->ep[ipa_ep_idx_ul]; + ep_dl = &ipa_ctx->ep[ipa_ep_idx_dl]; + + if (ep_ul->valid || ep_dl->valid) { + IPAERR("EP already allocated ul:%d dl:%d\n", + ep_ul->valid, ep_dl->valid); + return -EFAULT; + } + + memset(ep_ul, 0, offsetof(struct ipa_ep_context, sys)); + memset(ep_dl, 0, offsetof(struct ipa_ep_context, sys)); + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + /* setup ul ep cfg */ + ep_ul->valid = 1; + ep_ul->client = in->ul.client; + ep_ul->client_notify = notify; + ep_ul->priv = priv; + + memset(&ep_ul->cfg, 0, sizeof(ep_ul->cfg)); + ep_ul->cfg.nat.nat_en = IPA_SRC_NAT; + ep_ul->cfg.hdr.hdr_len = hdr_len; + ep_ul->cfg.mode.mode = IPA_BASIC; + + if (ipa2_cfg_ep(ipa_ep_idx_ul, &ep_ul->cfg)) { + IPAERR("fail to setup ul pipe cfg\n"); + result = -EFAULT; + goto fail; + } + + if (ipa2_uc_send_ntn_setup_pipe_cmd(&in->ul, IPA_NTN_RX_DIR)) { + IPAERR("fail to send cmd to uc for ul pipe\n"); + result = -EFAULT; + goto fail; + } + ipa_install_dflt_flt_rules(ipa_ep_idx_ul); + outp->ul_uc_db_pa = IPA_UC_NTN_DB_PA_RX; + ep_ul->uc_offload_state |= IPA_UC_OFFLOAD_CONNECTED; + IPAERR("client %d (ep: %d) connected\n", in->ul.client, + ipa_ep_idx_ul); + + /* setup dl ep cfg */ + ep_dl->valid = 1; + ep_dl->client = in->dl.client; + memset(&ep_dl->cfg, 0, sizeof(ep_ul->cfg)); + ep_dl->cfg.nat.nat_en = IPA_BYPASS_NAT; + ep_dl->cfg.hdr.hdr_len = hdr_len; + ep_dl->cfg.mode.mode = IPA_BASIC; + + if (ipa2_cfg_ep(ipa_ep_idx_dl, &ep_dl->cfg)) { + IPAERR("fail to setup dl pipe cfg\n"); + result = -EFAULT; + goto fail; + } + + if (ipa2_uc_send_ntn_setup_pipe_cmd(&in->dl, IPA_NTN_TX_DIR)) { + IPAERR("fail to send cmd to uc for dl pipe\n"); + result = -EFAULT; + goto fail; + } + outp->dl_uc_db_pa = IPA_UC_NTN_DB_PA_TX; + ep_dl->uc_offload_state |= IPA_UC_OFFLOAD_CONNECTED; + + result = ipa_enable_data_path(ipa_ep_idx_dl); + if (result) { + IPAERR("Enable data path failed res=%d clnt=%d.\n", result, + ipa_ep_idx_dl); + result = -EFAULT; + goto fail; + } + IPAERR("client %d (ep: %d) connected\n", in->dl.client, + ipa_ep_idx_dl); + +fail: + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return result; +} + +/** + * ipa2_tear_down_uc_offload_pipes() - tear down uc offload pipes + */ + +int ipa2_tear_down_uc_offload_pipes(int ipa_ep_idx_ul, + int ipa_ep_idx_dl, struct ipa_ntn_conn_in_params *params) +{ + struct ipa_mem_buffer cmd; + struct ipa_ep_context *ep_ul, *ep_dl; + struct IpaHwOffloadCommonChCmdData_t *cmd_data; + union IpaHwNtnCommonChCmdData_t *tear; + int result = 0; + + IPADBG("ep_ul = %d\n", ipa_ep_idx_ul); + IPADBG("ep_dl = %d\n", ipa_ep_idx_dl); + + ep_ul = &ipa_ctx->ep[ipa_ep_idx_ul]; + ep_dl = &ipa_ctx->ep[ipa_ep_idx_dl]; + + if (ep_ul->uc_offload_state != IPA_UC_OFFLOAD_CONNECTED || + ep_dl->uc_offload_state != IPA_UC_OFFLOAD_CONNECTED) { + IPAERR("channel bad state: ul %d dl %d\n", + ep_ul->uc_offload_state, ep_dl->uc_offload_state); + return -EFAULT; + } + + cmd.size = sizeof(*cmd_data); + cmd.base = dma_alloc_coherent(ipa_ctx->uc_pdev, cmd.size, + &cmd.phys_base, GFP_KERNEL); + if (cmd.base == NULL) { + IPAERR("fail to get DMA memory.\n"); + return -ENOMEM; + } + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + cmd_data = (struct IpaHwOffloadCommonChCmdData_t *)cmd.base; + cmd_data->protocol = IPA_HW_FEATURE_NTN; + tear = &cmd_data->CommonCh_params.NtnCommonCh_params; + + /* teardown the DL pipe */ + ipa_disable_data_path(ipa_ep_idx_dl); + /* + * Reset ep before sending cmd otherwise disconnect + * during data transfer will result into + * enormous suspend interrupts + */ + memset(&ipa_ctx->ep[ipa_ep_idx_dl], 0, sizeof(struct ipa_ep_context)); + IPADBG("dl client (ep: %d) disconnected\n", ipa_ep_idx_dl); + tear->params.ipa_pipe_number = ipa_ep_idx_dl; + result = ipa_uc_send_cmd((u32)(cmd.phys_base), + IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN, + IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS, + false, 10*HZ); + if (result) { + IPAERR("fail to tear down dl pipe\n"); + result = -EFAULT; + goto fail; + } + + /* teardown the UL pipe */ + tear->params.ipa_pipe_number = ipa_ep_idx_ul; + result = ipa_uc_send_cmd((u32)(cmd.phys_base), + IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN, + IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS, + false, 10*HZ); + if (result) { + IPAERR("fail to tear down ul pipe\n"); + result = -EFAULT; + goto fail; + } + + ipa_delete_dflt_flt_rules(ipa_ep_idx_ul); + memset(&ipa_ctx->ep[ipa_ep_idx_ul], 0, sizeof(struct ipa_ep_context)); + IPADBG("ul client (ep: %d) disconnected\n", ipa_ep_idx_ul); + +fail: + dma_free_coherent(ipa_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return result; +} diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_offload_i.h b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_offload_i.h new file mode 100644 index 000000000000..9a96ad919ee8 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_offload_i.h @@ -0,0 +1,608 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2016-2017, 2020, The Linux Foundation. All rights reserved. + */ + +#ifndef _IPA_UC_OFFLOAD_I_H_ +#define _IPA_UC_OFFLOAD_I_H_ + +#include +#include "ipa_i.h" + +/* + * Neutrino protocol related data structures + */ + +#define IPA_UC_MAX_NTN_TX_CHANNELS 1 +#define IPA_UC_MAX_NTN_RX_CHANNELS 1 + +#define IPA_NTN_TX_DIR 1 +#define IPA_NTN_RX_DIR 2 + +#define IPA_WDI3_TX_DIR 1 +#define IPA_WDI3_RX_DIR 2 + +/** + * @brief Enum value determined based on the feature it + * corresponds to + * +----------------+----------------+ + * | 3 bits | 5 bits | + * +----------------+----------------+ + * | HW_FEATURE | OPCODE | + * +----------------+----------------+ + * + */ +#define FEATURE_ENUM_VAL(feature, opcode) ((feature << 5) | opcode) +#define EXTRACT_UC_FEATURE(value) (value >> 5) + +#define IPA_HW_NUM_FEATURES 0x8 + +/** + * enum ipa_hw_features - Values that represent the features supported in IPA HW + * @IPA_HW_FEATURE_COMMON : Feature related to common operation of IPA HW + * @IPA_HW_FEATURE_MHI : Feature related to MHI operation in IPA HW + * @IPA_HW_FEATURE_POWER_COLLAPSE: Feature related to IPA Power collapse + * @IPA_HW_FEATURE_WDI : Feature related to WDI operation in IPA HW + * @IPA_HW_FEATURE_NTN : Feature related to NTN operation in IPA HW + * @IPA_HW_FEATURE_OFFLOAD : Feature related to NTN operation in IPA HW + * @IPA_HW_FEATURE_WDI3 : Feature related to WDI operation in IPA HW + */ +enum ipa_hw_features { + IPA_HW_FEATURE_COMMON = 0x0, + IPA_HW_FEATURE_MHI = 0x1, + IPA_HW_FEATURE_POWER_COLLAPSE = 0x2, + IPA_HW_FEATURE_WDI = 0x3, + IPA_HW_FEATURE_NTN = 0x4, + IPA_HW_FEATURE_OFFLOAD = 0x5, + IPA_HW_FEATURE_WDI3 = 0x6, + IPA_HW_FEATURE_MAX = IPA_HW_NUM_FEATURES +}; + +/** + * struct IpaHwSharedMemCommonMapping_t - Structure referring to the common + * section in 128B shared memory located in offset zero of SW Partition in IPA + * SRAM. + * @cmdOp : CPU->HW command opcode. See IPA_CPU_2_HW_COMMANDS + * @cmdParams : CPU->HW command parameter. The parameter filed can hold 32 bits + * of parameters (immediate parameters) and point on structure in + * system memory (in such case the address must be accessible + * for HW) + * @responseOp : HW->CPU response opcode. See IPA_HW_2_CPU_RESPONSES + * @responseParams : HW->CPU response parameter. The parameter filed can hold + * 32 bits of parameters (immediate parameters) and point + * on structure in system memory + * @eventOp : HW->CPU event opcode. See IPA_HW_2_CPU_EVENTS + * @eventParams : HW->CPU event parameter. The parameter filed can hold 32 bits + * of parameters (immediate parameters) and point on + * structure in system memory + * @firstErrorAddress : Contains the address of first error-source on SNOC + * @hwState : State of HW. The state carries information regarding the error + * type. + * @warningCounter : The warnings counter. The counter carries information + * regarding non fatal errors in HW + * @interfaceVersionCommon : The Common interface version as reported by HW + * + * The shared memory is used for communication between IPA HW and CPU. + */ +struct IpaHwSharedMemCommonMapping_t { + u8 cmdOp; + u8 reserved_01; + u16 reserved_03_02; + u32 cmdParams; + u8 responseOp; + u8 reserved_09; + u16 reserved_0B_0A; + u32 responseParams; + u8 eventOp; + u8 reserved_11; + u16 reserved_13_12; + u32 eventParams; + u32 reserved_1B_18; + u32 firstErrorAddress; + u8 hwState; + u8 warningCounter; + u16 reserved_23_22; + u16 interfaceVersionCommon; + u16 reserved_27_26; +} __packed; + +/** + * union IpaHwFeatureInfoData_t - parameters for stats/config blob + * + * @offset : Location of a feature within the EventInfoData + * @size : Size of the feature + */ +union IpaHwFeatureInfoData_t { + struct IpaHwFeatureInfoParams_t { + u32 offset:16; + u32 size:16; + } __packed params; + u32 raw32b; +} __packed; + +/** + * struct IpaHwEventInfoData_t - Structure holding the parameters for + * statistics and config info + * + * @baseAddrOffset : Base Address Offset of the statistics or config + * structure from IPA_WRAPPER_BASE + * @IpaHwFeatureInfoData_t : Location and size of each feature within + * the statistics or config structure + * + * @note Information about each feature in the featureInfo[] + * array is populated at predefined indices per the IPA_HW_FEATURES + * enum definition + */ +struct IpaHwEventInfoData_t { + u32 baseAddrOffset; + union IpaHwFeatureInfoData_t featureInfo[IPA_HW_NUM_FEATURES]; +} __packed; + +/** + * struct IpaHwEventLogInfoData_t - Structure holding the parameters for + * IPA_HW_2_CPU_EVENT_LOG_INFO Event + * + * @featureMask : Mask indicating the features enabled in HW. + * Refer IPA_HW_FEATURE_MASK + * @circBuffBaseAddrOffset : Base Address Offset of the Circular Event + * Log Buffer structure + * @statsInfo : Statistics related information + * @configInfo : Configuration related information + * + * @note The offset location of this structure from IPA_WRAPPER_BASE + * will be provided as Event Params for the IPA_HW_2_CPU_EVENT_LOG_INFO + * Event + */ +struct IpaHwEventLogInfoData_t { + u32 featureMask; + u32 circBuffBaseAddrOffset; + struct IpaHwEventInfoData_t statsInfo; + struct IpaHwEventInfoData_t configInfo; + +} __packed; + +/** + * struct ipa_uc_ntn_ctx + * @ntn_uc_stats_ofst: Neutrino stats offset + * @ntn_uc_stats_mmio: Neutrino stats + * @priv: private data of client + * @uc_ready_cb: uc Ready cb + */ +struct ipa_uc_ntn_ctx { + u32 ntn_uc_stats_ofst; + struct IpaHwStatsNTNInfoData_t *ntn_uc_stats_mmio; + void *priv; + ipa_uc_ready_cb uc_ready_cb; +}; + +/** + * enum ipa_hw_2_cpu_ntn_events - Values that represent HW event + * to be sent to CPU + * @IPA_HW_2_CPU_EVENT_NTN_ERROR : Event to specify that HW + * detected an error in NTN + * + */ +enum ipa_hw_2_cpu_ntn_events { + IPA_HW_2_CPU_EVENT_NTN_ERROR = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_NTN, 0), +}; + + +/** + * enum ipa_hw_ntn_errors - NTN specific error types. + * @IPA_HW_NTN_ERROR_NONE : No error persists + * @IPA_HW_NTN_CHANNEL_ERROR : Error is specific to channel + */ +enum ipa_hw_ntn_errors { + IPA_HW_NTN_ERROR_NONE = 0, + IPA_HW_NTN_CHANNEL_ERROR = 1 +}; + +/** + * enum ipa_hw_ntn_channel_states - Values that represent NTN + * channel state machine. + * @IPA_HW_NTN_CHANNEL_STATE_INITED_DISABLED : Channel is + * initialized but disabled + * @IPA_HW_NTN_CHANNEL_STATE_RUNNING : Channel is running. + * Entered after SET_UP_COMMAND is processed successfully + * @IPA_HW_NTN_CHANNEL_STATE_ERROR : Channel is in error state + * @IPA_HW_NTN_CHANNEL_STATE_INVALID : Invalid state. Shall not + * be in use in operational scenario + * + * These states apply to both Tx and Rx paths. These do not reflect the + * sub-state the state machine may be in. + */ +enum ipa_hw_ntn_channel_states { + IPA_HW_NTN_CHANNEL_STATE_INITED_DISABLED = 1, + IPA_HW_NTN_CHANNEL_STATE_RUNNING = 2, + IPA_HW_NTN_CHANNEL_STATE_ERROR = 3, + IPA_HW_NTN_CHANNEL_STATE_INVALID = 0xFF +}; + +/** + * enum ipa_hw_ntn_channel_errors - List of NTN Channel error + * types. This is present in the event param + * @IPA_HW_NTN_CH_ERR_NONE: No error persists + * @IPA_HW_NTN_TX_FSM_ERROR: Error in the state machine + * transition + * @IPA_HW_NTN_TX_COMP_RE_FETCH_FAIL: Error while calculating + * num RE to bring + * @IPA_HW_NTN_RX_RING_WP_UPDATE_FAIL: Write pointer update + * failed in Rx ring + * @IPA_HW_NTN_RX_FSM_ERROR: Error in the state machine + * transition + * @IPA_HW_NTN_RX_CACHE_NON_EMPTY: + * @IPA_HW_NTN_CH_ERR_RESERVED: + * + * These states apply to both Tx and Rx paths. These do not + * reflect the sub-state the state machine may be in. + */ +enum ipa_hw_ntn_channel_errors { + IPA_HW_NTN_CH_ERR_NONE = 0, + IPA_HW_NTN_TX_RING_WP_UPDATE_FAIL = 1, + IPA_HW_NTN_TX_FSM_ERROR = 2, + IPA_HW_NTN_TX_COMP_RE_FETCH_FAIL = 3, + IPA_HW_NTN_RX_RING_WP_UPDATE_FAIL = 4, + IPA_HW_NTN_RX_FSM_ERROR = 5, + IPA_HW_NTN_RX_CACHE_NON_EMPTY = 6, + IPA_HW_NTN_CH_ERR_RESERVED = 0xFF +}; + + +/** + * struct IpaHwNtnSetUpCmdData_t - Ntn setup command data + * @ring_base_pa: physical address of the base of the Tx/Rx NTN + * ring + * @buff_pool_base_pa: physical address of the base of the Tx/Rx + * buffer pool + * @ntn_ring_size: size of the Tx/Rx NTN ring + * @num_buffers: Rx/tx buffer pool size + * @ntn_reg_base_ptr_pa: physical address of the Tx/Rx NTN + * Ring's tail pointer + * @ipa_pipe_number: IPA pipe number that has to be used for the + * Tx/Rx path + * @dir: Tx/Rx Direction + * @data_buff_size: size of the each data buffer allocated in + * DDR + */ +struct IpaHwNtnSetUpCmdData_t { + u32 ring_base_pa; + u32 buff_pool_base_pa; + u16 ntn_ring_size; + u16 num_buffers; + u32 ntn_reg_base_ptr_pa; + u8 ipa_pipe_number; + u8 dir; + u16 data_buff_size; + +} __packed; + +struct IpaHwWdi3SetUpCmdData_t { + u32 transfer_ring_base_pa; + u32 transfer_ring_base_pa_hi; + + u32 transfer_ring_size; + + u32 transfer_ring_doorbell_pa; + u32 transfer_ring_doorbell_pa_hi; + + u32 event_ring_base_pa; + u32 event_ring_base_pa_hi; + + u32 event_ring_size; + + u32 event_ring_doorbell_pa; + u32 event_ring_doorbell_pa_hi; + + u16 num_pkt_buffers; + u8 ipa_pipe_number; + u8 dir; + + u16 pkt_offset; + u16 reserved0; + + u32 desc_format_template[IPA_HW_WDI3_MAX_ER_DESC_SIZE]; +} __packed; + +/** + * struct IpaHwNtnCommonChCmdData_t - Structure holding the + * parameters for Ntn Tear down command data params + * + *@ipa_pipe_number: IPA pipe number. This could be Tx or an Rx pipe + */ +union IpaHwNtnCommonChCmdData_t { + struct IpaHwNtnCommonChCmdParams_t { + u32 ipa_pipe_number :8; + u32 reserved :24; + } __packed params; + uint32_t raw32b; +} __packed; + +union IpaHwWdi3CommonChCmdData_t { + struct IpaHwWdi3CommonChCmdParams_t { + u32 ipa_pipe_number :8; + u32 reserved :24; + } __packed params; + u32 raw32b; +} __packed; + +/** + * struct IpaHwNTNErrorEventData_t - Structure holding the + * IPA_HW_2_CPU_EVENT_NTN_ERROR event. The parameters are passed + * as immediate params in the shared memory + * + *@ntn_error_type: type of NTN error (IPA_HW_NTN_ERRORS) + *@ipa_pipe_number: IPA pipe number on which error has happened + * Applicable only if error type indicates channel error + *@ntn_ch_err_type: Information about the channel error (if + * available) + */ +union IpaHwNTNErrorEventData_t { + struct IpaHwNTNErrorEventParams_t { + u32 ntn_error_type :8; + u32 reserved :8; + u32 ipa_pipe_number :8; + u32 ntn_ch_err_type :8; + } __packed params; + uint32_t raw32b; +} __packed; + +/** + * struct NTNRxInfoData_t - NTN Structure holding the + * Rx pipe information + * + *@max_outstanding_pkts: Number of outstanding packets in Rx + * Ring + *@num_pkts_processed: Number of packets processed - cumulative + *@rx_ring_rp_value: Read pointer last advertized to the WLAN FW + * + *@ntn_ch_err_type: Information about the channel error (if + * available) + *@rx_ind_ring_stats: + *@bam_stats: + *@num_bam_int_handled: Number of Bam Interrupts handled by FW + *@num_db: Number of times the doorbell was rung + *@num_unexpected_db: Number of unexpected doorbells + *@num_pkts_in_dis_uninit_state: + *@num_bam_int_handled_while_not_in_bam: Number of Bam + * Interrupts handled by FW + *@num_bam_int_handled_while_in_bam_state: Number of Bam + * Interrupts handled by FW + */ +struct NTNRxInfoData_t { + u32 max_outstanding_pkts; + u32 num_pkts_processed; + u32 rx_ring_rp_value; + struct IpaHwRingStats_t rx_ind_ring_stats; + struct IpaHwBamStats_t bam_stats; + u32 num_bam_int_handled; + u32 num_db; + u32 num_unexpected_db; + u32 num_pkts_in_dis_uninit_state; + u32 num_bam_int_handled_while_not_in_bam; + u32 num_bam_int_handled_while_in_bam_state; +} __packed; + + +/** + * struct NTNTxInfoData_t - Structure holding the NTN Tx channel + * Ensure that this is always word aligned + * + *@num_pkts_processed: Number of packets processed - cumulative + *@tail_ptr_val: Latest value of doorbell written to copy engine + *@num_db_fired: Number of DB from uC FW to Copy engine + * + *@tx_comp_ring_stats: + *@bam_stats: + *@num_db: Number of times the doorbell was rung + *@num_unexpected_db: Number of unexpected doorbells + *@num_bam_int_handled: Number of Bam Interrupts handled by FW + *@num_bam_int_in_non_running_state: Number of Bam interrupts + * while not in Running state + *@num_qmb_int_handled: Number of QMB interrupts handled + *@num_bam_int_handled_while_wait_for_bam: Number of times the + * Imm Cmd is injected due to fw_desc change + */ +struct NTNTxInfoData_t { + u32 num_pkts_processed; + u32 tail_ptr_val; + u32 num_db_fired; + struct IpaHwRingStats_t tx_comp_ring_stats; + struct IpaHwBamStats_t bam_stats; + u32 num_db; + u32 num_unexpected_db; + u32 num_bam_int_handled; + u32 num_bam_int_in_non_running_state; + u32 num_qmb_int_handled; + u32 num_bam_int_handled_while_wait_for_bam; + u32 num_bam_int_handled_while_not_in_bam; +} __packed; + + +/** + * struct IpaHwStatsNTNInfoData_t - Structure holding the NTN Tx + * channel Ensure that this is always word aligned + * + */ +struct IpaHwStatsNTNInfoData_t { + struct NTNRxInfoData_t rx_ch_stats[IPA_UC_MAX_NTN_RX_CHANNELS]; + struct NTNTxInfoData_t tx_ch_stats[IPA_UC_MAX_NTN_TX_CHANNELS]; +} __packed; + + +/* + * uC offload related data structures + */ +#define IPA_UC_OFFLOAD_CONNECTED BIT(0) +#define IPA_UC_OFFLOAD_ENABLED BIT(1) +#define IPA_UC_OFFLOAD_RESUMED BIT(2) + +/** + * enum ipa_cpu_2_hw_offload_commands - Values that represent + * the offload commands from CPU + * @IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP : Command to set up + * Offload protocol's Tx/Rx Path + * @IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN : Command to tear down + * Offload protocol's Tx/ Rx Path + * @IPA_CPU_2_HW_CMD_OFFLOAD_ENABLE : Command to enable + * Offload protocol's Tx/Rx Path + * @IPA_CPU_2_HW_CMD_OFFLOAD_DISABLE : Command to disable + * Offload protocol's Tx/ Rx Path + * @IPA_CPU_2_HW_CMD_OFFLOAD_SUSPEND : Command to suspend + * Offload protocol's Tx/Rx Path + * @IPA_CPU_2_HW_CMD_OFFLOAD_RESUME : Command to resume + * Offload protocol's Tx/ Rx Path + */ +enum ipa_cpu_2_hw_offload_commands { + IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 1), + IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 2), + IPA_CPU_2_HW_CMD_OFFLOAD_ENABLE = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 3), + IPA_CPU_2_HW_CMD_OFFLOAD_DISABLE = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 4), + IPA_CPU_2_HW_CMD_OFFLOAD_SUSPEND = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 5), + IPA_CPU_2_HW_CMD_OFFLOAD_RESUME = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 6), +}; + + +/** + * enum ipa_hw_offload_channel_states - Values that represent + * offload channel state machine. + * @IPA_HW_OFFLOAD_CHANNEL_STATE_INITED_DISABLED : Channel is initialized + * but disabled + * @IPA_HW_OFFLOAD_CHANNEL_STATE_RUNNING : Channel is running. Entered after + * SET_UP_COMMAND is processed successfully + * @IPA_HW_OFFLOAD_CHANNEL_STATE_ERROR : Channel is in error state + * @IPA_HW_OFFLOAD_CHANNEL_STATE_INVALID : Invalid state. Shall not be in use + * in operational scenario + * + * These states apply to both Tx and Rx paths. These do not + * reflect the sub-state the state machine may be in + */ +enum ipa_hw_offload_channel_states { + IPA_HW_OFFLOAD_CHANNEL_STATE_INITED_DISABLED = 1, + IPA_HW_OFFLOAD_CHANNEL_STATE_RUNNING = 2, + IPA_HW_OFFLOAD_CHANNEL_STATE_ERROR = 3, + IPA_HW_OFFLOAD_CHANNEL_STATE_INVALID = 0xFF +}; + + +/** + * enum ipa_hw_2_cpu_offload_cmd_resp_status - Values that represent + * offload related command response status to be sent to CPU. + */ +enum ipa_hw_2_cpu_offload_cmd_resp_status { + IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 0), + IPA_HW_2_CPU_OFFLOAD_MAX_TX_CHANNELS = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 1), + IPA_HW_2_CPU_OFFLOAD_TX_RING_OVERRUN_POSSIBILITY = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 2), + IPA_HW_2_CPU_OFFLOAD_TX_RING_SET_UP_FAILURE = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 3), + IPA_HW_2_CPU_OFFLOAD_TX_RING_PARAMS_UNALIGNED = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 4), + IPA_HW_2_CPU_OFFLOAD_UNKNOWN_TX_CHANNEL = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 5), + IPA_HW_2_CPU_OFFLOAD_TX_INVALID_FSM_TRANSITION = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 6), + IPA_HW_2_CPU_OFFLOAD_TX_FSM_TRANSITION_ERROR = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 7), + IPA_HW_2_CPU_OFFLOAD_MAX_RX_CHANNELS = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 8), + IPA_HW_2_CPU_OFFLOAD_RX_RING_PARAMS_UNALIGNED = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 9), + IPA_HW_2_CPU_OFFLOAD_RX_RING_SET_UP_FAILURE = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 10), + IPA_HW_2_CPU_OFFLOAD_UNKNOWN_RX_CHANNEL = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 11), + IPA_HW_2_CPU_OFFLOAD_RX_INVALID_FSM_TRANSITION = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 12), + IPA_HW_2_CPU_OFFLOAD_RX_FSM_TRANSITION_ERROR = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 13), + IPA_HW_2_CPU_OFFLOAD_RX_RING_OVERRUN_POSSIBILITY = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 14), +}; + +/** + * enum ipa_hw_2_cpu_cmd_resp_status - Values that represent WDI related + * command response status to be sent to CPU. + */ +enum ipa_hw_2_cpu_cmd_resp_status { + IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 0), + IPA_HW_2_CPU_MAX_WDI_TX_CHANNELS = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 1), + IPA_HW_2_CPU_WDI_CE_RING_OVERRUN_POSSIBILITY = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 2), + IPA_HW_2_CPU_WDI_CE_RING_SET_UP_FAILURE = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 3), + IPA_HW_2_CPU_WDI_CE_RING_PARAMS_UNALIGNED = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 4), + IPA_HW_2_CPU_WDI_COMP_RING_OVERRUN_POSSIBILITY = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 5), + IPA_HW_2_CPU_WDI_COMP_RING_SET_UP_FAILURE = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 6), + IPA_HW_2_CPU_WDI_COMP_RING_PARAMS_UNALIGNED = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 7), + IPA_HW_2_CPU_WDI_UNKNOWN_TX_CHANNEL = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 8), + IPA_HW_2_CPU_WDI_TX_INVALID_FSM_TRANSITION = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 9), + IPA_HW_2_CPU_WDI_TX_FSM_TRANSITION_ERROR = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 10), + IPA_HW_2_CPU_MAX_WDI_RX_CHANNELS = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 11), + IPA_HW_2_CPU_WDI_RX_RING_PARAMS_UNALIGNED = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 12), + IPA_HW_2_CPU_WDI_RX_RING_SET_UP_FAILURE = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 13), + IPA_HW_2_CPU_WDI_UNKNOWN_RX_CHANNEL = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 14), + IPA_HW_2_CPU_WDI_RX_INVALID_FSM_TRANSITION = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 15), + IPA_HW_2_CPU_WDI_RX_FSM_TRANSITION_ERROR = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 16), +}; + +/** + * struct IpaHwSetUpCmd - + * + * + */ +union IpaHwSetUpCmd { + struct IpaHwNtnSetUpCmdData_t NtnSetupCh_params; + struct IpaHwWdi3SetUpCmdData_t Wdi3SetupCh_params; +} __packed; + +/** + * struct IpaHwOffloadSetUpCmdData_t - + * + * + */ +struct IpaHwOffloadSetUpCmdData_t { + u8 protocol; + union IpaHwSetUpCmd SetupCh_params; +} __packed; + +/** + * struct IpaHwCommonChCmd - Structure holding the parameters + * for IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN + * + * + */ +union IpaHwCommonChCmd { + union IpaHwNtnCommonChCmdData_t NtnCommonCh_params; + union IpaHwWdi3CommonChCmdData_t Wdi3CommonCh_params; +} __packed; + +struct IpaHwOffloadCommonChCmdData_t { + u8 protocol; + union IpaHwCommonChCmd CommonCh_params; +} __packed; + +#endif /* _IPA_UC_OFFLOAD_I_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c new file mode 100644 index 000000000000..e76657e5ac20 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c @@ -0,0 +1,1892 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2018, 2020, The Linux Foundation. All rights reserved. + */ +#include "ipa_i.h" +#include +#include +#include "ipa_qmi_service.h" + +#define IPA_HOLB_TMR_DIS 0x0 + +#define IPA_HW_INTERFACE_WDI_VERSION 0x0001 +#define IPA_HW_WDI_RX_MBOX_START_INDEX 48 +#define IPA_HW_WDI_TX_MBOX_START_INDEX 50 +#define IPA_WDI_RING_ALIGNMENT 8 + +#define IPA_WDI_CONNECTED BIT(0) +#define IPA_WDI_ENABLED BIT(1) +#define IPA_WDI_RESUMED BIT(2) +#define IPA_UC_POLL_SLEEP_USEC 100 + +struct ipa_wdi_res { + struct ipa_wdi_buffer_info *res; + unsigned int nents; + bool valid; +}; + +static struct ipa_wdi_res wdi_res[IPA_WDI_MAX_RES]; + +static void ipa_uc_wdi_loaded_handler(void); + +/** + * enum ipa_hw_2_cpu_wdi_events - Values that represent HW event to be sent to + * CPU. + * @IPA_HW_2_CPU_EVENT_WDI_ERROR : Event to specify that HW detected an error + * in WDI + */ +enum ipa_hw_2_cpu_wdi_events { + IPA_HW_2_CPU_EVENT_WDI_ERROR = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 0), +}; + +/** + * enum ipa_hw_wdi_channel_states - Values that represent WDI channel state + * machine. + * @IPA_HW_WDI_CHANNEL_STATE_INITED_DISABLED : Channel is initialized but + * disabled + * @IPA_HW_WDI_CHANNEL_STATE_ENABLED_SUSPEND : Channel is enabled but in + * suspended state + * @IPA_HW_WDI_CHANNEL_STATE_RUNNING : Channel is running. Entered after + * SET_UP_COMMAND is processed successfully + * @IPA_HW_WDI_CHANNEL_STATE_ERROR : Channel is in error state + * @IPA_HW_WDI_CHANNEL_STATE_INVALID : Invalid state. Shall not be in use in + * operational scenario + * + * These states apply to both Tx and Rx paths. These do not reflect the + * sub-state the state machine may be in. + */ +enum ipa_hw_wdi_channel_states { + IPA_HW_WDI_CHANNEL_STATE_INITED_DISABLED = 1, + IPA_HW_WDI_CHANNEL_STATE_ENABLED_SUSPEND = 2, + IPA_HW_WDI_CHANNEL_STATE_RUNNING = 3, + IPA_HW_WDI_CHANNEL_STATE_ERROR = 4, + IPA_HW_WDI_CHANNEL_STATE_INVALID = 0xFF +}; + +/** + * enum ipa_cpu_2_hw_commands - Values that represent the WDI commands from CPU + * @IPA_CPU_2_HW_CMD_WDI_TX_SET_UP : Command to set up WDI Tx Path + * @IPA_CPU_2_HW_CMD_WDI_RX_SET_UP : Command to set up WDI Rx Path + * @IPA_CPU_2_HW_CMD_WDI_RX_EXT_CFG : Provide extended config info for Rx path + * @IPA_CPU_2_HW_CMD_WDI_CH_ENABLE : Command to enable a channel + * @IPA_CPU_2_HW_CMD_WDI_CH_DISABLE : Command to disable a channel + * @IPA_CPU_2_HW_CMD_WDI_CH_SUSPEND : Command to suspend a channel + * @IPA_CPU_2_HW_CMD_WDI_CH_RESUME : Command to resume a channel + * @IPA_CPU_2_HW_CMD_WDI_TEAR_DOWN : Command to tear down WDI Tx/ Rx Path + */ +enum ipa_cpu_2_hw_wdi_commands { + IPA_CPU_2_HW_CMD_WDI_TX_SET_UP = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 0), + IPA_CPU_2_HW_CMD_WDI_RX_SET_UP = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 1), + IPA_CPU_2_HW_CMD_WDI_RX_EXT_CFG = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 2), + IPA_CPU_2_HW_CMD_WDI_CH_ENABLE = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 3), + IPA_CPU_2_HW_CMD_WDI_CH_DISABLE = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 4), + IPA_CPU_2_HW_CMD_WDI_CH_SUSPEND = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 5), + IPA_CPU_2_HW_CMD_WDI_CH_RESUME = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 6), + IPA_CPU_2_HW_CMD_WDI_TEAR_DOWN = + FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 7), +}; + +/** + * enum ipa_hw_wdi_errors - WDI specific error types. + * @IPA_HW_WDI_ERROR_NONE : No error persists + * @IPA_HW_WDI_CHANNEL_ERROR : Error is specific to channel + */ +enum ipa_hw_wdi_errors { + IPA_HW_WDI_ERROR_NONE = 0, + IPA_HW_WDI_CHANNEL_ERROR = 1 +}; + +/** + * enum ipa_hw_wdi_ch_errors = List of WDI Channel error types. This is present + * in the event param. + * @IPA_HW_WDI_CH_ERR_NONE : No error persists + * @IPA_HW_WDI_TX_COMP_RING_WP_UPDATE_FAIL : Write pointer update failed in Tx + * Completion ring + * @IPA_HW_WDI_TX_FSM_ERROR : Error in the state machine transition + * @IPA_HW_WDI_TX_COMP_RE_FETCH_FAIL : Error while calculating num RE to bring + * @IPA_HW_WDI_CH_ERR_RESERVED : Reserved - Not available for CPU to use + */ +enum ipa_hw_wdi_ch_errors { + IPA_HW_WDI_CH_ERR_NONE = 0, + IPA_HW_WDI_TX_COMP_RING_WP_UPDATE_FAIL = 1, + IPA_HW_WDI_TX_FSM_ERROR = 2, + IPA_HW_WDI_TX_COMP_RE_FETCH_FAIL = 3, + IPA_HW_WDI_CH_ERR_RESERVED = 0xFF +}; + +/** + * struct IpaHwSharedMemWdiMapping_t - Structure referring to the common and + * WDI section of 128B shared memory located in offset zero of SW Partition in + * IPA SRAM. + * + * The shared memory is used for communication between IPA HW and CPU. + */ +struct IpaHwSharedMemWdiMapping_t { + struct IpaHwSharedMemCommonMapping_t common; + u32 reserved_2B_28; + u32 reserved_2F_2C; + u32 reserved_33_30; + u32 reserved_37_34; + u32 reserved_3B_38; + u32 reserved_3F_3C; + u16 interfaceVersionWdi; + u16 reserved_43_42; + u8 wdi_tx_ch_0_state; + u8 wdi_rx_ch_0_state; + u16 reserved_47_46; +} __packed; + +/** + * struct IpaHwWdiTxSetUpCmdData_t - Structure holding the parameters for + * IPA_CPU_2_HW_CMD_WDI_TX_SET_UP command. + * @comp_ring_base_pa : This is the physical address of the base of the Tx + * completion ring + * @comp_ring_size : This is the size of the Tx completion ring + * @reserved_comp_ring : Reserved field for expansion of Completion ring params + * @ce_ring_base_pa : This is the physical address of the base of the Copy + * Engine Source Ring + * @ce_ring_size : Copy Engine Ring size + * @reserved_ce_ring : Reserved field for expansion of CE ring params + * @ce_ring_doorbell_pa : This is the physical address of the doorbell that the + * IPA uC has to write into to trigger the copy engine + * @num_tx_buffers : Number of pkt buffers allocated. The size of the CE ring + * and the Tx completion ring has to be atleast ( num_tx_buffers + 1) + * @ipa_pipe_number : This is the IPA pipe number that has to be used for the + * Tx path + * @reserved : Reserved field + * + * Parameters are sent as pointer thus should be reside in address accessible + * to HW + */ +struct IpaHwWdiTxSetUpCmdData_t { + u32 comp_ring_base_pa; + u16 comp_ring_size; + u16 reserved_comp_ring; + u32 ce_ring_base_pa; + u16 ce_ring_size; + u16 reserved_ce_ring; + u32 ce_ring_doorbell_pa; + u16 num_tx_buffers; + u8 ipa_pipe_number; + u8 reserved; +} __packed; + +struct IpaHwWdi2TxSetUpCmdData_t { + u32 comp_ring_base_pa; + u32 comp_ring_base_pa_hi; + u16 comp_ring_size; + u16 reserved_comp_ring; + u32 ce_ring_base_pa; + u32 ce_ring_base_pa_hi; + u16 ce_ring_size; + u16 reserved_ce_ring; + u32 ce_ring_doorbell_pa; + u32 ce_ring_doorbell_pa_hi; + u16 num_tx_buffers; + u8 ipa_pipe_number; + u8 reserved; +} __packed; +/** + * struct IpaHwWdiRxSetUpCmdData_t - Structure holding the parameters for + * IPA_CPU_2_HW_CMD_WDI_RX_SET_UP command. + * @rx_ring_base_pa : This is the physical address of the base of the Rx ring + * (containing Rx buffers) + * @rx_ring_size : This is the size of the Rx ring + * @rx_ring_rp_pa : This is the physical address of the location through which + * IPA uc is expected to communicate about the Read pointer into the Rx Ring + * @ipa_pipe_number : This is the IPA pipe number that has to be used for the + * Rx path + * + * Parameters are sent as pointer thus should be reside in address accessible + * to HW + */ +struct IpaHwWdiRxSetUpCmdData_t { + u32 rx_ring_base_pa; + u32 rx_ring_size; + u32 rx_ring_rp_pa; + u8 ipa_pipe_number; +} __packed; + +struct IpaHwWdi2RxSetUpCmdData_t { + u32 rx_ring_base_pa; + u32 rx_ring_base_pa_hi; + u32 rx_ring_size; + u32 rx_ring_rp_pa; + u32 rx_ring_rp_pa_hi; + u32 rx_comp_ring_base_pa; + u32 rx_comp_ring_base_pa_hi; + u32 rx_comp_ring_size; + u32 rx_comp_ring_wp_pa; + u32 rx_comp_ring_wp_pa_hi; + u8 ipa_pipe_number; +} __packed; +/** + * union IpaHwWdiRxExtCfgCmdData_t - Structure holding the parameters for + * IPA_CPU_2_HW_CMD_WDI_RX_EXT_CFG command. + * @ipa_pipe_number : The IPA pipe number for which this config is passed + * @qmap_id : QMAP ID to be set in the metadata register + * @reserved : Reserved + * + * The parameters are passed as immediate params in the shared memory + */ +union IpaHwWdiRxExtCfgCmdData_t { + struct IpaHwWdiRxExtCfgCmdParams_t { + u32 ipa_pipe_number:8; + u32 qmap_id:8; + u32 reserved:16; + } __packed params; + u32 raw32b; +} __packed; + +/** + * union IpaHwWdiCommonChCmdData_t - Structure holding the parameters for + * IPA_CPU_2_HW_CMD_WDI_TEAR_DOWN, + * IPA_CPU_2_HW_CMD_WDI_CH_ENABLE, + * IPA_CPU_2_HW_CMD_WDI_CH_DISABLE, + * IPA_CPU_2_HW_CMD_WDI_CH_SUSPEND, + * IPA_CPU_2_HW_CMD_WDI_CH_RESUME command. + * @ipa_pipe_number : The IPA pipe number. This could be Tx or an Rx pipe + * @reserved : Reserved + * + * The parameters are passed as immediate params in the shared memory + */ +union IpaHwWdiCommonChCmdData_t { + struct IpaHwWdiCommonChCmdParams_t { + u32 ipa_pipe_number:8; + u32 reserved:24; + } __packed params; + u32 raw32b; +} __packed; + +/** + * union IpaHwWdiErrorEventData_t - parameters for IPA_HW_2_CPU_EVENT_WDI_ERROR + * event. + * @wdi_error_type : The IPA pipe number to be torn down. This could be Tx or + * an Rx pipe + * @reserved : Reserved + * @ipa_pipe_number : IPA pipe number on which error has happened. Applicable + * only if error type indicates channel error + * @wdi_ch_err_type : Information about the channel error (if available) + * + * The parameters are passed as immediate params in the shared memory + */ +union IpaHwWdiErrorEventData_t { + struct IpaHwWdiErrorEventParams_t { + u32 wdi_error_type:8; + u32 reserved:8; + u32 ipa_pipe_number:8; + u32 wdi_ch_err_type:8; + } __packed params; + u32 raw32b; +} __packed; + +static void ipa_uc_wdi_event_log_info_handler( +struct IpaHwEventLogInfoData_t *uc_event_top_mmio) + +{ + if ((uc_event_top_mmio->featureMask & (1 << IPA_HW_FEATURE_WDI)) == 0) { + IPAERR("WDI feature missing 0x%x\n", + uc_event_top_mmio->featureMask); + return; + } + +if (uc_event_top_mmio->statsInfo.featureInfo[IPA_HW_FEATURE_WDI].params.size != +sizeof(struct IpaHwStatsWDIInfoData_t)) { + IPAERR("wdi stats sz invalid exp=%zu is=%u\n", + sizeof(struct IpaHwStatsWDIInfoData_t), + uc_event_top_mmio->statsInfo.featureInfo[IPA_HW_FEATURE_WDI].params.size + ); + return; +} + +ipa_ctx->uc_wdi_ctx.wdi_uc_stats_ofst = +uc_event_top_mmio->statsInfo.baseAddrOffset + +uc_event_top_mmio->statsInfo.featureInfo[IPA_HW_FEATURE_WDI].params.offset; +IPAERR("WDI stats ofst=0x%x\n", ipa_ctx->uc_wdi_ctx.wdi_uc_stats_ofst); + + if (ipa_ctx->uc_wdi_ctx.wdi_uc_stats_ofst + + sizeof(struct IpaHwStatsWDIInfoData_t) >= + ipa_ctx->ctrl->ipa_reg_base_ofst + + IPA_SRAM_DIRECT_ACCESS_N_OFST_v2_0(0) + + ipa_ctx->smem_sz) { + IPAERR("uc_wdi_stats 0x%x outside SRAM\n", + ipa_ctx->uc_wdi_ctx.wdi_uc_stats_ofst); + return; + } + + ipa_ctx->uc_wdi_ctx.wdi_uc_stats_mmio = + ioremap(ipa_ctx->ipa_wrapper_base + + ipa_ctx->uc_wdi_ctx.wdi_uc_stats_ofst, + sizeof(struct IpaHwStatsWDIInfoData_t)); + if (!ipa_ctx->uc_wdi_ctx.wdi_uc_stats_mmio) { + IPAERR("fail to ioremap uc wdi stats\n"); + return; + } +} + +static void ipa_uc_wdi_event_handler(struct IpaHwSharedMemCommonMapping_t + *uc_sram_mmio) + +{ + union IpaHwWdiErrorEventData_t wdi_evt; + struct IpaHwSharedMemWdiMapping_t *wdi_sram_mmio_ext; + + if (uc_sram_mmio->eventOp == + IPA_HW_2_CPU_EVENT_WDI_ERROR) { + wdi_evt.raw32b = uc_sram_mmio->eventParams; + IPADBG("uC WDI evt errType=%u pipe=%d cherrType=%u\n", + wdi_evt.params.wdi_error_type, + wdi_evt.params.ipa_pipe_number, + wdi_evt.params.wdi_ch_err_type); + wdi_sram_mmio_ext = + (struct IpaHwSharedMemWdiMapping_t *) + uc_sram_mmio; + IPADBG("tx_ch_state=%u rx_ch_state=%u\n", + wdi_sram_mmio_ext->wdi_tx_ch_0_state, + wdi_sram_mmio_ext->wdi_rx_ch_0_state); + } +} + +/** + * ipa2_get_wdi_stats() - Query WDI statistics from uc + * @stats: [inout] stats blob from client populated by driver + * + * Returns: 0 on success, negative on failure + * + * @note Cannot be called from atomic context + * + */ +int ipa2_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats) +{ +#define TX_STATS(y) stats->tx_ch_stats.y = \ + ipa_ctx->uc_wdi_ctx.wdi_uc_stats_mmio->tx_ch_stats.y +#define RX_STATS(y) stats->rx_ch_stats.y = \ + ipa_ctx->uc_wdi_ctx.wdi_uc_stats_mmio->rx_ch_stats.y + + if (unlikely(!ipa_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return -EINVAL; + } + + if (!stats || !ipa_ctx->uc_wdi_ctx.wdi_uc_stats_mmio) { + IPAERR("bad parms stats=%p wdi_stats=%p\n", + stats, + ipa_ctx->uc_wdi_ctx.wdi_uc_stats_mmio); + return -EINVAL; + } + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + TX_STATS(num_pkts_processed); + TX_STATS(copy_engine_doorbell_value); + TX_STATS(num_db_fired); + TX_STATS(tx_comp_ring_stats.ringFull); + TX_STATS(tx_comp_ring_stats.ringEmpty); + TX_STATS(tx_comp_ring_stats.ringUsageHigh); + TX_STATS(tx_comp_ring_stats.ringUsageLow); + TX_STATS(tx_comp_ring_stats.RingUtilCount); + TX_STATS(bam_stats.bamFifoFull); + TX_STATS(bam_stats.bamFifoEmpty); + TX_STATS(bam_stats.bamFifoUsageHigh); + TX_STATS(bam_stats.bamFifoUsageLow); + TX_STATS(bam_stats.bamUtilCount); + TX_STATS(num_db); + TX_STATS(num_unexpected_db); + TX_STATS(num_bam_int_handled); + TX_STATS(num_bam_int_in_non_running_state); + TX_STATS(num_qmb_int_handled); + TX_STATS(num_bam_int_handled_while_wait_for_bam); + + RX_STATS(max_outstanding_pkts); + RX_STATS(num_pkts_processed); + RX_STATS(rx_ring_rp_value); + RX_STATS(rx_ind_ring_stats.ringFull); + RX_STATS(rx_ind_ring_stats.ringEmpty); + RX_STATS(rx_ind_ring_stats.ringUsageHigh); + RX_STATS(rx_ind_ring_stats.ringUsageLow); + RX_STATS(rx_ind_ring_stats.RingUtilCount); + RX_STATS(bam_stats.bamFifoFull); + RX_STATS(bam_stats.bamFifoEmpty); + RX_STATS(bam_stats.bamFifoUsageHigh); + RX_STATS(bam_stats.bamFifoUsageLow); + RX_STATS(bam_stats.bamUtilCount); + RX_STATS(num_bam_int_handled); + RX_STATS(num_db); + RX_STATS(num_unexpected_db); + RX_STATS(num_pkts_in_dis_uninit_state); + RX_STATS(num_ic_inj_vdev_change); + RX_STATS(num_ic_inj_fw_desc_change); + RX_STATS(num_qmb_int_handled); + RX_STATS(reserved1); + RX_STATS(reserved2); + + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + + return 0; +} + +int ipa2_wdi_init(void) +{ + struct ipa_uc_hdlrs uc_wdi_cbs = { 0 }; + + uc_wdi_cbs.ipa_uc_event_hdlr = ipa_uc_wdi_event_handler; + uc_wdi_cbs.ipa_uc_event_log_info_hdlr = + ipa_uc_wdi_event_log_info_handler; + uc_wdi_cbs.ipa_uc_loaded_hdlr = + ipa_uc_wdi_loaded_handler; + + ipa_uc_register_handlers(IPA_HW_FEATURE_WDI, &uc_wdi_cbs); + + return 0; +} + +static int ipa_create_uc_smmu_mapping_pa(phys_addr_t pa, size_t len, + bool device, unsigned long *iova) +{ + struct ipa_smmu_cb_ctx *cb = ipa2_get_uc_smmu_ctx(); + unsigned long va = roundup(cb->next_addr, PAGE_SIZE); + int prot = IOMMU_READ | IOMMU_WRITE; + size_t true_len = roundup(len + pa - rounddown(pa, PAGE_SIZE), + PAGE_SIZE); + int ret; + + if (!cb->valid) { + IPAERR("No SMMU CB setup\n"); + return -EINVAL; + } + + ret = ipa_iommu_map(cb->mapping->domain, va, rounddown(pa, PAGE_SIZE), + true_len, + device ? (prot | IOMMU_MMIO) : prot); + if (ret) { + IPAERR("iommu map failed for pa=%pa len=%zu\n", &pa, true_len); + return -EINVAL; + } + + ipa_ctx->wdi_map_cnt++; + cb->next_addr = va + true_len; + *iova = va + pa - rounddown(pa, PAGE_SIZE); + return 0; +} + +static int ipa_create_uc_smmu_mapping_sgt(struct sg_table *sgt, + unsigned long *iova) +{ + struct ipa_smmu_cb_ctx *cb = ipa2_get_uc_smmu_ctx(); + unsigned long va = roundup(cb->next_addr, PAGE_SIZE); + int prot = IOMMU_READ | IOMMU_WRITE; + int ret; + int i; + struct scatterlist *sg; + unsigned long start_iova = va; + phys_addr_t phys; + size_t len; + int count = 0; + + if (!cb->valid) { + IPAERR("No SMMU CB setup\n"); + return -EINVAL; + } + if (!sgt) { + IPAERR("Bad parameters, scatter / gather list is NULL\n"); + return -EINVAL; + } + + for_each_sg(sgt->sgl, sg, sgt->nents, i) { + phys = page_to_phys(sg_page(sg)); + len = PAGE_ALIGN(sg->offset + sg->length); + + ret = ipa_iommu_map(cb->mapping->domain, va, phys, len, prot); + if (ret) { + IPAERR("iommu map failed for pa=%pa len=%zu\n", + &phys, len); + goto bad_mapping; + } + va += len; + ipa_ctx->wdi_map_cnt++; + count++; + } + cb->next_addr = va; + *iova = start_iova; + + return 0; + +bad_mapping: + for_each_sg(sgt->sgl, sg, count, i) + iommu_unmap(cb->mapping->domain, sg_dma_address(sg), + sg_dma_len(sg)); + return -EINVAL; +} + +static void ipa_release_uc_smmu_mappings(enum ipa_client_type client) +{ + struct ipa_smmu_cb_ctx *cb = ipa2_get_uc_smmu_ctx(); + int i; + int j; + int start; + int end; + + if (IPA_CLIENT_IS_CONS(client)) { + start = IPA_WDI_TX_RING_RES; + end = IPA_WDI_CE_DB_RES; + } else { + start = IPA_WDI_RX_RING_RES; + if (ipa_ctx->ipa_wdi2) + end = IPA_WDI_RX_COMP_RING_WP_RES; + else + end = IPA_WDI_RX_RING_RP_RES; + } + + for (i = start; i <= end; i++) { + if (wdi_res[i].valid) { + for (j = 0; j < wdi_res[i].nents; j++) { + iommu_unmap(cb->mapping->domain, + wdi_res[i].res[j].iova, + wdi_res[i].res[j].size); + ipa_ctx->wdi_map_cnt--; + } + kfree(wdi_res[i].res); + wdi_res[i].valid = false; + } + } + + if (ipa_ctx->wdi_map_cnt == 0) + cb->next_addr = cb->va_end; + +} + +static void ipa_save_uc_smmu_mapping_pa(int res_idx, phys_addr_t pa, + unsigned long iova, size_t len) +{ + IPADBG("--res_idx=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", res_idx, + &pa, iova, len); + wdi_res[res_idx].res = kzalloc(sizeof(*wdi_res[res_idx].res), + GFP_KERNEL); + if (!wdi_res[res_idx].res) { + WARN_ON(1); + return; + } + wdi_res[res_idx].nents = 1; + wdi_res[res_idx].valid = true; + wdi_res[res_idx].res->pa = rounddown(pa, PAGE_SIZE); + wdi_res[res_idx].res->iova = rounddown(iova, PAGE_SIZE); + wdi_res[res_idx].res->size = roundup(len + pa - rounddown(pa, + PAGE_SIZE), PAGE_SIZE); + IPADBG("res_idx=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", res_idx, + &wdi_res[res_idx].res->pa, wdi_res[res_idx].res->iova, + wdi_res[res_idx].res->size); +} + +static void ipa_save_uc_smmu_mapping_sgt(int res_idx, struct sg_table *sgt, + unsigned long iova) +{ + int i; + struct scatterlist *sg; + unsigned long curr_iova = iova; + + if (!sgt) { + IPAERR("Bad parameters, scatter / gather list is NULL\n"); + return; + } + + wdi_res[res_idx].res = kcalloc(sgt->nents, + sizeof(*wdi_res[res_idx].res), GFP_KERNEL); + if (!wdi_res[res_idx].res) { + WARN_ON(1); + return; + } + wdi_res[res_idx].nents = sgt->nents; + wdi_res[res_idx].valid = true; + for_each_sg(sgt->sgl, sg, sgt->nents, i) { + wdi_res[res_idx].res[i].pa = page_to_phys(sg_page(sg)); + wdi_res[res_idx].res[i].iova = curr_iova; + wdi_res[res_idx].res[i].size = PAGE_ALIGN(sg->offset + + sg->length); + IPADBG("res_idx=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", res_idx, + &wdi_res[res_idx].res[i].pa, + wdi_res[res_idx].res[i].iova, + wdi_res[res_idx].res[i].size); + curr_iova += wdi_res[res_idx].res[i].size; + } +} + +int ipa2_create_uc_smmu_mapping(int res_idx, bool wlan_smmu_en, + phys_addr_t pa, struct sg_table *sgt, size_t len, bool device, + unsigned long *iova) +{ + /* support for SMMU on WLAN but no SMMU on IPA */ + if (wlan_smmu_en && ipa_ctx->smmu_s1_bypass) { + IPAERR("Unsupported SMMU pairing\n"); + return -EINVAL; + } + + /* legacy: no SMMUs on either end */ + if (!wlan_smmu_en && ipa_ctx->smmu_s1_bypass) { + *iova = pa; + return 0; + } + + /* no SMMU on WLAN but SMMU on IPA */ + if (!wlan_smmu_en && !ipa_ctx->smmu_s1_bypass) { + if (ipa_create_uc_smmu_mapping_pa(pa, len, + (res_idx == IPA_WDI_CE_DB_RES) ? true : false, iova)) { + IPAERR("Fail to create mapping res %d\n", res_idx); + return -EFAULT; + } + ipa_save_uc_smmu_mapping_pa(res_idx, pa, *iova, len); + return 0; + } + + /* SMMU on WLAN and SMMU on IPA */ + if (wlan_smmu_en && !ipa_ctx->smmu_s1_bypass) { + switch (res_idx) { + case IPA_WDI_RX_RING_RP_RES: + case IPA_WDI_CE_DB_RES: + if (ipa_create_uc_smmu_mapping_pa(pa, len, + (res_idx == IPA_WDI_CE_DB_RES) ? true : false, + iova)) { + IPAERR("Fail to create mapping res %d\n", + res_idx); + return -EFAULT; + } + ipa_save_uc_smmu_mapping_pa(res_idx, pa, *iova, len); + break; + case IPA_WDI_RX_RING_RES: + case IPA_WDI_TX_RING_RES: + case IPA_WDI_CE_RING_RES: + if (ipa_create_uc_smmu_mapping_sgt(sgt, iova)) { + IPAERR("Fail to create mapping res %d\n", + res_idx); + return -EFAULT; + } + ipa_save_uc_smmu_mapping_sgt(res_idx, sgt, *iova); + break; + default: + WARN_ON(1); + } + } + + return 0; +} + +/** + * ipa2_connect_wdi_pipe() - WDI client connect + * @in: [in] input parameters from client + * @out: [out] output params to client + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_connect_wdi_pipe(struct ipa_wdi_in_params *in, + struct ipa_wdi_out_params *out) +{ + int ipa_ep_idx; + int result = -EFAULT; + struct ipa_ep_context *ep; + struct ipa_mem_buffer cmd; + struct IpaHwWdiTxSetUpCmdData_t *tx; + struct IpaHwWdiRxSetUpCmdData_t *rx; + struct IpaHwWdi2TxSetUpCmdData_t *tx_2; + struct IpaHwWdi2RxSetUpCmdData_t *rx_2; + struct ipa_ep_cfg_ctrl ep_cfg_ctrl; + unsigned long va; + phys_addr_t pa; + u32 len; + + if (unlikely(!ipa_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return -EINVAL; + } + + if (in == NULL || out == NULL || in->sys.client >= IPA_CLIENT_MAX) { + IPAERR("bad parm. in=%p out=%p\n", in, out); + if (in) + IPAERR("client = %d\n", in->sys.client); + return -EINVAL; + } + + if (IPA_CLIENT_IS_CONS(in->sys.client)) { + if (in->u.dl.comp_ring_base_pa % IPA_WDI_RING_ALIGNMENT || + in->u.dl.ce_ring_base_pa % IPA_WDI_RING_ALIGNMENT) { + IPAERR("alignment failure on TX\n"); + return -EINVAL; + } + } else { + if (in->u.ul.rdy_ring_base_pa % IPA_WDI_RING_ALIGNMENT) { + IPAERR("alignment failure on RX\n"); + return -EINVAL; + } + } + + result = ipa2_uc_state_check(); + if (result) + return result; + + ipa_ep_idx = ipa2_get_ep_mapping(in->sys.client); + if (ipa_ep_idx == -1) { + IPAERR("fail to alloc EP.\n"); + goto fail; + } + + ep = &ipa_ctx->ep[ipa_ep_idx]; + + if (ep->valid) { + IPAERR("EP already allocated.\n"); + goto fail; + } + + memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context)); + IPA_ACTIVE_CLIENTS_INC_EP(in->sys.client); + + IPADBG("client=%d ep=%d\n", in->sys.client, ipa_ep_idx); + if (IPA_CLIENT_IS_CONS(in->sys.client)) { + if (ipa_ctx->ipa_wdi2) + cmd.size = sizeof(*tx_2); + else + cmd.size = sizeof(*tx); + IPADBG("comp_ring_base_pa=0x%pa\n", + &in->u.dl.comp_ring_base_pa); + IPADBG("comp_ring_size=%d\n", in->u.dl.comp_ring_size); + IPADBG("ce_ring_base_pa=0x%pa\n", &in->u.dl.ce_ring_base_pa); + IPADBG("ce_ring_size=%d\n", in->u.dl.ce_ring_size); + IPADBG("ce_ring_doorbell_pa=0x%pa\n", + &in->u.dl.ce_door_bell_pa); + IPADBG("num_tx_buffers=%d\n", in->u.dl.num_tx_buffers); + } else { + if (ipa_ctx->ipa_wdi2) { + /* WDI2.0 feature */ + cmd.size = sizeof(*rx_2); + IPADBG("rdy_ring_rp value =%d\n", + *in->u.ul.rdy_ring_rp_va); + IPADBG("rx_comp_ring_wp value=%d\n", + *in->u.ul.rdy_comp_ring_wp_va); + ipa_ctx->uc_ctx.rdy_ring_rp_va = + in->u.ul.rdy_ring_rp_va; + ipa_ctx->uc_ctx.rdy_comp_ring_wp_va = + in->u.ul.rdy_comp_ring_wp_va; + } else { + cmd.size = sizeof(*rx); + } + IPADBG("rx_ring_base_pa=0x%pa\n", + &in->u.ul.rdy_ring_base_pa); + IPADBG("rx_ring_size=%d\n", + in->u.ul.rdy_ring_size); + IPADBG("rx_ring_rp_pa=0x%pa\n", + &in->u.ul.rdy_ring_rp_pa); + + IPADBG("rx_comp_ring_base_pa=0x%pa\n", + &in->u.ul.rdy_comp_ring_base_pa); + IPADBG("rx_comp_ring_size=%d\n", + in->u.ul.rdy_comp_ring_size); + IPADBG("rx_comp_ring_wp_pa=0x%pa\n", + &in->u.ul.rdy_comp_ring_wp_pa); + + ipa_ctx->uc_ctx.rdy_ring_base_pa = + in->u.ul.rdy_ring_base_pa; + ipa_ctx->uc_ctx.rdy_ring_rp_pa = + in->u.ul.rdy_ring_rp_pa; + ipa_ctx->uc_ctx.rdy_ring_size = + in->u.ul.rdy_ring_size; + ipa_ctx->uc_ctx.rdy_comp_ring_base_pa = + in->u.ul.rdy_comp_ring_base_pa; + ipa_ctx->uc_ctx.rdy_comp_ring_wp_pa = + in->u.ul.rdy_comp_ring_wp_pa; + ipa_ctx->uc_ctx.rdy_comp_ring_size = + in->u.ul.rdy_comp_ring_size; + + /* check if the VA is empty */ + if (!in->u.ul.rdy_ring_rp_va && ipa_ctx->ipa_wdi2) { + IPAERR("rdy_ring_rp_va is empty, wdi2.0(%d)\n", + ipa_ctx->ipa_wdi2); + goto dma_alloc_fail; + } + if (!in->u.ul.rdy_comp_ring_wp_va && ipa_ctx->ipa_wdi2) { + IPAERR("comp_ring_wp_va is empty, wdi2.0(%d)\n", + ipa_ctx->ipa_wdi2); + goto dma_alloc_fail; + } + } + + cmd.base = dma_alloc_coherent(ipa_ctx->uc_pdev, cmd.size, + &cmd.phys_base, GFP_KERNEL); + if (cmd.base == NULL) { + IPAERR("fail to get DMA memory.\n"); + result = -ENOMEM; + goto dma_alloc_fail; + } + + if (IPA_CLIENT_IS_CONS(in->sys.client)) { + if (ipa_ctx->ipa_wdi2) { + tx_2 = (struct IpaHwWdi2TxSetUpCmdData_t *)cmd.base; + + len = in->smmu_enabled ? in->u.dl_smmu.comp_ring_size : + in->u.dl.comp_ring_size; + IPADBG("TX_2 ring smmu_en=%d ring_size=%d %d\n", + in->smmu_enabled, + in->u.dl_smmu.comp_ring_size, + in->u.dl.comp_ring_size); + if (ipa2_create_uc_smmu_mapping(IPA_WDI_TX_RING_RES, + in->smmu_enabled, + in->u.dl.comp_ring_base_pa, + &in->u.dl_smmu.comp_ring, + len, + false, + &va)) { + IPAERR("fail to create uc mapping TX ring.\n"); + result = -ENOMEM; + goto uc_timeout; + } + tx_2->comp_ring_base_pa_hi = + (u32) ((va & 0xFFFFFFFF00000000) >> 32); + tx_2->comp_ring_base_pa = (u32) (va & 0xFFFFFFFF); + tx_2->comp_ring_size = len; + IPADBG("TX_2 comp_ring_base_pa_hi=0x%08x :0x%08x\n", + tx_2->comp_ring_base_pa_hi, + tx_2->comp_ring_base_pa); + + len = in->smmu_enabled ? in->u.dl_smmu.ce_ring_size : + in->u.dl.ce_ring_size; + IPADBG("TX_2 CE ring smmu_en=%d ring_size=%d %d\n", + in->smmu_enabled, + in->u.dl_smmu.ce_ring_size, + in->u.dl.ce_ring_size); + if (ipa2_create_uc_smmu_mapping(IPA_WDI_CE_RING_RES, + in->smmu_enabled, + in->u.dl.ce_ring_base_pa, + &in->u.dl_smmu.ce_ring, + len, + false, + &va)) { + IPAERR("fail to create uc mapping CE ring.\n"); + result = -ENOMEM; + goto uc_timeout; + } + tx_2->ce_ring_base_pa_hi = + (u32) ((va & 0xFFFFFFFF00000000) >> 32); + tx_2->ce_ring_base_pa = (u32) (va & 0xFFFFFFFF); + tx_2->ce_ring_size = len; + IPADBG("TX_2 ce_ring_base_pa_hi=0x%08x :0x%08x\n", + tx_2->ce_ring_base_pa_hi, + tx_2->ce_ring_base_pa); + + pa = in->smmu_enabled ? in->u.dl_smmu.ce_door_bell_pa : + in->u.dl.ce_door_bell_pa; + if (ipa2_create_uc_smmu_mapping(IPA_WDI_CE_DB_RES, + in->smmu_enabled, + pa, + NULL, + 4, + true, + &va)) { + IPAERR("fail to create uc mapping CE DB.\n"); + result = -ENOMEM; + goto uc_timeout; + } + tx_2->ce_ring_doorbell_pa_hi = + (u32) ((va & 0xFFFFFFFF00000000) >> 32); + tx_2->ce_ring_doorbell_pa = (u32) (va & 0xFFFFFFFF); + IPADBG("TX_2 ce_ring_doorbell_pa_hi=0x%08x :0x%08x\n", + tx_2->ce_ring_doorbell_pa_hi, + tx_2->ce_ring_doorbell_pa); + + tx_2->num_tx_buffers = in->u.dl.num_tx_buffers; + tx_2->ipa_pipe_number = ipa_ep_idx; + } else { + tx = (struct IpaHwWdiTxSetUpCmdData_t *)cmd.base; + len = in->smmu_enabled ? in->u.dl_smmu.comp_ring_size : + in->u.dl.comp_ring_size; + IPADBG("TX ring smmu_en=%d ring_size=%d %d\n", + in->smmu_enabled, + in->u.dl_smmu.comp_ring_size, + in->u.dl.comp_ring_size); + if (ipa2_create_uc_smmu_mapping(IPA_WDI_TX_RING_RES, + in->smmu_enabled, + in->u.dl.comp_ring_base_pa, + &in->u.dl_smmu.comp_ring, + len, + false, + &va)) { + IPAERR("fail to create uc mapping TX ring.\n"); + result = -ENOMEM; + goto uc_timeout; + } + tx->comp_ring_base_pa = va; + tx->comp_ring_size = len; + + len = in->smmu_enabled ? in->u.dl_smmu.ce_ring_size : + in->u.dl.ce_ring_size; + IPADBG("TX CE ring smmu_en=%d ring_size=%d %d\n", + in->smmu_enabled, + in->u.dl_smmu.ce_ring_size, + in->u.dl.ce_ring_size); + if (ipa2_create_uc_smmu_mapping(IPA_WDI_CE_RING_RES, + in->smmu_enabled, + in->u.dl.ce_ring_base_pa, + &in->u.dl_smmu.ce_ring, + len, + false, + &va)) { + IPAERR("fail to create uc mapping CE ring.\n"); + result = -ENOMEM; + goto uc_timeout; + } + tx->ce_ring_base_pa = va; + tx->ce_ring_size = len; + pa = in->smmu_enabled ? in->u.dl_smmu.ce_door_bell_pa : + in->u.dl.ce_door_bell_pa; + if (ipa2_create_uc_smmu_mapping(IPA_WDI_CE_DB_RES, + in->smmu_enabled, + pa, + NULL, + 4, + true, + &va)) { + IPAERR("fail to create uc mapping CE DB.\n"); + result = -ENOMEM; + goto uc_timeout; + } + tx->ce_ring_doorbell_pa = va; + tx->num_tx_buffers = in->u.dl.num_tx_buffers; + tx->ipa_pipe_number = ipa_ep_idx; + } + + if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_5) { + out->uc_door_bell_pa = + ipa_ctx->ipa_wrapper_base + + IPA_REG_BASE_OFST_v2_5 + + IPA_UC_MAILBOX_m_n_OFFS_v2_5( + IPA_HW_WDI_TX_MBOX_START_INDEX/32, + IPA_HW_WDI_TX_MBOX_START_INDEX % 32); + } else { + out->uc_door_bell_pa = + ipa_ctx->ipa_wrapper_base + + IPA_REG_BASE_OFST_v2_0 + + IPA_UC_MAILBOX_m_n_OFFS( + IPA_HW_WDI_TX_MBOX_START_INDEX/32, + IPA_HW_WDI_TX_MBOX_START_INDEX % 32); + } + } else { + if (ipa_ctx->ipa_wdi2) { + rx_2 = (struct IpaHwWdi2RxSetUpCmdData_t *)cmd.base; + + len = in->smmu_enabled ? in->u.ul_smmu.rdy_ring_size : + in->u.ul.rdy_ring_size; + IPADBG("RX_2 ring smmu_en=%d ring_size=%d %d\n", + in->smmu_enabled, + in->u.ul_smmu.rdy_ring_size, + in->u.ul.rdy_ring_size); + if (ipa2_create_uc_smmu_mapping(IPA_WDI_RX_RING_RES, + in->smmu_enabled, + in->u.ul.rdy_ring_base_pa, + &in->u.ul_smmu.rdy_ring, + len, + false, + &va)) { + IPAERR("fail to create uc RX_2 ring.\n"); + result = -ENOMEM; + goto uc_timeout; + } + rx_2->rx_ring_base_pa_hi = + (u32) ((va & 0xFFFFFFFF00000000) >> 32); + rx_2->rx_ring_base_pa = (u32) (va & 0xFFFFFFFF); + rx_2->rx_ring_size = len; + IPADBG("RX_2 rx_ring_base_pa_hi=0x%08x:0x%08x\n", + rx_2->rx_ring_base_pa_hi, + rx_2->rx_ring_base_pa); + + pa = in->smmu_enabled ? in->u.ul_smmu.rdy_ring_rp_pa : + in->u.ul.rdy_ring_rp_pa; + if (ipa2_create_uc_smmu_mapping(IPA_WDI_RX_RING_RP_RES, + in->smmu_enabled, + pa, + NULL, + 4, + false, + &va)) { + IPAERR("fail to create uc RX_2 rng RP\n"); + result = -ENOMEM; + goto uc_timeout; + } + rx_2->rx_ring_rp_pa_hi = + (u32) ((va & 0xFFFFFFFF00000000) >> 32); + rx_2->rx_ring_rp_pa = (u32) (va & 0xFFFFFFFF); + IPADBG("RX_2 rx_ring_rp_pa_hi=0x%08x :0x%08x\n", + rx_2->rx_ring_rp_pa_hi, + rx_2->rx_ring_rp_pa); + len = in->smmu_enabled ? + in->u.ul_smmu.rdy_comp_ring_size : + in->u.ul.rdy_comp_ring_size; + IPADBG("RX_2 ring smmu_en=%d comp_ring_size=%d %d\n", + in->smmu_enabled, + in->u.ul_smmu.rdy_comp_ring_size, + in->u.ul.rdy_comp_ring_size); + if (ipa2_create_uc_smmu_mapping( + IPA_WDI_RX_COMP_RING_RES, + in->smmu_enabled, + in->u.ul.rdy_comp_ring_base_pa, + &in->u.ul_smmu.rdy_comp_ring, + len, + false, + &va)) { + IPAERR("fail to create uc RX_2 comp_ring.\n"); + result = -ENOMEM; + goto uc_timeout; + } + rx_2->rx_comp_ring_base_pa_hi = + (u32) ((va & 0xFFFFFFFF00000000) >> 32); + rx_2->rx_comp_ring_base_pa = (u32) (va & 0xFFFFFFFF); + rx_2->rx_comp_ring_size = len; + IPADBG("RX_2 rx_comp_ring_base_pa_hi=0x%08x:0x%08x\n", + rx_2->rx_comp_ring_base_pa_hi, + rx_2->rx_comp_ring_base_pa); + + pa = in->smmu_enabled ? + in->u.ul_smmu.rdy_comp_ring_wp_pa : + in->u.ul.rdy_comp_ring_wp_pa; + if (ipa2_create_uc_smmu_mapping( + IPA_WDI_RX_COMP_RING_WP_RES, + in->smmu_enabled, + pa, + NULL, + 4, + false, + &va)) { + IPAERR("fail to create uc RX_2 comp_rng WP\n"); + result = -ENOMEM; + goto uc_timeout; + } + rx_2->rx_comp_ring_wp_pa_hi = + (u32) ((va & 0xFFFFFFFF00000000) >> 32); + rx_2->rx_comp_ring_wp_pa = (u32) (va & 0xFFFFFFFF); + IPADBG("RX_2 rx_comp_ring_wp_pa_hi=0x%08x:0x%08x\n", + rx_2->rx_comp_ring_wp_pa_hi, + rx_2->rx_comp_ring_wp_pa); + rx_2->ipa_pipe_number = ipa_ep_idx; + } else { + rx = (struct IpaHwWdiRxSetUpCmdData_t *)cmd.base; + + len = in->smmu_enabled ? in->u.ul_smmu.rdy_ring_size : + in->u.ul.rdy_ring_size; + IPADBG("RX ring smmu_en=%d ring_size=%d %d\n", + in->smmu_enabled, + in->u.ul_smmu.rdy_ring_size, + in->u.ul.rdy_ring_size); + if (ipa2_create_uc_smmu_mapping(IPA_WDI_RX_RING_RES, + in->smmu_enabled, + in->u.ul.rdy_ring_base_pa, + &in->u.ul_smmu.rdy_ring, + len, + false, + &va)) { + IPAERR("fail to create uc mapping RX ring.\n"); + result = -ENOMEM; + goto uc_timeout; + } + rx->rx_ring_base_pa = va; + rx->rx_ring_size = len; + + pa = in->smmu_enabled ? in->u.ul_smmu.rdy_ring_rp_pa : + in->u.ul.rdy_ring_rp_pa; + if (ipa2_create_uc_smmu_mapping(IPA_WDI_RX_RING_RP_RES, + in->smmu_enabled, + pa, + NULL, + 4, + false, + &va)) { + IPAERR("fail to create uc mapping RX rng RP\n"); + result = -ENOMEM; + goto uc_timeout; + } + rx->rx_ring_rp_pa = va; + rx->ipa_pipe_number = ipa_ep_idx; + } + + if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_5) { + out->uc_door_bell_pa = + ipa_ctx->ipa_wrapper_base + + IPA_REG_BASE_OFST_v2_5 + + IPA_UC_MAILBOX_m_n_OFFS_v2_5( + IPA_HW_WDI_RX_MBOX_START_INDEX/32, + IPA_HW_WDI_RX_MBOX_START_INDEX % 32); + } else { + out->uc_door_bell_pa = + ipa_ctx->ipa_wrapper_base + + IPA_REG_BASE_OFST_v2_0 + + IPA_UC_MAILBOX_m_n_OFFS( + IPA_HW_WDI_RX_MBOX_START_INDEX/32, + IPA_HW_WDI_RX_MBOX_START_INDEX % 32); + } + } + + ep->valid = 1; + ep->client = in->sys.client; + ep->keep_ipa_awake = in->sys.keep_ipa_awake; + result = ipa_disable_data_path(ipa_ep_idx); + if (result) { + IPAERR("disable data path failed res=%d clnt=%d.\n", result, + ipa_ep_idx); + goto uc_timeout; + } + if (IPA_CLIENT_IS_PROD(in->sys.client)) { + memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl)); + ep_cfg_ctrl.ipa_ep_delay = true; + ipa2_cfg_ep_ctrl(ipa_ep_idx, &ep_cfg_ctrl); + } + + result = ipa_uc_send_cmd((u32)(cmd.phys_base), + IPA_CLIENT_IS_CONS(in->sys.client) ? + IPA_CPU_2_HW_CMD_WDI_TX_SET_UP : + IPA_CPU_2_HW_CMD_WDI_RX_SET_UP, + IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS, + false, 10*HZ); + + if (result) { + result = -EFAULT; + goto uc_timeout; + } + + ep->skip_ep_cfg = in->sys.skip_ep_cfg; + ep->client_notify = in->sys.notify; + ep->priv = in->sys.priv; + + /* for AP+STA stats update */ + if (in->wdi_notify) + ipa_ctx->uc_wdi_ctx.stats_notify = in->wdi_notify; + else + IPADBG("in->wdi_notify is null\n"); + + if (!ep->skip_ep_cfg) { + if (ipa2_cfg_ep(ipa_ep_idx, &in->sys.ipa_ep_cfg)) { + IPAERR("fail to configure EP.\n"); + goto ipa_cfg_ep_fail; + } + IPADBG("ep configuration successful\n"); + } else { + IPADBG("Skipping endpoint configuration.\n"); + } + + out->clnt_hdl = ipa_ep_idx; + + if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(in->sys.client)) + ipa_install_dflt_flt_rules(ipa_ep_idx); + + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client); + + dma_free_coherent(ipa_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base); + ep->uc_offload_state |= IPA_WDI_CONNECTED; + IPADBG("client %d (ep: %d) connected\n", in->sys.client, ipa_ep_idx); + + return 0; + +ipa_cfg_ep_fail: + memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context)); +uc_timeout: + ipa_release_uc_smmu_mappings(in->sys.client); + dma_free_coherent(ipa_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base); +dma_alloc_fail: + IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client); +fail: + return result; +} + + +/** + * ipa2_disconnect_wdi_pipe() - WDI client disconnect + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_disconnect_wdi_pipe(u32 clnt_hdl) +{ + int result = 0; + struct ipa_ep_context *ep; + union IpaHwWdiCommonChCmdData_t tear; + + if (unlikely(!ipa_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return -EINVAL; + } + + if (clnt_hdl >= ipa_ctx->ipa_num_pipes || + ipa_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("bad parm, %d\n", clnt_hdl); + return -EINVAL; + } + + result = ipa2_uc_state_check(); + if (result) + return result; + + IPADBG("ep=%d\n", clnt_hdl); + + ep = &ipa_ctx->ep[clnt_hdl]; + + if (ep->uc_offload_state != IPA_WDI_CONNECTED) { + IPAERR("WDI channel bad state %d\n", ep->uc_offload_state); + return -EFAULT; + } + + if (!ep->keep_ipa_awake) + IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); + + tear.params.ipa_pipe_number = clnt_hdl; + + result = ipa_uc_send_cmd(tear.raw32b, + IPA_CPU_2_HW_CMD_WDI_TEAR_DOWN, + IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS, + false, 10*HZ); + + if (result) { + result = -EFAULT; + goto uc_timeout; + } + + ipa_delete_dflt_flt_rules(clnt_hdl); + ipa_release_uc_smmu_mappings(ep->client); + + memset(&ipa_ctx->ep[clnt_hdl], 0, sizeof(struct ipa_ep_context)); + IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); + + IPADBG("client (ep: %d) disconnected\n", clnt_hdl); + + /* for AP+STA stats update */ + if (ipa_ctx->uc_wdi_ctx.stats_notify) + ipa_ctx->uc_wdi_ctx.stats_notify = NULL; + else + IPADBG("uc_wdi_ctx.stats_notify already null\n"); + +uc_timeout: + return result; +} + +/** + * ipa2_enable_wdi_pipe() - WDI client enable + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_enable_wdi_pipe(u32 clnt_hdl) +{ + int result = 0; + struct ipa_ep_context *ep; + union IpaHwWdiCommonChCmdData_t enable; + struct ipa_ep_cfg_holb holb_cfg; + + if (unlikely(!ipa_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return -EINVAL; + } + + if (clnt_hdl >= ipa_ctx->ipa_num_pipes || + ipa_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("bad parm, %d\n", clnt_hdl); + return -EINVAL; + } + + result = ipa2_uc_state_check(); + if (result) + return result; + + IPADBG("ep=%d\n", clnt_hdl); + + ep = &ipa_ctx->ep[clnt_hdl]; + + if (ep->uc_offload_state != IPA_WDI_CONNECTED) { + IPAERR("WDI channel bad state %d\n", ep->uc_offload_state); + return -EFAULT; + } + + IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); + enable.params.ipa_pipe_number = clnt_hdl; + + result = ipa_uc_send_cmd(enable.raw32b, + IPA_CPU_2_HW_CMD_WDI_CH_ENABLE, + IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS, + false, 10*HZ); + + if (result) { + result = -EFAULT; + goto uc_timeout; + } + + if (IPA_CLIENT_IS_CONS(ep->client)) { + memset(&holb_cfg, 0, sizeof(holb_cfg)); + holb_cfg.en = IPA_HOLB_TMR_DIS; + holb_cfg.tmr_val = 0; + result = ipa2_cfg_ep_holb(clnt_hdl, &holb_cfg); + } + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); + ep->uc_offload_state |= IPA_WDI_ENABLED; + IPADBG("client (ep: %d) enabled\n", clnt_hdl); + +uc_timeout: + return result; +} + +/** + * ipa2_disable_wdi_pipe() - WDI client disable + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_disable_wdi_pipe(u32 clnt_hdl) +{ + int result = 0; + struct ipa_ep_context *ep; + union IpaHwWdiCommonChCmdData_t disable; + struct ipa_ep_cfg_ctrl ep_cfg_ctrl; + u32 prod_hdl; + + if (unlikely(!ipa_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return -EINVAL; + } + + if (clnt_hdl >= ipa_ctx->ipa_num_pipes || + ipa_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("bad parm, %d\n", clnt_hdl); + return -EINVAL; + } + + result = ipa2_uc_state_check(); + if (result) + return result; + + IPADBG("ep=%d\n", clnt_hdl); + + ep = &ipa_ctx->ep[clnt_hdl]; + + if (ep->uc_offload_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED)) { + IPAERR("WDI channel bad state %d\n", ep->uc_offload_state); + return -EFAULT; + } + + IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); + + result = ipa_disable_data_path(clnt_hdl); + if (result) { + IPAERR("disable data path failed res=%d clnt=%d.\n", result, + clnt_hdl); + result = -EPERM; + goto uc_timeout; + } + + /** + * To avoid data stall during continuous SAP on/off before + * setting delay to IPA Consumer pipe, remove delay and enable + * holb on IPA Producer pipe + */ + if (IPA_CLIENT_IS_PROD(ep->client)) { + + IPADBG("Stopping PROD channel - hdl=%d clnt=%d\n", + clnt_hdl, ep->client); + + /* remove delay on wlan-prod pipe*/ + memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl)); + + ipa2_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl); + + prod_hdl = ipa2_get_ep_mapping(IPA_CLIENT_WLAN1_CONS); + if (ipa_ctx->ep[prod_hdl].valid == 1) { + result = ipa_disable_data_path(prod_hdl); + if (result) { + IPAERR("disable data path failed\n"); + IPAERR("res=%d clnt=%d\n", + result, prod_hdl); + result = -EPERM; + goto uc_timeout; + } + } + usleep_range(IPA_UC_POLL_SLEEP_USEC * IPA_UC_POLL_SLEEP_USEC, + IPA_UC_POLL_SLEEP_USEC * IPA_UC_POLL_SLEEP_USEC); + } + + disable.params.ipa_pipe_number = clnt_hdl; + + result = ipa_uc_send_cmd(disable.raw32b, + IPA_CPU_2_HW_CMD_WDI_CH_DISABLE, + IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS, + false, 10*HZ); + + if (result) { + result = -EFAULT; + goto uc_timeout; + } + + /* Set the delay after disabling IPA Producer pipe */ + if (IPA_CLIENT_IS_PROD(ep->client)) { + memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl)); + ep_cfg_ctrl.ipa_ep_delay = true; + ipa2_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl); + } + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); + ep->uc_offload_state &= ~IPA_WDI_ENABLED; + IPADBG("client (ep: %d) disabled\n", clnt_hdl); + +uc_timeout: + return result; +} + +/** + * ipa2_resume_wdi_pipe() - WDI client resume + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_resume_wdi_pipe(u32 clnt_hdl) +{ + int result = 0; + struct ipa_ep_context *ep; + union IpaHwWdiCommonChCmdData_t resume; + struct ipa_ep_cfg_ctrl ep_cfg_ctrl; + + if (unlikely(!ipa_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return -EINVAL; + } + + if (clnt_hdl >= ipa_ctx->ipa_num_pipes || + ipa_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("bad parm, %d\n", clnt_hdl); + return -EINVAL; + } + + result = ipa2_uc_state_check(); + if (result) + return result; + + IPADBG("ep=%d\n", clnt_hdl); + + ep = &ipa_ctx->ep[clnt_hdl]; + + if (ep->uc_offload_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED)) { + IPAERR("WDI channel bad state %d\n", ep->uc_offload_state); + return -EFAULT; + } + + IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); + resume.params.ipa_pipe_number = clnt_hdl; + + result = ipa_uc_send_cmd(resume.raw32b, + IPA_CPU_2_HW_CMD_WDI_CH_RESUME, + IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS, + false, 10*HZ); + + if (result) { + result = -EFAULT; + goto uc_timeout; + } + + memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl)); + result = ipa2_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl); + if (result) + IPAERR("client (ep: %d) fail un-susp/delay result=%d\n", + clnt_hdl, result); + else + IPADBG("client (ep: %d) un-susp/delay\n", clnt_hdl); + + ep->uc_offload_state |= IPA_WDI_RESUMED; + IPADBG("client (ep: %d) resumed\n", clnt_hdl); + +uc_timeout: + return result; +} + +/** + * ipa2_suspend_wdi_pipe() - WDI client suspend + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_suspend_wdi_pipe(u32 clnt_hdl) +{ + int result = 0; + struct ipa_ep_context *ep; + union IpaHwWdiCommonChCmdData_t suspend; + struct ipa_ep_cfg_ctrl ep_cfg_ctrl; + u32 source_pipe_bitmask = 0; + bool disable_force_clear = false; + + if (unlikely(!ipa_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return -EINVAL; + } + + if (clnt_hdl >= ipa_ctx->ipa_num_pipes || + ipa_ctx->ep[clnt_hdl].valid == 0) { + IPAERR("bad parm, %d\n", clnt_hdl); + return -EINVAL; + } + + result = ipa2_uc_state_check(); + if (result) + return result; + + IPADBG("ep=%d\n", clnt_hdl); + + ep = &ipa_ctx->ep[clnt_hdl]; + + if (ep->uc_offload_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED | + IPA_WDI_RESUMED)) { + IPAERR("WDI channel bad state %d\n", ep->uc_offload_state); + return -EFAULT; + } + + suspend.params.ipa_pipe_number = clnt_hdl; + + if (IPA_CLIENT_IS_PROD(ep->client)) { + /* + * For WDI 2.0 need to ensure pipe will be empty before suspend + * as IPA uC will fail to suspend the pipe otherwise. + */ + if (ipa_ctx->ipa_wdi2) { + source_pipe_bitmask = 1 << + ipa_get_ep_mapping(ep->client); + result = ipa2_enable_force_clear(clnt_hdl, + false, source_pipe_bitmask); + if (result) { + /* + * assuming here modem SSR, AP can remove + * the delay in this case + */ + IPAERR("failed to force clear %d\n", result); + IPAERR("remove delay from SCND reg\n"); + memset(&ep_cfg_ctrl, 0, + sizeof(struct ipa_ep_cfg_ctrl)); + ep_cfg_ctrl.ipa_ep_delay = false; + ep_cfg_ctrl.ipa_ep_suspend = false; + ipa2_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl); + } else { + disable_force_clear = true; + } + } + IPADBG("Post suspend event first for IPA Producer\n"); + IPADBG("Client: %d clnt_hdl: %d\n", ep->client, clnt_hdl); + result = ipa_uc_send_cmd(suspend.raw32b, + IPA_CPU_2_HW_CMD_WDI_CH_SUSPEND, + IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS, + false, 10*HZ); + + if (result) { + result = -EFAULT; + goto uc_timeout; + } + } + + memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl)); + if (IPA_CLIENT_IS_CONS(ep->client)) { + ep_cfg_ctrl.ipa_ep_suspend = true; + result = ipa2_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl); + if (result) + IPAERR("client (ep: %d) failed to suspend result=%d\n", + clnt_hdl, result); + else + IPADBG("client (ep: %d) suspended\n", clnt_hdl); + } else { + ep_cfg_ctrl.ipa_ep_delay = true; + result = ipa2_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl); + if (result) + IPAERR("client (ep: %d) failed to delay result=%d\n", + clnt_hdl, result); + else + IPADBG("client (ep: %d) delayed\n", clnt_hdl); + } + + if (IPA_CLIENT_IS_CONS(ep->client)) { + result = ipa_uc_send_cmd(suspend.raw32b, + IPA_CPU_2_HW_CMD_WDI_CH_SUSPEND, + IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS, + false, 10*HZ); + + if (result) { + result = -EFAULT; + goto uc_timeout; + } + } + + if (disable_force_clear) + ipa2_disable_force_clear(clnt_hdl); + + ipa_ctx->tag_process_before_gating = true; + IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); + ep->uc_offload_state &= ~IPA_WDI_RESUMED; + IPADBG("client (ep: %d) suspended\n", clnt_hdl); + +uc_timeout: + return result; +} + +/** + * ipa_broadcast_wdi_quota_reach_ind() - quota reach + * @uint32_t fid: [in] input netdev ID + * @uint64_t num_bytes: [in] used bytes + * + * Returns: 0 on success, negative on failure + */ +int ipa2_broadcast_wdi_quota_reach_ind(uint32_t fid, + uint64_t num_bytes) +{ + IPAERR("Quota reached indication on fis(%d) Mbytes(%lu)\n", + fid, + (unsigned long) num_bytes); + ipa_broadcast_quota_reach_ind(0, IPA_UPSTEAM_WLAN); + return 0; +} + +int ipa_write_qmapid_wdi_pipe(u32 clnt_hdl, u8 qmap_id) +{ + int result = 0; + struct ipa_ep_context *ep; + union IpaHwWdiRxExtCfgCmdData_t qmap; + + if (clnt_hdl >= ipa_ctx->ipa_num_pipes || + ipa_ctx->ep[clnt_hdl].valid == 0) { + IPAERR_RL("bad parm, %d\n", clnt_hdl); + return -EINVAL; + } + + result = ipa2_uc_state_check(); + if (result) + return result; + + IPADBG("ep=%d\n", clnt_hdl); + + ep = &ipa_ctx->ep[clnt_hdl]; + + if (!(ep->uc_offload_state & IPA_WDI_CONNECTED)) { + IPAERR_RL("WDI channel bad state %d\n", ep->uc_offload_state); + return -EFAULT; + } + + IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); + qmap.params.ipa_pipe_number = clnt_hdl; + qmap.params.qmap_id = qmap_id; + + result = ipa_uc_send_cmd(qmap.raw32b, + IPA_CPU_2_HW_CMD_WDI_RX_EXT_CFG, + IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS, + false, 10*HZ); + + if (result) { + result = -EFAULT; + goto uc_timeout; + } + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); + + IPADBG("client (ep: %d) qmap_id %d updated\n", clnt_hdl, qmap_id); + +uc_timeout: + return result; +} + +/** + * ipa2_uc_reg_rdyCB() - To register uC + * ready CB if uC not ready + * @inout: [in/out] input/output parameters + * from/to client + * + * Returns: 0 on success, negative on failure + * + */ +int ipa2_uc_reg_rdyCB( + struct ipa_wdi_uc_ready_params *inout) +{ + int result = 0; + + if (unlikely(!ipa_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return -EINVAL; + } + + if (inout == NULL) { + IPAERR("bad parm. inout=%p ", inout); + return -EINVAL; + } + + result = ipa2_uc_state_check(); + if (result) { + inout->is_uC_ready = false; + ipa_ctx->uc_wdi_ctx.uc_ready_cb = inout->notify; + ipa_ctx->uc_wdi_ctx.priv = inout->priv; + } else { + inout->is_uC_ready = true; + } + + return 0; +} + +/** + * ipa2_uc_dereg_rdyCB() - To de-register uC ready CB + * + * Returns: 0 on success, negative on failure + * + */ +int ipa2_uc_dereg_rdyCB(void) +{ + ipa_ctx->uc_wdi_ctx.uc_ready_cb = NULL; + ipa_ctx->uc_wdi_ctx.priv = NULL; + + return 0; +} + + +/** + * ipa2_uc_wdi_get_dbpa() - To retrieve + * doorbell physical address of wlan pipes + * @param: [in/out] input/output parameters + * from/to client + * + * Returns: 0 on success, negative on failure + * + */ +int ipa2_uc_wdi_get_dbpa( + struct ipa_wdi_db_params *param) +{ + if (unlikely(!ipa_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return -EINVAL; + } + + if (param == NULL || param->client >= IPA_CLIENT_MAX) { + IPAERR("bad parm. param=%p ", param); + if (param) + IPAERR("client = %d\n", param->client); + return -EINVAL; + } + + if (IPA_CLIENT_IS_CONS(param->client)) { + if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_5) { + param->uc_door_bell_pa = + ipa_ctx->ipa_wrapper_base + + IPA_REG_BASE_OFST_v2_5 + + IPA_UC_MAILBOX_m_n_OFFS_v2_5( + IPA_HW_WDI_TX_MBOX_START_INDEX/32, + IPA_HW_WDI_TX_MBOX_START_INDEX % 32); + } else { + param->uc_door_bell_pa = + ipa_ctx->ipa_wrapper_base + + IPA_REG_BASE_OFST_v2_0 + + IPA_UC_MAILBOX_m_n_OFFS( + IPA_HW_WDI_TX_MBOX_START_INDEX/32, + IPA_HW_WDI_TX_MBOX_START_INDEX % 32); + } + } else { + if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_5) { + param->uc_door_bell_pa = + ipa_ctx->ipa_wrapper_base + + IPA_REG_BASE_OFST_v2_5 + + IPA_UC_MAILBOX_m_n_OFFS_v2_5( + IPA_HW_WDI_RX_MBOX_START_INDEX/32, + IPA_HW_WDI_RX_MBOX_START_INDEX % 32); + } else { + param->uc_door_bell_pa = + ipa_ctx->ipa_wrapper_base + + IPA_REG_BASE_OFST_v2_0 + + IPA_UC_MAILBOX_m_n_OFFS( + IPA_HW_WDI_RX_MBOX_START_INDEX/32, + IPA_HW_WDI_RX_MBOX_START_INDEX % 32); + } + } + + return 0; +} + +static void ipa_uc_wdi_loaded_handler(void) +{ + if (!ipa_ctx) { + IPAERR("IPA ctx is null\n"); + return; + } + + if (ipa_ctx->uc_wdi_ctx.uc_ready_cb) { + ipa_ctx->uc_wdi_ctx.uc_ready_cb( + ipa_ctx->uc_wdi_ctx.priv); + + ipa_ctx->uc_wdi_ctx.uc_ready_cb = + NULL; + ipa_ctx->uc_wdi_ctx.priv = NULL; + } +} + +int ipa2_create_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info) +{ + struct ipa_smmu_cb_ctx *cb = ipa2_get_wlan_smmu_ctx(); + int i; + int ret = 0; + int prot = IOMMU_READ | IOMMU_WRITE; + + if (!info) { + IPAERR("info = %p\n", info); + return -EINVAL; + } + + if (!cb->valid) { + IPAERR("No SMMU CB setup\n"); + return -EINVAL; + } + + for (i = 0; i < num_buffers; i++) { + IPADBG("i=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", i, + &info[i].pa, info[i].iova, info[i].size); + info[i].result = ipa_iommu_map(cb->iommu, + rounddown(info[i].iova, PAGE_SIZE), + rounddown(info[i].pa, PAGE_SIZE), + roundup(info[i].size + info[i].pa - + rounddown(info[i].pa, PAGE_SIZE), PAGE_SIZE), + prot); + } + + return ret; +} + +int ipa2_release_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info) +{ + struct ipa_smmu_cb_ctx *cb = ipa2_get_wlan_smmu_ctx(); + int i; + int ret = 0; + + if (!info) { + IPAERR("info = %p\n", info); + return -EINVAL; + } + + if (!cb->valid) { + IPAERR("No SMMU CB setup\n"); + return -EINVAL; + } + + for (i = 0; i < num_buffers; i++) { + IPADBG("i=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", i, + &info[i].pa, info[i].iova, info[i].size); + info[i].result = iommu_unmap(cb->iommu, + rounddown(info[i].iova, PAGE_SIZE), + roundup(info[i].size + info[i].pa - + rounddown(info[i].pa, PAGE_SIZE), PAGE_SIZE)); + } + + return ret; +} diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c new file mode 100644 index 000000000000..95cfc4a28862 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c @@ -0,0 +1,5271 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2012-2018, 2020, The Linux Foundation. All rights reserved. + */ + +#include +#include /* gen_pool_alloc() */ +#include +#include +#include +#include +#include "ipa_i.h" +#include "../ipa_rm_i.h" + +#define IPA_V1_CLK_RATE (92.31 * 1000 * 1000UL) +#define IPA_V1_1_CLK_RATE (100 * 1000 * 1000UL) +#define IPA_V2_0_CLK_RATE_SVS (75 * 1000 * 1000UL) +#define IPA_V2_0_CLK_RATE_NOMINAL (150 * 1000 * 1000UL) +#define IPA_V2_0_CLK_RATE_TURBO (200 * 1000 * 1000UL) +#define IPA_V1_MAX_HOLB_TMR_VAL (512 - 1) +#define IPA_V2_0_MAX_HOLB_TMR_VAL (65536 - 1) +#define IPA_V2_5_MAX_HOLB_TMR_VAL (4294967296 - 1) +#define IPA_V2_6L_MAX_HOLB_TMR_VAL IPA_V2_5_MAX_HOLB_TMR_VAL + +#define IPA_V2_0_BW_THRESHOLD_TURBO_MBPS (1000) +#define IPA_V2_0_BW_THRESHOLD_NOMINAL_MBPS (600) + +/* Max pipes + ICs for TAG process */ +#define IPA_TAG_MAX_DESC (IPA_MAX_NUM_PIPES + 6) + +#define IPA_TAG_SLEEP_MIN_USEC (1000) +#define IPA_TAG_SLEEP_MAX_USEC (2000) +#define IPA_FORCE_CLOSE_TAG_PROCESS_TIMEOUT (10 * HZ) +#define IPA_BCR_REG_VAL (0x001FFF7F) +#define IPA_AGGR_GRAN_MIN (1) +#define IPA_AGGR_GRAN_MAX (32) +#define IPA_EOT_COAL_GRAN_MIN (1) +#define IPA_EOT_COAL_GRAN_MAX (16) +#define MSEC 1000 +#define MIN_RX_POLL_TIME 1 +#define MAX_RX_POLL_TIME 5 +#define UPPER_CUTOFF 50 +#define LOWER_CUTOFF 10 + +#define IPA_DEFAULT_SYS_YELLOW_WM 32 + +#define IPA_AGGR_BYTE_LIMIT (\ + IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_BMSK >> \ + IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_SHFT) +#define IPA_AGGR_PKT_LIMIT (\ + IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK >> \ + IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT) + +static const int ipa_ofst_meq32[] = { IPA_OFFSET_MEQ32_0, + IPA_OFFSET_MEQ32_1, -1 }; +static const int ipa_ofst_meq128[] = { IPA_OFFSET_MEQ128_0, + IPA_OFFSET_MEQ128_1, -1 }; +static const int ipa_ihl_ofst_rng16[] = { IPA_IHL_OFFSET_RANGE16_0, + IPA_IHL_OFFSET_RANGE16_1, -1 }; +static const int ipa_ihl_ofst_meq32[] = { IPA_IHL_OFFSET_MEQ32_0, + IPA_IHL_OFFSET_MEQ32_1, -1 }; +#define IPA_1_1 (0) +#define IPA_2_0 (1) +#define IPA_2_6L (2) + +#define INVALID_EP_MAPPING_INDEX (-1) + +#define IPA_IS_RAN_OUT_OF_EQ(__eq_array, __eq_index) \ + (ARRAY_SIZE(__eq_array) <= (__eq_index)) + +struct ipa_ep_confing { + bool valid; + int pipe_num; +}; + +static const struct ipa_ep_confing ep_mapping[3][IPA_CLIENT_MAX] = { + [IPA_1_1][IPA_CLIENT_HSIC1_PROD] = {true, 19}, + [IPA_1_1][IPA_CLIENT_HSIC2_PROD] = {true, 12}, + [IPA_1_1][IPA_CLIENT_USB2_PROD] = {true, 12}, + [IPA_1_1][IPA_CLIENT_HSIC3_PROD] = {true, 13}, + [IPA_1_1][IPA_CLIENT_USB3_PROD] = {true, 13}, + [IPA_1_1][IPA_CLIENT_HSIC4_PROD] = {true, 0}, + [IPA_1_1][IPA_CLIENT_USB4_PROD] = {true, 0}, + [IPA_1_1][IPA_CLIENT_USB_PROD] = {true, 11}, + [IPA_1_1][IPA_CLIENT_A5_WLAN_AMPDU_PROD] = {true, 15}, + [IPA_1_1][IPA_CLIENT_A2_EMBEDDED_PROD] = {true, 8}, + [IPA_1_1][IPA_CLIENT_A2_TETHERED_PROD] = {true, 6}, + [IPA_1_1][IPA_CLIENT_APPS_LAN_WAN_PROD] = {true, 2}, + [IPA_1_1][IPA_CLIENT_APPS_CMD_PROD] = {true, 1}, + [IPA_1_1][IPA_CLIENT_Q6_LAN_PROD] = {true, 5}, + + [IPA_1_1][IPA_CLIENT_HSIC1_CONS] = {true, 14}, + [IPA_1_1][IPA_CLIENT_HSIC2_CONS] = {true, 16}, + [IPA_1_1][IPA_CLIENT_USB2_CONS] = {true, 16}, + [IPA_1_1][IPA_CLIENT_HSIC3_CONS] = {true, 17}, + [IPA_1_1][IPA_CLIENT_USB3_CONS] = {true, 17}, + [IPA_1_1][IPA_CLIENT_HSIC4_CONS] = {true, 18}, + [IPA_1_1][IPA_CLIENT_USB4_CONS] = {true, 18}, + [IPA_1_1][IPA_CLIENT_USB_CONS] = {true, 10}, + [IPA_1_1][IPA_CLIENT_A2_EMBEDDED_CONS] = {true, 9}, + [IPA_1_1][IPA_CLIENT_A2_TETHERED_CONS] = {true, 7}, + [IPA_1_1][IPA_CLIENT_A5_LAN_WAN_CONS] = {true, 3}, + [IPA_1_1][IPA_CLIENT_Q6_LAN_CONS] = {true, 4}, + + + [IPA_2_0][IPA_CLIENT_HSIC1_PROD] = {true, 12}, + [IPA_2_0][IPA_CLIENT_WLAN1_PROD] = {true, 18}, + [IPA_2_0][IPA_CLIENT_USB2_PROD] = {true, 12}, + [IPA_2_0][IPA_CLIENT_USB3_PROD] = {true, 13}, + [IPA_2_0][IPA_CLIENT_USB4_PROD] = {true, 0}, + [IPA_2_0][IPA_CLIENT_USB_PROD] = {true, 11}, + [IPA_2_0][IPA_CLIENT_APPS_LAN_WAN_PROD] = {true, 4}, + [IPA_2_0][IPA_CLIENT_APPS_CMD_PROD] = {true, 3}, + [IPA_2_0][IPA_CLIENT_ODU_PROD] = {true, 12}, + [IPA_2_0][IPA_CLIENT_MHI_PROD] = {true, 18}, + [IPA_2_0][IPA_CLIENT_Q6_LAN_PROD] = {true, 6}, + [IPA_2_0][IPA_CLIENT_Q6_CMD_PROD] = {true, 7}, + + [IPA_2_0][IPA_CLIENT_MEMCPY_DMA_SYNC_PROD] + = {true, 12}, + [IPA_2_0][IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD] + = {true, 19}, + [IPA_2_0][IPA_CLIENT_ETHERNET_PROD] = {true, 12}, + /* Only for test purpose */ + [IPA_2_0][IPA_CLIENT_TEST_PROD] = {true, 19}, + [IPA_2_0][IPA_CLIENT_TEST1_PROD] = {true, 19}, + [IPA_2_0][IPA_CLIENT_TEST2_PROD] = {true, 12}, + [IPA_2_0][IPA_CLIENT_TEST3_PROD] = {true, 11}, + [IPA_2_0][IPA_CLIENT_TEST4_PROD] = {true, 0}, + + [IPA_2_0][IPA_CLIENT_HSIC1_CONS] = {true, 13}, + [IPA_2_0][IPA_CLIENT_WLAN1_CONS] = {true, 17}, + [IPA_2_0][IPA_CLIENT_WLAN2_CONS] = {true, 16}, + [IPA_2_0][IPA_CLIENT_WLAN3_CONS] = {true, 14}, + [IPA_2_0][IPA_CLIENT_WLAN4_CONS] = {true, 19}, + [IPA_2_0][IPA_CLIENT_USB_CONS] = {true, 15}, + [IPA_2_0][IPA_CLIENT_USB_DPL_CONS] = {true, 0}, + [IPA_2_0][IPA_CLIENT_APPS_LAN_CONS] = {true, 2}, + [IPA_2_0][IPA_CLIENT_APPS_WAN_CONS] = {true, 5}, + [IPA_2_0][IPA_CLIENT_ODU_EMB_CONS] = {true, 13}, + [IPA_2_0][IPA_CLIENT_ODU_TETH_CONS] = {true, 1}, + [IPA_2_0][IPA_CLIENT_MHI_CONS] = {true, 17}, + [IPA_2_0][IPA_CLIENT_Q6_LAN_CONS] = {true, 8}, + [IPA_2_0][IPA_CLIENT_Q6_WAN_CONS] = {true, 9}, + [IPA_2_0][IPA_CLIENT_MEMCPY_DMA_SYNC_CONS] + = {true, 13}, + [IPA_2_0][IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS] + = {true, 16}, + [IPA_2_0][IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS] + = {true, 10}, + [IPA_2_0][IPA_CLIENT_ETHERNET_CONS] = {true, 1}, + + /* Only for test purpose */ + [IPA_2_0][IPA_CLIENT_TEST_CONS] = {true, 1}, + [IPA_2_0][IPA_CLIENT_TEST1_CONS] = {true, 1}, + [IPA_2_0][IPA_CLIENT_TEST2_CONS] = {true, 16}, + [IPA_2_0][IPA_CLIENT_TEST3_CONS] = {true, 13}, + [IPA_2_0][IPA_CLIENT_TEST4_CONS] = {true, 15}, + + + [IPA_2_6L][IPA_CLIENT_USB_PROD] = {true, 1}, + [IPA_2_6L][IPA_CLIENT_APPS_LAN_WAN_PROD] = {true, 4}, + [IPA_2_6L][IPA_CLIENT_APPS_CMD_PROD] = {true, 3}, + [IPA_2_6L][IPA_CLIENT_Q6_LAN_PROD] = {true, 6}, + [IPA_2_6L][IPA_CLIENT_Q6_CMD_PROD] = {true, 7}, + [IPA_2_6L][IPA_CLIENT_Q6_DECOMP_PROD] = {true, 11}, + [IPA_2_6L][IPA_CLIENT_Q6_DECOMP2_PROD] = {true, 13}, + + /* Only for test purpose */ + [IPA_2_6L][IPA_CLIENT_TEST_PROD] = {true, 11}, + [IPA_2_6L][IPA_CLIENT_TEST1_PROD] = {true, 11}, + [IPA_2_6L][IPA_CLIENT_TEST2_PROD] = {true, 12}, + [IPA_2_6L][IPA_CLIENT_TEST3_PROD] = {true, 13}, + [IPA_2_6L][IPA_CLIENT_TEST4_PROD] = {true, 14}, + + [IPA_2_6L][IPA_CLIENT_USB_CONS] = {true, 0}, + [IPA_2_6L][IPA_CLIENT_USB_DPL_CONS] = {true, 10}, + [IPA_2_6L][IPA_CLIENT_APPS_LAN_CONS] = {true, 2}, + [IPA_2_6L][IPA_CLIENT_APPS_WAN_CONS] = {true, 5}, + [IPA_2_6L][IPA_CLIENT_Q6_LAN_CONS] = {true, 8}, + [IPA_2_6L][IPA_CLIENT_Q6_WAN_CONS] = {true, 9}, + [IPA_2_6L][IPA_CLIENT_Q6_DECOMP_CONS] = {true, 12}, + [IPA_2_6L][IPA_CLIENT_Q6_DECOMP2_CONS] = {true, 14}, + + /* Only for test purpose */ + [IPA_2_6L][IPA_CLIENT_TEST_CONS] = {true, 15}, + [IPA_2_6L][IPA_CLIENT_TEST1_CONS] = {true, 15}, + [IPA_2_6L][IPA_CLIENT_TEST2_CONS] = {true, 0}, + [IPA_2_6L][IPA_CLIENT_TEST3_CONS] = {true, 1}, + [IPA_2_6L][IPA_CLIENT_TEST4_CONS] = {true, 10}, +}; + +static struct msm_bus_vectors ipa_init_vectors_v1_1[] = { + { + .src = MSM_BUS_MASTER_IPA, + .dst = MSM_BUS_SLAVE_EBI_CH0, + .ab = 0, + .ib = 0, + }, + { + .src = MSM_BUS_MASTER_BAM_DMA, + .dst = MSM_BUS_SLAVE_EBI_CH0, + .ab = 0, + .ib = 0, + }, + { + .src = MSM_BUS_MASTER_BAM_DMA, + .dst = MSM_BUS_SLAVE_OCIMEM, + .ab = 0, + .ib = 0, + }, +}; + +static struct msm_bus_vectors ipa_init_vectors_v2_0[] = { + { + .src = MSM_BUS_MASTER_IPA, + .dst = MSM_BUS_SLAVE_EBI_CH0, + .ab = 0, + .ib = 0, + }, + { + .src = MSM_BUS_MASTER_IPA, + .dst = MSM_BUS_SLAVE_OCIMEM, + .ab = 0, + .ib = 0, + }, +}; + +static struct msm_bus_vectors ipa_max_perf_vectors_v1_1[] = { + { + .src = MSM_BUS_MASTER_IPA, + .dst = MSM_BUS_SLAVE_EBI_CH0, + .ab = 50000000, + .ib = 960000000, + }, + { + .src = MSM_BUS_MASTER_BAM_DMA, + .dst = MSM_BUS_SLAVE_EBI_CH0, + .ab = 50000000, + .ib = 960000000, + }, + { + .src = MSM_BUS_MASTER_BAM_DMA, + .dst = MSM_BUS_SLAVE_OCIMEM, + .ab = 50000000, + .ib = 960000000, + }, +}; + +static struct msm_bus_vectors ipa_nominal_perf_vectors_v2_0[] = { + { + .src = MSM_BUS_MASTER_IPA, + .dst = MSM_BUS_SLAVE_EBI_CH0, + .ab = 100000000, + .ib = 1300000000, + }, + { + .src = MSM_BUS_MASTER_IPA, + .dst = MSM_BUS_SLAVE_OCIMEM, + .ab = 100000000, + .ib = 1300000000, + }, +}; + +static struct msm_bus_paths ipa_usecases_v1_1[] = { + { + ARRAY_SIZE(ipa_init_vectors_v1_1), + ipa_init_vectors_v1_1, + }, + { + ARRAY_SIZE(ipa_max_perf_vectors_v1_1), + ipa_max_perf_vectors_v1_1, + }, +}; + +static struct msm_bus_paths ipa_usecases_v2_0[] = { + { + ARRAY_SIZE(ipa_init_vectors_v2_0), + ipa_init_vectors_v2_0, + }, + { + ARRAY_SIZE(ipa_nominal_perf_vectors_v2_0), + ipa_nominal_perf_vectors_v2_0, + }, +}; + +static struct msm_bus_scale_pdata ipa_bus_client_pdata_v1_1 = { + .usecase = ipa_usecases_v1_1, + .num_usecases = ARRAY_SIZE(ipa_usecases_v1_1), + .name = "ipa", +}; + +static struct msm_bus_scale_pdata ipa_bus_client_pdata_v2_0 = { + .usecase = ipa_usecases_v2_0, + .num_usecases = ARRAY_SIZE(ipa_usecases_v2_0), + .name = "ipa", +}; + +void ipa_active_clients_lock(void) +{ + unsigned long flags; + + mutex_lock(&ipa_ctx->ipa_active_clients.mutex); + spin_lock_irqsave(&ipa_ctx->ipa_active_clients.spinlock, flags); + ipa_ctx->ipa_active_clients.mutex_locked = true; + spin_unlock_irqrestore(&ipa_ctx->ipa_active_clients.spinlock, flags); +} + +int ipa_active_clients_trylock(unsigned long *flags) +{ + spin_lock_irqsave(&ipa_ctx->ipa_active_clients.spinlock, *flags); + if (ipa_ctx->ipa_active_clients.mutex_locked) { + spin_unlock_irqrestore(&ipa_ctx->ipa_active_clients.spinlock, + *flags); + return 0; + } + + return 1; +} + +void ipa_active_clients_trylock_unlock(unsigned long *flags) +{ + spin_unlock_irqrestore(&ipa_ctx->ipa_active_clients.spinlock, *flags); +} + +void ipa_active_clients_unlock(void) +{ + unsigned long flags; + + spin_lock_irqsave(&ipa_ctx->ipa_active_clients.spinlock, flags); + ipa_ctx->ipa_active_clients.mutex_locked = false; + spin_unlock_irqrestore(&ipa_ctx->ipa_active_clients.spinlock, flags); + mutex_unlock(&ipa_ctx->ipa_active_clients.mutex); +} + +/** + * ipa_get_clients_from_rm_resource() - get IPA clients which are related to an + * IPA_RM resource + * + * @resource: [IN] IPA Resource Manager resource + * @clients: [OUT] Empty array which will contain the list of clients. The + * caller must initialize this array. + * + * Return codes: 0 on success, negative on failure. + */ +int ipa_get_clients_from_rm_resource( + enum ipa_rm_resource_name resource, + struct ipa_client_names *clients) +{ + int i = 0; + + if (resource < 0 || + resource >= IPA_RM_RESOURCE_MAX || + !clients) { + IPAERR("Bad parameters\n"); + return -EINVAL; + } + + switch (resource) { + case IPA_RM_RESOURCE_USB_CONS: + clients->names[i++] = IPA_CLIENT_USB_CONS; + break; + case IPA_RM_RESOURCE_HSIC_CONS: + clients->names[i++] = IPA_CLIENT_HSIC1_CONS; + break; + case IPA_RM_RESOURCE_WLAN_CONS: + clients->names[i++] = IPA_CLIENT_WLAN1_CONS; + clients->names[i++] = IPA_CLIENT_WLAN2_CONS; + clients->names[i++] = IPA_CLIENT_WLAN3_CONS; + clients->names[i++] = IPA_CLIENT_WLAN4_CONS; + break; + case IPA_RM_RESOURCE_MHI_CONS: + clients->names[i++] = IPA_CLIENT_MHI_CONS; + break; + case IPA_RM_RESOURCE_ODU_ADAPT_CONS: + clients->names[i++] = IPA_CLIENT_ODU_EMB_CONS; + clients->names[i++] = IPA_CLIENT_ODU_TETH_CONS; + break; + case IPA_RM_RESOURCE_ETHERNET_CONS: + clients->names[i++] = IPA_CLIENT_ETHERNET_CONS; + break; + case IPA_RM_RESOURCE_USB_PROD: + clients->names[i++] = IPA_CLIENT_USB_PROD; + break; + case IPA_RM_RESOURCE_HSIC_PROD: + clients->names[i++] = IPA_CLIENT_HSIC1_PROD; + break; + case IPA_RM_RESOURCE_MHI_PROD: + clients->names[i++] = IPA_CLIENT_MHI_PROD; + break; + case IPA_RM_RESOURCE_ODU_ADAPT_PROD: + clients->names[i++] = IPA_CLIENT_ODU_PROD; + break; + case IPA_RM_RESOURCE_ETHERNET_PROD: + clients->names[i++] = IPA_CLIENT_ETHERNET_PROD; + break; + default: + break; + } + clients->length = i; + + return 0; +} + +/** + * ipa_should_pipe_be_suspended() - returns true when the client's pipe should + * be suspended during a power save scenario. False otherwise. + * + * @client: [IN] IPA client + */ +bool ipa_should_pipe_be_suspended(enum ipa_client_type client) +{ + struct ipa_ep_context *ep; + int ipa_ep_idx; + + ipa_ep_idx = ipa2_get_ep_mapping(client); + if (ipa_ep_idx == -1) { + IPAERR("Invalid client.\n"); + WARN_ON(1); + return false; + } + + ep = &ipa_ctx->ep[ipa_ep_idx]; + + if (ep->keep_ipa_awake) + return false; + + if (client == IPA_CLIENT_USB_CONS || + client == IPA_CLIENT_MHI_CONS || + client == IPA_CLIENT_HSIC1_CONS || + client == IPA_CLIENT_WLAN1_CONS || + client == IPA_CLIENT_WLAN2_CONS || + client == IPA_CLIENT_WLAN3_CONS || + client == IPA_CLIENT_WLAN4_CONS || + client == IPA_CLIENT_ODU_EMB_CONS || + client == IPA_CLIENT_ODU_TETH_CONS || + client == IPA_CLIENT_ETHERNET_CONS) + return true; + + return false; +} + +/** + * ipa2_suspend_resource_sync() - suspend client endpoints related to the IPA_RM + * resource and decrement active clients counter, which may result in clock + * gating of IPA clocks. + * + * @resource: [IN] IPA Resource Manager resource + * + * Return codes: 0 on success, negative on failure. + */ +int ipa2_suspend_resource_sync(enum ipa_rm_resource_name resource) +{ + struct ipa_client_names clients; + int res; + int index; + struct ipa_ep_cfg_ctrl suspend; + enum ipa_client_type client; + int ipa_ep_idx; + bool pipe_suspended = false; + + memset(&clients, 0, sizeof(clients)); + res = ipa_get_clients_from_rm_resource(resource, &clients); + if (res) { + IPAERR("Bad params.\n"); + return res; + } + + for (index = 0; index < clients.length; index++) { + client = clients.names[index]; + ipa_ep_idx = ipa2_get_ep_mapping(client); + if (ipa_ep_idx == -1) { + IPAERR("Invalid client.\n"); + res = -EINVAL; + continue; + } + ipa_ctx->resume_on_connect[client] = false; + if (ipa_ctx->ep[ipa_ep_idx].client == client && + ipa_should_pipe_be_suspended(client)) { + if (ipa_ctx->ep[ipa_ep_idx].valid) { + /* suspend endpoint */ + memset(&suspend, 0, sizeof(suspend)); + suspend.ipa_ep_suspend = true; + ipa2_cfg_ep_ctrl(ipa_ep_idx, &suspend); + pipe_suspended = true; + } + } + } + /* Sleep ~1 msec */ + if (pipe_suspended) + usleep_range(1000, 2000); + + /* before gating IPA clocks do TAG process */ + ipa_ctx->tag_process_before_gating = true; + IPA_ACTIVE_CLIENTS_DEC_RESOURCE(ipa_rm_resource_str(resource)); + + return 0; +} + +/** + * ipa2_suspend_resource_no_block() - suspend client endpoints related to the + * IPA_RM resource and decrement active clients counter. This function is + * guaranteed to avoid sleeping. + * + * @resource: [IN] IPA Resource Manager resource + * + * Return codes: 0 on success, negative on failure. + */ +int ipa2_suspend_resource_no_block(enum ipa_rm_resource_name resource) +{ + int res; + struct ipa_client_names clients; + int index; + enum ipa_client_type client; + struct ipa_ep_cfg_ctrl suspend; + int ipa_ep_idx; + unsigned long flags; + struct ipa_active_client_logging_info log_info; + + if (ipa_active_clients_trylock(&flags) == 0) + return -EPERM; + if (ipa_ctx->ipa_active_clients.cnt == 1) { + res = -EPERM; + goto bail; + } + + memset(&clients, 0, sizeof(clients)); + res = ipa_get_clients_from_rm_resource(resource, &clients); + if (res) { + IPAERR("ipa_get_clients_from_rm_resource() failed, name = %d.\n" + , resource); + goto bail; + } + + for (index = 0; index < clients.length; index++) { + client = clients.names[index]; + ipa_ep_idx = ipa2_get_ep_mapping(client); + if (ipa_ep_idx == -1) { + IPAERR("Invalid client.\n"); + res = -EINVAL; + continue; + } + ipa_ctx->resume_on_connect[client] = false; + if (ipa_ctx->ep[ipa_ep_idx].client == client && + ipa_should_pipe_be_suspended(client)) { + if (ipa_ctx->ep[ipa_ep_idx].valid) { + /* suspend endpoint */ + memset(&suspend, 0, sizeof(suspend)); + suspend.ipa_ep_suspend = true; + ipa2_cfg_ep_ctrl(ipa_ep_idx, &suspend); + } + } + } + + if (res == 0) { + IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info, + ipa_rm_resource_str(resource)); + ipa2_active_clients_log_dec(&log_info, true); + ipa_ctx->ipa_active_clients.cnt--; + IPADBG("active clients = %d\n", + ipa_ctx->ipa_active_clients.cnt); + } +bail: + ipa_active_clients_trylock_unlock(&flags); + + return res; +} + +/** + * ipa2_resume_resource() - resume client endpoints related to the IPA_RM + * resource. + * + * @resource: [IN] IPA Resource Manager resource + * + * Return codes: 0 on success, negative on failure. + */ +int ipa2_resume_resource(enum ipa_rm_resource_name resource) +{ + + struct ipa_client_names clients; + int res; + int index; + struct ipa_ep_cfg_ctrl suspend; + enum ipa_client_type client; + int ipa_ep_idx; + + memset(&clients, 0, sizeof(clients)); + res = ipa_get_clients_from_rm_resource(resource, &clients); + if (res) { + IPAERR("ipa_get_clients_from_rm_resource() failed.\n"); + return res; + } + + for (index = 0; index < clients.length; index++) { + client = clients.names[index]; + ipa_ep_idx = ipa2_get_ep_mapping(client); + if (ipa_ep_idx == -1) { + IPAERR("Invalid client.\n"); + res = -EINVAL; + continue; + } + /* + * The related ep, will be resumed on connect + * while its resource is granted + */ + ipa_ctx->resume_on_connect[client] = true; + IPADBG("%d will be resumed on connect.\n", client); + if (ipa_ctx->ep[ipa_ep_idx].client == client && + ipa_should_pipe_be_suspended(client)) { + spin_lock(&ipa_ctx->disconnect_lock); + if (ipa_ctx->ep[ipa_ep_idx].valid && + !ipa_ctx->ep[ipa_ep_idx].disconnect_in_progress) { + memset(&suspend, 0, sizeof(suspend)); + suspend.ipa_ep_suspend = false; + ipa2_cfg_ep_ctrl(ipa_ep_idx, &suspend); + } + spin_unlock(&ipa_ctx->disconnect_lock); + } + } + + return res; +} + +/* read how much SRAM is available for SW use + * In case of IPAv2.0 this will also supply an offset from + * which we can start write + */ +void _ipa_sram_settings_read_v1_1(void) +{ + ipa_ctx->smem_restricted_bytes = 0; + ipa_ctx->smem_sz = ipa_read_reg(ipa_ctx->mmio, + IPA_SHARED_MEM_SIZE_OFST_v1_1); + ipa_ctx->smem_reqd_sz = IPA_MEM_v1_RAM_END_OFST; + ipa_ctx->hdr_tbl_lcl = true; + ipa_ctx->ip4_rt_tbl_lcl = false; + ipa_ctx->ip6_rt_tbl_lcl = false; + ipa_ctx->ip4_flt_tbl_lcl = true; + ipa_ctx->ip6_flt_tbl_lcl = true; +} + +void _ipa_sram_settings_read_v2_0(void) +{ + ipa_ctx->smem_restricted_bytes = ipa_read_reg_field(ipa_ctx->mmio, + IPA_SHARED_MEM_SIZE_OFST_v2_0, + IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_BMSK_v2_0, + IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_SHFT_v2_0); + ipa_ctx->smem_sz = ipa_read_reg_field(ipa_ctx->mmio, + IPA_SHARED_MEM_SIZE_OFST_v2_0, + IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_BMSK_v2_0, + IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_SHFT_v2_0); + ipa_ctx->smem_reqd_sz = IPA_MEM_PART(end_ofst); + ipa_ctx->hdr_tbl_lcl = false; + ipa_ctx->ip4_rt_tbl_lcl = false; + ipa_ctx->ip6_rt_tbl_lcl = false; + ipa_ctx->ip4_flt_tbl_lcl = false; + ipa_ctx->ip6_flt_tbl_lcl = false; +} + +void _ipa_sram_settings_read_v2_5(void) +{ + ipa_ctx->smem_restricted_bytes = ipa_read_reg_field(ipa_ctx->mmio, + IPA_SHARED_MEM_SIZE_OFST_v2_0, + IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_BMSK_v2_0, + IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_SHFT_v2_0); + ipa_ctx->smem_sz = ipa_read_reg_field(ipa_ctx->mmio, + IPA_SHARED_MEM_SIZE_OFST_v2_0, + IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_BMSK_v2_0, + IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_SHFT_v2_0); + ipa_ctx->smem_reqd_sz = IPA_MEM_PART(end_ofst); + ipa_ctx->hdr_tbl_lcl = false; + ipa_ctx->hdr_proc_ctx_tbl_lcl = true; + + /* + * when proc ctx table is located in internal memory, + * modem entries resides first. + */ + if (ipa_ctx->hdr_proc_ctx_tbl_lcl) { + ipa_ctx->hdr_proc_ctx_tbl.start_offset = + IPA_MEM_PART(modem_hdr_proc_ctx_size); + } + ipa_ctx->ip4_rt_tbl_lcl = false; + ipa_ctx->ip6_rt_tbl_lcl = false; + ipa_ctx->ip4_flt_tbl_lcl = false; + ipa_ctx->ip6_flt_tbl_lcl = false; +} + +void _ipa_sram_settings_read_v2_6L(void) +{ + ipa_ctx->smem_restricted_bytes = ipa_read_reg_field(ipa_ctx->mmio, + IPA_SHARED_MEM_SIZE_OFST_v2_0, + IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_BMSK_v2_0, + IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_SHFT_v2_0); + ipa_ctx->smem_sz = ipa_read_reg_field(ipa_ctx->mmio, + IPA_SHARED_MEM_SIZE_OFST_v2_0, + IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_BMSK_v2_0, + IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_SHFT_v2_0); + ipa_ctx->smem_reqd_sz = IPA_MEM_PART(end_ofst); + ipa_ctx->hdr_tbl_lcl = false; + ipa_ctx->ip4_rt_tbl_lcl = false; + ipa_ctx->ip6_rt_tbl_lcl = false; + ipa_ctx->ip4_flt_tbl_lcl = false; + ipa_ctx->ip6_flt_tbl_lcl = false; +} + +void _ipa_cfg_route_v1_1(struct ipa_route *route) +{ + u32 reg_val = 0; + + IPA_SETFIELD_IN_REG(reg_val, route->route_dis, + IPA_ROUTE_ROUTE_DIS_SHFT, + IPA_ROUTE_ROUTE_DIS_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, route->route_def_pipe, + IPA_ROUTE_ROUTE_DEF_PIPE_SHFT, + IPA_ROUTE_ROUTE_DEF_PIPE_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, route->route_def_hdr_table, + IPA_ROUTE_ROUTE_DEF_HDR_TABLE_SHFT, + IPA_ROUTE_ROUTE_DEF_HDR_TABLE_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, route->route_def_hdr_ofst, + IPA_ROUTE_ROUTE_DEF_HDR_OFST_SHFT, + IPA_ROUTE_ROUTE_DEF_HDR_OFST_BMSK); + + ipa_write_reg(ipa_ctx->mmio, IPA_ROUTE_OFST_v1_1, reg_val); +} + +void _ipa_cfg_route_v2_0(struct ipa_route *route) +{ + u32 reg_val = 0; + + IPA_SETFIELD_IN_REG(reg_val, route->route_dis, + IPA_ROUTE_ROUTE_DIS_SHFT, + IPA_ROUTE_ROUTE_DIS_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, route->route_def_pipe, + IPA_ROUTE_ROUTE_DEF_PIPE_SHFT, + IPA_ROUTE_ROUTE_DEF_PIPE_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, route->route_def_hdr_table, + IPA_ROUTE_ROUTE_DEF_HDR_TABLE_SHFT, + IPA_ROUTE_ROUTE_DEF_HDR_TABLE_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, route->route_def_hdr_ofst, + IPA_ROUTE_ROUTE_DEF_HDR_OFST_SHFT, + IPA_ROUTE_ROUTE_DEF_HDR_OFST_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, route->route_frag_def_pipe, + IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_SHFT, + IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_BMSK); + + ipa_write_reg(ipa_ctx->mmio, IPA_ROUTE_OFST_v1_1, reg_val); +} + +/** + * ipa_cfg_route() - configure IPA route + * @route: IPA route + * + * Return codes: + * 0: success + */ +int ipa_cfg_route(struct ipa_route *route) +{ + + IPADBG("disable_route_block=%d, default_pipe=%d, default_hdr_tbl=%d\n", + route->route_dis, + route->route_def_pipe, + route->route_def_hdr_table); + IPADBG("default_hdr_ofst=%d, default_frag_pipe=%d\n", + route->route_def_hdr_ofst, + route->route_frag_def_pipe); + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + ipa_ctx->ctrl->ipa_cfg_route(route); + + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + + return 0; +} + +/** + * ipa_cfg_filter() - configure filter + * @disable: disable value + * + * Return codes: + * 0: success + */ +int ipa_cfg_filter(u32 disable) +{ + u32 ipa_filter_ofst = IPA_FILTER_OFST_v1_1; + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + ipa_write_reg(ipa_ctx->mmio, ipa_filter_ofst, + IPA_SETFIELD(!disable, + IPA_FILTER_FILTER_EN_SHFT, + IPA_FILTER_FILTER_EN_BMSK)); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + + return 0; +} + +/** + * ipa_init_hw() - initialize HW + * + * Return codes: + * 0: success + */ +int ipa_init_hw(void) +{ + u32 ipa_version = 0; + + /* do soft reset of IPA */ + ipa_write_reg(ipa_ctx->mmio, IPA_COMP_SW_RESET_OFST, 1); + ipa_write_reg(ipa_ctx->mmio, IPA_COMP_SW_RESET_OFST, 0); + + /* enable IPA */ + ipa_write_reg(ipa_ctx->mmio, IPA_COMP_CFG_OFST, 1); + + /* Read IPA version and make sure we have access to the registers */ + ipa_version = ipa_read_reg(ipa_ctx->mmio, IPA_VERSION_OFST); + if (ipa_version == 0) + return -EFAULT; + + if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_5) { + /* set ipa_bcr to 0xFFFFFFFF for using new IPA behavior */ + ipa_write_reg(ipa_ctx->mmio, IPA_BCR_OFST, IPA_BCR_REG_VAL); + } + return 0; +} + +/** + * ipa2_get_ep_mapping() - provide endpoint mapping + * @client: client type + * + * Return value: endpoint mapping + */ +int ipa2_get_ep_mapping(enum ipa_client_type client) +{ + u8 hw_type_index = IPA_1_1; + + if (unlikely(!ipa_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return INVALID_EP_MAPPING_INDEX; + } + + if (client >= IPA_CLIENT_MAX || client < 0) { + IPAERR_RL("Bad client number! client =%d\n", client); + return INVALID_EP_MAPPING_INDEX; + } + + switch (ipa_ctx->ipa_hw_type) { + case IPA_HW_v2_0: + case IPA_HW_v2_5: + hw_type_index = IPA_2_0; + break; + case IPA_HW_v2_6L: + hw_type_index = IPA_2_6L; + break; + default: + hw_type_index = IPA_1_1; + break; + } + + if (!ep_mapping[hw_type_index][client].valid) + return INVALID_EP_MAPPING_INDEX; + + return ep_mapping[hw_type_index][client].pipe_num; +} + +/* ipa2_set_client() - provide client mapping + * @client: client type + * + * Return value: none + */ + +void ipa2_set_client(int index, enum ipacm_client_enum client, bool uplink) +{ + if (client > IPACM_CLIENT_MAX || client < IPACM_CLIENT_USB) { + IPAERR("Bad client number! client =%d\n", client); + } else if (index >= IPA_MAX_NUM_PIPES || index < 0) { + IPAERR("Bad pipe index! index =%d\n", index); + } else { + ipa_ctx->ipacm_client[index].client_enum = client; + ipa_ctx->ipacm_client[index].uplink = uplink; + } +} + +/* ipa2_get_wlan_stats() - get ipa wifi stats + * + * Return value: success or failure + */ +int ipa2_get_wlan_stats(struct ipa_get_wdi_sap_stats *wdi_sap_stats) +{ + if (ipa_ctx->uc_wdi_ctx.stats_notify) { + ipa_ctx->uc_wdi_ctx.stats_notify(IPA_GET_WDI_SAP_STATS, + wdi_sap_stats); + } else { + IPAERR_RL("uc_wdi_ctx.stats_notify not registered\n"); + return -EFAULT; + } + return 0; +} + +int ipa2_set_wlan_quota(struct ipa_set_wifi_quota *wdi_quota) +{ + if (ipa_ctx->uc_wdi_ctx.stats_notify) { + ipa_ctx->uc_wdi_ctx.stats_notify(IPA_SET_WIFI_QUOTA, + wdi_quota); + } else { + IPAERR("uc_wdi_ctx.stats_notify not registered\n"); + return -EFAULT; + } + return 0; +} + +/** + * ipa2_get_client() - provide client mapping + * @client: client type + * + * Return value: client mapping enum + */ +enum ipacm_client_enum ipa2_get_client(int pipe_idx) +{ + if (pipe_idx >= IPA_MAX_NUM_PIPES || pipe_idx < 0) { + IPAERR("Bad pipe index! pipe_idx =%d\n", pipe_idx); + return IPACM_CLIENT_MAX; + } else { + return ipa_ctx->ipacm_client[pipe_idx].client_enum; + } +} + +/** + * ipa2_get_client_uplink() - provide client mapping + * @client: client type + * + * Return value: none + */ +bool ipa2_get_client_uplink(int pipe_idx) +{ + if (pipe_idx < 0 || pipe_idx >= IPA_MAX_NUM_PIPES) { + IPAERR("invalid pipe idx %d\n", pipe_idx); + return false; + } + + return ipa_ctx->ipacm_client[pipe_idx].uplink; +} + +/** + * ipa2_get_rm_resource_from_ep() - get the IPA_RM resource which is related to + * the supplied pipe index. + * + * @pipe_idx: + * + * Return value: IPA_RM resource related to the pipe, -1 if a resource was not + * found. + */ +enum ipa_rm_resource_name ipa2_get_rm_resource_from_ep(int pipe_idx) +{ + int i; + int j; + enum ipa_client_type client; + struct ipa_client_names clients; + bool found = false; + + if (unlikely(!ipa_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return -EINVAL; + } + + if (pipe_idx >= ipa_ctx->ipa_num_pipes || pipe_idx < 0) { + IPAERR("Bad pipe index!\n"); + return -EINVAL; + } + + client = ipa_ctx->ep[pipe_idx].client; + + for (i = 0; i < IPA_RM_RESOURCE_MAX; i++) { + memset(&clients, 0, sizeof(clients)); + ipa_get_clients_from_rm_resource(i, &clients); + for (j = 0; j < clients.length; j++) { + if (clients.names[j] == client) { + found = true; + break; + } + } + if (found) + break; + } + + if (!found) + return -EFAULT; + + return i; +} + +/** + * ipa2_get_client_mapping() - provide client mapping + * @pipe_idx: IPA end-point number + * + * Return value: client mapping + */ +enum ipa_client_type ipa2_get_client_mapping(int pipe_idx) +{ + if (unlikely(!ipa_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return -EINVAL; + } + + if (pipe_idx >= ipa_ctx->ipa_num_pipes || pipe_idx < 0) { + IPAERR("Bad pipe index!\n"); + return -EINVAL; + } + + return ipa_ctx->ep[pipe_idx].client; +} + +void ipa_generate_mac_addr_hw_rule(u8 **buf, u8 hdr_mac_addr_offset, + const uint8_t mac_addr_mask[ETH_ALEN], + const uint8_t mac_addr[ETH_ALEN]) +{ + *buf = ipa_write_8(hdr_mac_addr_offset, *buf); + + /* MAC addr mask copied as little endian each 4 bytes */ + *buf = ipa_write_8(mac_addr_mask[3], *buf); + *buf = ipa_write_8(mac_addr_mask[2], *buf); + *buf = ipa_write_8(mac_addr_mask[1], *buf); + *buf = ipa_write_8(mac_addr_mask[0], *buf); + *buf = ipa_write_16(0, *buf); + *buf = ipa_write_8(mac_addr_mask[5], *buf); + *buf = ipa_write_8(mac_addr_mask[4], *buf); + *buf = ipa_write_32(0, *buf); + *buf = ipa_write_32(0, *buf); + + /* MAC addr copied as little endian each 4 bytes */ + *buf = ipa_write_8(mac_addr[3], *buf); + *buf = ipa_write_8(mac_addr[2], *buf); + *buf = ipa_write_8(mac_addr[1], *buf); + *buf = ipa_write_8(mac_addr[0], *buf); + *buf = ipa_write_16(0, *buf); + *buf = ipa_write_8(mac_addr[5], *buf); + *buf = ipa_write_8(mac_addr[4], *buf); + *buf = ipa_write_32(0, *buf); + *buf = ipa_write_32(0, *buf); + *buf = ipa_pad_to_32(*buf); +} + +/** + * ipa_generate_hw_rule() - generate HW rule + * @ip: IP address type + * @attrib: IPA rule attribute + * @buf: output buffer + * @en_rule: rule + * + * Return codes: + * 0: success + * -EPERM: wrong input + */ +int ipa_generate_hw_rule(enum ipa_ip_type ip, + const struct ipa_rule_attrib *attrib, u8 **buf, u16 *en_rule) +{ + u8 ofst_meq32 = 0; + u8 ihl_ofst_rng16 = 0; + u8 ihl_ofst_meq32 = 0; + u8 ofst_meq128 = 0; + + if (ip == IPA_IP_v4) { + + /* error check */ + if (attrib->attrib_mask & IPA_FLT_NEXT_HDR || + attrib->attrib_mask & IPA_FLT_TC || attrib->attrib_mask & + IPA_FLT_FLOW_LABEL) { + IPAERR("v6 attrib's specified for v4 rule\n"); + return -EPERM; + } + + if (attrib->attrib_mask & IPA_FLT_TOS) { + *en_rule |= IPA_TOS_EQ; + *buf = ipa_write_8(attrib->u.v4.tos, *buf); + *buf = ipa_pad_to_32(*buf); + } + + if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) { + if (ipa_ofst_meq32[ofst_meq32] == -1) { + IPAERR("ran out of meq32 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq32[ofst_meq32]; + /* 0 => offset of TOS in v4 header */ + *buf = ipa_write_8(0, *buf); + *buf = ipa_write_32((attrib->tos_mask << 16), *buf); + *buf = ipa_write_32((attrib->tos_value << 16), *buf); + *buf = ipa_pad_to_32(*buf); + ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_PROTOCOL) { + *en_rule |= IPA_PROTOCOL_EQ; + *buf = ipa_write_8(attrib->u.v4.protocol, *buf); + *buf = ipa_pad_to_32(*buf); + } + + if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) { + if (ipa_ofst_meq32[ofst_meq32] == -1) { + IPAERR("ran out of meq32 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq32[ofst_meq32]; + /* 12 => offset of src ip in v4 header */ + *buf = ipa_write_8(12, *buf); + *buf = ipa_write_32(attrib->u.v4.src_addr_mask, *buf); + *buf = ipa_write_32(attrib->u.v4.src_addr, *buf); + *buf = ipa_pad_to_32(*buf); + ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_DST_ADDR) { + if (ipa_ofst_meq32[ofst_meq32] == -1) { + IPAERR("ran out of meq32 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq32[ofst_meq32]; + /* 16 => offset of dst ip in v4 header */ + *buf = ipa_write_8(16, *buf); + *buf = ipa_write_32(attrib->u.v4.dst_addr_mask, *buf); + *buf = ipa_write_32(attrib->u.v4.dst_addr, *buf); + *buf = ipa_pad_to_32(*buf); + ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) { + if (ipa_ofst_meq32[ofst_meq32] == -1) { + IPAERR("ran out of meq32 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq32[ofst_meq32]; + /* -2 => offset of ether type in L2 hdr */ + *buf = ipa_write_8((u8)-2, *buf); + *buf = ipa_write_16(0, *buf); + *buf = ipa_write_16(htons(attrib->ether_type), *buf); + *buf = ipa_write_16(0, *buf); + *buf = ipa_write_16(htons(attrib->ether_type), *buf); + *buf = ipa_pad_to_32(*buf); + ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) { + if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) { + IPAERR("ran out of ihl_rng16 eq\n"); + return -EPERM; + } + if (attrib->src_port_hi < attrib->src_port_lo) { + IPAERR("bad src port range param\n"); + return -EPERM; + } + *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16]; + /* 0 => offset of src port after v4 header */ + *buf = ipa_write_8(0, *buf); + *buf = ipa_write_16(attrib->src_port_hi, *buf); + *buf = ipa_write_16(attrib->src_port_lo, *buf); + *buf = ipa_pad_to_32(*buf); + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) { + if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) { + IPAERR("ran out of ihl_rng16 eq\n"); + return -EPERM; + } + if (attrib->dst_port_hi < attrib->dst_port_lo) { + IPAERR("bad dst port range param\n"); + return -EPERM; + } + *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16]; + /* 2 => offset of dst port after v4 header */ + *buf = ipa_write_8(2, *buf); + *buf = ipa_write_16(attrib->dst_port_hi, *buf); + *buf = ipa_write_16(attrib->dst_port_lo, *buf); + *buf = ipa_pad_to_32(*buf); + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_TYPE) { + if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) { + IPAERR("ran out of ihl_meq32 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32]; + /* 0 => offset of type after v4 header */ + *buf = ipa_write_8(0, *buf); + *buf = ipa_write_32(0xFF, *buf); + *buf = ipa_write_32(attrib->type, *buf); + *buf = ipa_pad_to_32(*buf); + ihl_ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_CODE) { + if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) { + IPAERR("ran out of ihl_meq32 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32]; + /* 1 => offset of code after v4 header */ + *buf = ipa_write_8(1, *buf); + *buf = ipa_write_32(0xFF, *buf); + *buf = ipa_write_32(attrib->code, *buf); + *buf = ipa_pad_to_32(*buf); + ihl_ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_SPI) { + if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) { + IPAERR("ran out of ihl_meq32 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32]; + /* 0 => offset of SPI after v4 header FIXME */ + *buf = ipa_write_8(0, *buf); + *buf = ipa_write_32(0xFFFFFFFF, *buf); + *buf = ipa_write_32(attrib->spi, *buf); + *buf = ipa_pad_to_32(*buf); + ihl_ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_SRC_PORT) { + if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) { + IPAERR("ran out of ihl_rng16 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16]; + /* 0 => offset of src port after v4 header */ + *buf = ipa_write_8(0, *buf); + *buf = ipa_write_16(attrib->src_port, *buf); + *buf = ipa_write_16(attrib->src_port, *buf); + *buf = ipa_pad_to_32(*buf); + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_DST_PORT) { + if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) { + IPAERR("ran out of ihl_rng16 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16]; + /* 2 => offset of dst port after v4 header */ + *buf = ipa_write_8(2, *buf); + *buf = ipa_write_16(attrib->dst_port, *buf); + *buf = ipa_write_16(attrib->dst_port, *buf); + *buf = ipa_pad_to_32(*buf); + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) { + if (ipa_ofst_meq128[ofst_meq128] == -1) { + IPAERR("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq128[ofst_meq128]; + + /* -14 => offset of dst mac addr in Ethernet II hdr */ + ipa_generate_mac_addr_hw_rule( + buf, + -14, + attrib->dst_mac_addr_mask, + attrib->dst_mac_addr); + + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) { + if (ipa_ofst_meq128[ofst_meq128] == -1) { + IPAERR("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq128[ofst_meq128]; + + /* -8 => offset of src mac addr in Ethernet II hdr */ + ipa_generate_mac_addr_hw_rule( + buf, + -8, + attrib->src_mac_addr_mask, + attrib->src_mac_addr); + + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) { + if (ipa_ofst_meq128[ofst_meq128] == -1) { + IPAERR("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq128[ofst_meq128]; + + /* -22 => offset of dst mac addr in 802.3 hdr */ + ipa_generate_mac_addr_hw_rule( + buf, + -22, + attrib->dst_mac_addr_mask, + attrib->dst_mac_addr); + + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) { + if (ipa_ofst_meq128[ofst_meq128] == -1) { + IPAERR("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq128[ofst_meq128]; + + /* -16 => offset of src mac addr in 802.3 hdr */ + ipa_generate_mac_addr_hw_rule( + buf, + -16, + attrib->src_mac_addr_mask, + attrib->src_mac_addr); + + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_META_DATA) { + *en_rule |= IPA_METADATA_COMPARE; + *buf = ipa_write_8(0, *buf); /* offset, reserved */ + *buf = ipa_write_32(attrib->meta_data_mask, *buf); + *buf = ipa_write_32(attrib->meta_data, *buf); + *buf = ipa_pad_to_32(*buf); + } + + if (attrib->attrib_mask & IPA_FLT_FRAGMENT) { + *en_rule |= IPA_IS_FRAG; + *buf = ipa_pad_to_32(*buf); + } + } else if (ip == IPA_IP_v6) { + + /* v6 code below assumes no extension headers TODO: fix this */ + + /* error check */ + if (attrib->attrib_mask & IPA_FLT_TOS || + attrib->attrib_mask & IPA_FLT_PROTOCOL) { + IPAERR("v4 attrib's specified for v6 rule\n"); + return -EPERM; + } + + if (attrib->attrib_mask & IPA_FLT_NEXT_HDR) { + *en_rule |= IPA_PROTOCOL_EQ; + *buf = ipa_write_8(attrib->u.v6.next_hdr, *buf); + *buf = ipa_pad_to_32(*buf); + } + + if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) { + if (ipa_ofst_meq32[ofst_meq32] == -1) { + IPAERR("ran out of meq32 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq32[ofst_meq32]; + /* -2 => offset of ether type in L2 hdr */ + *buf = ipa_write_8((u8)-2, *buf); + *buf = ipa_write_16(0, *buf); + *buf = ipa_write_16(htons(attrib->ether_type), *buf); + *buf = ipa_write_16(0, *buf); + *buf = ipa_write_16(htons(attrib->ether_type), *buf); + *buf = ipa_pad_to_32(*buf); + ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_TYPE) { + if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) { + IPAERR("ran out of ihl_meq32 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32]; + /* 0 => offset of type after v6 header */ + *buf = ipa_write_8(0, *buf); + *buf = ipa_write_32(0xFF, *buf); + *buf = ipa_write_32(attrib->type, *buf); + *buf = ipa_pad_to_32(*buf); + ihl_ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_CODE) { + if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) { + IPAERR("ran out of ihl_meq32 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32]; + /* 1 => offset of code after v6 header */ + *buf = ipa_write_8(1, *buf); + *buf = ipa_write_32(0xFF, *buf); + *buf = ipa_write_32(attrib->code, *buf); + *buf = ipa_pad_to_32(*buf); + ihl_ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_SPI) { + if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) { + IPAERR("ran out of ihl_meq32 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32]; + /* 0 => offset of SPI after v6 header FIXME */ + *buf = ipa_write_8(0, *buf); + *buf = ipa_write_32(0xFFFFFFFF, *buf); + *buf = ipa_write_32(attrib->spi, *buf); + *buf = ipa_pad_to_32(*buf); + ihl_ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IP_TYPE) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa_ihl_ofst_meq32, + ihl_ofst_meq32)) { + IPAERR("ran out of ihl_meq32 eq\n"); + return -EPERM; + } + if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) { + IPAERR("ran out of ihl_meq32 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32]; + /* 22 => offset of IP type after v6 header */ + *buf = ipa_write_8(22, *buf); + *buf = ipa_write_32(0xF0000000, *buf); + if (attrib->type == 0x40) + *buf = ipa_write_32(0x40000000, *buf); + else + *buf = ipa_write_32(0x60000000, *buf); + *buf = ipa_pad_to_32(*buf); + ihl_ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IPV4_DST_ADDR) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa_ihl_ofst_meq32, + ihl_ofst_meq32)) { + IPAERR("ran out of ihl_meq32 eq\n"); + return -EPERM; + } + if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) { + IPAERR("ran out of ihl_meq32 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32]; + /* 38 => offset of inner IPv4 addr */ + *buf = ipa_write_8(38, *buf); + *buf = ipa_write_32(attrib->u.v4.dst_addr_mask, *buf); + *buf = ipa_write_32(attrib->u.v4.dst_addr, *buf); + *buf = ipa_pad_to_32(*buf); + ihl_ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_SRC_PORT) { + if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) { + IPAERR("ran out of ihl_rng16 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16]; + /* 0 => offset of src port after v6 header */ + *buf = ipa_write_8(0, *buf); + *buf = ipa_write_16(attrib->src_port, *buf); + *buf = ipa_write_16(attrib->src_port, *buf); + *buf = ipa_pad_to_32(*buf); + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_DST_PORT) { + if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) { + IPAERR("ran out of ihl_rng16 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16]; + /* 2 => offset of dst port after v6 header */ + *buf = ipa_write_8(2, *buf); + *buf = ipa_write_16(attrib->dst_port, *buf); + *buf = ipa_write_16(attrib->dst_port, *buf); + *buf = ipa_pad_to_32(*buf); + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) { + if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) { + IPAERR("ran out of ihl_rng16 eq\n"); + return -EPERM; + } + if (attrib->src_port_hi < attrib->src_port_lo) { + IPAERR("bad src port range param\n"); + return -EPERM; + } + *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16]; + /* 0 => offset of src port after v6 header */ + *buf = ipa_write_8(0, *buf); + *buf = ipa_write_16(attrib->src_port_hi, *buf); + *buf = ipa_write_16(attrib->src_port_lo, *buf); + *buf = ipa_pad_to_32(*buf); + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) { + if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) { + IPAERR("ran out of ihl_rng16 eq\n"); + return -EPERM; + } + if (attrib->dst_port_hi < attrib->dst_port_lo) { + IPAERR("bad dst port range param\n"); + return -EPERM; + } + *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16]; + /* 2 => offset of dst port after v6 header */ + *buf = ipa_write_8(2, *buf); + *buf = ipa_write_16(attrib->dst_port_hi, *buf); + *buf = ipa_write_16(attrib->dst_port_lo, *buf); + *buf = ipa_pad_to_32(*buf); + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) { + if (ipa_ofst_meq128[ofst_meq128] == -1) { + IPAERR("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq128[ofst_meq128]; + /* 8 => offset of src ip in v6 header */ + *buf = ipa_write_8(8, *buf); + *buf = ipa_write_32(attrib->u.v6.src_addr_mask[0], + *buf); + *buf = ipa_write_32(attrib->u.v6.src_addr_mask[1], + *buf); + *buf = ipa_write_32(attrib->u.v6.src_addr_mask[2], + *buf); + *buf = ipa_write_32(attrib->u.v6.src_addr_mask[3], + *buf); + *buf = ipa_write_32(attrib->u.v6.src_addr[0], *buf); + *buf = ipa_write_32(attrib->u.v6.src_addr[1], *buf); + *buf = ipa_write_32(attrib->u.v6.src_addr[2], *buf); + *buf = ipa_write_32(attrib->u.v6.src_addr[3], *buf); + *buf = ipa_pad_to_32(*buf); + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_DST_ADDR) { + if (ipa_ofst_meq128[ofst_meq128] == -1) { + IPAERR("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq128[ofst_meq128]; + /* 24 => offset of dst ip in v6 header */ + *buf = ipa_write_8(24, *buf); + *buf = ipa_write_32(attrib->u.v6.dst_addr_mask[0], + *buf); + *buf = ipa_write_32(attrib->u.v6.dst_addr_mask[1], + *buf); + *buf = ipa_write_32(attrib->u.v6.dst_addr_mask[2], + *buf); + *buf = ipa_write_32(attrib->u.v6.dst_addr_mask[3], + *buf); + *buf = ipa_write_32(attrib->u.v6.dst_addr[0], *buf); + *buf = ipa_write_32(attrib->u.v6.dst_addr[1], *buf); + *buf = ipa_write_32(attrib->u.v6.dst_addr[2], *buf); + *buf = ipa_write_32(attrib->u.v6.dst_addr[3], *buf); + *buf = ipa_pad_to_32(*buf); + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_TC) { + *en_rule |= IPA_FLT_TC; + *buf = ipa_write_8(attrib->u.v6.tc, *buf); + *buf = ipa_pad_to_32(*buf); + } + + if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) { + if (ipa_ofst_meq128[ofst_meq128] == -1) { + IPAERR("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq128[ofst_meq128]; + /* 0 => offset of TOS in v6 header */ + *buf = ipa_write_8(0, *buf); + *buf = ipa_write_32((attrib->tos_mask << 20), *buf); + *buf = ipa_write_32(0, *buf); + *buf = ipa_write_32(0, *buf); + *buf = ipa_write_32(0, *buf); + + *buf = ipa_write_32((attrib->tos_value << 20), *buf); + *buf = ipa_write_32(0, *buf); + *buf = ipa_write_32(0, *buf); + *buf = ipa_write_32(0, *buf); + *buf = ipa_pad_to_32(*buf); + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) { + if (ipa_ofst_meq128[ofst_meq128] == -1) { + IPAERR("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq128[ofst_meq128]; + + /* -14 => offset of dst mac addr in Ethernet II hdr */ + ipa_generate_mac_addr_hw_rule( + buf, + -14, + attrib->dst_mac_addr_mask, + attrib->dst_mac_addr); + + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) { + if (ipa_ofst_meq128[ofst_meq128] == -1) { + IPAERR("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq128[ofst_meq128]; + + /* -8 => offset of src mac addr in Ethernet II hdr */ + ipa_generate_mac_addr_hw_rule( + buf, + -8, + attrib->src_mac_addr_mask, + attrib->src_mac_addr); + + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) { + if (ipa_ofst_meq128[ofst_meq128] == -1) { + IPAERR("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq128[ofst_meq128]; + + /* -22 => offset of dst mac addr in 802.3 hdr */ + ipa_generate_mac_addr_hw_rule( + buf, + -22, + attrib->dst_mac_addr_mask, + attrib->dst_mac_addr); + + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) { + if (ipa_ofst_meq128[ofst_meq128] == -1) { + IPAERR("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq128[ofst_meq128]; + + /* -16 => offset of src mac addr in 802.3 hdr */ + ipa_generate_mac_addr_hw_rule( + buf, + -16, + attrib->src_mac_addr_mask, + attrib->src_mac_addr); + + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_FLOW_LABEL) { + *en_rule |= IPA_FLT_FLOW_LABEL; + /* FIXME FL is only 20 bits */ + *buf = ipa_write_32(attrib->u.v6.flow_label, *buf); + *buf = ipa_pad_to_32(*buf); + } + + if (attrib->attrib_mask & IPA_FLT_META_DATA) { + *en_rule |= IPA_METADATA_COMPARE; + *buf = ipa_write_8(0, *buf); /* offset, reserved */ + *buf = ipa_write_32(attrib->meta_data_mask, *buf); + *buf = ipa_write_32(attrib->meta_data, *buf); + *buf = ipa_pad_to_32(*buf); + } + + if (attrib->attrib_mask & IPA_FLT_FRAGMENT) { + *en_rule |= IPA_IS_FRAG; + *buf = ipa_pad_to_32(*buf); + } + } else { + IPAERR("unsupported ip %d\n", ip); + return -EPERM; + } + + /* + * default "rule" means no attributes set -> map to + * OFFSET_MEQ32_0 with mask of 0 and val of 0 and offset 0 + */ + if (attrib->attrib_mask == 0) { + IPADBG_LOW("building default rule\n"); + if (ipa_ofst_meq32[ofst_meq32] == -1) { + IPAERR("ran out of meq32 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq32[ofst_meq32]; + *buf = ipa_write_8(0, *buf); /* offset */ + *buf = ipa_write_32(0, *buf); /* mask */ + *buf = ipa_write_32(0, *buf); /* val */ + *buf = ipa_pad_to_32(*buf); + ofst_meq32++; + } + + return 0; +} + +void ipa_generate_flt_mac_addr_eq(struct ipa_ipfltri_rule_eq *eq_atrb, + u8 hdr_mac_addr_offset, const uint8_t mac_addr_mask[ETH_ALEN], + const uint8_t mac_addr[ETH_ALEN], u8 ofst_meq128) +{ + eq_atrb->offset_meq_128[ofst_meq128].offset = hdr_mac_addr_offset; + eq_atrb->offset_meq_128[ofst_meq128].mask[0] = mac_addr_mask[3]; + eq_atrb->offset_meq_128[ofst_meq128].mask[1] = mac_addr_mask[2]; + eq_atrb->offset_meq_128[ofst_meq128].mask[2] = mac_addr_mask[1]; + eq_atrb->offset_meq_128[ofst_meq128].mask[3] = mac_addr_mask[0]; + eq_atrb->offset_meq_128[ofst_meq128].mask[4] = 0; + eq_atrb->offset_meq_128[ofst_meq128].mask[5] = 0; + eq_atrb->offset_meq_128[ofst_meq128].mask[6] = mac_addr_mask[5]; + eq_atrb->offset_meq_128[ofst_meq128].mask[7] = mac_addr_mask[4]; + memset(eq_atrb->offset_meq_128[ofst_meq128].mask + 8, 0, 8); + eq_atrb->offset_meq_128[ofst_meq128].value[0] = mac_addr[3]; + eq_atrb->offset_meq_128[ofst_meq128].value[1] = mac_addr[2]; + eq_atrb->offset_meq_128[ofst_meq128].value[2] = mac_addr[1]; + eq_atrb->offset_meq_128[ofst_meq128].value[3] = mac_addr[0]; + eq_atrb->offset_meq_128[ofst_meq128].value[4] = 0; + eq_atrb->offset_meq_128[ofst_meq128].value[5] = 0; + eq_atrb->offset_meq_128[ofst_meq128].value[6] = mac_addr[5]; + eq_atrb->offset_meq_128[ofst_meq128].value[7] = mac_addr[4]; + memset(eq_atrb->offset_meq_128[ofst_meq128].value + 8, 0, 8); +} + +int ipa_generate_flt_eq(enum ipa_ip_type ip, + const struct ipa_rule_attrib *attrib, + struct ipa_ipfltri_rule_eq *eq_atrb) +{ + u8 ofst_meq32 = 0; + u8 ihl_ofst_rng16 = 0; + u8 ihl_ofst_meq32 = 0; + u8 ofst_meq128 = 0; + u16 eq_bitmap = 0; + u16 *en_rule = &eq_bitmap; + + if (ip == IPA_IP_v4) { + + /* error check */ + if (attrib->attrib_mask & IPA_FLT_NEXT_HDR || + attrib->attrib_mask & IPA_FLT_TC || attrib->attrib_mask & + IPA_FLT_FLOW_LABEL) { + IPAERR_RL("v6 attrib's specified for v4 rule\n"); + return -EPERM; + } + + if (attrib->attrib_mask & IPA_FLT_TOS) { + *en_rule |= IPA_TOS_EQ; + eq_atrb->tos_eq_present = 1; + eq_atrb->tos_eq = attrib->u.v4.tos; + } + + if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) { + if (ipa_ofst_meq32[ofst_meq32] == -1) { + IPAERR_RL("ran out of meq32 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq32[ofst_meq32]; + eq_atrb->offset_meq_32[ofst_meq32].offset = 0; + eq_atrb->offset_meq_32[ofst_meq32].mask = + attrib->tos_mask << 16; + eq_atrb->offset_meq_32[ofst_meq32].value = + attrib->tos_value << 16; + ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_PROTOCOL) { + *en_rule |= IPA_PROTOCOL_EQ; + eq_atrb->protocol_eq_present = 1; + eq_atrb->protocol_eq = attrib->u.v4.protocol; + } + + if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) { + if (ipa_ofst_meq32[ofst_meq32] == -1) { + IPAERR_RL("ran out of meq32 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq32[ofst_meq32]; + eq_atrb->offset_meq_32[ofst_meq32].offset = 12; + eq_atrb->offset_meq_32[ofst_meq32].mask = + attrib->u.v4.src_addr_mask; + eq_atrb->offset_meq_32[ofst_meq32].value = + attrib->u.v4.src_addr; + ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_DST_ADDR) { + if (ipa_ofst_meq32[ofst_meq32] == -1) { + IPAERR_RL("ran out of meq32 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq32[ofst_meq32]; + eq_atrb->offset_meq_32[ofst_meq32].offset = 16; + eq_atrb->offset_meq_32[ofst_meq32].mask = + attrib->u.v4.dst_addr_mask; + eq_atrb->offset_meq_32[ofst_meq32].value = + attrib->u.v4.dst_addr; + ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) { + if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) { + IPAERR_RL("ran out of ihl_rng16 eq\n"); + return -EPERM; + } + if (attrib->src_port_hi < attrib->src_port_lo) { + IPAERR_RL("bad src port range param\n"); + return -EPERM; + } + *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16]; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low + = attrib->src_port_lo; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high + = attrib->src_port_hi; + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) { + if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) { + IPAERR_RL("ran out of ihl_rng16 eq\n"); + return -EPERM; + } + if (attrib->dst_port_hi < attrib->dst_port_lo) { + IPAERR_RL("bad dst port range param\n"); + return -EPERM; + } + *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16]; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low + = attrib->dst_port_lo; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high + = attrib->dst_port_hi; + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_TYPE) { + if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) { + IPAERR_RL("ran out of ihl_meq32 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32]; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value = + attrib->type; + ihl_ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_CODE) { + if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) { + IPAERR_RL("ran out of ihl_meq32 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32]; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 1; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value = + attrib->code; + ihl_ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_SPI) { + if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) { + IPAERR_RL("ran out of ihl_meq32 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32]; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = + 0xFFFFFFFF; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value = + attrib->spi; + ihl_ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_SRC_PORT) { + if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) { + IPAERR_RL("ran out of ihl_rng16 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16]; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low + = attrib->src_port; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high + = attrib->src_port; + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_DST_PORT) { + if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) { + IPAERR_RL("ran out of ihl_rng16 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16]; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low + = attrib->dst_port; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high + = attrib->dst_port; + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_META_DATA) { + *en_rule |= IPA_METADATA_COMPARE; + eq_atrb->metadata_meq32_present = 1; + eq_atrb->metadata_meq32.offset = 0; + eq_atrb->metadata_meq32.mask = attrib->meta_data_mask; + eq_atrb->metadata_meq32.value = attrib->meta_data; + } + + if (attrib->attrib_mask & IPA_FLT_FRAGMENT) { + *en_rule |= IPA_IS_FRAG; + eq_atrb->ipv4_frag_eq_present = 1; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) { + if (ipa_ofst_meq128[ofst_meq128] == -1) { + IPAERR_RL("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq128[ofst_meq128]; + + /* -14 => offset of dst mac addr in Ethernet II hdr */ + ipa_generate_flt_mac_addr_eq(eq_atrb, -14, + attrib->dst_mac_addr_mask, attrib->dst_mac_addr, + ofst_meq128); + + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) { + if (ipa_ofst_meq128[ofst_meq128] == -1) { + IPAERR_RL("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq128[ofst_meq128]; + + /* -8 => offset of src mac addr in Ethernet II hdr */ + ipa_generate_flt_mac_addr_eq(eq_atrb, -8, + attrib->src_mac_addr_mask, attrib->src_mac_addr, + ofst_meq128); + + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) { + if (ipa_ofst_meq128[ofst_meq128] == -1) { + IPAERR_RL("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq128[ofst_meq128]; + + /* -22 => offset of dst mac addr in 802.3 hdr */ + ipa_generate_flt_mac_addr_eq(eq_atrb, -22, + attrib->dst_mac_addr_mask, attrib->dst_mac_addr, + ofst_meq128); + + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) { + if (ipa_ofst_meq128[ofst_meq128] == -1) { + IPAERR_RL("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq128[ofst_meq128]; + + /* -16 => offset of src mac addr in 802.3 hdr */ + ipa_generate_flt_mac_addr_eq(eq_atrb, -16, + attrib->src_mac_addr_mask, attrib->src_mac_addr, + ofst_meq128); + + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) { + if (ipa_ofst_meq32[ofst_meq32] == -1) { + IPAERR_RL("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq32[ofst_meq32]; + eq_atrb->offset_meq_32[ofst_meq32].offset = -2; + eq_atrb->offset_meq_32[ofst_meq32].mask = + htons(attrib->ether_type); + eq_atrb->offset_meq_32[ofst_meq32].value = + htons(attrib->ether_type); + ofst_meq32++; + } + } else if (ip == IPA_IP_v6) { + + /* v6 code below assumes no extension headers TODO: fix this */ + + /* error check */ + if (attrib->attrib_mask & IPA_FLT_TOS || + attrib->attrib_mask & IPA_FLT_PROTOCOL) { + IPAERR_RL("v4 attrib's specified for v6 rule\n"); + return -EPERM; + } + + if (attrib->attrib_mask & IPA_FLT_NEXT_HDR) { + *en_rule |= IPA_PROTOCOL_EQ; + eq_atrb->protocol_eq_present = 1; + eq_atrb->protocol_eq = attrib->u.v6.next_hdr; + } + + if (attrib->attrib_mask & IPA_FLT_TYPE) { + if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) { + IPAERR_RL("ran out of ihl_meq32 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32]; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value = + attrib->type; + ihl_ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_CODE) { + if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) { + IPAERR_RL("ran out of ihl_meq32 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32]; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 1; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value = + attrib->code; + ihl_ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_SPI) { + if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) { + IPAERR_RL("ran out of ihl_meq32 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32]; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = + 0xFFFFFFFF; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value = + attrib->spi; + ihl_ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IP_TYPE) { + if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) { + IPAERR("ran out of ihl_meq32 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32]; + /* 22 => offset of inner IP type after v6 header */ + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 22; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = + 0xF0000000; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value = + (u32)attrib->type << 24; + ihl_ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_L2TP_INNER_IPV4_DST_ADDR) { + if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) { + IPAERR("ran out of ihl_meq32 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32]; + /* 38 => offset of inner IPv4 addr */ + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 38; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = + attrib->u.v4.dst_addr_mask; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value = + attrib->u.v4.dst_addr; + ihl_ofst_meq32++; + } + + if (attrib->attrib_mask & IPA_FLT_SRC_PORT) { + if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) { + IPAERR_RL("ran out of ihl_rng16 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16]; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low + = attrib->src_port; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high + = attrib->src_port; + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_DST_PORT) { + if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) { + IPAERR_RL("ran out of ihl_rng16 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16]; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low + = attrib->dst_port; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high + = attrib->dst_port; + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) { + if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) { + IPAERR_RL("ran out of ihl_rng16 eq\n"); + return -EPERM; + } + if (attrib->src_port_hi < attrib->src_port_lo) { + IPAERR_RL("bad src port range param\n"); + return -EPERM; + } + *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16]; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low + = attrib->src_port_lo; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high + = attrib->src_port_hi; + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) { + if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) { + IPAERR_RL("ran out of ihl_rng16 eq\n"); + return -EPERM; + } + if (attrib->dst_port_hi < attrib->dst_port_lo) { + IPAERR_RL("bad dst port range param\n"); + return -EPERM; + } + *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16]; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low + = attrib->dst_port_lo; + eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high + = attrib->dst_port_hi; + ihl_ofst_rng16++; + } + + if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) { + if (ipa_ofst_meq128[ofst_meq128] == -1) { + IPAERR_RL("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq128[ofst_meq128]; + eq_atrb->offset_meq_128[ofst_meq128].offset = 8; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 0) + = attrib->u.v6.src_addr_mask[0]; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 4) + = attrib->u.v6.src_addr_mask[1]; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 8) + = attrib->u.v6.src_addr_mask[2]; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 12) + = attrib->u.v6.src_addr_mask[3]; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 0) + = attrib->u.v6.src_addr[0]; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 4) + = attrib->u.v6.src_addr[1]; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 8) + = attrib->u.v6.src_addr[2]; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + + 12) = attrib->u.v6.src_addr[3]; + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_DST_ADDR) { + if (ipa_ofst_meq128[ofst_meq128] == -1) { + IPAERR_RL("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq128[ofst_meq128]; + eq_atrb->offset_meq_128[ofst_meq128].offset = 24; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 0) + = attrib->u.v6.dst_addr_mask[0]; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 4) + = attrib->u.v6.dst_addr_mask[1]; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 8) + = attrib->u.v6.dst_addr_mask[2]; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 12) + = attrib->u.v6.dst_addr_mask[3]; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 0) + = attrib->u.v6.dst_addr[0]; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 4) + = attrib->u.v6.dst_addr[1]; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 8) + = attrib->u.v6.dst_addr[2]; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + + 12) = attrib->u.v6.dst_addr[3]; + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_TC) { + *en_rule |= IPA_FLT_TC; + eq_atrb->tc_eq_present = 1; + eq_atrb->tc_eq = attrib->u.v6.tc; + } + + if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) { + if (ipa_ofst_meq128[ofst_meq128] == -1) { + IPAERR_RL("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq128[ofst_meq128]; + eq_atrb->offset_meq_128[ofst_meq128].offset = 0; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 0) + = attrib->tos_mask << 20; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 4) + = 0; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 8) + = 0; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 12) + = 0; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 0) + = attrib->tos_value << 20; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 4) + = 0; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 8) + = 0; + *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + + 12) = 0; + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_FLOW_LABEL) { + *en_rule |= IPA_FLT_FLOW_LABEL; + eq_atrb->fl_eq_present = 1; + eq_atrb->fl_eq = attrib->u.v6.flow_label; + } + + if (attrib->attrib_mask & IPA_FLT_META_DATA) { + *en_rule |= IPA_METADATA_COMPARE; + eq_atrb->metadata_meq32_present = 1; + eq_atrb->metadata_meq32.offset = 0; + eq_atrb->metadata_meq32.mask = attrib->meta_data_mask; + eq_atrb->metadata_meq32.value = attrib->meta_data; + } + + if (attrib->attrib_mask & IPA_FLT_FRAGMENT) { + *en_rule |= IPA_IS_FRAG; + eq_atrb->ipv4_frag_eq_present = 1; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) { + if (ipa_ofst_meq128[ofst_meq128] == -1) { + IPAERR_RL("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq128[ofst_meq128]; + + /* -14 => offset of dst mac addr in Ethernet II hdr */ + ipa_generate_flt_mac_addr_eq(eq_atrb, -14, + attrib->dst_mac_addr_mask, attrib->dst_mac_addr, + ofst_meq128); + + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) { + if (ipa_ofst_meq128[ofst_meq128] == -1) { + IPAERR_RL("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq128[ofst_meq128]; + + /* -8 => offset of src mac addr in Ethernet II hdr */ + ipa_generate_flt_mac_addr_eq(eq_atrb, -8, + attrib->src_mac_addr_mask, attrib->src_mac_addr, + ofst_meq128); + + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) { + if (ipa_ofst_meq128[ofst_meq128] == -1) { + IPAERR_RL("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq128[ofst_meq128]; + + /* -22 => offset of dst mac addr in 802.3 hdr */ + ipa_generate_flt_mac_addr_eq(eq_atrb, -22, + attrib->dst_mac_addr_mask, attrib->dst_mac_addr, + ofst_meq128); + + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) { + if (ipa_ofst_meq128[ofst_meq128] == -1) { + IPAERR_RL("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq128[ofst_meq128]; + + /* -16 => offset of src mac addr in 802.3 hdr */ + ipa_generate_flt_mac_addr_eq(eq_atrb, -16, + attrib->src_mac_addr_mask, attrib->src_mac_addr, + ofst_meq128); + + ofst_meq128++; + } + + if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) { + if (ipa_ofst_meq32[ofst_meq32] == -1) { + IPAERR_RL("ran out of meq128 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq32[ofst_meq32]; + eq_atrb->offset_meq_32[ofst_meq32].offset = -2; + eq_atrb->offset_meq_32[ofst_meq32].mask = + htons(attrib->ether_type); + eq_atrb->offset_meq_32[ofst_meq32].value = + htons(attrib->ether_type); + ofst_meq32++; + } + + } else { + IPAERR_RL("unsupported ip %d\n", ip); + return -EPERM; + } + + /* + * default "rule" means no attributes set -> map to + * OFFSET_MEQ32_0 with mask of 0 and val of 0 and offset 0 + */ + if (attrib->attrib_mask == 0) { + if (ipa_ofst_meq32[ofst_meq32] == -1) { + IPAERR_RL("ran out of meq32 eq\n"); + return -EPERM; + } + *en_rule |= ipa_ofst_meq32[ofst_meq32]; + eq_atrb->offset_meq_32[ofst_meq32].offset = 0; + eq_atrb->offset_meq_32[ofst_meq32].mask = 0; + eq_atrb->offset_meq_32[ofst_meq32].value = 0; + ofst_meq32++; + } + + eq_atrb->rule_eq_bitmap = *en_rule; + eq_atrb->num_offset_meq_32 = ofst_meq32; + eq_atrb->num_ihl_offset_range_16 = ihl_ofst_rng16; + eq_atrb->num_ihl_offset_meq_32 = ihl_ofst_meq32; + eq_atrb->num_offset_meq_128 = ofst_meq128; + + return 0; +} + +/** + * ipa2_cfg_ep - IPA end-point configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ipa_ep_cfg: [in] IPA end-point configuration params + * + * This includes nat, header, mode, aggregation and route settings and is a one + * shot API to configure the IPA end-point fully + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_cfg_ep(u32 clnt_hdl, const struct ipa_ep_cfg *ipa_ep_cfg) +{ + int result = -EINVAL; + + if (unlikely(!ipa_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return -EINVAL; + } + + if (clnt_hdl >= ipa_ctx->ipa_num_pipes || + ipa_ctx->ep[clnt_hdl].valid == 0 || ipa_ep_cfg == NULL) { + IPAERR("bad parm.\n"); + return -EINVAL; + } + + result = ipa2_cfg_ep_hdr(clnt_hdl, &ipa_ep_cfg->hdr); + if (result) + return result; + + result = ipa2_cfg_ep_hdr_ext(clnt_hdl, &ipa_ep_cfg->hdr_ext); + if (result) + return result; + + result = ipa2_cfg_ep_aggr(clnt_hdl, &ipa_ep_cfg->aggr); + if (result) + return result; + + result = ipa2_cfg_ep_cfg(clnt_hdl, &ipa_ep_cfg->cfg); + if (result) + return result; + + if (IPA_CLIENT_IS_PROD(ipa_ctx->ep[clnt_hdl].client)) { + result = ipa2_cfg_ep_nat(clnt_hdl, &ipa_ep_cfg->nat); + if (result) + return result; + + result = ipa2_cfg_ep_mode(clnt_hdl, &ipa_ep_cfg->mode); + if (result) + return result; + + result = ipa2_cfg_ep_route(clnt_hdl, &ipa_ep_cfg->route); + if (result) + return result; + + result = ipa2_cfg_ep_deaggr(clnt_hdl, &ipa_ep_cfg->deaggr); + if (result) + return result; + } else { + result = ipa2_cfg_ep_metadata_mask(clnt_hdl, + &ipa_ep_cfg->metadata_mask); + if (result) + return result; + } + + return 0; +} + +const char *ipa_get_nat_en_str(enum ipa_nat_en_type nat_en) +{ + switch (nat_en) { + case (IPA_BYPASS_NAT): + return "NAT disabled"; + case (IPA_SRC_NAT): + return "Source NAT"; + case (IPA_DST_NAT): + return "Dst NAT"; + } + + return "undefined"; +} + +void _ipa_cfg_ep_nat_v1_1(u32 clnt_hdl, + const struct ipa_ep_cfg_nat *ep_nat) +{ + u32 reg_val = 0; + + IPA_SETFIELD_IN_REG(reg_val, ep_nat->nat_en, + IPA_ENDP_INIT_NAT_N_NAT_EN_SHFT, + IPA_ENDP_INIT_NAT_N_NAT_EN_BMSK); + + ipa_write_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_NAT_N_OFST_v1_1(clnt_hdl), + reg_val); +} + +void _ipa_cfg_ep_nat_v2_0(u32 clnt_hdl, + const struct ipa_ep_cfg_nat *ep_nat) +{ + u32 reg_val = 0; + + IPA_SETFIELD_IN_REG(reg_val, ep_nat->nat_en, + IPA_ENDP_INIT_NAT_N_NAT_EN_SHFT, + IPA_ENDP_INIT_NAT_N_NAT_EN_BMSK); + + ipa_write_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_NAT_N_OFST_v2_0(clnt_hdl), + reg_val); +} + +/** + * ipa2_cfg_ep_nat() - IPA end-point NAT configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ipa_ep_cfg: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_cfg_ep_nat(u32 clnt_hdl, const struct ipa_ep_cfg_nat *ep_nat) +{ + if (clnt_hdl >= ipa_ctx->ipa_num_pipes || + ipa_ctx->ep[clnt_hdl].valid == 0 || ep_nat == NULL) { + IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n", + clnt_hdl, + ipa_ctx->ep[clnt_hdl].valid); + return -EINVAL; + } + + if (IPA_CLIENT_IS_CONS(ipa_ctx->ep[clnt_hdl].client)) { + IPAERR("NAT does not apply to IPA out EP %d\n", clnt_hdl); + return -EINVAL; + } + + IPADBG("pipe=%d, nat_en=%d(%s)\n", + clnt_hdl, + ep_nat->nat_en, + ipa_get_nat_en_str(ep_nat->nat_en)); + + /* copy over EP cfg */ + ipa_ctx->ep[clnt_hdl].cfg.nat = *ep_nat; + + IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); + + ipa_ctx->ctrl->ipa_cfg_ep_nat(clnt_hdl, ep_nat); + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); + + return 0; +} + +static void _ipa_cfg_ep_status_v1_1(u32 clnt_hdl, + const struct ipa_ep_cfg_status *ep_status) +{ + IPADBG("Not supported for version 1.1\n"); +} + +static void _ipa_cfg_ep_status_v2_0(u32 clnt_hdl, + const struct ipa_ep_cfg_status *ep_status) +{ + u32 reg_val = 0; + + IPA_SETFIELD_IN_REG(reg_val, ep_status->status_en, + IPA_ENDP_STATUS_n_STATUS_EN_SHFT, + IPA_ENDP_STATUS_n_STATUS_EN_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, ep_status->status_ep, + IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT, + IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK); + + ipa_write_reg(ipa_ctx->mmio, + IPA_ENDP_STATUS_n_OFST(clnt_hdl), + reg_val); +} + +/** + * ipa2_cfg_ep_status() - IPA end-point status configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ipa_ep_cfg: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_cfg_ep_status(u32 clnt_hdl, const struct ipa_ep_cfg_status *ep_status) +{ + if (clnt_hdl >= ipa_ctx->ipa_num_pipes || + ipa_ctx->ep[clnt_hdl].valid == 0 || ep_status == NULL) { + IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n", + clnt_hdl, + ipa_ctx->ep[clnt_hdl].valid); + return -EINVAL; + } + + IPADBG("pipe=%d, status_en=%d status_ep=%d\n", + clnt_hdl, + ep_status->status_en, + ep_status->status_ep); + + /* copy over EP cfg */ + ipa_ctx->ep[clnt_hdl].status = *ep_status; + + IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); + + ipa_ctx->ctrl->ipa_cfg_ep_status(clnt_hdl, ep_status); + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); + + return 0; +} + +static void _ipa_cfg_ep_cfg_v1_1(u32 clnt_hdl, + const struct ipa_ep_cfg_cfg *cfg) +{ + IPADBG("Not supported for version 1.1\n"); +} + +static void _ipa_cfg_ep_cfg_v2_0(u32 clnt_hdl, + const struct ipa_ep_cfg_cfg *cfg) +{ + u32 reg_val = 0; + + IPA_SETFIELD_IN_REG(reg_val, cfg->frag_offload_en, + IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_SHFT, + IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_BMSK); + IPA_SETFIELD_IN_REG(reg_val, cfg->cs_offload_en, + IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_SHFT, + IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_BMSK); + IPA_SETFIELD_IN_REG(reg_val, cfg->cs_metadata_hdr_offset, + IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_SHFT, + IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_BMSK); + + ipa_write_reg(ipa_ctx->mmio, IPA_ENDP_INIT_CFG_n_OFST(clnt_hdl), + reg_val); +} + +/** + * ipa2_cfg_ep_cfg() - IPA end-point cfg configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ipa_ep_cfg: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_cfg_ep_cfg(u32 clnt_hdl, const struct ipa_ep_cfg_cfg *cfg) +{ + if (clnt_hdl >= ipa_ctx->ipa_num_pipes || + ipa_ctx->ep[clnt_hdl].valid == 0 || cfg == NULL) { + IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n", + clnt_hdl, + ipa_ctx->ep[clnt_hdl].valid); + return -EINVAL; + } + + IPADBG("pipe=%d, frag_ofld_en=%d cs_ofld_en=%d mdata_hdr_ofst=%d\n", + clnt_hdl, + cfg->frag_offload_en, + cfg->cs_offload_en, + cfg->cs_metadata_hdr_offset); + + /* copy over EP cfg */ + ipa_ctx->ep[clnt_hdl].cfg.cfg = *cfg; + + IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); + + ipa_ctx->ctrl->ipa_cfg_ep_cfg(clnt_hdl, cfg); + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); + + return 0; +} + +static void _ipa_cfg_ep_metadata_mask_v1_1(u32 clnt_hdl, + const struct ipa_ep_cfg_metadata_mask *metadata_mask) +{ + IPADBG("Not supported for version 1.1\n"); +} + +static void _ipa_cfg_ep_metadata_mask_v2_0(u32 clnt_hdl, + const struct ipa_ep_cfg_metadata_mask *metadata_mask) +{ + u32 reg_val = 0; + + IPA_SETFIELD_IN_REG(reg_val, metadata_mask->metadata_mask, + IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_SHFT, + IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_BMSK); + + ipa_write_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_HDR_METADATA_MASK_n_OFST(clnt_hdl), + reg_val); +} + +/** + * ipa2_cfg_ep_metadata_mask() - IPA end-point meta-data mask configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ipa_ep_cfg: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_cfg_ep_metadata_mask(u32 clnt_hdl, + const struct ipa_ep_cfg_metadata_mask *metadata_mask) +{ + if (clnt_hdl >= ipa_ctx->ipa_num_pipes || + ipa_ctx->ep[clnt_hdl].valid == 0 || metadata_mask == NULL) { + IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n", + clnt_hdl, + ipa_ctx->ep[clnt_hdl].valid); + return -EINVAL; + } + + IPADBG("pipe=%d, metadata_mask=0x%x\n", + clnt_hdl, + metadata_mask->metadata_mask); + + /* copy over EP cfg */ + ipa_ctx->ep[clnt_hdl].cfg.metadata_mask = *metadata_mask; + + IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); + + ipa_ctx->ctrl->ipa_cfg_ep_metadata_mask(clnt_hdl, metadata_mask); + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); + + return 0; +} + +void _ipa_cfg_ep_hdr_v1_1(u32 pipe_number, + const struct ipa_ep_cfg_hdr *ep_hdr) +{ + u32 val = 0; + + val = IPA_SETFIELD(ep_hdr->hdr_len, + IPA_ENDP_INIT_HDR_N_HDR_LEN_SHFT, + IPA_ENDP_INIT_HDR_N_HDR_LEN_BMSK) | + IPA_SETFIELD(ep_hdr->hdr_ofst_metadata_valid, + IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_VALID_SHFT, + IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_VALID_BMSK) | + IPA_SETFIELD(ep_hdr->hdr_ofst_metadata, + IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_SHFT, + IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_BMSK) | + IPA_SETFIELD(ep_hdr->hdr_additional_const_len, + IPA_ENDP_INIT_HDR_N_HDR_ADDITIONAL_CONST_LEN_SHFT, + IPA_ENDP_INIT_HDR_N_HDR_ADDITIONAL_CONST_LEN_BMSK) | + IPA_SETFIELD(ep_hdr->hdr_ofst_pkt_size_valid, + IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_VALID_SHFT, + IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_VALID_BMSK) | + IPA_SETFIELD(ep_hdr->hdr_ofst_pkt_size, + IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_SHFT, + IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_BMSK) | + IPA_SETFIELD(ep_hdr->hdr_a5_mux, + IPA_ENDP_INIT_HDR_N_HDR_A5_MUX_SHFT, + IPA_ENDP_INIT_HDR_N_HDR_A5_MUX_BMSK); + ipa_write_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_HDR_N_OFST_v1_1(pipe_number), val); +} + +void _ipa_cfg_ep_hdr_v2_0(u32 pipe_number, + const struct ipa_ep_cfg_hdr *ep_hdr) +{ + u32 reg_val = 0; + + IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_metadata_reg_valid, + IPA_ENDP_INIT_HDR_N_HDR_METADATA_REG_VALID_SHFT_v2, + IPA_ENDP_INIT_HDR_N_HDR_METADATA_REG_VALID_BMSK_v2); + + IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_remove_additional, + IPA_ENDP_INIT_HDR_N_HDR_LEN_INC_DEAGG_HDR_SHFT_v2, + IPA_ENDP_INIT_HDR_N_HDR_LEN_INC_DEAGG_HDR_BMSK_v2); + + IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_a5_mux, + IPA_ENDP_INIT_HDR_N_HDR_A5_MUX_SHFT, + IPA_ENDP_INIT_HDR_N_HDR_A5_MUX_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_ofst_pkt_size, + IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_SHFT, + IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_ofst_pkt_size_valid, + IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_VALID_SHFT, + IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_VALID_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_additional_const_len, + IPA_ENDP_INIT_HDR_N_HDR_ADDITIONAL_CONST_LEN_SHFT, + IPA_ENDP_INIT_HDR_N_HDR_ADDITIONAL_CONST_LEN_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_ofst_metadata, + IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_SHFT, + IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_ofst_metadata_valid, + IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_VALID_SHFT, + IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_VALID_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_len, + IPA_ENDP_INIT_HDR_N_HDR_LEN_SHFT, + IPA_ENDP_INIT_HDR_N_HDR_LEN_BMSK); + + ipa_write_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_HDR_N_OFST_v2_0(pipe_number), reg_val); +} + +/** + * ipa2_cfg_ep_hdr() - IPA end-point header configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ipa_ep_cfg: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_cfg_ep_hdr(u32 clnt_hdl, const struct ipa_ep_cfg_hdr *ep_hdr) +{ + struct ipa_ep_context *ep; + + if (clnt_hdl >= ipa_ctx->ipa_num_pipes || + ipa_ctx->ep[clnt_hdl].valid == 0 || ep_hdr == NULL) { + IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n", + clnt_hdl, ipa_ctx->ep[clnt_hdl].valid); + return -EINVAL; + } + IPADBG("pipe=%d remove_additional=%d, a5_mux=%d, ofst_pkt_size=0x%x\n", + clnt_hdl, + ep_hdr->hdr_remove_additional, + ep_hdr->hdr_a5_mux, + ep_hdr->hdr_ofst_pkt_size); + + IPADBG("ofst_pkt_size_valid=%d, additional_const_len=0x%x\n", + ep_hdr->hdr_ofst_pkt_size_valid, + ep_hdr->hdr_additional_const_len); + + IPADBG("ofst_metadata=0x%x, ofst_metadata_valid=%d, len=0x%x", + ep_hdr->hdr_ofst_metadata, + ep_hdr->hdr_ofst_metadata_valid, + ep_hdr->hdr_len); + + ep = &ipa_ctx->ep[clnt_hdl]; + + /* copy over EP cfg */ + ep->cfg.hdr = *ep_hdr; + + IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); + + ipa_ctx->ctrl->ipa_cfg_ep_hdr(clnt_hdl, &ep->cfg.hdr); + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); + + return 0; +} + +static int _ipa_cfg_ep_hdr_ext_v1_1(u32 clnt_hdl, + const struct ipa_ep_cfg_hdr_ext *ep_hdr) +{ + IPADBG("Not supported for version 1.1\n"); + return 0; +} + +static int _ipa_cfg_ep_hdr_ext(u32 clnt_hdl, + const struct ipa_ep_cfg_hdr_ext *ep_hdr_ext, u32 reg_val) +{ + u8 hdr_endianness = ep_hdr_ext->hdr_little_endian ? 0 : 1; + + IPA_SETFIELD_IN_REG(reg_val, ep_hdr_ext->hdr_total_len_or_pad_offset, + IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_SHFT, + IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, ep_hdr_ext->hdr_payload_len_inc_padding, + IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_SHFT, + IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, ep_hdr_ext->hdr_total_len_or_pad, + IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_SHFT, + IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, ep_hdr_ext->hdr_total_len_or_pad_valid, + IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_SHFT, + IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, hdr_endianness, + IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_SHFT, + IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_BMSK); + + ipa_write_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_HDR_EXT_n_OFST_v2_0(clnt_hdl), reg_val); + + return 0; +} + +static int _ipa_cfg_ep_hdr_ext_v2_0(u32 clnt_hdl, + const struct ipa_ep_cfg_hdr_ext *ep_hdr_ext) +{ + u32 reg_val = 0; + + IPA_SETFIELD_IN_REG(reg_val, ep_hdr_ext->hdr_pad_to_alignment, + IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_SHFT, + IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_BMSK_v2_0); + + return _ipa_cfg_ep_hdr_ext(clnt_hdl, ep_hdr_ext, reg_val); +} + +static int _ipa_cfg_ep_hdr_ext_v2_5(u32 clnt_hdl, + const struct ipa_ep_cfg_hdr_ext *ep_hdr_ext) +{ + u32 reg_val = 0; + + IPA_SETFIELD_IN_REG(reg_val, ep_hdr_ext->hdr_pad_to_alignment, + IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_SHFT, + IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_BMSK_v2_5); + + return _ipa_cfg_ep_hdr_ext(clnt_hdl, ep_hdr_ext, reg_val); + +} + +static int _ipa_cfg_ep_hdr_ext_v2_6L(u32 clnt_hdl, + const struct ipa_ep_cfg_hdr_ext *ep_hdr_ext) +{ + u32 reg_val = 0; + + IPA_SETFIELD_IN_REG(reg_val, ep_hdr_ext->hdr_pad_to_alignment, + IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_SHFT, + IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_BMSK_v2_5); + + return _ipa_cfg_ep_hdr_ext(clnt_hdl, ep_hdr_ext, reg_val); + +} + +/** + * ipa2_cfg_ep_hdr_ext() - IPA end-point extended header configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ep_hdr_ext: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_cfg_ep_hdr_ext(u32 clnt_hdl, + const struct ipa_ep_cfg_hdr_ext *ep_hdr_ext) +{ + struct ipa_ep_context *ep; + + if (clnt_hdl >= ipa_ctx->ipa_num_pipes || + ipa_ctx->ep[clnt_hdl].valid == 0 || ep_hdr_ext == NULL) { + IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n", + clnt_hdl, ipa_ctx->ep[clnt_hdl].valid); + return -EINVAL; + } + + IPADBG("pipe=%d hdr_pad_to_alignment=%d\n", + clnt_hdl, + ep_hdr_ext->hdr_pad_to_alignment); + + IPADBG("hdr_total_len_or_pad_offset=%d\n", + ep_hdr_ext->hdr_total_len_or_pad_offset); + + IPADBG("hdr_payload_len_inc_padding=%d hdr_total_len_or_pad=%d\n", + ep_hdr_ext->hdr_payload_len_inc_padding, + ep_hdr_ext->hdr_total_len_or_pad); + + IPADBG("hdr_total_len_or_pad_valid=%d hdr_little_endian=%d\n", + ep_hdr_ext->hdr_total_len_or_pad_valid, + ep_hdr_ext->hdr_little_endian); + + ep = &ipa_ctx->ep[clnt_hdl]; + + /* copy over EP cfg */ + ep->cfg.hdr_ext = *ep_hdr_ext; + + IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); + + ipa_ctx->ctrl->ipa_cfg_ep_hdr_ext(clnt_hdl, &ep->cfg.hdr_ext); + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); + + return 0; +} + +/** + * ipa2_cfg_ep_hdr() - IPA end-point Control configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ipa_ep_cfg_ctrl: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + */ +int ipa2_cfg_ep_ctrl(u32 clnt_hdl, const struct ipa_ep_cfg_ctrl *ep_ctrl) +{ + u32 reg_val = 0; + + if (clnt_hdl >= ipa_ctx->ipa_num_pipes || ep_ctrl == NULL) { + IPAERR("bad parm, clnt_hdl = %d\n", clnt_hdl); + return -EINVAL; + } + + IPADBG("pipe=%d ep_suspend=%d, ep_delay=%d\n", + clnt_hdl, + ep_ctrl->ipa_ep_suspend, + ep_ctrl->ipa_ep_delay); + + IPA_SETFIELD_IN_REG(reg_val, ep_ctrl->ipa_ep_suspend, + IPA_ENDP_INIT_CTRL_N_ENDP_SUSPEND_SHFT, + IPA_ENDP_INIT_CTRL_N_ENDP_SUSPEND_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, ep_ctrl->ipa_ep_delay, + IPA_ENDP_INIT_CTRL_N_ENDP_DELAY_SHFT, + IPA_ENDP_INIT_CTRL_N_ENDP_DELAY_BMSK); + + ipa_write_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_CTRL_N_OFST(clnt_hdl), reg_val); + + return 0; + +} + +/** + * ipa_cfg_aggr_cntr_granularity() - granularity of the AGGR timer configuration + * @aggr_granularity: [in] defines the granularity of AGGR timers + * number of units of 1/32msec + * + * Returns: 0 on success, negative on failure + */ +int ipa_cfg_aggr_cntr_granularity(u8 aggr_granularity) +{ + u32 reg_val = 0; + + if (aggr_granularity <= IPA_AGGR_GRAN_MIN || + aggr_granularity > IPA_AGGR_GRAN_MAX) { + IPAERR("bad param, aggr_granularity = %d\n", + aggr_granularity); + return -EINVAL; + } + IPADBG("aggr_granularity=%d\n", aggr_granularity); + + reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_COUNTER_CFG_OFST); + reg_val = (reg_val & ~IPA_COUNTER_CFG_AGGR_GRAN_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, aggr_granularity - 1, + IPA_COUNTER_CFG_AGGR_GRAN_SHFT, + IPA_COUNTER_CFG_AGGR_GRAN_BMSK); + + ipa_write_reg(ipa_ctx->mmio, + IPA_COUNTER_CFG_OFST, reg_val); + + return 0; + +} +EXPORT_SYMBOL(ipa_cfg_aggr_cntr_granularity); + +/** + * ipa_cfg_eot_coal_cntr_granularity() - granularity of EOT_COAL timer + * configuration + * @eot_coal_granularity: defines the granularity of EOT_COAL timers + * number of units of 1/32msec + * + * Returns: 0 on success, negative on failure + */ +int ipa_cfg_eot_coal_cntr_granularity(u8 eot_coal_granularity) +{ + u32 reg_val = 0; + + if (eot_coal_granularity <= IPA_EOT_COAL_GRAN_MIN || + eot_coal_granularity > IPA_EOT_COAL_GRAN_MAX) { + IPAERR("bad parm, eot_coal_granularity = %d\n", + eot_coal_granularity); + return -EINVAL; + } + IPADBG("eot_coal_granularity=%d\n", eot_coal_granularity); + + reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_COUNTER_CFG_OFST); + reg_val = (reg_val & ~IPA_COUNTER_CFG_EOT_COAL_GRAN_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, eot_coal_granularity - 1, + IPA_COUNTER_CFG_EOT_COAL_GRAN_SHFT, + IPA_COUNTER_CFG_EOT_COAL_GRAN_BMSK); + + ipa_write_reg(ipa_ctx->mmio, + IPA_COUNTER_CFG_OFST, reg_val); + + return 0; + +} +EXPORT_SYMBOL(ipa_cfg_eot_coal_cntr_granularity); + +const char *ipa_get_mode_type_str(enum ipa_mode_type mode) +{ + switch (mode) { + case (IPA_BASIC): + return "Basic"; + case (IPA_ENABLE_FRAMING_HDLC): + return "HDLC framing"; + case (IPA_ENABLE_DEFRAMING_HDLC): + return "HDLC de-framing"; + case (IPA_DMA): + return "DMA"; + } + + return "undefined"; +} + +void _ipa_cfg_ep_mode_v1_1(u32 pipe_number, u32 dst_pipe_number, + const struct ipa_ep_cfg_mode *ep_mode) +{ + u32 reg_val = 0; + + IPA_SETFIELD_IN_REG(reg_val, ep_mode->mode, + IPA_ENDP_INIT_MODE_N_MODE_SHFT, + IPA_ENDP_INIT_MODE_N_MODE_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, dst_pipe_number, + IPA_ENDP_INIT_MODE_N_DEST_PIPE_INDEX_SHFT_v1_1, + IPA_ENDP_INIT_MODE_N_DEST_PIPE_INDEX_BMSK_v1_1); + + ipa_write_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_MODE_N_OFST_v1_1(pipe_number), reg_val); +} + +void _ipa_cfg_ep_mode_v2_0(u32 pipe_number, u32 dst_pipe_number, + const struct ipa_ep_cfg_mode *ep_mode) +{ + u32 reg_val = 0; + + IPA_SETFIELD_IN_REG(reg_val, ep_mode->mode, + IPA_ENDP_INIT_MODE_N_MODE_SHFT, + IPA_ENDP_INIT_MODE_N_MODE_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, dst_pipe_number, + IPA_ENDP_INIT_MODE_N_DEST_PIPE_INDEX_SHFT_v2_0, + IPA_ENDP_INIT_MODE_N_DEST_PIPE_INDEX_BMSK_v2_0); + + ipa_write_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_MODE_N_OFST_v2_0(pipe_number), reg_val); +} + +/** + * ipa2_cfg_ep_mode() - IPA end-point mode configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ipa_ep_cfg: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_cfg_ep_mode(u32 clnt_hdl, const struct ipa_ep_cfg_mode *ep_mode) +{ + int ep; + + if (unlikely(!ipa_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return -EINVAL; + } + + if (clnt_hdl >= ipa_ctx->ipa_num_pipes || + ipa_ctx->ep[clnt_hdl].valid == 0 || ep_mode == NULL) { + IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n", + clnt_hdl, ipa_ctx->ep[clnt_hdl].valid); + return -EINVAL; + } + + if (IPA_CLIENT_IS_CONS(ipa_ctx->ep[clnt_hdl].client)) { + IPAERR("MODE does not apply to IPA out EP %d\n", clnt_hdl); + return -EINVAL; + } + + ep = ipa2_get_ep_mapping(ep_mode->dst); + if (ep == -1 && ep_mode->mode == IPA_DMA) { + IPAERR("dst %d does not exist\n", ep_mode->dst); + return -EINVAL; + } + + WARN_ON(ep_mode->mode == IPA_DMA && IPA_CLIENT_IS_PROD(ep_mode->dst)); + + if (!IPA_CLIENT_IS_CONS(ep_mode->dst)) + ep = ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS); + + IPADBG("pipe=%d mode=%d(%s), dst_client_number=%d", + clnt_hdl, + ep_mode->mode, + ipa_get_mode_type_str(ep_mode->mode), + ep_mode->dst); + + /* copy over EP cfg */ + ipa_ctx->ep[clnt_hdl].cfg.mode = *ep_mode; + ipa_ctx->ep[clnt_hdl].dst_pipe_index = ep; + + IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); + + ipa_ctx->ctrl->ipa_cfg_ep_mode(clnt_hdl, + ipa_ctx->ep[clnt_hdl].dst_pipe_index, + ep_mode); + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); + + return 0; +} + +const char *get_aggr_enable_str(enum ipa_aggr_en_type aggr_en) +{ + switch (aggr_en) { + case (IPA_BYPASS_AGGR): + return "no aggregation"; + case (IPA_ENABLE_AGGR): + return "aggregation enabled"; + case (IPA_ENABLE_DEAGGR): + return "de-aggregation enabled"; + } + + return "undefined"; +} + +const char *get_aggr_type_str(enum ipa_aggr_type aggr_type) +{ + switch (aggr_type) { + case (IPA_MBIM_16): + return "MBIM_16"; + case (IPA_HDLC): + return "HDLC"; + case (IPA_TLP): + return "TLP"; + case (IPA_RNDIS): + return "RNDIS"; + case (IPA_GENERIC): + return "GENERIC"; + case (IPA_QCMAP): + return "QCMAP"; + } + return "undefined"; +} + +void _ipa_cfg_ep_aggr_v1_1(u32 pipe_number, + const struct ipa_ep_cfg_aggr *ep_aggr) +{ + u32 reg_val = 0; + + IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_en, + IPA_ENDP_INIT_AGGR_N_AGGR_EN_SHFT, + IPA_ENDP_INIT_AGGR_N_AGGR_EN_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr, + IPA_ENDP_INIT_AGGR_N_AGGR_TYPE_SHFT, + IPA_ENDP_INIT_AGGR_N_AGGR_TYPE_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_byte_limit, + IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_SHFT, + IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_time_limit, + IPA_ENDP_INIT_AGGR_N_AGGR_TIME_LIMIT_SHFT, + IPA_ENDP_INIT_AGGR_N_AGGR_TIME_LIMIT_BMSK); + + ipa_write_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_AGGR_N_OFST_v1_1(pipe_number), reg_val); +} + +void _ipa_cfg_ep_aggr_v2_0(u32 pipe_number, + const struct ipa_ep_cfg_aggr *ep_aggr) +{ + u32 reg_val = 0; + + IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_en, + IPA_ENDP_INIT_AGGR_N_AGGR_EN_SHFT, + IPA_ENDP_INIT_AGGR_N_AGGR_EN_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr, + IPA_ENDP_INIT_AGGR_N_AGGR_TYPE_SHFT, + IPA_ENDP_INIT_AGGR_N_AGGR_TYPE_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_byte_limit, + IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_SHFT, + IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_time_limit, + IPA_ENDP_INIT_AGGR_N_AGGR_TIME_LIMIT_SHFT, + IPA_ENDP_INIT_AGGR_N_AGGR_TIME_LIMIT_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_pkt_limit, + IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT, + IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_sw_eof_active, + IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_SHFT, + IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_BMSK); + + ipa_write_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_AGGR_N_OFST_v2_0(pipe_number), reg_val); +} + +/** + * ipa2_cfg_ep_aggr() - IPA end-point aggregation configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ipa_ep_cfg: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_cfg_ep_aggr(u32 clnt_hdl, const struct ipa_ep_cfg_aggr *ep_aggr) +{ + if (clnt_hdl >= ipa_ctx->ipa_num_pipes || + ipa_ctx->ep[clnt_hdl].valid == 0 || ep_aggr == NULL) { + IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n", + clnt_hdl, ipa_ctx->ep[clnt_hdl].valid); + return -EINVAL; + } + + IPADBG("pipe=%d en=%d(%s), type=%d(%s), byte_limit=%d, time_limit=%d\n", + clnt_hdl, + ep_aggr->aggr_en, + get_aggr_enable_str(ep_aggr->aggr_en), + ep_aggr->aggr, + get_aggr_type_str(ep_aggr->aggr), + ep_aggr->aggr_byte_limit, + ep_aggr->aggr_time_limit); + + /* copy over EP cfg */ + ipa_ctx->ep[clnt_hdl].cfg.aggr = *ep_aggr; + + IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); + + ipa_ctx->ctrl->ipa_cfg_ep_aggr(clnt_hdl, ep_aggr); + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); + + return 0; +} + +void _ipa_cfg_ep_route_v1_1(u32 pipe_index, u32 rt_tbl_index) +{ + int reg_val = 0; + + IPA_SETFIELD_IN_REG(reg_val, rt_tbl_index, + IPA_ENDP_INIT_ROUTE_N_ROUTE_TABLE_INDEX_SHFT, + IPA_ENDP_INIT_ROUTE_N_ROUTE_TABLE_INDEX_BMSK); + + ipa_write_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_ROUTE_N_OFST_v1_1(pipe_index), + reg_val); +} + +void _ipa_cfg_ep_route_v2_0(u32 pipe_index, u32 rt_tbl_index) +{ + int reg_val = 0; + + IPA_SETFIELD_IN_REG(reg_val, rt_tbl_index, + IPA_ENDP_INIT_ROUTE_N_ROUTE_TABLE_INDEX_SHFT, + IPA_ENDP_INIT_ROUTE_N_ROUTE_TABLE_INDEX_BMSK); + + ipa_write_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_ROUTE_N_OFST_v2_0(pipe_index), + reg_val); +} + +/** + * ipa2_cfg_ep_route() - IPA end-point routing configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ipa_ep_cfg: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_cfg_ep_route(u32 clnt_hdl, const struct ipa_ep_cfg_route *ep_route) +{ + if (clnt_hdl >= ipa_ctx->ipa_num_pipes || + ipa_ctx->ep[clnt_hdl].valid == 0 || ep_route == NULL) { + IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n", + clnt_hdl, ipa_ctx->ep[clnt_hdl].valid); + return -EINVAL; + } + + if (IPA_CLIENT_IS_CONS(ipa_ctx->ep[clnt_hdl].client)) { + IPAERR("ROUTE does not apply to IPA out EP %d\n", + clnt_hdl); + return -EINVAL; + } + + /* + * if DMA mode was configured previously for this EP, return with + * success + */ + if (ipa_ctx->ep[clnt_hdl].cfg.mode.mode == IPA_DMA) { + IPADBG("DMA enabled for ep %d, dst pipe is part of DMA\n", + clnt_hdl); + return 0; + } + + if (ep_route->rt_tbl_hdl) + IPAERR("client specified non-zero RT TBL hdl - ignore it\n"); + + IPADBG("pipe=%d, rt_tbl_hdl=%d\n", + clnt_hdl, + ep_route->rt_tbl_hdl); + + /* always use "default" routing table when programming EP ROUTE reg */ + if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_0) + ipa_ctx->ep[clnt_hdl].rt_tbl_idx = + IPA_MEM_PART(v4_apps_rt_index_lo); + else + ipa_ctx->ep[clnt_hdl].rt_tbl_idx = 0; + + IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); + + ipa_ctx->ctrl->ipa_cfg_ep_route(clnt_hdl, + ipa_ctx->ep[clnt_hdl].rt_tbl_idx); + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); + + return 0; +} + +void _ipa_cfg_ep_holb_v1_1(u32 pipe_number, + const struct ipa_ep_cfg_holb *ep_holb) +{ + ipa_write_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_HOL_BLOCK_EN_N_OFST_v1_1(pipe_number), + ep_holb->en); + + ipa_write_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_OFST_v1_1(pipe_number), + (u16)ep_holb->tmr_val); +} + +void _ipa_cfg_ep_holb_v2_0(u32 pipe_number, + const struct ipa_ep_cfg_holb *ep_holb) +{ + ipa_write_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_HOL_BLOCK_EN_N_OFST_v2_0(pipe_number), + ep_holb->en); + + ipa_write_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_OFST_v2_0(pipe_number), + (u16)ep_holb->tmr_val); +} + +void _ipa_cfg_ep_holb_v2_5(u32 pipe_number, + const struct ipa_ep_cfg_holb *ep_holb) +{ + ipa_write_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_HOL_BLOCK_EN_N_OFST_v2_0(pipe_number), + ep_holb->en); + + ipa_write_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_OFST_v2_0(pipe_number), + ep_holb->tmr_val); +} + +void _ipa_cfg_ep_holb_v2_6L(u32 pipe_number, + const struct ipa_ep_cfg_holb *ep_holb) +{ + ipa_write_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_HOL_BLOCK_EN_N_OFST_v2_0(pipe_number), + ep_holb->en); + + ipa_write_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_OFST_v2_0(pipe_number), + ep_holb->tmr_val); +} + +/** + * ipa2_cfg_ep_holb() - IPA end-point holb configuration + * + * If an IPA producer pipe is full, IPA HW by default will block + * indefinitely till space opens up. During this time no packets + * including those from unrelated pipes will be processed. Enabling + * HOLB means IPA HW will be allowed to drop packets as/when needed + * and indefinite blocking is avoided. + * + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ipa_ep_cfg: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + */ +int ipa2_cfg_ep_holb(u32 clnt_hdl, const struct ipa_ep_cfg_holb *ep_holb) +{ + if (clnt_hdl >= ipa_ctx->ipa_num_pipes || + ipa_ctx->ep[clnt_hdl].valid == 0 || ep_holb == NULL || + ep_holb->tmr_val > ipa_ctx->ctrl->max_holb_tmr_val || + ep_holb->en > 1) { + IPAERR("bad parm.\n"); + return -EINVAL; + } + + if (IPA_CLIENT_IS_PROD(ipa_ctx->ep[clnt_hdl].client)) { + IPAERR("HOLB does not apply to IPA in EP %d\n", clnt_hdl); + return -EINVAL; + } + + if (!ipa_ctx->ctrl->ipa_cfg_ep_holb) { + IPAERR("HOLB is not supported for this IPA core\n"); + return -EINVAL; + } + + ipa_ctx->ep[clnt_hdl].holb = *ep_holb; + + IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); + + ipa_ctx->ctrl->ipa_cfg_ep_holb(clnt_hdl, ep_holb); + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); + + IPADBG("cfg holb %u ep=%d tmr=%d\n", ep_holb->en, clnt_hdl, + ep_holb->tmr_val); + + return 0; +} + +/** + * ipa2_cfg_ep_holb_by_client() - IPA end-point holb configuration + * + * Wrapper function for ipa_cfg_ep_holb() with client name instead of + * client handle. This function is used for clients that does not have + * client handle. + * + * @client: [in] client name + * @ipa_ep_cfg: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + */ +int ipa2_cfg_ep_holb_by_client(enum ipa_client_type client, + const struct ipa_ep_cfg_holb *ep_holb) +{ + return ipa2_cfg_ep_holb(ipa2_get_ep_mapping(client), ep_holb); +} + +static int _ipa_cfg_ep_deaggr_v1_1(u32 clnt_hdl, + const struct ipa_ep_cfg_deaggr *ep_deaggr) +{ + IPADBG("Not supported for version 1.1\n"); + return 0; +} + +static int _ipa_cfg_ep_deaggr_v2_0(u32 clnt_hdl, + const struct ipa_ep_cfg_deaggr *ep_deaggr) +{ + u32 reg_val = 0; + + IPA_SETFIELD_IN_REG(reg_val, ep_deaggr->deaggr_hdr_len, + IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_SHFT, + IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, ep_deaggr->packet_offset_valid, + IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_SHFT, + IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, ep_deaggr->packet_offset_location, + IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_SHFT, + IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_BMSK); + + IPA_SETFIELD_IN_REG(reg_val, ep_deaggr->max_packet_len, + IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_SHFT, + IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_BMSK); + + ipa_write_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_DEAGGR_n_OFST_v2_0(clnt_hdl), reg_val); + + return 0; +} + +/** + * ipa2_cfg_ep_deaggr() - IPA end-point deaggregation configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ep_deaggr: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_cfg_ep_deaggr(u32 clnt_hdl, + const struct ipa_ep_cfg_deaggr *ep_deaggr) +{ + struct ipa_ep_context *ep; + + if (clnt_hdl >= ipa_ctx->ipa_num_pipes || + ipa_ctx->ep[clnt_hdl].valid == 0 || ep_deaggr == NULL) { + IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n", + clnt_hdl, ipa_ctx->ep[clnt_hdl].valid); + return -EINVAL; + } + + IPADBG("pipe=%d deaggr_hdr_len=%d\n", + clnt_hdl, + ep_deaggr->deaggr_hdr_len); + + IPADBG("packet_offset_valid=%d\n", + ep_deaggr->packet_offset_valid); + + IPADBG("packet_offset_location=%d max_packet_len=%d\n", + ep_deaggr->packet_offset_location, + ep_deaggr->max_packet_len); + + ep = &ipa_ctx->ep[clnt_hdl]; + + /* copy over EP cfg */ + ep->cfg.deaggr = *ep_deaggr; + + IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); + + ipa_ctx->ctrl->ipa_cfg_ep_deaggr(clnt_hdl, &ep->cfg.deaggr); + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); + + return 0; +} + +static void _ipa_cfg_ep_metadata_v1_1(u32 pipe_number, + const struct ipa_ep_cfg_metadata *meta) +{ + IPADBG("Not supported for version 1.1\n"); +} + +static void _ipa_cfg_ep_metadata_v2_0(u32 pipe_number, + const struct ipa_ep_cfg_metadata *meta) +{ + u32 reg_val = 0; + + IPA_SETFIELD_IN_REG(reg_val, meta->qmap_id, + IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_SHFT, + IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_BMASK); + + ipa_write_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_HDR_METADATA_n_OFST(pipe_number), + reg_val); +} + +/** + * ipa2_cfg_ep_metadata() - IPA end-point metadata configuration + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * @ipa_ep_cfg: [in] IPA end-point configuration params + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa2_cfg_ep_metadata(u32 clnt_hdl, const struct ipa_ep_cfg_metadata *ep_md) +{ + if (clnt_hdl >= ipa_ctx->ipa_num_pipes || + ipa_ctx->ep[clnt_hdl].valid == 0 || ep_md == NULL) { + IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n", + clnt_hdl, ipa_ctx->ep[clnt_hdl].valid); + return -EINVAL; + } + + IPADBG("pipe=%d, mux id=%d\n", clnt_hdl, ep_md->qmap_id); + + /* copy over EP cfg */ + ipa_ctx->ep[clnt_hdl].cfg.meta = *ep_md; + + IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl)); + + ipa_ctx->ctrl->ipa_cfg_ep_metadata(clnt_hdl, ep_md); + ipa_ctx->ep[clnt_hdl].cfg.hdr.hdr_metadata_reg_valid = 1; + ipa_ctx->ctrl->ipa_cfg_ep_hdr(clnt_hdl, &ipa_ctx->ep[clnt_hdl].cfg.hdr); + + IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl)); + + return 0; +} +EXPORT_SYMBOL(ipa2_cfg_ep_metadata); + +int ipa2_write_qmap_id(struct ipa_ioc_write_qmapid *param_in) +{ + struct ipa_ep_cfg_metadata meta; + struct ipa_ep_context *ep; + int ipa_ep_idx; + int result = -EINVAL; + + if (unlikely(!ipa_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return -EINVAL; + } + + if (param_in->client >= IPA_CLIENT_MAX) { + IPAERR_RL("bad parm client:%d\n", param_in->client); + goto fail; + } + + ipa_ep_idx = ipa2_get_ep_mapping(param_in->client); + if (ipa_ep_idx == -1) { + IPAERR_RL("Invalid client.\n"); + goto fail; + } + + ep = &ipa_ctx->ep[ipa_ep_idx]; + if (!ep->valid) { + IPAERR_RL("EP not allocated.\n"); + goto fail; + } + + meta.qmap_id = param_in->qmap_id; + if (param_in->client == IPA_CLIENT_USB_PROD || + param_in->client == IPA_CLIENT_HSIC1_PROD || + param_in->client == IPA_CLIENT_ODU_PROD || + param_in->client == IPA_CLIENT_ETHERNET_PROD) { + result = ipa2_cfg_ep_metadata(ipa_ep_idx, &meta); + } else if (param_in->client == IPA_CLIENT_WLAN1_PROD) { + ipa_ctx->ep[ipa_ep_idx].cfg.meta = meta; + result = ipa_write_qmapid_wdi_pipe(ipa_ep_idx, meta.qmap_id); + if (result) + IPAERR_RL("qmap_id %d write failed on ep=%d\n", + meta.qmap_id, ipa_ep_idx); + result = 0; + } + +fail: + return result; +} + +/** + * ipa_dump_buff_internal() - dumps buffer for debug purposes + * @base: buffer base address + * @phy_base: buffer physical base address + * @size: size of the buffer + */ +void ipa_dump_buff_internal(void *base, dma_addr_t phy_base, u32 size) +{ + int i; + u32 *cur = (u32 *)base; + u8 *byt; + + IPADBG("system phys addr=%pa len=%u\n", &phy_base, size); + for (i = 0; i < size / 4; i++) { + byt = (u8 *)(cur + i); + IPADBG("%2d %08x %02x %02x %02x %02x\n", i, *(cur + i), + byt[0], byt[1], byt[2], byt[3]); + } + IPADBG("END\n"); +} + +/** + * void ipa_rx_timeout_min_max_calc() - calc min max timeout time of rx polling + * @time: time fom dtsi entry or from debugfs file system + * @min: rx polling min timeout + * @max: rx polling max timeout + * Maximum time could be of 10Msec allowed. + */ +void ipa_rx_timeout_min_max_calc(u32 *min, u32 *max, s8 time) +{ + if ((time >= MIN_RX_POLL_TIME) && + (time <= MAX_RX_POLL_TIME)) { + *min = (time * MSEC) + LOWER_CUTOFF; + *max = (time * MSEC) + UPPER_CUTOFF; + } else { + /* Setting up the default min max time */ + IPADBG("Setting up default rx polling timeout\n"); + *min = (MIN_RX_POLL_TIME * MSEC) + + LOWER_CUTOFF; + *max = (MIN_RX_POLL_TIME * MSEC) + + UPPER_CUTOFF; + } + IPADBG("Rx polling timeout Min = %u len = %u\n", *min, *max); +} + +/** + * ipa_pipe_mem_init() - initialize the pipe memory + * @start_ofst: start offset + * @size: size + * + * Return value: + * 0: success + * -ENOMEM: no memory + */ +int ipa_pipe_mem_init(u32 start_ofst, u32 size) +{ + int res; + u32 aligned_start_ofst; + u32 aligned_size; + struct gen_pool *pool; + + if (!size) { + IPAERR("no IPA pipe memory allocated\n"); + goto fail; + } + + aligned_start_ofst = IPA_HW_TABLE_ALIGNMENT(start_ofst); + aligned_size = size - (aligned_start_ofst - start_ofst); + + IPADBG("start_ofst=%u aligned_start_ofst=%u size=%u aligned_size=%u\n", + start_ofst, aligned_start_ofst, size, aligned_size); + + /* allocation order of 8 i.e. 128 bytes, global pool */ + pool = gen_pool_create(8, -1); + if (!pool) { + IPAERR("Failed to create a new memory pool.\n"); + goto fail; + } + + res = gen_pool_add(pool, aligned_start_ofst, aligned_size, -1); + if (res) { + IPAERR("Failed to add memory to IPA pipe pool\n"); + goto err_pool_add; + } + + ipa_ctx->pipe_mem_pool = pool; + return 0; + +err_pool_add: + gen_pool_destroy(pool); +fail: + return -ENOMEM; +} + +/** + * ipa_pipe_mem_alloc() - allocate pipe memory + * @ofst: offset + * @size: size + * + * Return value: + * 0: success + */ +int ipa_pipe_mem_alloc(u32 *ofst, u32 size) +{ + u32 vaddr; + int res = -1; + + if (!ipa_ctx->pipe_mem_pool || !size) { + IPAERR("failed size=%u pipe_mem_pool=%p\n", size, + ipa_ctx->pipe_mem_pool); + return res; + } + + vaddr = gen_pool_alloc(ipa_ctx->pipe_mem_pool, size); + + if (vaddr) { + *ofst = vaddr; + res = 0; + IPADBG("size=%u ofst=%u\n", size, vaddr); + } else { + IPAERR("size=%u failed\n", size); + } + + return res; +} + +/** + * ipa_pipe_mem_free() - free pipe memory + * @ofst: offset + * @size: size + * + * Return value: + * 0: success + */ +int ipa_pipe_mem_free(u32 ofst, u32 size) +{ + IPADBG("size=%u ofst=%u\n", size, ofst); + if (ipa_ctx->pipe_mem_pool && size) + gen_pool_free(ipa_ctx->pipe_mem_pool, ofst, size); + return 0; +} + +/** + * ipa2_set_aggr_mode() - Set the aggregation mode which is a global setting + * @mode: [in] the desired aggregation mode for e.g. straight MBIM, QCNCM, + * etc + * + * Returns: 0 on success + */ +int ipa2_set_aggr_mode(enum ipa_aggr_mode mode) +{ + u32 reg_val; + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_QCNCM_OFST); + ipa_write_reg(ipa_ctx->mmio, IPA_QCNCM_OFST, (mode & 0x1) | + (reg_val & 0xfffffffe)); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + + return 0; +} + +/** + * ipa2_set_qcncm_ndp_sig() - Set the NDP signature used for QCNCM aggregation + * mode + * @sig: [in] the first 3 bytes of QCNCM NDP signature (expected to be + * "QND") + * + * Set the NDP signature used for QCNCM aggregation mode. The fourth byte + * (expected to be 'P') needs to be set using the header addition mechanism + * + * Returns: 0 on success, negative on failure + */ +int ipa2_set_qcncm_ndp_sig(char sig[3]) +{ + u32 reg_val; + + if (sig == NULL) { + IPAERR("bad argument for ipa_set_qcncm_ndp_sig/n"); + return -EINVAL; + } + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_QCNCM_OFST); + ipa_write_reg(ipa_ctx->mmio, IPA_QCNCM_OFST, sig[0] << 20 | + (sig[1] << 12) | (sig[2] << 4) | + (reg_val & 0xf000000f)); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + + return 0; +} + +/** + * ipa2_set_single_ndp_per_mbim() - Enable/disable single NDP per MBIM frame + * configuration + * @enable: [in] true for single NDP/MBIM; false otherwise + * + * Returns: 0 on success + */ +int ipa2_set_single_ndp_per_mbim(bool enable) +{ + u32 reg_val; + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_SINGLE_NDP_MODE_OFST); + ipa_write_reg(ipa_ctx->mmio, IPA_SINGLE_NDP_MODE_OFST, + (enable & 0x1) | (reg_val & 0xfffffffe)); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + + return 0; +} + +/** + * ipa_set_hw_timer_fix_for_mbim_aggr() - Enable/disable HW timer fix + * for MBIM aggregation. + * @enable: [in] true for enable HW fix; false otherwise + * + * Returns: 0 on success + */ +int ipa_set_hw_timer_fix_for_mbim_aggr(bool enable) +{ + u32 reg_val; + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_AGGREGATION_SPARE_REG_1_OFST); + ipa_write_reg(ipa_ctx->mmio, IPA_AGGREGATION_SPARE_REG_1_OFST, + (enable << IPA_AGGREGATION_HW_TIMER_FIX_MBIM_AGGR_SHFT) | + (reg_val & ~IPA_AGGREGATION_HW_TIMER_FIX_MBIM_AGGR_BMSK)); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return 0; +} +EXPORT_SYMBOL(ipa_set_hw_timer_fix_for_mbim_aggr); + +/** + * ipa_straddle_boundary() - Checks whether a memory buffer straddles a boundary + * @start: start address of the memory buffer + * @end: end address of the memory buffer + * @boundary: boundary + * + * Return value: + * 1: if the interval [start, end] straddles boundary + * 0: otherwise + */ +int ipa_straddle_boundary(u32 start, u32 end, u32 boundary) +{ + u32 next_start; + u32 prev_end; + + IPADBG("start=%u end=%u boundary=%u\n", start, end, boundary); + + next_start = (start + (boundary - 1)) & ~(boundary - 1); + prev_end = ((end + (boundary - 1)) & ~(boundary - 1)) - boundary; + + while (next_start < prev_end) + next_start += boundary; + + if (next_start == prev_end) + return 1; + else + return 0; +} + +/** + * ipa2_bam_reg_dump() - Dump selected BAM registers for IPA and DMA-BAM + * + * Function is rate limited to avoid flooding kernel log buffer + */ +void ipa2_bam_reg_dump(void) +{ + static DEFINE_RATELIMIT_STATE(_rs, 500*HZ, 1); + + if (__ratelimit(&_rs)) { + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + pr_err("IPA BAM START\n"); + if (ipa_ctx->ipa_hw_type < IPA_HW_v2_0) { + sps_get_bam_debug_info(ipa_ctx->bam_handle, 5, + 511950, 0, 0); + sps_get_bam_debug_info(ipa_ctx->bam_handle, 93, 0, + 0, 0); + } else { + sps_get_bam_debug_info(ipa_ctx->bam_handle, 93, + (SPS_BAM_PIPE(ipa_get_ep_mapping(IPA_CLIENT_USB_CONS)) + | + SPS_BAM_PIPE(ipa_get_ep_mapping(IPA_CLIENT_USB_PROD))), + 0, 2); + } + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + } +} + +static void ipa_init_mem_partition_v2(void) +{ + IPADBG("Memory partition IPA 2\n"); + IPA_MEM_PART(nat_ofst) = IPA_RAM_NAT_OFST; + IPA_MEM_PART(nat_size) = IPA_RAM_NAT_SIZE; + IPADBG("NAT OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(nat_ofst), + IPA_MEM_PART(nat_size)); + + IPA_MEM_PART(ofst_start) = IPA_MEM_v2_RAM_OFST_START; + IPADBG("RAM OFST 0x%x\n", IPA_MEM_PART(ofst_start)); + + IPA_MEM_PART(v4_flt_ofst) = IPA_MEM_v2_RAM_V4_FLT_OFST; + IPA_MEM_PART(v4_flt_size) = IPA_MEM_v2_RAM_V4_FLT_SIZE; + IPA_MEM_PART(v4_flt_size_ddr) = IPA_MEM_RAM_V4_FLT_SIZE_DDR; + IPADBG("V4 FLT OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n", + IPA_MEM_PART(v4_flt_ofst), IPA_MEM_PART(v4_flt_size), + IPA_MEM_PART(v4_flt_size_ddr)); + + IPA_MEM_PART(v6_flt_ofst) = IPA_MEM_v2_RAM_V6_FLT_OFST; + IPA_MEM_PART(v6_flt_size) = IPA_MEM_v2_RAM_V6_FLT_SIZE; + IPA_MEM_PART(v6_flt_size_ddr) = IPA_MEM_RAM_V6_FLT_SIZE_DDR; + IPADBG("V6 FLT OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n", + IPA_MEM_PART(v6_flt_ofst), IPA_MEM_PART(v6_flt_size), + IPA_MEM_PART(v6_flt_size_ddr)); + + IPA_MEM_PART(v4_rt_ofst) = IPA_MEM_v2_RAM_V4_RT_OFST; + IPADBG("V4 RT OFST 0x%x\n", IPA_MEM_PART(v4_rt_ofst)); + + IPA_MEM_PART(v4_num_index) = IPA_MEM_v2_RAM_V4_NUM_INDEX; + IPADBG("V4 RT NUM INDEX 0x%x\n", IPA_MEM_PART(v4_num_index)); + + IPA_MEM_PART(v4_modem_rt_index_lo) = IPA_MEM_v2_V4_MODEM_RT_INDEX_LO; + IPA_MEM_PART(v4_modem_rt_index_hi) = IPA_MEM_v2_V4_MODEM_RT_INDEX_HI; + IPADBG("V4 RT MODEM INDEXES 0x%x - 0x%x\n", + IPA_MEM_PART(v4_modem_rt_index_lo), + IPA_MEM_PART(v4_modem_rt_index_hi)); + + IPA_MEM_PART(v4_apps_rt_index_lo) = IPA_MEM_v2_V4_APPS_RT_INDEX_LO; + IPA_MEM_PART(v4_apps_rt_index_hi) = IPA_MEM_v2_V4_APPS_RT_INDEX_HI; + IPADBG("V4 RT APPS INDEXES 0x%x - 0x%x\n", + IPA_MEM_PART(v4_apps_rt_index_lo), + IPA_MEM_PART(v4_apps_rt_index_hi)); + + IPA_MEM_PART(v4_rt_size) = IPA_MEM_v2_RAM_V4_RT_SIZE; + IPA_MEM_PART(v4_rt_size_ddr) = IPA_MEM_RAM_V4_RT_SIZE_DDR; + IPADBG("V4 RT SIZE 0x%x DDR SIZE 0x%x\n", IPA_MEM_PART(v4_rt_size), + IPA_MEM_PART(v4_rt_size_ddr)); + + IPA_MEM_PART(v6_rt_ofst) = IPA_MEM_v2_RAM_V6_RT_OFST; + IPADBG("V6 RT OFST 0x%x\n", IPA_MEM_PART(v6_rt_ofst)); + + IPA_MEM_PART(v6_num_index) = IPA_MEM_v2_RAM_V6_NUM_INDEX; + IPADBG("V6 RT NUM INDEX 0x%x\n", IPA_MEM_PART(v6_num_index)); + + IPA_MEM_PART(v6_modem_rt_index_lo) = IPA_MEM_v2_V6_MODEM_RT_INDEX_LO; + IPA_MEM_PART(v6_modem_rt_index_hi) = IPA_MEM_v2_V6_MODEM_RT_INDEX_HI; + IPADBG("V6 RT MODEM INDEXES 0x%x - 0x%x\n", + IPA_MEM_PART(v6_modem_rt_index_lo), + IPA_MEM_PART(v6_modem_rt_index_hi)); + + IPA_MEM_PART(v6_apps_rt_index_lo) = IPA_MEM_v2_V6_APPS_RT_INDEX_LO; + IPA_MEM_PART(v6_apps_rt_index_hi) = IPA_MEM_v2_V6_APPS_RT_INDEX_HI; + IPADBG("V6 RT APPS INDEXES 0x%x - 0x%x\n", + IPA_MEM_PART(v6_apps_rt_index_lo), + IPA_MEM_PART(v6_apps_rt_index_hi)); + + IPA_MEM_PART(v6_rt_size) = IPA_MEM_v2_RAM_V6_RT_SIZE; + IPA_MEM_PART(v6_rt_size_ddr) = IPA_MEM_RAM_V6_RT_SIZE_DDR; + IPADBG("V6 RT SIZE 0x%x DDR SIZE 0x%x\n", IPA_MEM_PART(v6_rt_size), + IPA_MEM_PART(v6_rt_size_ddr)); + + IPA_MEM_PART(modem_hdr_ofst) = IPA_MEM_v2_RAM_MODEM_HDR_OFST; + IPA_MEM_PART(modem_hdr_size) = IPA_MEM_v2_RAM_MODEM_HDR_SIZE; + IPADBG("MODEM HDR OFST 0x%x SIZE 0x%x\n", + IPA_MEM_PART(modem_hdr_ofst), IPA_MEM_PART(modem_hdr_size)); + + IPA_MEM_PART(apps_hdr_ofst) = IPA_MEM_v2_RAM_APPS_HDR_OFST; + IPA_MEM_PART(apps_hdr_size) = IPA_MEM_v2_RAM_APPS_HDR_SIZE; + IPA_MEM_PART(apps_hdr_size_ddr) = IPA_MEM_v2_RAM_HDR_SIZE_DDR; + IPADBG("APPS HDR OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n", + IPA_MEM_PART(apps_hdr_ofst), IPA_MEM_PART(apps_hdr_size), + IPA_MEM_PART(apps_hdr_size_ddr)); + + IPA_MEM_PART(modem_ofst) = IPA_MEM_v2_RAM_MODEM_OFST; + IPA_MEM_PART(modem_size) = IPA_MEM_v2_RAM_MODEM_SIZE; + IPADBG("MODEM OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(modem_ofst), + IPA_MEM_PART(modem_size)); + + IPA_MEM_PART(apps_v4_flt_ofst) = IPA_MEM_v2_RAM_APPS_V4_FLT_OFST; + IPA_MEM_PART(apps_v4_flt_size) = IPA_MEM_v2_RAM_APPS_V4_FLT_SIZE; + IPADBG("V4 APPS FLT OFST 0x%x SIZE 0x%x\n", + IPA_MEM_PART(apps_v4_flt_ofst), IPA_MEM_PART(apps_v4_flt_size)); + + IPA_MEM_PART(apps_v6_flt_ofst) = IPA_MEM_v2_RAM_APPS_V6_FLT_OFST; + IPA_MEM_PART(apps_v6_flt_size) = IPA_MEM_v2_RAM_APPS_V6_FLT_SIZE; + IPADBG("V6 APPS FLT OFST 0x%x SIZE 0x%x\n", + IPA_MEM_PART(apps_v6_flt_ofst), IPA_MEM_PART(apps_v6_flt_size)); + + IPA_MEM_PART(uc_info_ofst) = IPA_MEM_v2_RAM_UC_INFO_OFST; + IPA_MEM_PART(uc_info_size) = IPA_MEM_v2_RAM_UC_INFO_SIZE; + IPADBG("V6 UC INFO OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(uc_info_ofst), + IPA_MEM_PART(uc_info_size)); + + IPA_MEM_PART(end_ofst) = IPA_MEM_v2_RAM_END_OFST; + IPA_MEM_PART(apps_v4_rt_ofst) = IPA_MEM_v2_RAM_APPS_V4_RT_OFST; + IPA_MEM_PART(apps_v4_rt_size) = IPA_MEM_v2_RAM_APPS_V4_RT_SIZE; + IPA_MEM_PART(apps_v6_rt_ofst) = IPA_MEM_v2_RAM_APPS_V6_RT_OFST; + IPA_MEM_PART(apps_v6_rt_size) = IPA_MEM_v2_RAM_APPS_V6_RT_SIZE; +} + +static void ipa_init_mem_partition_v2_5(void) +{ + IPADBG("Memory partition IPA 2.5\n"); + IPA_MEM_PART(nat_ofst) = IPA_RAM_NAT_OFST; + IPA_MEM_PART(nat_size) = IPA_RAM_NAT_SIZE; + IPADBG("NAT OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(nat_ofst), + IPA_MEM_PART(nat_size)); + + IPA_MEM_PART(uc_info_ofst) = IPA_MEM_v2_5_RAM_UC_INFO_OFST; + IPA_MEM_PART(uc_info_size) = IPA_MEM_v2_5_RAM_UC_INFO_SIZE; + IPADBG("UC INFO OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(uc_info_ofst), + IPA_MEM_PART(uc_info_size)); + + IPA_MEM_PART(ofst_start) = IPA_MEM_v2_5_RAM_OFST_START; + IPADBG("RAM OFST 0x%x\n", IPA_MEM_PART(ofst_start)); + + IPA_MEM_PART(v4_flt_ofst) = IPA_MEM_v2_5_RAM_V4_FLT_OFST; + IPA_MEM_PART(v4_flt_size) = IPA_MEM_v2_5_RAM_V4_FLT_SIZE; + IPA_MEM_PART(v4_flt_size_ddr) = IPA_MEM_RAM_V4_FLT_SIZE_DDR; + IPADBG("V4 FLT OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n", + IPA_MEM_PART(v4_flt_ofst), IPA_MEM_PART(v4_flt_size), + IPA_MEM_PART(v4_flt_size_ddr)); + + IPA_MEM_PART(v6_flt_ofst) = IPA_MEM_v2_5_RAM_V6_FLT_OFST; + IPA_MEM_PART(v6_flt_size) = IPA_MEM_v2_5_RAM_V6_FLT_SIZE; + IPA_MEM_PART(v6_flt_size_ddr) = IPA_MEM_RAM_V6_FLT_SIZE_DDR; + IPADBG("V6 FLT OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n", + IPA_MEM_PART(v6_flt_ofst), IPA_MEM_PART(v6_flt_size), + IPA_MEM_PART(v6_flt_size_ddr)); + + IPA_MEM_PART(v4_rt_ofst) = IPA_MEM_v2_5_RAM_V4_RT_OFST; + IPADBG("V4 RT OFST 0x%x\n", IPA_MEM_PART(v4_rt_ofst)); + + IPA_MEM_PART(v4_num_index) = IPA_MEM_v2_5_RAM_V4_NUM_INDEX; + IPADBG("V4 RT NUM INDEX 0x%x\n", IPA_MEM_PART(v4_num_index)); + + IPA_MEM_PART(v4_modem_rt_index_lo) = IPA_MEM_v2_5_V4_MODEM_RT_INDEX_LO; + IPA_MEM_PART(v4_modem_rt_index_hi) = IPA_MEM_v2_5_V4_MODEM_RT_INDEX_HI; + IPADBG("V4 RT MODEM INDEXES 0x%x - 0x%x\n", + IPA_MEM_PART(v4_modem_rt_index_lo), + IPA_MEM_PART(v4_modem_rt_index_hi)); + + IPA_MEM_PART(v4_apps_rt_index_lo) = IPA_MEM_v2_5_V4_APPS_RT_INDEX_LO; + IPA_MEM_PART(v4_apps_rt_index_hi) = IPA_MEM_v2_5_V4_APPS_RT_INDEX_HI; + IPADBG("V4 RT APPS INDEXES 0x%x - 0x%x\n", + IPA_MEM_PART(v4_apps_rt_index_lo), + IPA_MEM_PART(v4_apps_rt_index_hi)); + + IPA_MEM_PART(v4_rt_size) = IPA_MEM_v2_5_RAM_V4_RT_SIZE; + IPA_MEM_PART(v4_rt_size_ddr) = IPA_MEM_RAM_V4_RT_SIZE_DDR; + IPADBG("V4 RT SIZE 0x%x DDR SIZE 0x%x\n", IPA_MEM_PART(v4_rt_size), + IPA_MEM_PART(v4_rt_size_ddr)); + + IPA_MEM_PART(v6_rt_ofst) = IPA_MEM_v2_5_RAM_V6_RT_OFST; + IPADBG("V6 RT OFST 0x%x\n", IPA_MEM_PART(v6_rt_ofst)); + + IPA_MEM_PART(v6_num_index) = IPA_MEM_v2_5_RAM_V6_NUM_INDEX; + IPADBG("V6 RT NUM INDEX 0x%x\n", IPA_MEM_PART(v6_num_index)); + + IPA_MEM_PART(v6_modem_rt_index_lo) = IPA_MEM_v2_5_V6_MODEM_RT_INDEX_LO; + IPA_MEM_PART(v6_modem_rt_index_hi) = IPA_MEM_v2_5_V6_MODEM_RT_INDEX_HI; + IPADBG("V6 RT MODEM INDEXES 0x%x - 0x%x\n", + IPA_MEM_PART(v6_modem_rt_index_lo), + IPA_MEM_PART(v6_modem_rt_index_hi)); + + IPA_MEM_PART(v6_apps_rt_index_lo) = IPA_MEM_v2_5_V6_APPS_RT_INDEX_LO; + IPA_MEM_PART(v6_apps_rt_index_hi) = IPA_MEM_v2_5_V6_APPS_RT_INDEX_HI; + IPADBG("V6 RT APPS INDEXES 0x%x - 0x%x\n", + IPA_MEM_PART(v6_apps_rt_index_lo), + IPA_MEM_PART(v6_apps_rt_index_hi)); + + IPA_MEM_PART(v6_rt_size) = IPA_MEM_v2_5_RAM_V6_RT_SIZE; + IPA_MEM_PART(v6_rt_size_ddr) = IPA_MEM_RAM_V6_RT_SIZE_DDR; + IPADBG("V6 RT SIZE 0x%x DDR SIZE 0x%x\n", IPA_MEM_PART(v6_rt_size), + IPA_MEM_PART(v6_rt_size_ddr)); + + IPA_MEM_PART(modem_hdr_ofst) = IPA_MEM_v2_5_RAM_MODEM_HDR_OFST; + IPA_MEM_PART(modem_hdr_size) = IPA_MEM_v2_5_RAM_MODEM_HDR_SIZE; + IPADBG("MODEM HDR OFST 0x%x SIZE 0x%x\n", + IPA_MEM_PART(modem_hdr_ofst), IPA_MEM_PART(modem_hdr_size)); + + IPA_MEM_PART(apps_hdr_ofst) = IPA_MEM_v2_5_RAM_APPS_HDR_OFST; + IPA_MEM_PART(apps_hdr_size) = IPA_MEM_v2_5_RAM_APPS_HDR_SIZE; + IPA_MEM_PART(apps_hdr_size_ddr) = IPA_MEM_v2_5_RAM_HDR_SIZE_DDR; + IPADBG("APPS HDR OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n", + IPA_MEM_PART(apps_hdr_ofst), IPA_MEM_PART(apps_hdr_size), + IPA_MEM_PART(apps_hdr_size_ddr)); + + IPA_MEM_PART(modem_hdr_proc_ctx_ofst) = + IPA_MEM_v2_5_RAM_MODEM_HDR_PROC_CTX_OFST; + IPA_MEM_PART(modem_hdr_proc_ctx_size) = + IPA_MEM_v2_5_RAM_MODEM_HDR_PROC_CTX_SIZE; + IPADBG("MODEM HDR PROC CTX OFST 0x%x SIZE 0x%x\n", + IPA_MEM_PART(modem_hdr_proc_ctx_ofst), + IPA_MEM_PART(modem_hdr_proc_ctx_size)); + + IPA_MEM_PART(apps_hdr_proc_ctx_ofst) = + IPA_MEM_v2_5_RAM_APPS_HDR_PROC_CTX_OFST; + IPA_MEM_PART(apps_hdr_proc_ctx_size) = + IPA_MEM_v2_5_RAM_APPS_HDR_PROC_CTX_SIZE; + IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr) = + IPA_MEM_RAM_HDR_PROC_CTX_SIZE_DDR; + IPADBG("APPS HDR PROC CTX OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n", + IPA_MEM_PART(apps_hdr_proc_ctx_ofst), + IPA_MEM_PART(apps_hdr_proc_ctx_size), + IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr)); + + IPA_MEM_PART(modem_ofst) = IPA_MEM_v2_5_RAM_MODEM_OFST; + IPA_MEM_PART(modem_size) = IPA_MEM_v2_5_RAM_MODEM_SIZE; + IPADBG("MODEM OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(modem_ofst), + IPA_MEM_PART(modem_size)); + + IPA_MEM_PART(apps_v4_flt_ofst) = IPA_MEM_v2_5_RAM_APPS_V4_FLT_OFST; + IPA_MEM_PART(apps_v4_flt_size) = IPA_MEM_v2_5_RAM_APPS_V4_FLT_SIZE; + IPADBG("V4 APPS FLT OFST 0x%x SIZE 0x%x\n", + IPA_MEM_PART(apps_v4_flt_ofst), IPA_MEM_PART(apps_v4_flt_size)); + + IPA_MEM_PART(apps_v6_flt_ofst) = IPA_MEM_v2_5_RAM_APPS_V6_FLT_OFST; + IPA_MEM_PART(apps_v6_flt_size) = IPA_MEM_v2_5_RAM_APPS_V6_FLT_SIZE; + IPADBG("V6 APPS FLT OFST 0x%x SIZE 0x%x\n", + IPA_MEM_PART(apps_v6_flt_ofst), IPA_MEM_PART(apps_v6_flt_size)); + + IPA_MEM_PART(end_ofst) = IPA_MEM_v2_5_RAM_END_OFST; + IPA_MEM_PART(apps_v4_rt_ofst) = IPA_MEM_v2_5_RAM_APPS_V4_RT_OFST; + IPA_MEM_PART(apps_v4_rt_size) = IPA_MEM_v2_5_RAM_APPS_V4_RT_SIZE; + IPA_MEM_PART(apps_v6_rt_ofst) = IPA_MEM_v2_5_RAM_APPS_V6_RT_OFST; + IPA_MEM_PART(apps_v6_rt_size) = IPA_MEM_v2_5_RAM_APPS_V6_RT_SIZE; +} + +static void ipa_init_mem_partition_v2_6L(void) +{ + IPADBG("Memory partition IPA 2.6Lite\n"); + IPA_MEM_PART(nat_ofst) = IPA_RAM_NAT_OFST; + IPA_MEM_PART(nat_size) = IPA_RAM_NAT_SIZE; + IPADBG("NAT OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(nat_ofst), + IPA_MEM_PART(nat_size)); + + IPA_MEM_PART(uc_info_ofst) = IPA_MEM_v2_6L_RAM_UC_INFO_OFST; + IPA_MEM_PART(uc_info_size) = IPA_MEM_v2_6L_RAM_UC_INFO_SIZE; + IPADBG("V6 UC INFO OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(uc_info_ofst), + IPA_MEM_PART(uc_info_size)); + + IPA_MEM_PART(ofst_start) = IPA_MEM_v2_6L_RAM_OFST_START; + IPADBG("RAM OFST 0x%x\n", IPA_MEM_PART(ofst_start)); + + IPA_MEM_PART(v4_flt_ofst) = IPA_MEM_v2_6L_RAM_V4_FLT_OFST; + IPA_MEM_PART(v4_flt_size) = IPA_MEM_v2_6L_RAM_V4_FLT_SIZE; + IPA_MEM_PART(v4_flt_size_ddr) = IPA_MEM_RAM_V4_FLT_SIZE_DDR; + IPADBG("V4 FLT OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n", + IPA_MEM_PART(v4_flt_ofst), IPA_MEM_PART(v4_flt_size), + IPA_MEM_PART(v4_flt_size_ddr)); + + IPA_MEM_PART(v6_flt_ofst) = IPA_MEM_v2_6L_RAM_V6_FLT_OFST; + IPA_MEM_PART(v6_flt_size) = IPA_MEM_v2_6L_RAM_V6_FLT_SIZE; + IPA_MEM_PART(v6_flt_size_ddr) = IPA_MEM_RAM_V6_FLT_SIZE_DDR; + IPADBG("V6 FLT OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n", + IPA_MEM_PART(v6_flt_ofst), IPA_MEM_PART(v6_flt_size), + IPA_MEM_PART(v6_flt_size_ddr)); + + IPA_MEM_PART(v4_rt_ofst) = IPA_MEM_v2_6L_RAM_V4_RT_OFST; + IPADBG("V4 RT OFST 0x%x\n", IPA_MEM_PART(v4_rt_ofst)); + + IPA_MEM_PART(v4_num_index) = IPA_MEM_v2_6L_RAM_V4_NUM_INDEX; + IPADBG("V4 RT NUM INDEX 0x%x\n", IPA_MEM_PART(v4_num_index)); + + IPA_MEM_PART(v4_modem_rt_index_lo) = IPA_MEM_v2_6L_V4_MODEM_RT_INDEX_LO; + IPA_MEM_PART(v4_modem_rt_index_hi) = IPA_MEM_v2_6L_V4_MODEM_RT_INDEX_HI; + IPADBG("V4 RT MODEM INDEXES 0x%x - 0x%x\n", + IPA_MEM_PART(v4_modem_rt_index_lo), + IPA_MEM_PART(v4_modem_rt_index_hi)); + + IPA_MEM_PART(v4_apps_rt_index_lo) = IPA_MEM_v2_6L_V4_APPS_RT_INDEX_LO; + IPA_MEM_PART(v4_apps_rt_index_hi) = IPA_MEM_v2_6L_V4_APPS_RT_INDEX_HI; + IPADBG("V4 RT APPS INDEXES 0x%x - 0x%x\n", + IPA_MEM_PART(v4_apps_rt_index_lo), + IPA_MEM_PART(v4_apps_rt_index_hi)); + + IPA_MEM_PART(v4_rt_size) = IPA_MEM_v2_6L_RAM_V4_RT_SIZE; + IPA_MEM_PART(v4_rt_size_ddr) = IPA_MEM_RAM_V4_RT_SIZE_DDR; + IPADBG("V4 RT SIZE 0x%x DDR SIZE 0x%x\n", IPA_MEM_PART(v4_rt_size), + IPA_MEM_PART(v4_rt_size_ddr)); + + IPA_MEM_PART(v6_rt_ofst) = IPA_MEM_v2_6L_RAM_V6_RT_OFST; + IPADBG("V6 RT OFST 0x%x\n", IPA_MEM_PART(v6_rt_ofst)); + + IPA_MEM_PART(v6_num_index) = IPA_MEM_v2_6L_RAM_V6_NUM_INDEX; + IPADBG("V6 RT NUM INDEX 0x%x\n", IPA_MEM_PART(v6_num_index)); + + IPA_MEM_PART(v6_modem_rt_index_lo) = IPA_MEM_v2_6L_V6_MODEM_RT_INDEX_LO; + IPA_MEM_PART(v6_modem_rt_index_hi) = IPA_MEM_v2_6L_V6_MODEM_RT_INDEX_HI; + IPADBG("V6 RT MODEM INDEXES 0x%x - 0x%x\n", + IPA_MEM_PART(v6_modem_rt_index_lo), + IPA_MEM_PART(v6_modem_rt_index_hi)); + + IPA_MEM_PART(v6_apps_rt_index_lo) = IPA_MEM_v2_6L_V6_APPS_RT_INDEX_LO; + IPA_MEM_PART(v6_apps_rt_index_hi) = IPA_MEM_v2_6L_V6_APPS_RT_INDEX_HI; + IPADBG("V6 RT APPS INDEXES 0x%x - 0x%x\n", + IPA_MEM_PART(v6_apps_rt_index_lo), + IPA_MEM_PART(v6_apps_rt_index_hi)); + + IPA_MEM_PART(v6_rt_size) = IPA_MEM_v2_6L_RAM_V6_RT_SIZE; + IPA_MEM_PART(v6_rt_size_ddr) = IPA_MEM_RAM_V6_RT_SIZE_DDR; + IPADBG("V6 RT SIZE 0x%x DDR SIZE 0x%x\n", IPA_MEM_PART(v6_rt_size), + IPA_MEM_PART(v6_rt_size_ddr)); + + IPA_MEM_PART(modem_hdr_ofst) = IPA_MEM_v2_6L_RAM_MODEM_HDR_OFST; + IPA_MEM_PART(modem_hdr_size) = IPA_MEM_v2_6L_RAM_MODEM_HDR_SIZE; + IPADBG("MODEM HDR OFST 0x%x SIZE 0x%x\n", + IPA_MEM_PART(modem_hdr_ofst), IPA_MEM_PART(modem_hdr_size)); + + IPA_MEM_PART(apps_hdr_ofst) = IPA_MEM_v2_6L_RAM_APPS_HDR_OFST; + IPA_MEM_PART(apps_hdr_size) = IPA_MEM_v2_6L_RAM_APPS_HDR_SIZE; + IPA_MEM_PART(apps_hdr_size_ddr) = IPA_MEM_v2_6L_RAM_HDR_SIZE_DDR; + IPADBG("APPS HDR OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n", + IPA_MEM_PART(apps_hdr_ofst), IPA_MEM_PART(apps_hdr_size), + IPA_MEM_PART(apps_hdr_size_ddr)); + + IPA_MEM_PART(modem_comp_decomp_ofst) = + IPA_MEM_v2_6L_RAM_MODEM_COMP_DECOMP_OFST; + IPA_MEM_PART(modem_comp_decomp_size) = + IPA_MEM_v2_6L_RAM_MODEM_COMP_DECOMP_SIZE; + IPADBG("MODEM COMP DECOMP OFST 0x%x SIZE 0x%x\n", + IPA_MEM_PART(modem_comp_decomp_ofst), + IPA_MEM_PART(modem_comp_decomp_size)); + + IPA_MEM_PART(modem_ofst) = IPA_MEM_v2_6L_RAM_MODEM_OFST; + IPA_MEM_PART(modem_size) = IPA_MEM_v2_6L_RAM_MODEM_SIZE; + IPADBG("MODEM OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(modem_ofst), + IPA_MEM_PART(modem_size)); + + IPA_MEM_PART(apps_v4_flt_ofst) = IPA_MEM_v2_6L_RAM_APPS_V4_FLT_OFST; + IPA_MEM_PART(apps_v4_flt_size) = IPA_MEM_v2_6L_RAM_APPS_V4_FLT_SIZE; + IPADBG("V4 APPS FLT OFST 0x%x SIZE 0x%x\n", + IPA_MEM_PART(apps_v4_flt_ofst), IPA_MEM_PART(apps_v4_flt_size)); + + IPA_MEM_PART(apps_v6_flt_ofst) = IPA_MEM_v2_6L_RAM_APPS_V6_FLT_OFST; + IPA_MEM_PART(apps_v6_flt_size) = IPA_MEM_v2_6L_RAM_APPS_V6_FLT_SIZE; + IPADBG("V6 APPS FLT OFST 0x%x SIZE 0x%x\n", + IPA_MEM_PART(apps_v6_flt_ofst), IPA_MEM_PART(apps_v6_flt_size)); + + IPA_MEM_PART(end_ofst) = IPA_MEM_v2_6L_RAM_END_OFST; + IPA_MEM_PART(apps_v4_rt_ofst) = IPA_MEM_v2_6L_RAM_APPS_V4_RT_OFST; + IPA_MEM_PART(apps_v4_rt_size) = IPA_MEM_v2_6L_RAM_APPS_V4_RT_SIZE; + IPA_MEM_PART(apps_v6_rt_ofst) = IPA_MEM_v2_6L_RAM_APPS_V6_RT_OFST; + IPA_MEM_PART(apps_v6_rt_size) = IPA_MEM_v2_6L_RAM_APPS_V6_RT_SIZE; +} + +/** + * ipa_controller_shared_static_bind() - set the appropriate shared methods for + * for IPA HW version 2.0, 2.5, 2.6 and 2.6L + * + * @ctrl: data structure which holds the function pointers + */ +void ipa_controller_shared_static_bind(struct ipa_controller *ctrl) +{ + ctrl->ipa_init_rt4 = _ipa_init_rt4_v2; + ctrl->ipa_init_rt6 = _ipa_init_rt6_v2; + ctrl->ipa_init_flt4 = _ipa_init_flt4_v2; + ctrl->ipa_init_flt6 = _ipa_init_flt6_v2; + ctrl->ipa_cfg_ep_hdr = _ipa_cfg_ep_hdr_v2_0; + ctrl->ipa_cfg_ep_nat = _ipa_cfg_ep_nat_v2_0; + ctrl->ipa_cfg_ep_aggr = _ipa_cfg_ep_aggr_v2_0; + ctrl->ipa_cfg_ep_deaggr = _ipa_cfg_ep_deaggr_v2_0; + ctrl->ipa_cfg_ep_mode = _ipa_cfg_ep_mode_v2_0; + ctrl->ipa_cfg_ep_route = _ipa_cfg_ep_route_v2_0; + ctrl->ipa_cfg_route = _ipa_cfg_route_v2_0; + ctrl->ipa_cfg_ep_status = _ipa_cfg_ep_status_v2_0; + ctrl->ipa_cfg_ep_cfg = _ipa_cfg_ep_cfg_v2_0; + ctrl->ipa_cfg_ep_metadata_mask = _ipa_cfg_ep_metadata_mask_v2_0; + ctrl->ipa_clk_rate_turbo = IPA_V2_0_CLK_RATE_TURBO; + ctrl->ipa_clk_rate_nominal = IPA_V2_0_CLK_RATE_NOMINAL; + ctrl->ipa_clk_rate_svs = IPA_V2_0_CLK_RATE_SVS; + ctrl->ipa_read_gen_reg = _ipa_read_gen_reg_v2_0; + ctrl->ipa_read_ep_reg = _ipa_read_ep_reg_v2_0; + ctrl->ipa_write_dbg_cnt = _ipa_write_dbg_cnt_v2_0; + ctrl->ipa_read_dbg_cnt = _ipa_read_dbg_cnt_v2_0; + ctrl->ipa_commit_flt = __ipa_commit_flt_v2; + ctrl->ipa_commit_rt = __ipa_commit_rt_v2; + ctrl->ipa_commit_hdr = __ipa_commit_hdr_v2; + ctrl->ipa_enable_clks = _ipa_enable_clks_v2_0; + ctrl->ipa_disable_clks = _ipa_disable_clks_v2_0; + ctrl->msm_bus_data_ptr = &ipa_bus_client_pdata_v2_0; + ctrl->ipa_cfg_ep_metadata = _ipa_cfg_ep_metadata_v2_0; + ctrl->clock_scaling_bw_threshold_nominal = + IPA_V2_0_BW_THRESHOLD_NOMINAL_MBPS; + ctrl->clock_scaling_bw_threshold_turbo = + IPA_V2_0_BW_THRESHOLD_TURBO_MBPS; +} + +/** + * ipa_ctrl_static_bind() - set the appropriate methods for + * IPA Driver based on the HW version + * + * @ctrl: data structure which holds the function pointers + * @hw_type: the HW type in use + * + * This function can avoid the runtime assignment by using C99 special + * struct initialization - hard decision... time.vs.mem + */ +int ipa_controller_static_bind(struct ipa_controller *ctrl, + enum ipa_hw_type hw_type) +{ + switch (hw_type) { + case (IPA_HW_v1_1): + ipa_init_mem_partition_v2(); + ctrl->ipa_sram_read_settings = _ipa_sram_settings_read_v1_1; + ctrl->ipa_cfg_ep_hdr = _ipa_cfg_ep_hdr_v1_1; + ctrl->ipa_cfg_ep_hdr_ext = _ipa_cfg_ep_hdr_ext_v1_1; + ctrl->ipa_cfg_ep_aggr = _ipa_cfg_ep_aggr_v1_1; + ctrl->ipa_cfg_ep_deaggr = _ipa_cfg_ep_deaggr_v1_1; + ctrl->ipa_cfg_ep_nat = _ipa_cfg_ep_nat_v1_1; + ctrl->ipa_cfg_ep_mode = _ipa_cfg_ep_mode_v1_1; + ctrl->ipa_cfg_ep_route = _ipa_cfg_ep_route_v1_1; + ctrl->ipa_cfg_ep_holb = _ipa_cfg_ep_holb_v1_1; + ctrl->ipa_cfg_route = _ipa_cfg_route_v1_1; + ctrl->ipa_cfg_ep_status = _ipa_cfg_ep_status_v1_1; + ctrl->ipa_cfg_ep_cfg = _ipa_cfg_ep_cfg_v1_1; + ctrl->ipa_cfg_ep_metadata_mask = _ipa_cfg_ep_metadata_mask_v1_1; + ctrl->ipa_clk_rate_turbo = IPA_V1_1_CLK_RATE; + ctrl->ipa_clk_rate_nominal = IPA_V1_1_CLK_RATE; + ctrl->ipa_clk_rate_svs = IPA_V1_1_CLK_RATE; + ctrl->ipa_read_gen_reg = _ipa_read_gen_reg_v1_1; + ctrl->ipa_read_ep_reg = _ipa_read_ep_reg_v1_1; + ctrl->ipa_write_dbg_cnt = _ipa_write_dbg_cnt_v1_1; + ctrl->ipa_read_dbg_cnt = _ipa_read_dbg_cnt_v1_1; + ctrl->ipa_commit_flt = __ipa_commit_flt_v1_1; + ctrl->ipa_commit_rt = __ipa_commit_rt_v1_1; + ctrl->ipa_commit_hdr = __ipa_commit_hdr_v1_1; + ctrl->ipa_enable_clks = _ipa_enable_clks_v1_1; + ctrl->ipa_disable_clks = _ipa_disable_clks_v1_1; + ctrl->msm_bus_data_ptr = &ipa_bus_client_pdata_v1_1; + ctrl->ipa_cfg_ep_metadata = _ipa_cfg_ep_metadata_v1_1; + ctrl->ipa_reg_base_ofst = IPA_REG_BASE_OFST_v2_0; + ctrl->max_holb_tmr_val = IPA_V1_MAX_HOLB_TMR_VAL; + break; + case (IPA_HW_v2_0): + ipa_init_mem_partition_v2(); + ipa_controller_shared_static_bind(ctrl); + ctrl->ipa_cfg_ep_holb = _ipa_cfg_ep_holb_v2_0; + ctrl->ipa_reg_base_ofst = IPA_REG_BASE_OFST_v2_0; + ctrl->max_holb_tmr_val = IPA_V2_0_MAX_HOLB_TMR_VAL; + ctrl->ipa_cfg_ep_hdr_ext = _ipa_cfg_ep_hdr_ext_v2_0; + ctrl->ipa_sram_read_settings = _ipa_sram_settings_read_v2_0; + ctrl->ipa_init_sram = _ipa_init_sram_v2; + ctrl->ipa_init_hdr = _ipa_init_hdr_v2; + ctrl->ipa_commit_hdr = __ipa_commit_hdr_v2; + ctrl->ipa_generate_rt_hw_rule = __ipa_generate_rt_hw_rule_v2; + break; + case (IPA_HW_v2_5): + ipa_init_mem_partition_v2_5(); + ipa_controller_shared_static_bind(ctrl); + ctrl->ipa_cfg_ep_holb = _ipa_cfg_ep_holb_v2_5; + ctrl->ipa_reg_base_ofst = IPA_REG_BASE_OFST_v2_5; + ctrl->max_holb_tmr_val = IPA_V2_5_MAX_HOLB_TMR_VAL; + ctrl->ipa_cfg_ep_hdr_ext = _ipa_cfg_ep_hdr_ext_v2_5; + ctrl->ipa_sram_read_settings = _ipa_sram_settings_read_v2_5; + ctrl->ipa_init_sram = _ipa_init_sram_v2_5; + ctrl->ipa_init_hdr = _ipa_init_hdr_v2_5; + ctrl->ipa_commit_hdr = __ipa_commit_hdr_v2_5; + ctrl->ipa_generate_rt_hw_rule = __ipa_generate_rt_hw_rule_v2_5; + break; + case (IPA_HW_v2_6L): + ipa_init_mem_partition_v2_6L(); + ipa_controller_shared_static_bind(ctrl); + ctrl->ipa_cfg_ep_holb = _ipa_cfg_ep_holb_v2_6L; + ctrl->ipa_reg_base_ofst = IPA_REG_BASE_OFST_v2_6L; + ctrl->max_holb_tmr_val = IPA_V2_6L_MAX_HOLB_TMR_VAL; + ctrl->ipa_cfg_ep_hdr_ext = _ipa_cfg_ep_hdr_ext_v2_6L; + ctrl->ipa_sram_read_settings = _ipa_sram_settings_read_v2_6L; + ctrl->ipa_init_sram = _ipa_init_sram_v2_6L; + ctrl->ipa_init_hdr = _ipa_init_hdr_v2_6L; + ctrl->ipa_commit_hdr = __ipa_commit_hdr_v2_6L; + ctrl->ipa_generate_rt_hw_rule = __ipa_generate_rt_hw_rule_v2_6L; + break; + default: + return -EPERM; + } + + return 0; +} + +void ipa_skb_recycle(struct sk_buff *skb) +{ + struct skb_shared_info *shinfo; + + shinfo = skb_shinfo(skb); + memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); + atomic_set(&shinfo->dataref, 1); + + memset(skb, 0, offsetof(struct sk_buff, tail)); + skb->data = skb->head + NET_SKB_PAD; + skb_reset_tail_pointer(skb); +} + +int ipa_id_alloc(void *ptr) +{ + int id; + + idr_preload(GFP_KERNEL); + spin_lock(&ipa_ctx->idr_lock); + id = idr_alloc(&ipa_ctx->ipa_idr, ptr, 0, 0, GFP_NOWAIT); + spin_unlock(&ipa_ctx->idr_lock); + idr_preload_end(); + + return id; +} + +void *ipa_id_find(u32 id) +{ + void *ptr; + + spin_lock(&ipa_ctx->idr_lock); + ptr = idr_find(&ipa_ctx->ipa_idr, id); + spin_unlock(&ipa_ctx->idr_lock); + + return ptr; +} + +void ipa_id_remove(u32 id) +{ + spin_lock(&ipa_ctx->idr_lock); + idr_remove(&ipa_ctx->ipa_idr, id); + spin_unlock(&ipa_ctx->idr_lock); +} + +static void ipa_tag_free_buf(void *user1, int user2) +{ + kfree(user1); +} + +static void ipa_tag_free_skb(void *user1, int user2) +{ + dev_kfree_skb_any((struct sk_buff *)user1); +} + +#define REQUIRED_TAG_PROCESS_DESCRIPTORS 4 + +/* ipa_tag_process() - Initiates a tag process. Incorporates the input + * descriptors + * + * @desc: descriptors with commands for IC + * @desc_size: amount of descriptors in the above variable + * + * Note: The descriptors are copied (if there's room), the client needs to + * free his descriptors afterwards + * + * Return: 0 or negative in case of failure + */ +int ipa_tag_process(struct ipa_desc desc[], + int descs_num, + unsigned long timeout) +{ + struct ipa_sys_context *sys; + struct ipa_desc *tag_desc; + int desc_idx = 0; + struct ipa_ip_packet_init *pkt_init; + struct ipa_register_write *reg_write_nop; + struct ipa_ip_packet_tag_status *status; + int i; + struct sk_buff *dummy_skb; + int res; + struct ipa_tag_completion *comp; + int ep_idx; + gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0); + + /* Not enough room for the required descriptors for the tag process */ + if (IPA_TAG_MAX_DESC - descs_num < REQUIRED_TAG_PROCESS_DESCRIPTORS) { + IPAERR("up to %d descriptors are allowed (received %d)\n", + IPA_TAG_MAX_DESC - REQUIRED_TAG_PROCESS_DESCRIPTORS, + descs_num); + return -ENOMEM; + } + + ep_idx = ipa2_get_ep_mapping(IPA_CLIENT_APPS_CMD_PROD); + if (-1 == ep_idx) { + IPAERR("Client %u is not mapped\n", + IPA_CLIENT_APPS_CMD_PROD); + return -EFAULT; + } + sys = ipa_ctx->ep[ep_idx].sys; + + tag_desc = kzalloc(sizeof(*tag_desc) * IPA_TAG_MAX_DESC, flag); + if (!tag_desc) { + IPAERR("failed to allocate memory\n"); + res = -ENOMEM; + goto fail_alloc_desc; + } + + /* IP_PACKET_INIT IC for tag status to be sent to apps */ + pkt_init = kzalloc(sizeof(*pkt_init), flag); + if (!pkt_init) { + IPAERR("failed to allocate memory\n"); + res = -ENOMEM; + goto fail_alloc_pkt_init; + } + + pkt_init->destination_pipe_index = + ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS); + + tag_desc[desc_idx].opcode = IPA_IP_PACKET_INIT; + tag_desc[desc_idx].pyld = pkt_init; + tag_desc[desc_idx].len = sizeof(*pkt_init); + tag_desc[desc_idx].type = IPA_IMM_CMD_DESC; + tag_desc[desc_idx].callback = ipa_tag_free_buf; + tag_desc[desc_idx].user1 = pkt_init; + desc_idx++; + + /* NO-OP IC for ensuring that IPA pipeline is empty */ + reg_write_nop = kzalloc(sizeof(*reg_write_nop), flag); + if (!reg_write_nop) { + IPAERR("no mem\n"); + res = -ENOMEM; + goto fail_free_desc; + } + + reg_write_nop->skip_pipeline_clear = 0; + reg_write_nop->value_mask = 0x0; + + tag_desc[desc_idx].opcode = IPA_REGISTER_WRITE; + tag_desc[desc_idx].pyld = reg_write_nop; + tag_desc[desc_idx].len = sizeof(*reg_write_nop); + tag_desc[desc_idx].type = IPA_IMM_CMD_DESC; + tag_desc[desc_idx].callback = ipa_tag_free_buf; + tag_desc[desc_idx].user1 = reg_write_nop; + desc_idx++; + + /* status IC */ + status = kzalloc(sizeof(*status), flag); + if (!status) { + IPAERR("no mem\n"); + res = -ENOMEM; + goto fail_free_desc; + } + + status->tag_f_2 = IPA_COOKIE; + + tag_desc[desc_idx].opcode = IPA_IP_PACKET_TAG_STATUS; + tag_desc[desc_idx].pyld = status; + tag_desc[desc_idx].len = sizeof(*status); + tag_desc[desc_idx].type = IPA_IMM_CMD_DESC; + tag_desc[desc_idx].callback = ipa_tag_free_buf; + tag_desc[desc_idx].user1 = status; + desc_idx++; + + /* Copy the required descriptors from the client now */ + if (desc) { + memcpy(&(tag_desc[desc_idx]), desc, descs_num * + sizeof(struct ipa_desc)); + desc_idx += descs_num; + } + + comp = kzalloc(sizeof(*comp), GFP_KERNEL); + if (!comp) { + IPAERR("no mem\n"); + res = -ENOMEM; + goto fail_free_desc; + } + init_completion(&comp->comp); + + /* completion needs to be released from both here and rx handler */ + atomic_set(&comp->cnt, 2); + + /* dummy packet to send to IPA. packet payload is a completion object */ + dummy_skb = alloc_skb(sizeof(comp), flag); + if (!dummy_skb) { + IPAERR("failed to allocate memory\n"); + res = -ENOMEM; + goto fail_free_skb; + } + + memcpy(skb_put(dummy_skb, sizeof(comp)), &comp, sizeof(comp)); + + tag_desc[desc_idx].pyld = dummy_skb->data; + tag_desc[desc_idx].len = dummy_skb->len; + tag_desc[desc_idx].type = IPA_DATA_DESC_SKB; + tag_desc[desc_idx].callback = ipa_tag_free_skb; + tag_desc[desc_idx].user1 = dummy_skb; + desc_idx++; + + /* send all descriptors to IPA with single EOT */ + res = ipa_send(sys, desc_idx, tag_desc, true); + if (res) { + IPAERR("failed to send TAG packets %d\n", res); + res = -ENOMEM; + goto fail_send; + } + kfree(tag_desc); + tag_desc = NULL; + + IPADBG("waiting for TAG response\n"); + res = wait_for_completion_timeout(&comp->comp, timeout); + if (res == 0) { + IPAERR("timeout (%lu msec) on waiting for TAG response\n", + timeout); + WARN_ON(1); + if (atomic_dec_return(&comp->cnt) == 0) + kfree(comp); + return -ETIME; + } + + IPADBG("TAG response arrived!\n"); + if (atomic_dec_return(&comp->cnt) == 0) + kfree(comp); + + /* sleep for short period to ensure IPA wrote all packets to BAM */ + usleep_range(IPA_TAG_SLEEP_MIN_USEC, IPA_TAG_SLEEP_MAX_USEC); + + return 0; + +fail_send: + dev_kfree_skb_any(dummy_skb); + desc_idx--; +fail_free_skb: + kfree(comp); +fail_free_desc: + /* + * Free only the first descriptors allocated here. + * [pkt_init, status, nop] + * The user is responsible to free his allocations + * in case of failure. + * The min is required because we may fail during + * of the initial allocations above + */ + for (i = 0; i < min(REQUIRED_TAG_PROCESS_DESCRIPTORS-1, desc_idx); i++) + kfree(tag_desc[i].user1); + +fail_alloc_pkt_init: + kfree(tag_desc); +fail_alloc_desc: + return res; +} + +/** + * ipa_tag_generate_force_close_desc() - generate descriptors for force close + * immediate command + * + * @desc: descriptors for IC + * @desc_size: desc array size + * @start_pipe: first pipe to close aggregation + * @end_pipe: last (non-inclusive) pipe to close aggregation + * + * Return: number of descriptors written or negative in case of failure + */ +static int ipa_tag_generate_force_close_desc(struct ipa_desc desc[], + int desc_size, int start_pipe, int end_pipe) +{ + int i; + u32 aggr_init; + int desc_idx = 0; + int res; + struct ipa_register_write *reg_write_agg_close; + + for (i = start_pipe; i < end_pipe; i++) { + aggr_init = ipa_read_reg(ipa_ctx->mmio, + IPA_ENDP_INIT_AGGR_N_OFST_v2_0(i)); + if (((aggr_init & IPA_ENDP_INIT_AGGR_N_AGGR_EN_BMSK) >> + IPA_ENDP_INIT_AGGR_N_AGGR_EN_SHFT) != IPA_ENABLE_AGGR) + continue; + IPADBG("Force close ep: %d\n", i); + if (desc_idx + 1 > desc_size) { + IPAERR("Internal error - no descriptors\n"); + res = -EFAULT; + goto fail_no_desc; + } + + reg_write_agg_close = kzalloc(sizeof(*reg_write_agg_close), + GFP_KERNEL); + if (!reg_write_agg_close) { + IPAERR("no mem\n"); + res = -ENOMEM; + goto fail_alloc_reg_write_agg_close; + } + + reg_write_agg_close->skip_pipeline_clear = 0; + reg_write_agg_close->offset = IPA_ENDP_INIT_AGGR_N_OFST_v2_0(i); + reg_write_agg_close->value = + (1 & IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_BMSK) << + IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_SHFT; + reg_write_agg_close->value_mask = + IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_BMSK << + IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_SHFT; + + desc[desc_idx].opcode = IPA_REGISTER_WRITE; + desc[desc_idx].pyld = reg_write_agg_close; + desc[desc_idx].len = sizeof(*reg_write_agg_close); + desc[desc_idx].type = IPA_IMM_CMD_DESC; + desc[desc_idx].callback = ipa_tag_free_buf; + desc[desc_idx].user1 = reg_write_agg_close; + ++desc_idx; + } + + return desc_idx; + +fail_alloc_reg_write_agg_close: + for (i = 0; i < desc_idx; ++i) + kfree(desc[desc_idx].user1); +fail_no_desc: + return res; +} + +/** + * ipa_tag_aggr_force_close() - Force close aggregation + * + * @pipe_num: pipe number or -1 for all pipes + */ +int ipa_tag_aggr_force_close(int pipe_num) +{ + struct ipa_desc *desc; + int res = -1; + int start_pipe; + int end_pipe; + int num_descs; + int num_aggr_descs; + + if (pipe_num < -1 || pipe_num >= (int)ipa_ctx->ipa_num_pipes) { + IPAERR("Invalid pipe number %d\n", pipe_num); + return -EINVAL; + } + + if (pipe_num == -1) { + start_pipe = 0; + end_pipe = ipa_ctx->ipa_num_pipes; + } else { + start_pipe = pipe_num; + end_pipe = pipe_num + 1; + } + + num_descs = end_pipe - start_pipe; + + desc = kcalloc(num_descs, sizeof(*desc), GFP_KERNEL); + if (!desc) { + IPAERR("no mem\n"); + return -ENOMEM; + } + + /* Force close aggregation on all valid pipes with aggregation */ + num_aggr_descs = ipa_tag_generate_force_close_desc(desc, num_descs, + start_pipe, end_pipe); + if (num_aggr_descs < 0) { + IPAERR("ipa_tag_generate_force_close_desc failed %d\n", + num_aggr_descs); + goto fail_free_desc; + } + + res = ipa_tag_process(desc, num_aggr_descs, + IPA_FORCE_CLOSE_TAG_PROCESS_TIMEOUT); + +fail_free_desc: + kfree(desc); + + return res; +} + +/** + * ipa2_is_ready() - check if IPA module was initialized + * successfully + * + * Return value: true for yes; false for no + */ +bool ipa2_is_ready(void) +{ + return (ipa_ctx != NULL) ? true : false; +} + +/** + * ipa2_is_client_handle_valid() - check if IPA client handle is valid handle + * + * Return value: true for yes; false for no + */ +bool ipa2_is_client_handle_valid(u32 clnt_hdl) +{ + if (unlikely(!ipa_ctx)) { + IPAERR("IPA driver was not initialized\n"); + return false; + } + + if (clnt_hdl >= 0 && clnt_hdl < ipa_ctx->ipa_num_pipes) + return true; + return false; +} + +/** + * ipa2_proxy_clk_unvote() - called to remove IPA clock proxy vote + * + * Return value: none + */ +void ipa2_proxy_clk_unvote(void) +{ + if (ipa2_is_ready() && ipa_ctx->q6_proxy_clk_vote_valid) { + IPA_ACTIVE_CLIENTS_DEC_SPECIAL("PROXY_CLK_VOTE"); + ipa_ctx->q6_proxy_clk_vote_valid = false; + } +} + +/** + * ipa2_proxy_clk_vote() - called to add IPA clock proxy vote + * + * Return value: none + */ +void ipa2_proxy_clk_vote(void) +{ + if (ipa2_is_ready() && !ipa_ctx->q6_proxy_clk_vote_valid) { + IPA_ACTIVE_CLIENTS_INC_SPECIAL("PROXY_CLK_VOTE"); + ipa_ctx->q6_proxy_clk_vote_valid = true; + } +} + + +/** + * ipa2_get_smem_restr_bytes()- Return IPA smem restricted bytes + * + * Return value: u16 - number of IPA smem restricted bytes + */ +u16 ipa2_get_smem_restr_bytes(void) +{ + if (ipa_ctx) + return ipa_ctx->smem_restricted_bytes; + + IPAERR("IPA Driver not initialized\n"); + + return 0; +} + +/** + * ipa2_get_modem_cfg_emb_pipe_flt()- Return ipa_ctx->modem_cfg_emb_pipe_flt + * + * Return value: true if modem configures embedded pipe flt, false otherwise + */ +bool ipa2_get_modem_cfg_emb_pipe_flt(void) +{ + if (ipa_ctx) + return ipa_ctx->modem_cfg_emb_pipe_flt; + + IPAERR("IPA driver has not been initialized\n"); + + return false; +} +/** + * ipa2_get_transport_type()- Return IPA_TRANSPORT_TYPE_SPS + * + * Return value: enum ipa_transport_type + */ +enum ipa_transport_type ipa2_get_transport_type(void) +{ + return IPA_TRANSPORT_TYPE_SPS; +} + +u32 ipa_get_num_pipes(void) +{ + if (ipa_ctx->ipa_hw_type == IPA_HW_v2_6L) + return ipa_read_reg(ipa_ctx->mmio, IPA_ENABLED_PIPES_OFST); + else + return IPA_MAX_NUM_PIPES; +} +EXPORT_SYMBOL(ipa_get_num_pipes); + +/** + * ipa2_disable_apps_wan_cons_deaggr()- + * set ipa_ctx->ipa_client_apps_wan_cons_agg_gro + * + * Return value: 0 or negative in case of failure + */ +int ipa2_disable_apps_wan_cons_deaggr(uint32_t agg_size, uint32_t agg_count) +{ + int res = -1; + + /* checking if IPA-HW can support */ + if ((agg_size >> 10) > + IPA_AGGR_BYTE_LIMIT) { + IPAWANERR("IPA-AGG byte limit %d\n", + IPA_AGGR_BYTE_LIMIT); + IPAWANERR("exceed aggr_byte_limit\n"); + return res; + } + if (agg_count > + IPA_AGGR_PKT_LIMIT) { + IPAWANERR("IPA-AGG pkt limit %d\n", + IPA_AGGR_PKT_LIMIT); + IPAWANERR("exceed aggr_pkt_limit\n"); + return res; + } + + if (ipa_ctx) { + ipa_ctx->ipa_client_apps_wan_cons_agg_gro = true; + return 0; + } + return res; +} + +static const struct ipa_gsi_ep_config *ipa2_get_gsi_ep_info + (enum ipa_client_type client) +{ + IPAERR("Not supported for IPA 2.x\n"); + return NULL; +} + +static int ipa2_stop_gsi_channel(u32 clnt_hdl) +{ + IPAERR("Not supported for IPA 2.x\n"); + return -EFAULT; +} + +static void *ipa2_get_ipc_logbuf(void) +{ + if (ipa_ctx) + return ipa_ctx->logbuf; + + return NULL; +} + +static void *ipa2_get_ipc_logbuf_low(void) +{ + if (ipa_ctx) + return ipa_ctx->logbuf_low; + + return NULL; +} + +static void ipa2_get_holb(int ep_idx, struct ipa_ep_cfg_holb *holb) +{ + *holb = ipa_ctx->ep[ep_idx].holb; +} + +static int ipa2_generate_tag_process(void) +{ + int res; + + res = ipa_tag_process(NULL, 0, HZ); + if (res) + IPAERR("TAG process failed\n"); + + return res; +} + +static void ipa2_set_tag_process_before_gating(bool val) +{ + ipa_ctx->tag_process_before_gating = val; +} + +static bool ipa2_pm_is_used(void) +{ + return false; +} + +int ipa2_bind_api_controller(enum ipa_hw_type ipa_hw_type, + struct ipa_api_controller *api_ctrl) +{ + if (ipa_hw_type < IPA_HW_v2_0 || ipa_hw_type >= IPA_HW_v3_0) { + IPAERR("Unsupported IPA HW version %d\n", ipa_hw_type); + WARN_ON(1); + return -EPERM; + } + + api_ctrl->ipa_connect = ipa2_connect; + api_ctrl->ipa_disconnect = ipa2_disconnect; + api_ctrl->ipa_reset_endpoint = ipa2_reset_endpoint; + api_ctrl->ipa_clear_endpoint_delay = ipa2_clear_endpoint_delay; + api_ctrl->ipa_disable_endpoint = ipa2_disable_endpoint; + api_ctrl->ipa_cfg_ep = ipa2_cfg_ep; + api_ctrl->ipa_cfg_ep_nat = ipa2_cfg_ep_nat; + api_ctrl->ipa_cfg_ep_hdr = ipa2_cfg_ep_hdr; + api_ctrl->ipa_cfg_ep_hdr_ext = ipa2_cfg_ep_hdr_ext; + api_ctrl->ipa_cfg_ep_mode = ipa2_cfg_ep_mode; + api_ctrl->ipa_cfg_ep_aggr = ipa2_cfg_ep_aggr; + api_ctrl->ipa_cfg_ep_deaggr = ipa2_cfg_ep_deaggr; + api_ctrl->ipa_cfg_ep_route = ipa2_cfg_ep_route; + api_ctrl->ipa_cfg_ep_holb = ipa2_cfg_ep_holb; + api_ctrl->ipa_get_holb = ipa2_get_holb; + api_ctrl->ipa_set_tag_process_before_gating = + ipa2_set_tag_process_before_gating; + api_ctrl->ipa_cfg_ep_cfg = ipa2_cfg_ep_cfg; + api_ctrl->ipa_cfg_ep_metadata_mask = ipa2_cfg_ep_metadata_mask; + api_ctrl->ipa_cfg_ep_holb_by_client = ipa2_cfg_ep_holb_by_client; + api_ctrl->ipa_cfg_ep_ctrl = ipa2_cfg_ep_ctrl; + api_ctrl->ipa_add_hdr = ipa2_add_hdr; + api_ctrl->ipa_add_hdr_usr = ipa2_add_hdr_usr; + api_ctrl->ipa_del_hdr = ipa2_del_hdr; + api_ctrl->ipa_commit_hdr = ipa2_commit_hdr; + api_ctrl->ipa_reset_hdr = ipa2_reset_hdr; + api_ctrl->ipa_get_hdr = ipa2_get_hdr; + api_ctrl->ipa_put_hdr = ipa2_put_hdr; + api_ctrl->ipa_copy_hdr = ipa2_copy_hdr; + api_ctrl->ipa_add_hdr_proc_ctx = ipa2_add_hdr_proc_ctx; + api_ctrl->ipa_del_hdr_proc_ctx = ipa2_del_hdr_proc_ctx; + api_ctrl->ipa_add_rt_rule = ipa2_add_rt_rule; + api_ctrl->ipa_add_rt_rule_usr = ipa2_add_rt_rule_usr; + api_ctrl->ipa_del_rt_rule = ipa2_del_rt_rule; + api_ctrl->ipa_commit_rt = ipa2_commit_rt; + api_ctrl->ipa_reset_rt = ipa2_reset_rt; + api_ctrl->ipa_get_rt_tbl = ipa2_get_rt_tbl; + api_ctrl->ipa_put_rt_tbl = ipa2_put_rt_tbl; + api_ctrl->ipa_query_rt_index = ipa2_query_rt_index; + api_ctrl->ipa_mdfy_rt_rule = ipa2_mdfy_rt_rule; + api_ctrl->ipa_add_flt_rule = ipa2_add_flt_rule; + api_ctrl->ipa_add_flt_rule_usr = ipa2_add_flt_rule_usr; + api_ctrl->ipa_del_flt_rule = ipa2_del_flt_rule; + api_ctrl->ipa_mdfy_flt_rule = ipa2_mdfy_flt_rule; + api_ctrl->ipa_commit_flt = ipa2_commit_flt; + api_ctrl->ipa_reset_flt = ipa2_reset_flt; + api_ctrl->ipa_allocate_nat_device = ipa2_allocate_nat_device; + api_ctrl->ipa_nat_init_cmd = ipa2_nat_init_cmd; + api_ctrl->ipa_nat_dma_cmd = ipa2_nat_dma_cmd; + api_ctrl->ipa_nat_del_cmd = ipa2_nat_del_cmd; + api_ctrl->ipa_send_msg = ipa2_send_msg; + api_ctrl->ipa_register_pull_msg = ipa2_register_pull_msg; + api_ctrl->ipa_deregister_pull_msg = ipa2_deregister_pull_msg; + api_ctrl->ipa_register_intf = ipa2_register_intf; + api_ctrl->ipa_register_intf_ext = ipa2_register_intf_ext; + api_ctrl->ipa_deregister_intf = ipa2_deregister_intf; + api_ctrl->ipa_set_aggr_mode = ipa2_set_aggr_mode; + api_ctrl->ipa_set_qcncm_ndp_sig = ipa2_set_qcncm_ndp_sig; + api_ctrl->ipa_set_single_ndp_per_mbim = ipa2_set_single_ndp_per_mbim; + api_ctrl->ipa_tx_dp = ipa2_tx_dp; + api_ctrl->ipa_tx_dp_mul = ipa2_tx_dp_mul; + api_ctrl->ipa_free_skb = ipa2_free_skb; + api_ctrl->ipa_setup_sys_pipe = ipa2_setup_sys_pipe; + api_ctrl->ipa_teardown_sys_pipe = ipa2_teardown_sys_pipe; + api_ctrl->ipa_sys_update_gsi_hdls = ipa2_sys_update_gsi_hdls; + api_ctrl->ipa_sys_setup = ipa2_sys_setup; + api_ctrl->ipa_sys_teardown = ipa2_sys_teardown; + api_ctrl->ipa_connect_wdi_pipe = ipa2_connect_wdi_pipe; + api_ctrl->ipa_disconnect_wdi_pipe = ipa2_disconnect_wdi_pipe; + api_ctrl->ipa_enable_wdi_pipe = ipa2_enable_wdi_pipe; + api_ctrl->ipa_disable_wdi_pipe = ipa2_disable_wdi_pipe; + api_ctrl->ipa_resume_wdi_pipe = ipa2_resume_wdi_pipe; + api_ctrl->ipa_suspend_wdi_pipe = ipa2_suspend_wdi_pipe; + api_ctrl->ipa_get_wdi_stats = ipa2_get_wdi_stats; + api_ctrl->ipa_get_smem_restr_bytes = ipa2_get_smem_restr_bytes; + api_ctrl->ipa_broadcast_wdi_quota_reach_ind = + ipa2_broadcast_wdi_quota_reach_ind; + api_ctrl->ipa_uc_wdi_get_dbpa = ipa2_uc_wdi_get_dbpa; + api_ctrl->ipa_uc_reg_rdyCB = ipa2_uc_reg_rdyCB; + api_ctrl->ipa_uc_dereg_rdyCB = ipa2_uc_dereg_rdyCB; + api_ctrl->ipa_create_wdi_mapping = ipa2_create_wdi_mapping; + api_ctrl->ipa_release_wdi_mapping = ipa2_release_wdi_mapping; + api_ctrl->teth_bridge_init = ipa2_teth_bridge_init; + api_ctrl->teth_bridge_disconnect = ipa2_teth_bridge_disconnect; + api_ctrl->teth_bridge_connect = ipa2_teth_bridge_connect; + api_ctrl->ipa_set_client = ipa2_set_client; + api_ctrl->ipa_get_client = ipa2_get_client; + api_ctrl->ipa_get_client_uplink = ipa2_get_client_uplink; + api_ctrl->ipa_dma_init = ipa2_dma_init; + api_ctrl->ipa_dma_enable = ipa2_dma_enable; + api_ctrl->ipa_dma_disable = ipa2_dma_disable; + api_ctrl->ipa_dma_sync_memcpy = ipa2_dma_sync_memcpy; + api_ctrl->ipa_dma_async_memcpy = ipa2_dma_async_memcpy; + api_ctrl->ipa_dma_uc_memcpy = ipa2_dma_uc_memcpy; + api_ctrl->ipa_dma_destroy = ipa2_dma_destroy; + api_ctrl->ipa_mhi_init_engine = ipa2_mhi_init_engine; + api_ctrl->ipa_connect_mhi_pipe = ipa2_connect_mhi_pipe; + api_ctrl->ipa_disconnect_mhi_pipe = ipa2_disconnect_mhi_pipe; + api_ctrl->ipa_uc_mhi_reset_channel = ipa2_uc_mhi_reset_channel; + api_ctrl->ipa_mhi_sps_channel_empty = ipa2_mhi_sps_channel_empty; + api_ctrl->ipa_generate_tag_process = ipa2_generate_tag_process; + api_ctrl->ipa_disable_sps_pipe = ipa2_disable_sps_pipe; + api_ctrl->ipa_qmi_enable_force_clear_datapath_send = + qmi_enable_force_clear_datapath_send; + api_ctrl->ipa_qmi_disable_force_clear_datapath_send = + qmi_disable_force_clear_datapath_send; + api_ctrl->ipa_mhi_reset_channel_internal = + ipa2_mhi_reset_channel_internal; + api_ctrl->ipa_mhi_start_channel_internal = + ipa2_mhi_start_channel_internal; + api_ctrl->ipa_mhi_resume_channels_internal = + ipa2_mhi_resume_channels_internal; + api_ctrl->ipa_uc_mhi_send_dl_ul_sync_info = + ipa2_uc_mhi_send_dl_ul_sync_info; + api_ctrl->ipa_uc_mhi_init = ipa2_uc_mhi_init; + api_ctrl->ipa_uc_mhi_suspend_channel = ipa2_uc_mhi_suspend_channel; + api_ctrl->ipa_uc_mhi_stop_event_update_channel = + ipa2_uc_mhi_stop_event_update_channel; + api_ctrl->ipa_uc_mhi_cleanup = ipa2_uc_mhi_cleanup; + api_ctrl->ipa_uc_mhi_print_stats = ipa2_uc_mhi_print_stats; + api_ctrl->ipa_uc_state_check = ipa2_uc_state_check; + api_ctrl->ipa_write_qmap_id = ipa2_write_qmap_id; + api_ctrl->ipa_add_interrupt_handler = ipa2_add_interrupt_handler; + api_ctrl->ipa_remove_interrupt_handler = ipa2_remove_interrupt_handler; + api_ctrl->ipa_restore_suspend_handler = ipa2_restore_suspend_handler; + api_ctrl->ipa_bam_reg_dump = ipa2_bam_reg_dump; + api_ctrl->ipa_get_ep_mapping = ipa2_get_ep_mapping; + api_ctrl->ipa_is_ready = ipa2_is_ready; + api_ctrl->ipa_proxy_clk_vote = ipa2_proxy_clk_vote; + api_ctrl->ipa_proxy_clk_unvote = ipa2_proxy_clk_unvote; + api_ctrl->ipa_is_client_handle_valid = ipa2_is_client_handle_valid; + api_ctrl->ipa_get_client_mapping = ipa2_get_client_mapping; + api_ctrl->ipa_get_rm_resource_from_ep = ipa2_get_rm_resource_from_ep; + api_ctrl->ipa_get_modem_cfg_emb_pipe_flt = + ipa2_get_modem_cfg_emb_pipe_flt; + api_ctrl->ipa_get_transport_type = ipa2_get_transport_type; + api_ctrl->ipa_ap_suspend = ipa2_ap_suspend; + api_ctrl->ipa_ap_resume = ipa2_ap_resume; + api_ctrl->ipa_get_smmu_domain = ipa2_get_smmu_domain; + api_ctrl->ipa_disable_apps_wan_cons_deaggr = + ipa2_disable_apps_wan_cons_deaggr; + api_ctrl->ipa_get_dma_dev = ipa2_get_dma_dev; + api_ctrl->ipa_get_gsi_ep_info = ipa2_get_gsi_ep_info; + api_ctrl->ipa_stop_gsi_channel = ipa2_stop_gsi_channel; + api_ctrl->ipa_register_ipa_ready_cb = ipa2_register_ipa_ready_cb; + api_ctrl->ipa_inc_client_enable_clks = ipa2_inc_client_enable_clks; + api_ctrl->ipa_dec_client_disable_clks = ipa2_dec_client_disable_clks; + api_ctrl->ipa_inc_client_enable_clks_no_block = + ipa2_inc_client_enable_clks_no_block; + api_ctrl->ipa_suspend_resource_no_block = + ipa2_suspend_resource_no_block; + api_ctrl->ipa_resume_resource = ipa2_resume_resource; + api_ctrl->ipa_suspend_resource_sync = ipa2_suspend_resource_sync; + api_ctrl->ipa_set_required_perf_profile = + ipa2_set_required_perf_profile; + api_ctrl->ipa_get_ipc_logbuf = ipa2_get_ipc_logbuf; + api_ctrl->ipa_get_ipc_logbuf_low = ipa2_get_ipc_logbuf_low; + api_ctrl->ipa_rx_poll = ipa2_rx_poll; + api_ctrl->ipa_recycle_wan_skb = ipa2_recycle_wan_skb; + api_ctrl->ipa_setup_uc_ntn_pipes = ipa2_setup_uc_ntn_pipes; + api_ctrl->ipa_tear_down_uc_offload_pipes = + ipa2_tear_down_uc_offload_pipes; + api_ctrl->ipa_get_pdev = ipa2_get_pdev; + api_ctrl->ipa_ntn_uc_reg_rdyCB = ipa2_ntn_uc_reg_rdyCB; + api_ctrl->ipa_ntn_uc_dereg_rdyCB = ipa2_ntn_uc_dereg_rdyCB; + api_ctrl->ipa_conn_wdi_pipes = ipa2_conn_wdi3_pipes; + api_ctrl->ipa_disconn_wdi_pipes = ipa2_disconn_wdi3_pipes; + api_ctrl->ipa_enable_wdi_pipes = ipa2_enable_wdi3_pipes; + api_ctrl->ipa_disable_wdi_pipes = ipa2_disable_wdi3_pipes; + api_ctrl->ipa_pm_is_used = ipa2_pm_is_used; + + return 0; +} + +/** + * ipa_get_sys_yellow_wm()- Return yellow WM value for IPA SYS pipes. + * + * Return value: IPA_YELLOW_MARKER_SYS_CFG_OFST register if IPA_HW_v2.6L, + * IPA_DEFAULT_SYS_YELLOW_WM otherwise. + */ +u32 ipa_get_sys_yellow_wm(struct ipa_sys_context *sys) +{ + if (ipa_ctx->ipa_hw_type == IPA_HW_v2_6L && + ipa_ctx->ipa_uc_monitor_holb) { + return ipa_read_reg(ipa_ctx->mmio, + IPA_YELLOW_MARKER_SYS_CFG_OFST); + } else { + if (!sys) + return 0; + + return IPA_DEFAULT_SYS_YELLOW_WM * sys->rx_buff_sz; + } +} +EXPORT_SYMBOL(ipa_get_sys_yellow_wm); + +void ipa_suspend_apps_pipes(bool suspend) +{ + struct ipa_ep_cfg_ctrl cfg; + int ipa_ep_idx; + u32 lan_empty = 0, wan_empty = 0; + int ret; + struct sps_event_notify notify; + struct ipa_ep_context *ep; + + memset(&cfg, 0, sizeof(cfg)); + cfg.ipa_ep_suspend = suspend; + + ipa_ep_idx = ipa_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS); + ep = &ipa_ctx->ep[ipa_ep_idx]; + if (ep->valid) { + ipa2_cfg_ep_ctrl(ipa_ep_idx, &cfg); + /* Check if the pipes are empty. */ + ret = sps_is_pipe_empty(ep->ep_hdl, &lan_empty); + if (ret) { + IPAERR("%s: sps_is_pipe_empty failed with %d\n", + __func__, ret); + } + if (!lan_empty) { + IPADBG("LAN Cons is not-empty. Enter poll mode.\n"); + notify.user = ep->sys; + notify.event_id = SPS_EVENT_EOT; + if (ep->sys->sps_callback) + ep->sys->sps_callback(¬ify); + } + } + + ipa_ep_idx = ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS); + /* Considering the case for SSR. */ + if (ipa_ep_idx == -1) { + IPADBG("Invalid client.\n"); + return; + } + ep = &ipa_ctx->ep[ipa_ep_idx]; + if (ep->valid) { + ipa2_cfg_ep_ctrl(ipa_ep_idx, &cfg); + /* Check if the pipes are empty. */ + ret = sps_is_pipe_empty(ep->ep_hdl, &wan_empty); + if (ret) { + IPAERR("%s: sps_is_pipe_empty failed with %d\n", + __func__, ret); + } + if (!wan_empty) { + IPADBG("WAN Cons is not-empty. Enter poll mode.\n"); + notify.user = ep->sys; + notify.event_id = SPS_EVENT_EOT; + if (ep->sys->sps_callback) + ep->sys->sps_callback(¬ify); + } + } +} + +/** + * ipa2_get_pdev() - return a pointer to IPA dev struct + * + * Return value: a pointer to IPA dev struct + * + */ +struct device *ipa2_get_pdev(void) +{ + if (!ipa_ctx) + return NULL; + + return ipa_ctx->pdev; +} diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_wdi3_i.c b/drivers/platform/msm/ipa/ipa_v2/ipa_wdi3_i.c new file mode 100644 index 000000000000..3b670c92612c --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_wdi3_i.c @@ -0,0 +1,557 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2018, 2020, The Linux Foundation. All rights reserved. + */ +#include "ipa_i.h" +#include + +#define IPA_HW_WDI3_RX_MBOX_START_INDEX 48 +#define IPA_HW_WDI3_TX_MBOX_START_INDEX 50 + +static int ipa_send_wdi3_setup_pipe_cmd( + u8 is_smmu_enabled, struct ipa_wdi_pipe_setup_info *info, + struct ipa_wdi_pipe_setup_info_smmu *info_smmu, u8 dir) +{ + int ipa_ep_idx; + int result = 0, len; + unsigned long va; + struct ipa_mem_buffer cmd; + struct IpaHwWdi3SetUpCmdData_t *wdi3_params; + struct IpaHwOffloadSetUpCmdData_t *cmd_data; + + if (info == NULL || info_smmu == NULL) { + IPAERR("invalid input\n"); + return -EINVAL; + } + + cmd.size = sizeof(*cmd_data); + cmd.base = dma_alloc_coherent(ipa_ctx->uc_pdev, cmd.size, + &cmd.phys_base, GFP_KERNEL); + if (cmd.base == NULL) { + IPAERR("fail to get DMA memory.\n"); + return -ENOMEM; + } + + cmd_data = (struct IpaHwOffloadSetUpCmdData_t *)cmd.base; + cmd_data->protocol = IPA_HW_FEATURE_WDI3; + + if (!is_smmu_enabled) { + ipa_ep_idx = ipa_get_ep_mapping(info->client); + if (ipa_ep_idx == -1) { + IPAERR("fail to get ep idx.\n"); + return -EFAULT; + } + + IPADBG("client=%d ep=%d\n", info->client, ipa_ep_idx); + IPADBG("ring_base_pa = 0x%pad\n", &info->transfer_ring_base_pa); + IPADBG("ring_size = %hu\n", info->transfer_ring_size); + IPADBG("ring_db_pa = 0x%pad\n", + &info->transfer_ring_doorbell_pa); + IPADBG("evt_ring_base_pa = 0x%pad\n", + &info->event_ring_base_pa); + IPADBG("evt_ring_size = %hu\n", info->event_ring_size); + IPADBG("evt_ring_db_pa = 0x%pad\n", + &info->event_ring_doorbell_pa); + IPADBG("num_pkt_buffers = %hu\n", info->num_pkt_buffers); + IPADBG("pkt_offset = %d\n", info->pkt_offset); + + wdi3_params = &cmd_data->SetupCh_params.Wdi3SetupCh_params; + wdi3_params->transfer_ring_base_pa = + (u32)info->transfer_ring_base_pa; + wdi3_params->transfer_ring_base_pa_hi = + (u32)((u64)info->transfer_ring_base_pa >> 32); + wdi3_params->transfer_ring_size = info->transfer_ring_size; + wdi3_params->transfer_ring_doorbell_pa = + (u32)info->transfer_ring_doorbell_pa; + wdi3_params->transfer_ring_doorbell_pa_hi = + (u32)((u64)info->transfer_ring_doorbell_pa >> 32); + wdi3_params->event_ring_base_pa = (u32)info->event_ring_base_pa; + wdi3_params->event_ring_base_pa_hi = + (u32)((u64)info->event_ring_base_pa >> 32); + wdi3_params->event_ring_size = info->event_ring_size; + wdi3_params->event_ring_doorbell_pa = + (u32)info->event_ring_doorbell_pa; + wdi3_params->event_ring_doorbell_pa_hi = + (u32)((u64)info->event_ring_doorbell_pa >> 32); + wdi3_params->num_pkt_buffers = info->num_pkt_buffers; + wdi3_params->ipa_pipe_number = ipa_ep_idx; + wdi3_params->dir = dir; + wdi3_params->pkt_offset = info->pkt_offset; + memcpy(wdi3_params->desc_format_template, + info->desc_format_template, + sizeof(wdi3_params->desc_format_template)); + } else { + ipa_ep_idx = ipa_get_ep_mapping(info_smmu->client); + if (ipa_ep_idx == -1) { + IPAERR("fail to get ep idx\n"); + return -EFAULT; + } + + IPADBG("client=%d ep=%d\n", info_smmu->client, ipa_ep_idx); + IPADBG("ring_size = %hu\n", info_smmu->transfer_ring_size); + IPADBG("ring_db_pa = 0x%pad\n", + &info_smmu->transfer_ring_doorbell_pa); + IPADBG("evt_ring_size = %hu\n", info_smmu->event_ring_size); + IPADBG("evt_ring_db_pa = 0x%pad\n", + &info_smmu->event_ring_doorbell_pa); + IPADBG("num_pkt_buffers = %hu\n", info_smmu->num_pkt_buffers); + IPADBG("pkt_offset = %d\n", info_smmu->pkt_offset); + + wdi3_params = &cmd_data->SetupCh_params.Wdi3SetupCh_params; + + if (dir == IPA_WDI3_TX_DIR) { + len = info_smmu->transfer_ring_size; + if (ipa2_create_uc_smmu_mapping(IPA_WDI_TX_RING_RES, + true, info->transfer_ring_base_pa, + &info_smmu->transfer_ring_base, len, + false, &va)) { + IPAERR("failed to get smmu mapping\n"); + return -EFAULT; + } + wdi3_params->transfer_ring_base_pa = (u32)va; + wdi3_params->transfer_ring_base_pa_hi = + (u32)((u64)va >> 32); + wdi3_params->transfer_ring_size = len; + + if (ipa2_create_uc_smmu_mapping(IPA_WDI_TX_DB_RES, + true, info_smmu->transfer_ring_doorbell_pa, + NULL, 4, true, &va)) { + IPAERR("failed to get smmu mapping\n"); + return -EFAULT; + } + wdi3_params->transfer_ring_doorbell_pa = + (u32)va; + wdi3_params->transfer_ring_doorbell_pa_hi = + (u32)((u64)va >> 32); + + len = info_smmu->event_ring_size; + if (ipa2_create_uc_smmu_mapping(IPA_WDI_CE_RING_RES, + true, info->event_ring_base_pa, + &info_smmu->event_ring_base, len, + false, &va)) { + IPAERR("failed to get smmu mapping\n"); + return -EFAULT; + } + wdi3_params->event_ring_base_pa = (u32)va; + wdi3_params->event_ring_base_pa_hi = + (u32)((u64)va >> 32); + wdi3_params->event_ring_size = len; + + if (ipa2_create_uc_smmu_mapping(IPA_WDI_CE_DB_RES, + true, info_smmu->event_ring_doorbell_pa, + NULL, 4, true, &va)) { + IPAERR("failed to get smmu mapping\n"); + return -EFAULT; + } + wdi3_params->event_ring_doorbell_pa = + (u32)va; + wdi3_params->event_ring_doorbell_pa_hi = + (u32)((u64)va >> 32); + } else { + len = info_smmu->transfer_ring_size; + if (ipa2_create_uc_smmu_mapping(IPA_WDI_RX_RING_RES, + true, info->transfer_ring_base_pa, + &info_smmu->transfer_ring_base, len, + false, &va)) { + IPAERR("failed to get smmu mapping\n"); + return -EFAULT; + } + wdi3_params->transfer_ring_base_pa = (u32)va; + wdi3_params->transfer_ring_base_pa_hi = + (u32)((u64)va >> 32); + wdi3_params->transfer_ring_size = len; + + if (ipa2_create_uc_smmu_mapping(IPA_WDI_RX_RING_RP_RES, + true, info_smmu->transfer_ring_doorbell_pa, + NULL, 4, true, &va)) { + IPAERR("failed to get smmu mapping\n"); + return -EFAULT; + } + wdi3_params->transfer_ring_doorbell_pa = + (u32)va; + wdi3_params->transfer_ring_doorbell_pa_hi = + (u32)((u64)va >> 32); + + len = info_smmu->event_ring_size; + if (ipa2_create_uc_smmu_mapping( + IPA_WDI_RX_COMP_RING_RES, true, + info->event_ring_base_pa, + &info_smmu->event_ring_base, len, + false, &va)) { + IPAERR("failed to get smmu mapping\n"); + return -EFAULT; + } + wdi3_params->event_ring_base_pa = (u32)va; + wdi3_params->event_ring_base_pa_hi = + (u32)((u64)va >> 32); + wdi3_params->event_ring_size = len; + + if (ipa2_create_uc_smmu_mapping( + IPA_WDI_RX_COMP_RING_WP_RES, true, + info_smmu->event_ring_doorbell_pa, + NULL, 4, true, &va)) { + IPAERR("failed to get smmu mapping\n"); + return -EFAULT; + } + wdi3_params->event_ring_doorbell_pa = + (u32)va; + wdi3_params->event_ring_doorbell_pa_hi = + (u32)((u64)va >> 32); + } + wdi3_params->num_pkt_buffers = info_smmu->num_pkt_buffers; + wdi3_params->ipa_pipe_number = ipa_ep_idx; + wdi3_params->dir = dir; + wdi3_params->pkt_offset = info_smmu->pkt_offset; + memcpy(wdi3_params->desc_format_template, + info_smmu->desc_format_template, + sizeof(wdi3_params->desc_format_template)); + } + + result = ipa_uc_send_cmd((u32)(cmd.phys_base), + IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP, + IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS, + false, 10*HZ); + if (result) { + IPAERR("uc setup channel cmd failed: %d\n", result); + result = -EFAULT; + } + + dma_free_coherent(ipa_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base); + return result; +} + +int ipa2_conn_wdi3_pipes(struct ipa_wdi_conn_in_params *in, + struct ipa_wdi_conn_out_params *out, + ipa_wdi_meter_notifier_cb wdi_notify) +{ + enum ipa_client_type rx_client; + enum ipa_client_type tx_client; + struct ipa_ep_context *ep_rx; + struct ipa_ep_context *ep_tx; + int ipa_ep_idx_rx; + int ipa_ep_idx_tx; + int result = 0; + + if (in == NULL || out == NULL) { + IPAERR("invalid input\n"); + return -EINVAL; + } + + if (in->is_smmu_enabled == false) { + rx_client = in->u_rx.rx.client; + tx_client = in->u_tx.tx.client; + } else { + rx_client = in->u_rx.rx_smmu.client; + tx_client = in->u_tx.tx_smmu.client; + } + + ipa_ep_idx_rx = ipa_get_ep_mapping(rx_client); + ipa_ep_idx_tx = ipa_get_ep_mapping(tx_client); + + if (ipa_ep_idx_rx == -1 || ipa_ep_idx_tx == -1) { + IPAERR("fail to alloc EP.\n"); + return -EFAULT; + } + if (ipa_ep_idx_rx >= IPA_MAX_NUM_PIPES || + ipa_ep_idx_tx >= IPA_MAX_NUM_PIPES) { + IPAERR("ep out of range.\n"); + return -EFAULT; + } + + ep_rx = &ipa_ctx->ep[ipa_ep_idx_rx]; + ep_tx = &ipa_ctx->ep[ipa_ep_idx_tx]; + + if (ep_rx->valid || ep_tx->valid) { + IPAERR("EP already allocated.\n"); + return -EFAULT; + } + + memset(ep_rx, 0, offsetof(struct ipa_ep_context, sys)); + memset(ep_tx, 0, offsetof(struct ipa_ep_context, sys)); + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + if (wdi_notify) + ipa_ctx->uc_wdi_ctx.stats_notify = wdi_notify; + else + IPADBG("wdi_notify is null\n"); + + /* setup rx ep cfg */ + ep_rx->valid = 1; + ep_rx->client = rx_client; + result = ipa_disable_data_path(ipa_ep_idx_rx); + if (result) { + IPAERR("disable data path failed res=%d clnt=%d.\n", result, + ipa_ep_idx_rx); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return -EFAULT; + } + ep_rx->client_notify = in->notify; + ep_rx->priv = in->priv; + + if (in->is_smmu_enabled == false) + memcpy(&ep_rx->cfg, &in->u_rx.rx.ipa_ep_cfg, + sizeof(ep_rx->cfg)); + else + memcpy(&ep_rx->cfg, &in->u_rx.rx_smmu.ipa_ep_cfg, + sizeof(ep_rx->cfg)); + + if (ipa_cfg_ep(ipa_ep_idx_rx, &ep_rx->cfg)) { + IPAERR("fail to setup rx pipe cfg\n"); + result = -EFAULT; + goto fail; + } + + if (ipa_send_wdi3_setup_pipe_cmd(in->is_smmu_enabled, + &in->u_rx.rx, &in->u_rx.rx_smmu, IPA_WDI3_RX_DIR)) { + IPAERR("fail to send cmd to uc for rx pipe\n"); + result = -EFAULT; + goto fail; + } + ipa_install_dflt_flt_rules(ipa_ep_idx_rx); + out->rx_uc_db_pa = ipa_ctx->ipa_wrapper_base + + IPA_REG_BASE_OFST_v2_5 + + IPA_UC_MAILBOX_m_n_OFFS_v2_5( + IPA_HW_WDI3_RX_MBOX_START_INDEX/32, + IPA_HW_WDI3_RX_MBOX_START_INDEX % 32); + + IPADBG("client %d (ep: %d) connected\n", rx_client, + ipa_ep_idx_rx); + + /* setup tx ep cfg */ + ep_tx->valid = 1; + ep_tx->client = tx_client; + result = ipa_disable_data_path(ipa_ep_idx_tx); + if (result) { + IPAERR("disable data path failed res=%d ep=%d.\n", result, + ipa_ep_idx_tx); + result = -EFAULT; + goto fail; + } + + if (in->is_smmu_enabled == false) + memcpy(&ep_tx->cfg, &in->u_tx.tx.ipa_ep_cfg, + sizeof(ep_tx->cfg)); + else + memcpy(&ep_tx->cfg, &in->u_tx.tx_smmu.ipa_ep_cfg, + sizeof(ep_tx->cfg)); + + if (ipa_cfg_ep(ipa_ep_idx_tx, &ep_tx->cfg)) { + IPAERR("fail to setup tx pipe cfg\n"); + result = -EFAULT; + goto fail; + } + + if (ipa_send_wdi3_setup_pipe_cmd(in->is_smmu_enabled, + &in->u_tx.tx, &in->u_tx.tx_smmu, IPA_WDI3_TX_DIR)) { + IPAERR("fail to send cmd to uc for tx pipe\n"); + result = -EFAULT; + goto fail; + } + out->tx_uc_db_pa = ipa_ctx->ipa_wrapper_base + + IPA_REG_BASE_OFST_v2_5 + + IPA_UC_MAILBOX_m_n_OFFS_v2_5( + IPA_HW_WDI3_TX_MBOX_START_INDEX/32, + IPA_HW_WDI3_TX_MBOX_START_INDEX % 32); + IPADBG("client %d (ep: %d) connected\n", tx_client, + ipa_ep_idx_tx); + +fail: + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return result; +} + +static int ipa_send_wdi3_common_ch_cmd(int ipa_ep_idx, int command) +{ + struct ipa_mem_buffer cmd; + struct IpaHwOffloadCommonChCmdData_t *cmd_data; + union IpaHwWdi3CommonChCmdData_t *wdi3; + int result = 0; + + cmd.size = sizeof(*cmd_data); + cmd.base = dma_alloc_coherent(ipa_ctx->uc_pdev, cmd.size, + &cmd.phys_base, GFP_KERNEL); + if (cmd.base == NULL) { + IPAERR("fail to get DMA memory.\n"); + return -ENOMEM; + } + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + /* enable the TX pipe */ + cmd_data = (struct IpaHwOffloadCommonChCmdData_t *)cmd.base; + cmd_data->protocol = IPA_HW_FEATURE_WDI3; + + wdi3 = &cmd_data->CommonCh_params.Wdi3CommonCh_params; + wdi3->params.ipa_pipe_number = ipa_ep_idx; + result = ipa_uc_send_cmd((u32)(cmd.phys_base), command, + IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS, + false, 10*HZ); + if (result) { + result = -EFAULT; + goto fail; + } + +fail: + dma_free_coherent(ipa_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return result; +} + +int ipa2_disconn_wdi3_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx) +{ + struct ipa_ep_context *ep_tx, *ep_rx; + int result = 0; + + IPADBG("ep_tx = %d\n", ipa_ep_idx_tx); + IPADBG("ep_rx = %d\n", ipa_ep_idx_rx); + + if (ipa_ep_idx_tx < 0 || ipa_ep_idx_tx >= IPA_MAX_NUM_PIPES || + ipa_ep_idx_rx < 0 || ipa_ep_idx_rx >= IPA_MAX_NUM_PIPES) { + IPAERR("invalid ipa ep index\n"); + return -EINVAL; + } + + ep_tx = &ipa_ctx->ep[ipa_ep_idx_tx]; + ep_rx = &ipa_ctx->ep[ipa_ep_idx_rx]; + + /* tear down tx pipe */ + if (ipa_send_wdi3_common_ch_cmd(ipa_ep_idx_tx, + IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN)) { + IPAERR("fail to tear down tx pipe\n"); + result = -EFAULT; + goto fail; + } + ipa_disable_data_path(ipa_ep_idx_tx); + memset(ep_tx, 0, sizeof(struct ipa_ep_context)); + IPADBG("tx client (ep: %d) disconnected\n", ipa_ep_idx_tx); + + /* tear down rx pipe */ + if (ipa_send_wdi3_common_ch_cmd(ipa_ep_idx_rx, + IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN)) { + IPAERR("fail to tear down rx pipe\n"); + result = -EFAULT; + goto fail; + } + ipa_disable_data_path(ipa_ep_idx_rx); + ipa_delete_dflt_flt_rules(ipa_ep_idx_rx); + memset(ep_rx, 0, sizeof(struct ipa_ep_context)); + IPADBG("rx client (ep: %d) disconnected\n", ipa_ep_idx_rx); + +fail: + return result; +} + +int ipa2_enable_wdi3_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx) +{ + struct ipa_ep_context *ep_tx, *ep_rx; + int result = 0; + + IPADBG("ep_tx = %d\n", ipa_ep_idx_tx); + IPADBG("ep_rx = %d\n", ipa_ep_idx_rx); + + ep_tx = &ipa_ctx->ep[ipa_ep_idx_tx]; + ep_rx = &ipa_ctx->ep[ipa_ep_idx_rx]; + + /* enable tx pipe */ + if (ipa_send_wdi3_common_ch_cmd(ipa_ep_idx_tx, + IPA_CPU_2_HW_CMD_OFFLOAD_ENABLE)) { + IPAERR("fail to enable tx pipe\n"); + result = -EFAULT; + goto fail; + } + + /* resume tx pipe */ + if (ipa_send_wdi3_common_ch_cmd(ipa_ep_idx_tx, + IPA_CPU_2_HW_CMD_OFFLOAD_RESUME)) { + IPAERR("fail to resume tx pipe\n"); + result = -EFAULT; + goto fail; + } + + /* enable rx pipe */ + if (ipa_send_wdi3_common_ch_cmd(ipa_ep_idx_rx, + IPA_CPU_2_HW_CMD_OFFLOAD_ENABLE)) { + IPAERR("fail to enable rx pipe\n"); + result = -EFAULT; + goto fail; + } + + /* resume rx pipe */ + if (ipa_send_wdi3_common_ch_cmd(ipa_ep_idx_rx, + IPA_CPU_2_HW_CMD_OFFLOAD_RESUME)) { + IPAERR("fail to resume rx pipe\n"); + result = -EFAULT; + goto fail; + } + + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); + + /* enable data path */ + result = ipa_enable_data_path(ipa_ep_idx_rx); + if (result) { + IPAERR("enable data path failed res=%d clnt=%d.\n", result, + ipa_ep_idx_rx); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return -EFAULT; + } + + result = ipa_enable_data_path(ipa_ep_idx_tx); + if (result) { + IPAERR("enable data path failed res=%d clnt=%d.\n", result, + ipa_ep_idx_tx); + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + return -EFAULT; + } + + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); + +fail: + return result; +} + +int ipa2_disable_wdi3_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx) +{ + struct ipa_ep_context *ep_tx, *ep_rx; + int result = 0; + + IPADBG("ep_tx = %d\n", ipa_ep_idx_tx); + IPADBG("ep_rx = %d\n", ipa_ep_idx_rx); + + ep_tx = &ipa_ctx->ep[ipa_ep_idx_tx]; + ep_rx = &ipa_ctx->ep[ipa_ep_idx_rx]; + + /* suspend tx pipe */ + if (ipa_send_wdi3_common_ch_cmd(ipa_ep_idx_tx, + IPA_CPU_2_HW_CMD_OFFLOAD_SUSPEND)) { + IPAERR("fail to suspend tx pipe\n"); + result = -EFAULT; + goto fail; + } + + /* disable tx pipe */ + if (ipa_send_wdi3_common_ch_cmd(ipa_ep_idx_tx, + IPA_CPU_2_HW_CMD_OFFLOAD_DISABLE)) { + IPAERR("fail to disable tx pipe\n"); + result = -EFAULT; + goto fail; + } + + /* suspend rx pipe */ + if (ipa_send_wdi3_common_ch_cmd(ipa_ep_idx_rx, + IPA_CPU_2_HW_CMD_OFFLOAD_SUSPEND)) { + IPAERR("fail to suspend rx pipe\n"); + result = -EFAULT; + goto fail; + } + + /* disable rx pipe */ + if (ipa_send_wdi3_common_ch_cmd(ipa_ep_idx_rx, + IPA_CPU_2_HW_CMD_OFFLOAD_DISABLE)) { + IPAERR("fail to disable rx pipe\n"); + result = -EFAULT; + goto fail; + } + +fail: + return result; +} diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c new file mode 100644 index 000000000000..eca80f9e2339 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c @@ -0,0 +1,3237 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2014-2018, 2020, The Linux Foundation. All rights reserved. + */ + +/* + * WWAN Transport Network Driver. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "ipa_qmi_service.h" +#include +#include +#include +#include + +#include "ipa_trace.h" + +#define WWAN_METADATA_SHFT 24 +#define WWAN_METADATA_MASK 0xFF000000 +#define WWAN_DATA_LEN 2000 +#define IPA_RM_INACTIVITY_TIMER 100 /* IPA_RM */ +#define HEADROOM_FOR_QMAP 8 /* for mux header */ +#define TAILROOM 0 /* for padding by mux layer */ +#define MAX_NUM_OF_MUX_CHANNEL 15 /* max mux channels */ +#define UL_FILTER_RULE_HANDLE_START 69 +#define DEFAULT_OUTSTANDING_HIGH_CTL 96 +#define DEFAULT_OUTSTANDING_HIGH 64 +#define DEFAULT_OUTSTANDING_LOW 32 + +#define IPA_WWAN_DEV_NAME "rmnet_ipa%d" +#define IPA_UPSTEAM_WLAN_IFACE_NAME "wlan0" +#define IPA_UPSTEAM_WLAN1_IFACE_NAME "wlan1" + +#define IPA_WWAN_DEVICE_COUNT (1) + +#define IPA_WWAN_RX_SOFTIRQ_THRESH 16 + +#define INVALID_MUX_ID 0xFF +#define IPA_QUOTA_REACH_ALERT_MAX_SIZE 64 +#define IPA_QUOTA_REACH_IF_NAME_MAX_SIZE 64 +#define IPA_UEVENT_NUM_EVNP 4 /* number of event pointers */ + +#define NAPI_WEIGHT 60 +#define IPA_WWAN_CONS_DESC_FIFO_SZ 1024 + +static struct net_device *ipa_netdevs[IPA_WWAN_DEVICE_COUNT]; +static struct ipa_sys_connect_params apps_to_ipa_ep_cfg, ipa_to_apps_ep_cfg; +static u32 qmap_hdr_hdl, dflt_v4_wan_rt_hdl, dflt_v6_wan_rt_hdl; +static struct rmnet_mux_val mux_channel[MAX_NUM_OF_MUX_CHANNEL]; +static int num_q6_rule, old_num_q6_rule; +static int rmnet_index; +static bool egress_set, a7_ul_flt_set; +static struct workqueue_struct *ipa_rm_q6_workqueue; /* IPA_RM workqueue*/ +static atomic_t is_initialized; +static atomic_t is_ssr; +static void *subsys_notify_handle; + +u32 apps_to_ipa_hdl, ipa_to_apps_hdl; /* get handler from ipa */ +static struct mutex ipa_to_apps_pipe_handle_guard; +static struct mutex add_mux_channel_lock; +static int wwan_add_ul_flt_rule_to_ipa(void); +static int wwan_del_ul_flt_rule_to_ipa(void); +static void ipa_wwan_msg_free_cb(void*, u32, u32); +static void ipa_rmnet_rx_cb(void *priv); +static int ipa_rmnet_poll(struct napi_struct *napi, int budget); + +static void wake_tx_queue(struct work_struct *work); +static DECLARE_WORK(ipa_tx_wakequeue_work, wake_tx_queue); + +static void tethering_stats_poll_queue(struct work_struct *work); +static DECLARE_DELAYED_WORK(ipa_tether_stats_poll_wakequeue_work, + tethering_stats_poll_queue); + +enum wwan_device_status { + WWAN_DEVICE_INACTIVE = 0, + WWAN_DEVICE_ACTIVE = 1 +}; + +struct ipa_rmnet_plat_drv_res { + bool ipa_rmnet_ssr; + bool ipa_loaduC; + bool ipa_advertise_sg_support; + bool ipa_napi_enable; + u32 wan_rx_desc_size; +}; + +static struct ipa_rmnet_plat_drv_res ipa_rmnet_res; +/** + * struct wwan_private - WWAN private data + * @net: network interface struct implemented by this driver + * @stats: iface statistics + * @outstanding_pkts: number of packets sent to IPA without TX complete ACKed + * @outstanding_high: number of outstanding packets allowed + * @outstanding_low: number of outstanding packets which shall cause + * @ch_id: channel id + * @lock: spinlock for mutual exclusion + * @device_status: holds device status + * + * WWAN private - holds all relevant info about WWAN driver + */ +struct wwan_private { + struct net_device *net; + struct net_device_stats stats; + atomic_t outstanding_pkts; + int outstanding_high_ctl; + int outstanding_high; + int outstanding_low; + uint32_t ch_id; + spinlock_t lock; + struct completion resource_granted_completion; + enum wwan_device_status device_status; + struct napi_struct napi; +}; + +/** + * ipa_setup_a7_qmap_hdr() - Setup default a7 qmap hdr + * + * Return codes: + * 0: success + * -ENOMEM: failed to allocate memory + * -EPERM: failed to add the tables + */ +static int ipa_setup_a7_qmap_hdr(void) +{ + struct ipa_ioc_add_hdr *hdr; + struct ipa_hdr_add *hdr_entry; + u32 pyld_sz; + int ret; + + /* install the basic exception header */ + pyld_sz = sizeof(struct ipa_ioc_add_hdr) + 1 * + sizeof(struct ipa_hdr_add); + hdr = kzalloc(pyld_sz, GFP_KERNEL); + if (!hdr) { + IPAWANERR("fail to alloc exception hdr\n"); + return -ENOMEM; + } + hdr->num_hdrs = 1; + hdr->commit = 1; + hdr_entry = &hdr->hdr[0]; + + strlcpy(hdr_entry->name, IPA_A7_QMAP_HDR_NAME, + IPA_RESOURCE_NAME_MAX); + hdr_entry->hdr_len = IPA_QMAP_HEADER_LENGTH; /* 4 bytes */ + + if (ipa2_add_hdr(hdr)) { + IPAWANERR("fail to add IPA_A7_QMAP hdr\n"); + ret = -EPERM; + goto bail; + } + + if (hdr_entry->status) { + IPAWANERR("fail to add IPA_A7_QMAP hdr\n"); + ret = -EPERM; + goto bail; + } + qmap_hdr_hdl = hdr_entry->hdr_hdl; + + ret = 0; +bail: + kfree(hdr); + return ret; +} + +static void ipa_del_a7_qmap_hdr(void) +{ + struct ipa_ioc_del_hdr *del_hdr; + struct ipa_hdr_del *hdl_entry; + u32 pyld_sz; + int ret; + + pyld_sz = sizeof(struct ipa_ioc_del_hdr) + 1 * + sizeof(struct ipa_hdr_del); + del_hdr = kzalloc(pyld_sz, GFP_KERNEL); + if (!del_hdr) { + IPAWANERR("fail to alloc exception hdr_del\n"); + return; + } + + del_hdr->commit = 1; + del_hdr->num_hdls = 1; + hdl_entry = &del_hdr->hdl[0]; + hdl_entry->hdl = qmap_hdr_hdl; + + ret = ipa2_del_hdr(del_hdr); + if (ret || hdl_entry->status) + IPAWANERR("ipa2_del_hdr failed\n"); + else + IPAWANDBG("hdrs deletion done\n"); + + qmap_hdr_hdl = 0; + kfree(del_hdr); +} + +static void ipa_del_qmap_hdr(uint32_t hdr_hdl) +{ + struct ipa_ioc_del_hdr *del_hdr; + struct ipa_hdr_del *hdl_entry; + u32 pyld_sz; + int ret; + + if (hdr_hdl == 0) { + IPAWANERR("Invalid hdr_hdl provided\n"); + return; + } + + pyld_sz = sizeof(struct ipa_ioc_del_hdr) + 1 * + sizeof(struct ipa_hdr_del); + del_hdr = kzalloc(pyld_sz, GFP_KERNEL); + if (!del_hdr) { + IPAWANERR("fail to alloc exception hdr_del\n"); + return; + } + + del_hdr->commit = 1; + del_hdr->num_hdls = 1; + hdl_entry = &del_hdr->hdl[0]; + hdl_entry->hdl = hdr_hdl; + + ret = ipa2_del_hdr(del_hdr); + if (ret || hdl_entry->status) + IPAWANERR("ipa2_del_hdr failed\n"); + else + IPAWANDBG("header deletion done\n"); + + qmap_hdr_hdl = 0; + kfree(del_hdr); +} + +static void ipa_del_mux_qmap_hdrs(void) +{ + int index; + + for (index = 0; index < rmnet_index; index++) { + ipa_del_qmap_hdr(mux_channel[index].hdr_hdl); + mux_channel[index].hdr_hdl = 0; + } +} + +static int ipa_add_qmap_hdr(uint32_t mux_id, uint32_t *hdr_hdl) +{ + struct ipa_ioc_add_hdr *hdr; + struct ipa_hdr_add *hdr_entry; + char hdr_name[IPA_RESOURCE_NAME_MAX]; + u32 pyld_sz; + int ret; + + pyld_sz = sizeof(struct ipa_ioc_add_hdr) + 1 * + sizeof(struct ipa_hdr_add); + hdr = kzalloc(pyld_sz, GFP_KERNEL); + if (!hdr) { + IPAWANERR("fail to alloc exception hdr\n"); + return -ENOMEM; + } + hdr->num_hdrs = 1; + hdr->commit = 1; + hdr_entry = &hdr->hdr[0]; + + snprintf(hdr_name, IPA_RESOURCE_NAME_MAX, "%s%d", + A2_MUX_HDR_NAME_V4_PREF, + mux_id); + strlcpy(hdr_entry->name, hdr_name, + IPA_RESOURCE_NAME_MAX); + + hdr_entry->hdr_len = IPA_QMAP_HEADER_LENGTH; /* 4 bytes */ + hdr_entry->hdr[1] = (uint8_t) mux_id; + IPAWANDBG("header (%s) with mux-id: (%d)\n", + hdr_name, + hdr_entry->hdr[1]); + if (ipa2_add_hdr(hdr)) { + IPAWANERR("fail to add IPA_QMAP hdr\n"); + ret = -EPERM; + goto bail; + } + + if (hdr_entry->status) { + IPAWANERR("fail to add IPA_QMAP hdr\n"); + ret = -EPERM; + goto bail; + } + + ret = 0; + *hdr_hdl = hdr_entry->hdr_hdl; +bail: + kfree(hdr); + return ret; +} + +/** + * ipa_setup_dflt_wan_rt_tables() - Setup default wan routing tables + * + * Return codes: + * 0: success + * -ENOMEM: failed to allocate memory + * -EPERM: failed to add the tables + */ +static int ipa_setup_dflt_wan_rt_tables(void) +{ + struct ipa_ioc_add_rt_rule *rt_rule; + struct ipa_rt_rule_add *rt_rule_entry; + + rt_rule = + kzalloc(sizeof(struct ipa_ioc_add_rt_rule) + 1 * + sizeof(struct ipa_rt_rule_add), GFP_KERNEL); + if (!rt_rule) { + IPAWANERR("fail to alloc mem\n"); + return -ENOMEM; + } + /* setup a default v4 route to point to Apps */ + rt_rule->num_rules = 1; + rt_rule->commit = 1; + rt_rule->ip = IPA_IP_v4; + strlcpy(rt_rule->rt_tbl_name, IPA_DFLT_WAN_RT_TBL_NAME, + IPA_RESOURCE_NAME_MAX); + + rt_rule_entry = &rt_rule->rules[0]; + rt_rule_entry->at_rear = 1; + rt_rule_entry->rule.dst = IPA_CLIENT_APPS_WAN_CONS; + rt_rule_entry->rule.hdr_hdl = qmap_hdr_hdl; + + if (ipa2_add_rt_rule(rt_rule)) { + IPAWANERR("fail to add dflt_wan v4 rule\n"); + kfree(rt_rule); + return -EPERM; + } + + IPAWANDBG("dflt v4 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl); + dflt_v4_wan_rt_hdl = rt_rule_entry->rt_rule_hdl; + + /* setup a default v6 route to point to A5 */ + rt_rule->ip = IPA_IP_v6; + if (ipa2_add_rt_rule(rt_rule)) { + IPAWANERR("fail to add dflt_wan v6 rule\n"); + kfree(rt_rule); + return -EPERM; + } + IPAWANDBG("dflt v6 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl); + dflt_v6_wan_rt_hdl = rt_rule_entry->rt_rule_hdl; + + kfree(rt_rule); + return 0; +} + +static void ipa_del_dflt_wan_rt_tables(void) +{ + struct ipa_ioc_del_rt_rule *rt_rule; + struct ipa_rt_rule_del *rt_rule_entry; + int len; + + len = sizeof(struct ipa_ioc_del_rt_rule) + 1 * + sizeof(struct ipa_rt_rule_del); + rt_rule = kzalloc(len, GFP_KERNEL); + if (!rt_rule) { + IPAWANERR("unable to allocate memory for del route rule\n"); + return; + } + + memset(rt_rule, 0, len); + rt_rule->commit = 1; + rt_rule->num_hdls = 1; + rt_rule->ip = IPA_IP_v4; + + rt_rule_entry = &rt_rule->hdl[0]; + rt_rule_entry->status = -1; + rt_rule_entry->hdl = dflt_v4_wan_rt_hdl; + + IPAWANERR("Deleting Route hdl:(0x%x) with ip type: %d\n", + rt_rule_entry->hdl, IPA_IP_v4); + if (ipa2_del_rt_rule(rt_rule) || + (rt_rule_entry->status)) { + IPAWANERR("Routing rule deletion failed!\n"); + } + + rt_rule->ip = IPA_IP_v6; + rt_rule_entry->hdl = dflt_v6_wan_rt_hdl; + IPAWANERR("Deleting Route hdl:(0x%x) with ip type: %d\n", + rt_rule_entry->hdl, IPA_IP_v6); + if (ipa2_del_rt_rule(rt_rule) || + (rt_rule_entry->status)) { + IPAWANERR("Routing rule deletion failed!\n"); + } + + kfree(rt_rule); +} + +int copy_ul_filter_rule_to_ipa(struct ipa_install_fltr_rule_req_msg_v01 + *rule_req, uint32_t *rule_hdl) +{ + int i, j; + + /* prevent multi-threads accessing num_q6_rule */ + mutex_lock(&add_mux_channel_lock); + if (rule_req->filter_spec_list_valid == true) { + num_q6_rule = rule_req->filter_spec_list_len; + IPAWANDBG("Received (%d) install_flt_req\n", num_q6_rule); + } else { + num_q6_rule = 0; + IPAWANERR("got no UL rules from modem\n"); + mutex_unlock(&add_mux_channel_lock); + return -EINVAL; + } + + /* copy UL filter rules from Modem*/ + for (i = 0; i < num_q6_rule; i++) { + /* check if rules overside the cache*/ + if (i == MAX_NUM_Q6_RULE) { + IPAWANERR("Reaching (%d) max cache ", + MAX_NUM_Q6_RULE); + IPAWANERR(" however total (%d)\n", + num_q6_rule); + goto failure; + } + /* construct UL_filter_rule handler QMI use-cas */ + ipa_qmi_ctx->q6_ul_filter_rule[i].filter_hdl = + UL_FILTER_RULE_HANDLE_START + i; + rule_hdl[i] = ipa_qmi_ctx->q6_ul_filter_rule[i].filter_hdl; + ipa_qmi_ctx->q6_ul_filter_rule[i].ip = + rule_req->filter_spec_list[i].ip_type; + ipa_qmi_ctx->q6_ul_filter_rule[i].action = + rule_req->filter_spec_list[i].filter_action; + if (rule_req->filter_spec_list[i].is_routing_table_index_valid + == true) + ipa_qmi_ctx->q6_ul_filter_rule[i].rt_tbl_idx = + rule_req->filter_spec_list[i].route_table_index; + if (rule_req->filter_spec_list[i].is_mux_id_valid == true) + ipa_qmi_ctx->q6_ul_filter_rule[i].mux_id = + rule_req->filter_spec_list[i].mux_id; + ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.rule_eq_bitmap = + rule_req->filter_spec_list[i].filter_rule.rule_eq_bitmap; + ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.tos_eq_present = + rule_req->filter_spec_list[i].filter_rule.tos_eq_present; + ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.tos_eq = + rule_req->filter_spec_list[i].filter_rule.tos_eq; + ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.protocol_eq_present = + rule_req->filter_spec_list[i].filter_rule.protocol_eq_present; + ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.protocol_eq = + rule_req->filter_spec_list[i].filter_rule.protocol_eq; + ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.num_ihl_offset_range_16 = + rule_req->filter_spec_list[i].filter_rule.num_ihl_offset_range_16; + +for (j = 0; +j < ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.num_ihl_offset_range_16; j++) { + IPAWANDBG("copy_ul_filter_rule_to_ipa"); +ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.ihl_offset_range_16[j].offset += rule_req->filter_spec_list[i].filter_rule.ihl_offset_range_16[j].offset; +ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.ihl_offset_range_16[j].range_low += rule_req->filter_spec_list[i].filter_rule.ihl_offset_range_16[j].range_low; +ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.ihl_offset_range_16[j].range_high += rule_req->filter_spec_list[i].filter_rule.ihl_offset_range_16[j].range_high; +} + ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.num_offset_meq_32 = + rule_req->filter_spec_list[i].filter_rule.num_offset_meq_32; + +for (j = 0; +j < ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.num_offset_meq_32; j++) { + + ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.offset_meq_32[j].offset += rule_req->filter_spec_list[i].filter_rule.offset_meq_32[j].offset; +ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.offset_meq_32[j].mask = +rule_req->filter_spec_list[i].filter_rule.offset_meq_32[j].mask; +ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.offset_meq_32[j].value = +rule_req->filter_spec_list[i].filter_rule.offset_meq_32[j].value; +} + + ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.tc_eq_present = + rule_req->filter_spec_list[i].filter_rule.tc_eq_present; + ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.tc_eq = + rule_req->filter_spec_list[i].filter_rule.tc_eq; + ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.fl_eq_present = + rule_req->filter_spec_list[i].filter_rule.flow_eq_present; + ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.fl_eq = + rule_req->filter_spec_list[i].filter_rule.flow_eq; + ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.ihl_offset_eq_16_present + = rule_req->filter_spec_list[i].filter_rule.ihl_offset_eq_16_present; + ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.ihl_offset_eq_16.offset = + rule_req->filter_spec_list[i].filter_rule.ihl_offset_eq_16.offset; + ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.ihl_offset_eq_16.value = + rule_req->filter_spec_list[i].filter_rule.ihl_offset_eq_16.value; + + ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.ihl_offset_eq_32_present = + rule_req->filter_spec_list[i].filter_rule.ihl_offset_eq_32_present; + ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.ihl_offset_eq_32.offset = + rule_req->filter_spec_list[i].filter_rule.ihl_offset_eq_32.offset; + ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.ihl_offset_eq_32.value = + rule_req->filter_spec_list[i].filter_rule.ihl_offset_eq_32.value; + + ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.num_ihl_offset_meq_32 = + rule_req->filter_spec_list[i].filter_rule.num_ihl_offset_meq_32; + +for (j = 0; +j < ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.num_ihl_offset_meq_32; j++) { + + ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.ihl_offset_meq_32[j].offset += rule_req->filter_spec_list[i].filter_rule.ihl_offset_meq_32[j].offset; +ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.ihl_offset_meq_32[j].mask = +rule_req->filter_spec_list[i].filter_rule.ihl_offset_meq_32[j].mask; +ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.ihl_offset_meq_32[j].value = +rule_req->filter_spec_list[i].filter_rule.ihl_offset_meq_32[j].value; +} + ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.num_offset_meq_128 = + rule_req->filter_spec_list[i].filter_rule.num_offset_meq_128; + +for (j = 0; +j < ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.num_offset_meq_128; j++) { + + ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.offset_meq_128[j].offset = +rule_req->filter_spec_list[i].filter_rule.offset_meq_128[j].offset; +memcpy(ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.offset_meq_128[j].mask, +rule_req->filter_spec_list[i].filter_rule.offset_meq_128[j].mask, 16); +memcpy(ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.offset_meq_128[j].value, +rule_req->filter_spec_list[i].filter_rule.offset_meq_128[j].value, 16); +} + + ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.metadata_meq32_present = + rule_req->filter_spec_list[i].filter_rule.metadata_meq32_present; + ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.metadata_meq32.offset = + rule_req->filter_spec_list[i].filter_rule.metadata_meq32.offset; + ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.metadata_meq32.mask = + rule_req->filter_spec_list[i].filter_rule.metadata_meq32.mask; + ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.metadata_meq32.value = + rule_req->filter_spec_list[i].filter_rule.metadata_meq32.value; + ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.ipv4_frag_eq_present = + rule_req->filter_spec_list[i].filter_rule.ipv4_frag_eq_present; + } + + if (rule_req->xlat_filter_indices_list_valid) { + if (rule_req->xlat_filter_indices_list_len > num_q6_rule) { + IPAWANERR("Number of xlat indices is not valid: %d\n", + rule_req->xlat_filter_indices_list_len); + goto failure; + } + IPAWANDBG("Receive %d XLAT indices: ", + rule_req->xlat_filter_indices_list_len); + for (i = 0; i < rule_req->xlat_filter_indices_list_len; i++) + IPAWANDBG("%d ", rule_req->xlat_filter_indices_list[i]); + IPAWANDBG("\n"); + + for (i = 0; i < rule_req->xlat_filter_indices_list_len; i++) { + if (rule_req->xlat_filter_indices_list[i] + >= num_q6_rule) { + IPAWANERR("Xlat rule idx is wrong: %d\n", + rule_req->xlat_filter_indices_list[i]); + goto failure; + } else { + ipa_qmi_ctx->q6_ul_filter_rule + [rule_req->xlat_filter_indices_list[i]] + .is_xlat_rule = 1; + IPAWANDBG("Rule %d is xlat rule\n", + rule_req->xlat_filter_indices_list[i]); + } + } + } + goto success; + +failure: + num_q6_rule = 0; + memset(ipa_qmi_ctx->q6_ul_filter_rule, 0, + sizeof(ipa_qmi_ctx->q6_ul_filter_rule)); + mutex_unlock(&add_mux_channel_lock); + return -EINVAL; + +success: + mutex_unlock(&add_mux_channel_lock); + return 0; +} + +static int wwan_add_ul_flt_rule_to_ipa(void) +{ + u32 pyld_sz; + int i, retval = 0; + int num_v4_rule = 0, num_v6_rule = 0; + struct ipa_ioc_add_flt_rule *param; + struct ipa_flt_rule_add flt_rule_entry; + struct ipa_fltr_installed_notif_req_msg_v01 *req; + + if (ipa_qmi_ctx == NULL) { + IPAWANERR("ipa_qmi_ctx is NULL!\n"); + return -EFAULT; + } + + pyld_sz = sizeof(struct ipa_ioc_add_flt_rule) + + sizeof(struct ipa_flt_rule_add); + param = kzalloc(pyld_sz, GFP_KERNEL); + if (!param) + return -ENOMEM; + + req = kzalloc(sizeof(struct ipa_fltr_installed_notif_req_msg_v01), + GFP_KERNEL); + if (!req) { + kfree(param); + return -ENOMEM; + } + + memset(req, 0, sizeof(struct ipa_fltr_installed_notif_req_msg_v01)); + + param->commit = 1; + param->ep = IPA_CLIENT_APPS_LAN_WAN_PROD; + param->global = false; + param->num_rules = (uint8_t)1; + + mutex_lock(&ipa_qmi_lock); + for (i = 0; i < num_q6_rule; i++) { + param->ip = ipa_qmi_ctx->q6_ul_filter_rule[i].ip; + memset(&flt_rule_entry, 0, sizeof(struct ipa_flt_rule_add)); + flt_rule_entry.at_rear = true; + flt_rule_entry.rule.action = + ipa_qmi_ctx->q6_ul_filter_rule[i].action; + flt_rule_entry.rule.rt_tbl_idx + = ipa_qmi_ctx->q6_ul_filter_rule[i].rt_tbl_idx; + flt_rule_entry.rule.retain_hdr = true; + + /* debug rt-hdl*/ + IPAWANDBG("install-IPA index(%d),rt-tbl:(%d)\n", + i, flt_rule_entry.rule.rt_tbl_idx); + flt_rule_entry.rule.eq_attrib_type = true; + memcpy(&(flt_rule_entry.rule.eq_attrib), + &ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib, + sizeof(struct ipa_ipfltri_rule_eq)); + memcpy(&(param->rules[0]), &flt_rule_entry, + sizeof(struct ipa_flt_rule_add)); + if (ipa2_add_flt_rule((struct ipa_ioc_add_flt_rule *)param)) { + retval = -EFAULT; + IPAWANERR("add A7 UL filter rule(%d) failed\n", i); + } else { + /* store the rule handler */ + ipa_qmi_ctx->q6_ul_filter_rule_hdl[i] = + param->rules[0].flt_rule_hdl; + } + } + mutex_unlock(&ipa_qmi_lock); + + /* send ipa_fltr_installed_notif_req_msg_v01 to Q6*/ + req->source_pipe_index = + ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD); + req->install_status = QMI_RESULT_SUCCESS_V01; + req->filter_index_list_len = num_q6_rule; + mutex_lock(&ipa_qmi_lock); + for (i = 0; i < num_q6_rule; i++) { + if (ipa_qmi_ctx->q6_ul_filter_rule[i].ip == IPA_IP_v4) { + req->filter_index_list[i].filter_index = num_v4_rule; + num_v4_rule++; + } else { + req->filter_index_list[i].filter_index = num_v6_rule; + num_v6_rule++; + } + req->filter_index_list[i].filter_handle = + ipa_qmi_ctx->q6_ul_filter_rule[i].filter_hdl; + } + mutex_unlock(&ipa_qmi_lock); + if (qmi_filter_notify_send(req)) { + IPAWANDBG("add filter rule index on A7-RX failed\n"); + retval = -EFAULT; + } + old_num_q6_rule = num_q6_rule; + IPAWANDBG("add (%d) filter rule index on A7-RX\n", + old_num_q6_rule); + kfree(param); + kfree(req); + return retval; +} + +static int wwan_del_ul_flt_rule_to_ipa(void) +{ + u32 pyld_sz; + int i, retval = 0; + struct ipa_ioc_del_flt_rule *param; + struct ipa_flt_rule_del flt_rule_entry; + + pyld_sz = sizeof(struct ipa_ioc_del_flt_rule) + + sizeof(struct ipa_flt_rule_del); + param = kzalloc(pyld_sz, GFP_KERNEL); + if (!param) { + IPAWANERR("kzalloc failed\n"); + return -ENOMEM; + } + + param->commit = 1; + param->num_hdls = (uint8_t) 1; + + for (i = 0; i < old_num_q6_rule; i++) { + param->ip = ipa_qmi_ctx->q6_ul_filter_rule[i].ip; + memset(&flt_rule_entry, 0, sizeof(struct ipa_flt_rule_del)); + flt_rule_entry.hdl = ipa_qmi_ctx->q6_ul_filter_rule_hdl[i]; + /* debug rt-hdl*/ + IPAWANDBG("delete-IPA rule index(%d)\n", i); + memcpy(&(param->hdl[0]), &flt_rule_entry, + sizeof(struct ipa_flt_rule_del)); + if (ipa2_del_flt_rule((struct ipa_ioc_del_flt_rule *)param)) { + IPAWANERR("del A7 UL filter rule(%d) failed\n", i); + kfree(param); + return -EFAULT; + } + } + + /* set UL filter-rule add-indication */ + a7_ul_flt_set = false; + old_num_q6_rule = 0; + + kfree(param); + return retval; +} + +static int find_mux_channel_index(uint32_t mux_id) +{ + int i; + + for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++) { + if (mux_id == mux_channel[i].mux_id) + return i; + } + return MAX_NUM_OF_MUX_CHANNEL; +} + +static int find_vchannel_name_index(const char *vchannel_name) +{ + int i; + + for (i = 0; i < rmnet_index; i++) { + if (strcmp(mux_channel[i].vchannel_name, vchannel_name) == 0) + return i; + } + return MAX_NUM_OF_MUX_CHANNEL; +} + +static enum ipa_upstream_type find_upstream_type(const char *upstreamIface) +{ + int i; + + for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++) { + if (strcmp(mux_channel[i].vchannel_name, + upstreamIface) == 0) + return IPA_UPSTEAM_MODEM; + } + + if ((strcmp(IPA_UPSTEAM_WLAN_IFACE_NAME, upstreamIface) == 0) || + (strcmp(IPA_UPSTEAM_WLAN1_IFACE_NAME, upstreamIface) == 0)) + return IPA_UPSTEAM_WLAN; + else + return IPA_UPSTEAM_MAX; +} + +static int wwan_register_to_ipa(int index) +{ + struct ipa_tx_intf tx_properties = {0}; + struct ipa_ioc_tx_intf_prop tx_ioc_properties[2] = { {0}, {0} }; + struct ipa_ioc_tx_intf_prop *tx_ipv4_property; + struct ipa_ioc_tx_intf_prop *tx_ipv6_property; + struct ipa_rx_intf rx_properties = {0}; + struct ipa_ioc_rx_intf_prop rx_ioc_properties[2] = { {0}, {0} }; + struct ipa_ioc_rx_intf_prop *rx_ipv4_property; + struct ipa_ioc_rx_intf_prop *rx_ipv6_property; + struct ipa_ext_intf ext_properties = {0}; + struct ipa_ioc_ext_intf_prop *ext_ioc_properties; + u32 pyld_sz; + int ret = 0, i; + + IPAWANDBG("index(%d) device[%s]:\n", index, + mux_channel[index].vchannel_name); + if (!mux_channel[index].mux_hdr_set) { + ret = ipa_add_qmap_hdr(mux_channel[index].mux_id, + &mux_channel[index].hdr_hdl); + if (ret) { + IPAWANERR("ipa_add_mux_hdr failed (%d)\n", index); + return ret; + } + mux_channel[index].mux_hdr_set = true; + } + tx_properties.prop = tx_ioc_properties; + tx_ipv4_property = &tx_properties.prop[0]; + tx_ipv4_property->ip = IPA_IP_v4; + tx_ipv4_property->dst_pipe = IPA_CLIENT_APPS_WAN_CONS; + snprintf(tx_ipv4_property->hdr_name, IPA_RESOURCE_NAME_MAX, "%s%d", + A2_MUX_HDR_NAME_V4_PREF, + mux_channel[index].mux_id); + tx_ipv6_property = &tx_properties.prop[1]; + tx_ipv6_property->ip = IPA_IP_v6; + tx_ipv6_property->dst_pipe = IPA_CLIENT_APPS_WAN_CONS; + /* no need use A2_MUX_HDR_NAME_V6_PREF, same header */ + snprintf(tx_ipv6_property->hdr_name, IPA_RESOURCE_NAME_MAX, "%s%d", + A2_MUX_HDR_NAME_V4_PREF, + mux_channel[index].mux_id); + tx_properties.num_props = 2; + + rx_properties.prop = rx_ioc_properties; + rx_ipv4_property = &rx_properties.prop[0]; + rx_ipv4_property->ip = IPA_IP_v4; + rx_ipv4_property->attrib.attrib_mask |= IPA_FLT_META_DATA; + rx_ipv4_property->attrib.meta_data = + mux_channel[index].mux_id << WWAN_METADATA_SHFT; + rx_ipv4_property->attrib.meta_data_mask = WWAN_METADATA_MASK; + rx_ipv4_property->src_pipe = IPA_CLIENT_APPS_LAN_WAN_PROD; + rx_ipv6_property = &rx_properties.prop[1]; + rx_ipv6_property->ip = IPA_IP_v6; + rx_ipv6_property->attrib.attrib_mask |= IPA_FLT_META_DATA; + rx_ipv6_property->attrib.meta_data = + mux_channel[index].mux_id << WWAN_METADATA_SHFT; + rx_ipv6_property->attrib.meta_data_mask = WWAN_METADATA_MASK; + rx_ipv6_property->src_pipe = IPA_CLIENT_APPS_LAN_WAN_PROD; + rx_properties.num_props = 2; + + pyld_sz = num_q6_rule * + sizeof(struct ipa_ioc_ext_intf_prop); + ext_ioc_properties = kmalloc(pyld_sz, GFP_KERNEL); + if (!ext_ioc_properties) { + IPAWANERR("Error allocate memory\n"); + return -ENOMEM; + } + + ext_properties.prop = ext_ioc_properties; + ext_properties.excp_pipe_valid = true; + ext_properties.excp_pipe = IPA_CLIENT_APPS_WAN_CONS; + ext_properties.num_props = num_q6_rule; + for (i = 0; i < num_q6_rule; i++) { + memcpy(&(ext_properties.prop[i]), + &(ipa_qmi_ctx->q6_ul_filter_rule[i]), + sizeof(struct ipa_ioc_ext_intf_prop)); + ext_properties.prop[i].mux_id = mux_channel[index].mux_id; + IPAWANDBG("index %d ip: %d rt-tbl:%d\n", i, + ext_properties.prop[i].ip, + ext_properties.prop[i].rt_tbl_idx); + IPAWANDBG("action: %d mux:%d\n", + ext_properties.prop[i].action, + ext_properties.prop[i].mux_id); + } + ret = ipa2_register_intf_ext(mux_channel[index].vchannel_name, + &tx_properties, &rx_properties, &ext_properties); + if (ret) { + IPAWANERR("[%s]:ipa2_register_intf failed %d\n", + mux_channel[index].vchannel_name, ret); + goto fail; + } + mux_channel[index].ul_flt_reg = true; +fail: + kfree(ext_ioc_properties); + return ret; +} + +static void ipa_cleanup_deregister_intf(void) +{ + int i; + int ret; + + for (i = 0; i < rmnet_index; i++) { + if (mux_channel[i].ul_flt_reg) { + ret = ipa2_deregister_intf( + mux_channel[i].vchannel_name); + if (ret < 0) { + IPAWANERR("de-register device %s(%d) failed\n", + mux_channel[i].vchannel_name, + i); + return; + } + IPAWANDBG("de-register device %s(%d) success\n", + mux_channel[i].vchannel_name, + i); + } + mux_channel[i].ul_flt_reg = false; + } +} + +int wwan_update_mux_channel_prop(void) +{ + int ret = 0, i; + /* install UL filter rules */ + if (egress_set) { + if (ipa_qmi_ctx && + !ipa_qmi_ctx->modem_cfg_emb_pipe_flt) { + IPAWANDBG("setup UL filter rules\n"); + if (a7_ul_flt_set) { + IPAWANDBG("del previous UL filter rules\n"); + /* delete rule hdlers */ + ret = wwan_del_ul_flt_rule_to_ipa(); + if (ret) { + IPAWANERR("failed to del old rules\n"); + return -EINVAL; + } + IPAWANDBG("deleted old UL rules\n"); + } + ret = wwan_add_ul_flt_rule_to_ipa(); + } + if (ret) + IPAWANERR("failed to install UL rules\n"); + else + a7_ul_flt_set = true; + } + /* update Tx/Rx/Ext property */ + IPAWANDBG("update Tx/Rx/Ext property in IPA\n"); + if (rmnet_index == 0) { + IPAWANDBG("no Tx/Rx/Ext property registered in IPA\n"); + return ret; + } + + ipa_cleanup_deregister_intf(); + + for (i = 0; i < rmnet_index; i++) { + ret = wwan_register_to_ipa(i); + if (ret < 0) { + IPAWANERR("failed to re-regist %s, mux %d, index %d\n", + mux_channel[i].vchannel_name, + mux_channel[i].mux_id, + i); + return -ENODEV; + } + IPAWANERR("dev(%s) has registered to IPA\n", + mux_channel[i].vchannel_name); + mux_channel[i].ul_flt_reg = true; + } + return ret; +} + +#ifdef INIT_COMPLETION +#define reinit_completion(x) INIT_COMPLETION(*(x)) +#endif /* INIT_COMPLETION */ + +static int __ipa_wwan_open(struct net_device *dev) +{ + struct wwan_private *wwan_ptr = netdev_priv(dev); + + IPAWANDBG("[%s] __wwan_open()\n", dev->name); + if (wwan_ptr->device_status != WWAN_DEVICE_ACTIVE) + reinit_completion(&wwan_ptr->resource_granted_completion); + wwan_ptr->device_status = WWAN_DEVICE_ACTIVE; + + if (ipa_rmnet_res.ipa_napi_enable) + napi_enable(&(wwan_ptr->napi)); + return 0; +} + +/** + * wwan_open() - Opens the wwan network interface. Opens logical + * channel on A2 MUX driver and starts the network stack queue + * + * @dev: network device + * + * Return codes: + * 0: success + * -ENODEV: Error while opening logical channel on A2 MUX driver + */ +static int ipa_wwan_open(struct net_device *dev) +{ + int rc = 0; + + IPAWANDBG("[%s] wwan_open()\n", dev->name); + rc = __ipa_wwan_open(dev); + if (rc == 0) + netif_start_queue(dev); + return rc; +} + +static int __ipa_wwan_close(struct net_device *dev) +{ + struct wwan_private *wwan_ptr = netdev_priv(dev); + int rc = 0; + + if (wwan_ptr->device_status == WWAN_DEVICE_ACTIVE) { + wwan_ptr->device_status = WWAN_DEVICE_INACTIVE; + /* do not close wwan port once up, this causes + * remote side to hang if tried to open again + */ + reinit_completion(&wwan_ptr->resource_granted_completion); + if (ipa_rmnet_res.ipa_napi_enable) + napi_disable(&(wwan_ptr->napi)); + rc = ipa2_deregister_intf(dev->name); + if (rc) { + IPAWANERR("[%s]: ipa2_deregister_intf failed %d\n", + dev->name, rc); + return rc; + } + return rc; + } else { + return -EBADF; + } +} + +/** + * ipa_wwan_stop() - Stops the wwan network interface. Closes + * logical channel on A2 MUX driver and stops the network stack + * queue + * + * @dev: network device + * + * Return codes: + * 0: success + * -ENODEV: Error while opening logical channel on A2 MUX driver + */ +static int ipa_wwan_stop(struct net_device *dev) +{ + IPAWANDBG("[%s]\n", dev->name); + __ipa_wwan_close(dev); + netif_stop_queue(dev); + return 0; +} + +static int ipa_wwan_change_mtu(struct net_device *dev, int new_mtu) +{ + if (0 > new_mtu || WWAN_DATA_LEN < new_mtu) + return -EINVAL; + IPAWANDBG("[%s] MTU change: old=%d new=%d\n", + dev->name, dev->mtu, new_mtu); + dev->mtu = new_mtu; + return 0; +} + +/** + * ipa_wwan_xmit() - Transmits an skb. + * + * @skb: skb to be transmitted + * @dev: network device + * + * Return codes: + * 0: success + * NETDEV_TX_BUSY: Error while transmitting the skb. Try again + * later + * -EFAULT: Error while transmitting the skb + */ +static int ipa_wwan_xmit(struct sk_buff *skb, struct net_device *dev) +{ + int ret = 0; + bool qmap_check; + struct wwan_private *wwan_ptr = netdev_priv(dev); + struct ipa_tx_meta meta; + + if (skb->protocol != htons(ETH_P_MAP)) { + IPAWANDBG_LOW + ("SW filtering out none QMAP packet received from %s", + current->comm); + dev_kfree_skb_any(skb); + dev->stats.tx_dropped++; + return NETDEV_TX_OK; + } + + qmap_check = RMNET_MAP_GET_CD_BIT(skb); + if (netif_queue_stopped(dev)) { + if (qmap_check && + atomic_read(&wwan_ptr->outstanding_pkts) < + wwan_ptr->outstanding_high_ctl) { + pr_err("[%s]Queue stop, send ctrl pkts\n", dev->name); + goto send; + } else { + pr_err("[%s]fatal: %s stopped\n", __func__, dev->name); + return NETDEV_TX_BUSY; + } + } + + /* checking High WM hit */ + if (atomic_read(&wwan_ptr->outstanding_pkts) >= + wwan_ptr->outstanding_high) { + if (!qmap_check) { + IPAWANDBG_LOW + ("pending(%d)/(%d)- stop(%d), qmap_chk(%d)\n", + atomic_read(&wwan_ptr->outstanding_pkts), + wwan_ptr->outstanding_high, + netif_queue_stopped(dev), + qmap_check); + netif_stop_queue(dev); + return NETDEV_TX_BUSY; + } + } + +send: + /* IPA_RM checking start */ + ret = ipa_rm_inactivity_timer_request_resource( + IPA_RM_RESOURCE_WWAN_0_PROD); + if (ret == -EINPROGRESS) { + netif_stop_queue(dev); + return NETDEV_TX_BUSY; + } + if (ret) { + pr_err("[%s] fatal: ipa rm timer request resource failed %d\n", + dev->name, ret); + dev_kfree_skb_any(skb); + dev->stats.tx_dropped++; + return -EFAULT; + } + /* IPA_RM checking end */ + + if (qmap_check) { + memset(&meta, 0, sizeof(meta)); + meta.pkt_init_dst_ep_valid = true; + meta.pkt_init_dst_ep_remote = true; + ret = ipa2_tx_dp(IPA_CLIENT_Q6_LAN_CONS, skb, &meta); + } else { + ret = ipa2_tx_dp(IPA_CLIENT_APPS_LAN_WAN_PROD, skb, NULL); + } + + if (ret) { + ret = NETDEV_TX_BUSY; + goto out; + } + + atomic_inc(&wwan_ptr->outstanding_pkts); + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; + ret = NETDEV_TX_OK; +out: + ipa_rm_inactivity_timer_release_resource( + IPA_RM_RESOURCE_WWAN_0_PROD); + return ret; +} + +static void ipa_wwan_tx_timeout(struct net_device *dev) +{ + IPAWANERR("[%s]:[%s] data stall in UL\n", __func__, dev->name); +} + +/** + * apps_ipa_tx_complete_notify() - Rx notify + * + * @priv: driver context + * @evt: event type + * @data: data provided with event + * + * Check that the packet is the one we sent and release it + * This function will be called in defered context in IPA wq. + */ +static void apps_ipa_tx_complete_notify(void *priv, + enum ipa_dp_evt_type evt, + unsigned long data) +{ + struct sk_buff *skb = (struct sk_buff *)data; + struct net_device *dev = (struct net_device *)priv; + struct wwan_private *wwan_ptr; + + if (dev != ipa_netdevs[0]) { + IPAWANDBG("Received pre-SSR packet completion\n"); + dev_kfree_skb_any(skb); + return; + } + + if (evt != IPA_WRITE_DONE) { + IPAWANERR("unsupported evt on Tx callback, Drop the packet\n"); + dev_kfree_skb_any(skb); + dev->stats.tx_dropped++; + return; + } + + wwan_ptr = netdev_priv(dev); + atomic_dec(&wwan_ptr->outstanding_pkts); + __netif_tx_lock_bh(netdev_get_tx_queue(dev, 0)); + if (!atomic_read(&is_ssr) && + netif_queue_stopped(wwan_ptr->net) && + atomic_read(&wwan_ptr->outstanding_pkts) < + (wwan_ptr->outstanding_low)) { + IPAWANDBG_LOW + ("Outstanding low (%d) - wake up queue\n", + wwan_ptr->outstanding_low); + netif_wake_queue(wwan_ptr->net); + } + __netif_tx_unlock_bh(netdev_get_tx_queue(dev, 0)); + dev_kfree_skb_any(skb); + ipa_rm_inactivity_timer_release_resource( + IPA_RM_RESOURCE_WWAN_0_PROD); +} + +/** + * apps_ipa_packet_receive_notify() - Rx notify + * + * @priv: driver context + * @evt: event type + * @data: data provided with event + * + * IPA will pass a packet to the Linux network stack with skb->data + */ +static void apps_ipa_packet_receive_notify(void *priv, + enum ipa_dp_evt_type evt, + unsigned long data) +{ + struct net_device *dev = (struct net_device *)priv; + + if (evt == IPA_RECEIVE) { + struct sk_buff *skb = (struct sk_buff *)data; + int result; + unsigned int packet_len = skb->len; + + IPAWANDBG_LOW("Rx packet was received"); + skb->dev = ipa_netdevs[0]; + skb->protocol = htons(ETH_P_MAP); + + if (ipa_rmnet_res.ipa_napi_enable) { + trace_rmnet_ipa_netif_rcv_skb(dev->stats.rx_packets); + result = netif_receive_skb(skb); + } else { + if (dev->stats.rx_packets % IPA_WWAN_RX_SOFTIRQ_THRESH + == 0) { + trace_rmnet_ipa_netifni(dev->stats.rx_packets); + result = netif_rx_ni(skb); + } else { + trace_rmnet_ipa_netifrx(dev->stats.rx_packets); + result = netif_rx(skb); + } + } + + if (result) { + pr_err_ratelimited(DEV_NAME " %s:%d fail on netif_receive_skb\n", + __func__, __LINE__); + dev->stats.rx_dropped++; + } + dev->stats.rx_packets++; + dev->stats.rx_bytes += packet_len; + } else if (evt == IPA_CLIENT_START_POLL) + ipa_rmnet_rx_cb(priv); + else if (evt == IPA_CLIENT_COMP_NAPI) { + struct wwan_private *wwan_ptr = netdev_priv(dev); + + if (ipa_rmnet_res.ipa_napi_enable) + napi_complete(&(wwan_ptr->napi)); + } else + IPAWANERR("Invalid evt %d received in wan_ipa_receive\n", evt); + +} + +static int handle_ingress_format(struct net_device *dev, + struct rmnet_ioctl_extended_s *in) +{ + int ret = 0; + struct rmnet_phys_ep_conf_s *ep_cfg; + + IPAWANDBG("Get RMNET_IOCTL_SET_INGRESS_DATA_FORMAT\n"); + if ((in->u.data) & RMNET_IOCTL_INGRESS_FORMAT_CHECKSUM) + ipa_to_apps_ep_cfg.ipa_ep_cfg.cfg.cs_offload_en = + IPA_ENABLE_CS_OFFLOAD_DL; + + if ((in->u.data) & RMNET_IOCTL_INGRESS_FORMAT_AGG_DATA) { + IPAWANERR("get AGG size %d count %d\n", + in->u.ingress_format.agg_size, + in->u.ingress_format.agg_count); + + ret = ipa_disable_apps_wan_cons_deaggr( + in->u.ingress_format.agg_size, + in->u.ingress_format.agg_count); + + if (!ret) { + ipa_to_apps_ep_cfg.ipa_ep_cfg.aggr.aggr_byte_limit = + in->u.ingress_format.agg_size; + ipa_to_apps_ep_cfg.ipa_ep_cfg.aggr.aggr_pkt_limit = + in->u.ingress_format.agg_count; + + if (ipa_rmnet_res.ipa_napi_enable) { + ipa_to_apps_ep_cfg.recycle_enabled = true; + ep_cfg = (struct rmnet_phys_ep_conf_s *) + rcu_dereference(dev->rx_handler_data); + ep_cfg->recycle = ipa_recycle_wan_skb; + pr_info("Wan Recycle Enabled\n"); + } + } + } + + ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.hdr_len = 4; + ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.hdr_ofst_metadata_valid = 1; + ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.hdr_ofst_metadata = 1; + ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.hdr_ofst_pkt_size_valid = 1; + ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.hdr_ofst_pkt_size = 2; + + ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_valid = true; + ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad = 0; + ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.hdr_payload_len_inc_padding = + true; + ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_offset = 0; + ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.hdr_little_endian = 0; + ipa_to_apps_ep_cfg.ipa_ep_cfg.metadata_mask.metadata_mask = 0xFF000000; + + ipa_to_apps_ep_cfg.client = IPA_CLIENT_APPS_WAN_CONS; + ipa_to_apps_ep_cfg.notify = apps_ipa_packet_receive_notify; + ipa_to_apps_ep_cfg.priv = dev; + + ipa_to_apps_ep_cfg.napi_enabled = ipa_rmnet_res.ipa_napi_enable; + ipa_to_apps_ep_cfg.desc_fifo_sz = + ipa_rmnet_res.wan_rx_desc_size * sizeof(struct sps_iovec); + + mutex_lock(&ipa_to_apps_pipe_handle_guard); + if (atomic_read(&is_ssr)) { + IPAWANDBG("In SSR sequence/recovery\n"); + mutex_unlock(&ipa_to_apps_pipe_handle_guard); + return -EFAULT; + } + ret = ipa2_setup_sys_pipe(&ipa_to_apps_ep_cfg, &ipa_to_apps_hdl); + mutex_unlock(&ipa_to_apps_pipe_handle_guard); + + if (ret) + IPAWANERR("failed to configure ingress\n"); + + return ret; +} + +/** + * ipa_wwan_ioctl() - I/O control for wwan network driver. + * + * @dev: network device + * @ifr: ignored + * @cmd: cmd to be excecuded. can be one of the following: + * IPA_WWAN_IOCTL_OPEN - Open the network interface + * IPA_WWAN_IOCTL_CLOSE - Close the network interface + * + * Return codes: + * 0: success + * NETDEV_TX_BUSY: Error while transmitting the skb. Try again + * later + * -EFAULT: Error while transmitting the skb + */ +static int ipa_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + int rc = 0; + int mru = 1000, epid = 1, mux_index, len; + struct ipa_msg_meta msg_meta; + struct ipa_wan_msg *wan_msg = NULL; + struct rmnet_ioctl_extended_s extend_ioctl_data; + struct rmnet_ioctl_data_s ioctl_data; + + IPAWANDBG("rmnet_ipa got ioctl number 0x%08x", cmd); + switch (cmd) { + /* Set Ethernet protocol */ + case RMNET_IOCTL_SET_LLP_ETHERNET: + break; + /* Set RAWIP protocol */ + case RMNET_IOCTL_SET_LLP_IP: + break; + /* Get link protocol */ + case RMNET_IOCTL_GET_LLP: + ioctl_data.u.operation_mode = RMNET_MODE_LLP_IP; + if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data, + sizeof(struct rmnet_ioctl_data_s))) + rc = -EFAULT; + break; + /* Set QoS header enabled */ + case RMNET_IOCTL_SET_QOS_ENABLE: + return -EINVAL; + /* Set QoS header disabled */ + case RMNET_IOCTL_SET_QOS_DISABLE: + break; + /* Get QoS header state */ + case RMNET_IOCTL_GET_QOS: + ioctl_data.u.operation_mode = RMNET_MODE_NONE; + if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data, + sizeof(struct rmnet_ioctl_data_s))) + rc = -EFAULT; + break; + /* Get operation mode */ + case RMNET_IOCTL_GET_OPMODE: + ioctl_data.u.operation_mode = RMNET_MODE_LLP_IP; + if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data, + sizeof(struct rmnet_ioctl_data_s))) + rc = -EFAULT; + break; + /* Open transport port */ + case RMNET_IOCTL_OPEN: + break; + /* Close transport port */ + case RMNET_IOCTL_CLOSE: + break; + /* Flow enable */ + case RMNET_IOCTL_FLOW_ENABLE: + IPAWANDBG("Received flow enable\n"); + if (copy_from_user(&ioctl_data, ifr->ifr_ifru.ifru_data, + sizeof(struct rmnet_ioctl_data_s))) { + rc = -EFAULT; + break; + } + ipa_flow_control(IPA_CLIENT_USB_PROD, true, + ioctl_data.u.tcm_handle); + break; + /* Flow disable */ + case RMNET_IOCTL_FLOW_DISABLE: + IPAWANDBG("Received flow disable\n"); + if (copy_from_user(&ioctl_data, ifr->ifr_ifru.ifru_data, + sizeof(struct rmnet_ioctl_data_s))) { + rc = -EFAULT; + break; + } + ipa_flow_control(IPA_CLIENT_USB_PROD, false, + ioctl_data.u.tcm_handle); + break; + /* Set flow handle */ + case RMNET_IOCTL_FLOW_SET_HNDL: + break; + + /* Extended IOCTLs */ + case RMNET_IOCTL_EXTENDED: + if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN)) + return -EPERM; + IPAWANDBG("get ioctl: RMNET_IOCTL_EXTENDED\n"); + if (copy_from_user(&extend_ioctl_data, + (u8 *)ifr->ifr_ifru.ifru_data, + sizeof(struct rmnet_ioctl_extended_s))) { + IPAWANERR("failed to copy extended ioctl data\n"); + rc = -EFAULT; + break; + } + switch (extend_ioctl_data.extended_ioctl) { + /* Get features */ + case RMNET_IOCTL_GET_SUPPORTED_FEATURES: + IPAWANDBG("get RMNET_IOCTL_GET_SUPPORTED_FEATURES\n"); + extend_ioctl_data.u.data = + (RMNET_IOCTL_FEAT_NOTIFY_MUX_CHANNEL | + RMNET_IOCTL_FEAT_SET_EGRESS_DATA_FORMAT | + RMNET_IOCTL_FEAT_SET_INGRESS_DATA_FORMAT); + if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data, + &extend_ioctl_data, + sizeof(struct rmnet_ioctl_extended_s))) + rc = -EFAULT; + break; + /* Set MRU */ + case RMNET_IOCTL_SET_MRU: + mru = extend_ioctl_data.u.data; + IPAWANDBG("get MRU size %d\n", + extend_ioctl_data.u.data); + break; + /* Get MRU */ + case RMNET_IOCTL_GET_MRU: + extend_ioctl_data.u.data = mru; + if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data, + &extend_ioctl_data, + sizeof(struct rmnet_ioctl_extended_s))) + rc = -EFAULT; + break; + /* GET SG support */ + case RMNET_IOCTL_GET_SG_SUPPORT: + extend_ioctl_data.u.data = + ipa_rmnet_res.ipa_advertise_sg_support; + if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data, + &extend_ioctl_data, + sizeof(struct rmnet_ioctl_extended_s))) + rc = -EFAULT; + break; + /* Get endpoint ID */ + case RMNET_IOCTL_GET_EPID: + IPAWANDBG("get ioctl: RMNET_IOCTL_GET_EPID\n"); + extend_ioctl_data.u.data = epid; + if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data, + &extend_ioctl_data, + sizeof(struct rmnet_ioctl_extended_s))) + rc = -EFAULT; + if (copy_from_user(&extend_ioctl_data, + (u8 *)ifr->ifr_ifru.ifru_data, + sizeof(struct rmnet_ioctl_extended_s))) { + IPAWANERR("copy extended ioctl data failed\n"); + rc = -EFAULT; + break; + } + IPAWANDBG("RMNET_IOCTL_GET_EPID return %d\n", + extend_ioctl_data.u.data); + break; + /* Endpoint pair */ + case RMNET_IOCTL_GET_EP_PAIR: + IPAWANDBG("get ioctl: RMNET_IOCTL_GET_EP_PAIR\n"); + extend_ioctl_data.u.ipa_ep_pair.consumer_pipe_num = + ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD); + extend_ioctl_data.u.ipa_ep_pair.producer_pipe_num = + ipa2_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS); + if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data, + &extend_ioctl_data, + sizeof(struct rmnet_ioctl_extended_s))) + rc = -EFAULT; + if (copy_from_user(&extend_ioctl_data, + (u8 *)ifr->ifr_ifru.ifru_data, + sizeof(struct rmnet_ioctl_extended_s))) { + IPAWANERR("copy extended ioctl data failed\n"); + rc = -EFAULT; + break; + } + IPAWANDBG("RMNET_IOCTL_GET_EP_PAIR c: %d p: %d\n", + extend_ioctl_data.u.ipa_ep_pair.consumer_pipe_num, + extend_ioctl_data.u.ipa_ep_pair.producer_pipe_num); + break; + /* Get driver name */ + case RMNET_IOCTL_GET_DRIVER_NAME: + memcpy(&extend_ioctl_data.u.if_name, + ipa_netdevs[0]->name, IFNAMSIZ); + extend_ioctl_data.u.if_name[IFNAMSIZ - 1] = '\0'; + if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data, + &extend_ioctl_data, + sizeof(struct rmnet_ioctl_extended_s))) + rc = -EFAULT; + break; + /* Add MUX ID */ + case RMNET_IOCTL_ADD_MUX_CHANNEL: + mux_index = find_mux_channel_index( + extend_ioctl_data.u.rmnet_mux_val.mux_id); + if (mux_index < MAX_NUM_OF_MUX_CHANNEL) { + IPAWANDBG("already setup mux(%d)\n", + extend_ioctl_data.u.rmnet_mux_val.mux_id); + return rc; + } + mutex_lock(&add_mux_channel_lock); + if (rmnet_index >= MAX_NUM_OF_MUX_CHANNEL) { + IPAWANERR("Exceed mux_channel limit(%d)\n", + rmnet_index); + mutex_unlock(&add_mux_channel_lock); + return -EFAULT; + } + extend_ioctl_data.u.rmnet_mux_val.vchannel_name + [IFNAMSIZ-1] = '\0'; + IPAWANDBG("ADD_MUX_CHANNEL(%d, name: %s)\n", + extend_ioctl_data.u.rmnet_mux_val.mux_id, + extend_ioctl_data.u.rmnet_mux_val.vchannel_name); + /* cache the mux name and id */ + mux_channel[rmnet_index].mux_id = + extend_ioctl_data.u.rmnet_mux_val.mux_id; + memcpy(mux_channel[rmnet_index].vchannel_name, + extend_ioctl_data.u.rmnet_mux_val.vchannel_name, + sizeof(mux_channel[rmnet_index].vchannel_name)); + mux_channel[rmnet_index].vchannel_name[ + IFNAMSIZ - 1] = '\0'; + + IPAWANDBG("cashe device[%s:%d] in IPA_wan[%d]\n", + mux_channel[rmnet_index].vchannel_name, + mux_channel[rmnet_index].mux_id, + rmnet_index); + /* check if UL filter rules coming*/ + if (num_q6_rule != 0) { + IPAWANERR("dev(%s) register to IPA\n", + extend_ioctl_data.u.rmnet_mux_val.vchannel_name); + rc = wwan_register_to_ipa(rmnet_index); + if (rc < 0) { + IPAWANERR("device %s reg IPA failed\n", + extend_ioctl_data.u.rmnet_mux_val.vchannel_name); + mutex_unlock(&add_mux_channel_lock); + return -ENODEV; + } + mux_channel[rmnet_index].mux_channel_set = true; + mux_channel[rmnet_index].ul_flt_reg = true; + } else { + IPAWANDBG("dev(%s) haven't registered to IPA\n", + extend_ioctl_data.u.rmnet_mux_val.vchannel_name); + mux_channel[rmnet_index].mux_channel_set = true; + mux_channel[rmnet_index].ul_flt_reg = false; + } + rmnet_index++; + mutex_unlock(&add_mux_channel_lock); + break; + case RMNET_IOCTL_SET_EGRESS_DATA_FORMAT: + IPAWANDBG("get RMNET_IOCTL_SET_EGRESS_DATA_FORMAT\n"); + if ((extend_ioctl_data.u.data) & + RMNET_IOCTL_EGRESS_FORMAT_CHECKSUM) { + apps_to_ipa_ep_cfg.ipa_ep_cfg.hdr.hdr_len = 8; + apps_to_ipa_ep_cfg.ipa_ep_cfg.cfg.cs_offload_en = + IPA_ENABLE_CS_OFFLOAD_UL; + apps_to_ipa_ep_cfg.ipa_ep_cfg.cfg.cs_metadata_hdr_offset = 1; + } else { + apps_to_ipa_ep_cfg.ipa_ep_cfg.hdr.hdr_len = 4; + } + if ((extend_ioctl_data.u.data) & + RMNET_IOCTL_EGRESS_FORMAT_AGGREGATION) + apps_to_ipa_ep_cfg.ipa_ep_cfg.aggr.aggr_en = + IPA_ENABLE_AGGR; + else + apps_to_ipa_ep_cfg.ipa_ep_cfg.aggr.aggr_en = + IPA_BYPASS_AGGR; + apps_to_ipa_ep_cfg.ipa_ep_cfg.hdr.hdr_ofst_metadata_valid = 1; + /* modem want offset at 0! */ + apps_to_ipa_ep_cfg.ipa_ep_cfg.hdr.hdr_ofst_metadata = 0; + apps_to_ipa_ep_cfg.ipa_ep_cfg.mode.dst = + IPA_CLIENT_APPS_LAN_WAN_PROD; + apps_to_ipa_ep_cfg.ipa_ep_cfg.mode.mode = IPA_BASIC; + + apps_to_ipa_ep_cfg.client = + IPA_CLIENT_APPS_LAN_WAN_PROD; + apps_to_ipa_ep_cfg.notify = + apps_ipa_tx_complete_notify; + apps_to_ipa_ep_cfg.desc_fifo_sz = + IPA_SYS_TX_DATA_DESC_FIFO_SZ; + apps_to_ipa_ep_cfg.priv = dev; + + rc = ipa2_setup_sys_pipe(&apps_to_ipa_ep_cfg, + &apps_to_ipa_hdl); + if (rc) + IPAWANERR("failed to config egress endpoint\n"); + + if (num_q6_rule != 0) { + /* already got Q6 UL filter rules*/ + if (ipa_qmi_ctx && + !ipa_qmi_ctx->modem_cfg_emb_pipe_flt) { + /* protect num_q6_rule */ + mutex_lock(&add_mux_channel_lock); + rc = wwan_add_ul_flt_rule_to_ipa(); + mutex_unlock(&add_mux_channel_lock); + } else + rc = 0; + egress_set = true; + if (rc) + IPAWANERR("install UL rules failed\n"); + else + a7_ul_flt_set = true; + } else { + /* wait Q6 UL filter rules*/ + egress_set = true; + IPAWANDBG("no UL-rules, egress_set(%d)\n", + egress_set); + } + break; + case RMNET_IOCTL_SET_INGRESS_DATA_FORMAT:/* Set IDF */ + rc = handle_ingress_format(dev, &extend_ioctl_data); + break; + case RMNET_IOCTL_SET_XLAT_DEV_INFO: + wan_msg = kzalloc(sizeof(struct ipa_wan_msg), + GFP_KERNEL); + if (!wan_msg) { + IPAWANERR("Failed to allocate memory.\n"); + return -ENOMEM; + } + extend_ioctl_data.u.if_name[IFNAMSIZ-1] = '\0'; + len = sizeof(wan_msg->upstream_ifname) > + sizeof(extend_ioctl_data.u.if_name) ? + sizeof(extend_ioctl_data.u.if_name) : + sizeof(wan_msg->upstream_ifname); + strlcpy(wan_msg->upstream_ifname, + extend_ioctl_data.u.if_name, len); + wan_msg->upstream_ifname[len - 1] = '\0'; + memset(&msg_meta, 0, sizeof(struct ipa_msg_meta)); + msg_meta.msg_type = WAN_XLAT_CONNECT; + msg_meta.msg_len = sizeof(struct ipa_wan_msg); + rc = ipa2_send_msg(&msg_meta, wan_msg, + ipa_wwan_msg_free_cb); + if (rc) { + IPAWANERR("Failed to send XLAT_CONNECT msg\n"); + kfree(wan_msg); + } + break; + /* Get agg count */ + case RMNET_IOCTL_GET_AGGREGATION_COUNT: + break; + /* Set agg count */ + case RMNET_IOCTL_SET_AGGREGATION_COUNT: + break; + /* Get agg size */ + case RMNET_IOCTL_GET_AGGREGATION_SIZE: + break; + /* Set agg size */ + case RMNET_IOCTL_SET_AGGREGATION_SIZE: + break; + /* Do flow control */ + case RMNET_IOCTL_FLOW_CONTROL: + break; + /* For legacy use */ + case RMNET_IOCTL_GET_DFLT_CONTROL_CHANNEL: + break; + /* Get HW/SW map */ + case RMNET_IOCTL_GET_HWSW_MAP: + break; + /* Set RX Headroom */ + case RMNET_IOCTL_SET_RX_HEADROOM: + break; + default: + IPAWANERR("[%s] unsupported extended cmd[%d]", + dev->name, + extend_ioctl_data.extended_ioctl); + rc = -EINVAL; + } + break; + default: + IPAWANERR("[%s] unsupported cmd[%d]", + dev->name, cmd); + rc = -EINVAL; + } + return rc; +} + +static const struct net_device_ops ipa_wwan_ops_ip = { + .ndo_open = ipa_wwan_open, + .ndo_stop = ipa_wwan_stop, + .ndo_start_xmit = ipa_wwan_xmit, + .ndo_tx_timeout = ipa_wwan_tx_timeout, + .ndo_do_ioctl = ipa_wwan_ioctl, + .ndo_change_mtu = ipa_wwan_change_mtu, + .ndo_set_mac_address = 0, + .ndo_validate_addr = 0, +}; + +/** + * wwan_setup() - Setups the wwan network driver. + * + * @dev: network device + * + * Return codes: + * None + */ + +static void ipa_wwan_setup(struct net_device *dev) +{ + dev->netdev_ops = &ipa_wwan_ops_ip; + ether_setup(dev); + /* set this after calling ether_setup */ + dev->header_ops = 0; /* No header */ + dev->type = ARPHRD_RAWIP; + dev->hard_header_len = 0; + dev->mtu = WWAN_DATA_LEN; + dev->addr_len = 0; + dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST); + dev->needed_headroom = HEADROOM_FOR_QMAP; + dev->needed_tailroom = TAILROOM; + dev->watchdog_timeo = 1000; +} + +/* IPA_RM related functions start*/ +static void q6_prod_rm_request_resource(struct work_struct *work); +static DECLARE_DELAYED_WORK(q6_con_rm_request, q6_prod_rm_request_resource); +static void q6_prod_rm_release_resource(struct work_struct *work); +static DECLARE_DELAYED_WORK(q6_con_rm_release, q6_prod_rm_release_resource); + +static void q6_prod_rm_request_resource(struct work_struct *work) +{ + int ret = 0; + + ret = ipa_rm_request_resource(IPA_RM_RESOURCE_Q6_PROD); + if (ret < 0 && ret != -EINPROGRESS) { + IPAWANERR("%s: ipa_rm_request_resource failed %d\n", __func__, + ret); + return; + } +} + +static int q6_rm_request_resource(void) +{ + queue_delayed_work(ipa_rm_q6_workqueue, + &q6_con_rm_request, 0); + return 0; +} + +static void q6_prod_rm_release_resource(struct work_struct *work) +{ + int ret = 0; + + ret = ipa_rm_release_resource(IPA_RM_RESOURCE_Q6_PROD); + if (ret < 0 && ret != -EINPROGRESS) { + IPAWANERR("%s: ipa_rm_release_resource failed %d\n", __func__, + ret); + return; + } +} + + +static int q6_rm_release_resource(void) +{ + queue_delayed_work(ipa_rm_q6_workqueue, + &q6_con_rm_release, 0); + return 0; +} + + +static void q6_rm_notify_cb(void *user_data, + enum ipa_rm_event event, + unsigned long data) +{ + switch (event) { + case IPA_RM_RESOURCE_GRANTED: + IPAWANDBG_LOW("%s: Q6_PROD GRANTED CB\n", __func__); + break; + case IPA_RM_RESOURCE_RELEASED: + IPAWANDBG_LOW("%s: Q6_PROD RELEASED CB\n", __func__); + break; + default: + return; + } +} +static int q6_initialize_rm(void) +{ + struct ipa_rm_create_params create_params; + struct ipa_rm_perf_profile profile; + int result; + + /* Initialize IPA_RM workqueue */ + ipa_rm_q6_workqueue = create_singlethread_workqueue("clnt_req"); + if (!ipa_rm_q6_workqueue) + return -ENOMEM; + + memset(&create_params, 0, sizeof(create_params)); + create_params.name = IPA_RM_RESOURCE_Q6_PROD; + create_params.reg_params.notify_cb = &q6_rm_notify_cb; + result = ipa_rm_create_resource(&create_params); + if (result) + goto create_rsrc_err1; + memset(&create_params, 0, sizeof(create_params)); + create_params.name = IPA_RM_RESOURCE_Q6_CONS; + create_params.release_resource = &q6_rm_release_resource; + create_params.request_resource = &q6_rm_request_resource; + result = ipa_rm_create_resource(&create_params); + if (result) + goto create_rsrc_err2; + /* add dependency*/ + result = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD, + IPA_RM_RESOURCE_APPS_CONS); + if (result) + goto add_dpnd_err; + /* setup Performance profile */ + memset(&profile, 0, sizeof(profile)); + profile.max_supported_bandwidth_mbps = 100; + result = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_PROD, + &profile); + if (result) + goto set_perf_err; + result = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_CONS, + &profile); + if (result) + goto set_perf_err; + return result; + +set_perf_err: + ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD, + IPA_RM_RESOURCE_APPS_CONS); +add_dpnd_err: + result = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_CONS); + if (result < 0) + IPAWANERR("Error deleting resource %d, ret=%d\n", + IPA_RM_RESOURCE_Q6_CONS, result); +create_rsrc_err2: + result = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_PROD); + if (result < 0) + IPAWANERR("Error deleting resource %d, ret=%d\n", + IPA_RM_RESOURCE_Q6_PROD, result); +create_rsrc_err1: + destroy_workqueue(ipa_rm_q6_workqueue); + return result; +} + +void q6_deinitialize_rm(void) +{ + int ret; + + ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD, + IPA_RM_RESOURCE_APPS_CONS); + if (ret < 0) + IPAWANERR("Error deleting dependency %d->%d, ret=%d\n", + IPA_RM_RESOURCE_Q6_PROD, IPA_RM_RESOURCE_APPS_CONS, + ret); + ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_CONS); + if (ret < 0) + IPAWANERR("Error deleting resource %d, ret=%d\n", + IPA_RM_RESOURCE_Q6_CONS, ret); + ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_PROD); + if (ret < 0) + IPAWANERR("Error deleting resource %d, ret=%d\n", + IPA_RM_RESOURCE_Q6_PROD, ret); + + if (ipa_rm_q6_workqueue) + destroy_workqueue(ipa_rm_q6_workqueue); +} + +static void wake_tx_queue(struct work_struct *work) +{ + if (ipa_netdevs[0]) { + __netif_tx_lock_bh(netdev_get_tx_queue(ipa_netdevs[0], 0)); + netif_wake_queue(ipa_netdevs[0]); + __netif_tx_unlock_bh(netdev_get_tx_queue(ipa_netdevs[0], 0)); + } +} + +/** + * ipa_rm_resource_granted() - Called upon + * IPA_RM_RESOURCE_GRANTED event. Wakes up queue is was stopped. + * + * @work: work object supplied ny workqueue + * + * Return codes: + * None + */ +static void ipa_rm_resource_granted(void *dev) +{ + IPAWANDBG_LOW("Resource Granted - starting queue\n"); + schedule_work(&ipa_tx_wakequeue_work); +} + +/** + * ipa_rm_notify() - Callback function for RM events. Handles + * IPA_RM_RESOURCE_GRANTED and IPA_RM_RESOURCE_RELEASED events. + * IPA_RM_RESOURCE_GRANTED is handled in the context of shared + * workqueue. + * + * @dev: network device + * @event: IPA RM event + * @data: Additional data provided by IPA RM + * + * Return codes: + * None + */ +static void ipa_rm_notify(void *dev, enum ipa_rm_event event, + unsigned long data) +{ + struct wwan_private *wwan_ptr = netdev_priv(dev); + + pr_debug("%s: event %d\n", __func__, event); + switch (event) { + case IPA_RM_RESOURCE_GRANTED: + if (wwan_ptr->device_status == WWAN_DEVICE_INACTIVE) { + complete_all(&wwan_ptr->resource_granted_completion); + break; + } + ipa_rm_resource_granted(dev); + break; + case IPA_RM_RESOURCE_RELEASED: + break; + default: + pr_err("%s: unknown event %d\n", __func__, event); + break; + } +} + +/* IPA_RM related functions end*/ + +static int ssr_notifier_cb(struct notifier_block *this, + unsigned long code, + void *data); + +static struct notifier_block ssr_notifier = { + .notifier_call = ssr_notifier_cb, +}; + +static int get_ipa_rmnet_dts_configuration(struct platform_device *pdev, + struct ipa_rmnet_plat_drv_res *ipa_rmnet_drv_res) +{ + int result; + + ipa_rmnet_drv_res->wan_rx_desc_size = IPA_WWAN_CONS_DESC_FIFO_SZ; + ipa_rmnet_drv_res->ipa_rmnet_ssr = + of_property_read_bool(pdev->dev.of_node, + "qcom,rmnet-ipa-ssr"); + pr_info("IPA SSR support = %s\n", + ipa_rmnet_drv_res->ipa_rmnet_ssr ? "True" : "False"); + ipa_rmnet_drv_res->ipa_loaduC = + of_property_read_bool(pdev->dev.of_node, + "qcom,ipa-loaduC"); + pr_info("IPA ipa-loaduC = %s\n", + ipa_rmnet_drv_res->ipa_loaduC ? "True" : "False"); + + ipa_rmnet_drv_res->ipa_advertise_sg_support = + of_property_read_bool(pdev->dev.of_node, + "qcom,ipa-advertise-sg-support"); + pr_info("IPA SG support = %s\n", + ipa_rmnet_drv_res->ipa_advertise_sg_support ? "True" : "False"); + + ipa_rmnet_drv_res->ipa_napi_enable = + of_property_read_bool(pdev->dev.of_node, + "qcom,ipa-napi-enable"); + pr_info("IPA Napi Enable = %s\n", + ipa_rmnet_drv_res->ipa_napi_enable ? "True" : "False"); + + /* Get IPA WAN RX desc fifo size */ + result = of_property_read_u32(pdev->dev.of_node, + "qcom,wan-rx-desc-size", + &ipa_rmnet_drv_res->wan_rx_desc_size); + if (result) + pr_info("using default for wan-rx-desc-size = %u\n", + ipa_rmnet_drv_res->wan_rx_desc_size); + else + IPAWANDBG(": found ipa_drv_res->wan-rx-desc-size = %u\n", + ipa_rmnet_drv_res->wan_rx_desc_size); + + return 0; +} + +struct ipa_rmnet_context ipa_rmnet_ctx; + +/** + * ipa_wwan_probe() - Initialized the module and registers as a + * network interface to the network stack + * + * Return codes: + * 0: success + * -ENOMEM: No memory available + * -EFAULT: Internal error + * -ENODEV: IPA driver not loaded + */ +static int ipa_wwan_probe(struct platform_device *pdev) +{ + int ret, i; + struct net_device *dev; + struct wwan_private *wwan_ptr; + struct ipa_rm_create_params ipa_rm_params; /* IPA_RM */ + struct ipa_rm_perf_profile profile; /* IPA_RM */ + + pr_info("rmnet_ipa started initialization\n"); + + if (!ipa2_is_ready()) { + IPAWANERR("IPA driver not loaded\n"); + return -ENODEV; + } + + ret = get_ipa_rmnet_dts_configuration(pdev, &ipa_rmnet_res); + ipa_rmnet_ctx.ipa_rmnet_ssr = ipa_rmnet_res.ipa_rmnet_ssr; + + ret = ipa_init_q6_smem(); + if (ret) { + IPAWANERR("ipa_init_q6_smem failed!\n"); + return ret; + } + + /* initialize tx/rx enpoint setup */ + memset(&apps_to_ipa_ep_cfg, 0, sizeof(struct ipa_sys_connect_params)); + memset(&ipa_to_apps_ep_cfg, 0, sizeof(struct ipa_sys_connect_params)); + + /* initialize ex property setup */ + num_q6_rule = 0; + old_num_q6_rule = 0; + rmnet_index = 0; + egress_set = false; + a7_ul_flt_set = false; + for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++) + memset(&mux_channel[i], 0, sizeof(struct rmnet_mux_val)); + + /* start A7 QMI service/client */ + if (ipa_rmnet_res.ipa_loaduC) + /* Android platform loads uC */ + ipa_qmi_service_init(QMI_IPA_PLATFORM_TYPE_MSM_ANDROID_V01); + else + /* LE platform not loads uC */ + ipa_qmi_service_init(QMI_IPA_PLATFORM_TYPE_LE_V01); + + /* construct default WAN RT tbl for IPACM */ + ret = ipa_setup_a7_qmap_hdr(); + if (ret) + goto setup_a7_qmap_hdr_err; + ret = ipa_setup_dflt_wan_rt_tables(); + if (ret) + goto setup_dflt_wan_rt_tables_err; + + if (!atomic_read(&is_ssr)) { + /* Start transport-driver fd ioctl for ipacm for first init */ + ret = wan_ioctl_init(); + if (ret) + goto wan_ioctl_init_err; + } else { + /* Enable sending QMI messages after SSR */ + wan_ioctl_enable_qmi_messages(); + } + + /* initialize wan-driver netdev */ + dev = alloc_netdev(sizeof(struct wwan_private), + IPA_WWAN_DEV_NAME, + NET_NAME_UNKNOWN, + ipa_wwan_setup); + if (!dev) { + IPAWANERR("no memory for netdev\n"); + ret = -ENOMEM; + goto alloc_netdev_err; + } + ipa_netdevs[0] = dev; + wwan_ptr = netdev_priv(dev); + memset(wwan_ptr, 0, sizeof(*wwan_ptr)); + IPAWANDBG("wwan_ptr (private) = %p", wwan_ptr); + wwan_ptr->net = dev; + wwan_ptr->outstanding_high_ctl = DEFAULT_OUTSTANDING_HIGH_CTL; + wwan_ptr->outstanding_high = DEFAULT_OUTSTANDING_HIGH; + wwan_ptr->outstanding_low = DEFAULT_OUTSTANDING_LOW; + atomic_set(&wwan_ptr->outstanding_pkts, 0); + spin_lock_init(&wwan_ptr->lock); + init_completion(&wwan_ptr->resource_granted_completion); + + if (!atomic_read(&is_ssr)) { + /* IPA_RM configuration starts */ + ret = q6_initialize_rm(); + if (ret) { + IPAWANERR("%s: q6_initialize_rm failed, ret: %d\n", + __func__, ret); + goto q6_init_err; + } + } + + memset(&ipa_rm_params, 0, sizeof(struct ipa_rm_create_params)); + ipa_rm_params.name = IPA_RM_RESOURCE_WWAN_0_PROD; + ipa_rm_params.reg_params.user_data = dev; + ipa_rm_params.reg_params.notify_cb = ipa_rm_notify; + ret = ipa_rm_create_resource(&ipa_rm_params); + if (ret) { + pr_err("%s: unable to create resourse %d in IPA RM\n", + __func__, IPA_RM_RESOURCE_WWAN_0_PROD); + goto create_rsrc_err; + } + ret = ipa_rm_inactivity_timer_init(IPA_RM_RESOURCE_WWAN_0_PROD, + IPA_RM_INACTIVITY_TIMER); + if (ret) { + pr_err("%s: ipa rm timer init failed %d on resourse %d\n", + __func__, ret, IPA_RM_RESOURCE_WWAN_0_PROD); + goto timer_init_err; + } + /* add dependency */ + ret = ipa_rm_add_dependency(IPA_RM_RESOURCE_WWAN_0_PROD, + IPA_RM_RESOURCE_Q6_CONS); + if (ret) + goto add_dpnd_err; + /* setup Performance profile */ + memset(&profile, 0, sizeof(profile)); + profile.max_supported_bandwidth_mbps = IPA_APPS_MAX_BW_IN_MBPS; + ret = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_WWAN_0_PROD, + &profile); + if (ret) + goto set_perf_err; + /* IPA_RM configuration ends */ + + /* Enable SG support in netdevice. */ + if (ipa_rmnet_res.ipa_advertise_sg_support) + dev->hw_features |= NETIF_F_SG; + + /* Enable NAPI support in netdevice. */ + if (ipa_rmnet_res.ipa_napi_enable) { + netif_napi_add(dev, &(wwan_ptr->napi), + ipa_rmnet_poll, NAPI_WEIGHT); + } + + ret = register_netdev(dev); + if (ret) { + IPAWANERR("unable to register ipa_netdev %d rc=%d\n", + 0, ret); + goto set_perf_err; + } + + IPAWANDBG("IPA-WWAN devices (%s) initialization ok :>>>>\n", + ipa_netdevs[0]->name); + if (ret) { + IPAWANERR("default configuration failed rc=%d\n", + ret); + goto config_err; + } + atomic_set(&is_initialized, 1); + if (!atomic_read(&is_ssr)) { + /* offline charging mode */ + ipa2_proxy_clk_unvote(); + } + atomic_set(&is_ssr, 0); + + pr_info("rmnet_ipa completed initialization\n"); + return 0; +config_err: + if (ipa_rmnet_res.ipa_napi_enable) + netif_napi_del(&(wwan_ptr->napi)); + unregister_netdev(ipa_netdevs[0]); +set_perf_err: + ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD, + IPA_RM_RESOURCE_Q6_CONS); + if (ret) + IPAWANERR("Error deleting dependency %d->%d, ret=%d\n", + IPA_RM_RESOURCE_WWAN_0_PROD, IPA_RM_RESOURCE_Q6_CONS, + ret); +add_dpnd_err: + ret = ipa_rm_inactivity_timer_destroy( + IPA_RM_RESOURCE_WWAN_0_PROD); /* IPA_RM */ + if (ret) + IPAWANERR("Error ipa_rm_inactivity_timer_destroy %d, ret=%d\n", + IPA_RM_RESOURCE_WWAN_0_PROD, ret); +timer_init_err: + ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_WWAN_0_PROD); + if (ret) + IPAWANERR("Error deleting resource %d, ret=%d\n", + IPA_RM_RESOURCE_WWAN_0_PROD, ret); +create_rsrc_err: + + if (!atomic_read(&is_ssr)) + q6_deinitialize_rm(); + +q6_init_err: + free_netdev(ipa_netdevs[0]); + ipa_netdevs[0] = NULL; +alloc_netdev_err: + wan_ioctl_deinit(); +wan_ioctl_init_err: + ipa_del_dflt_wan_rt_tables(); +setup_dflt_wan_rt_tables_err: + ipa_del_a7_qmap_hdr(); +setup_a7_qmap_hdr_err: + ipa_qmi_service_exit(); + atomic_set(&is_ssr, 0); + return ret; +} + +static int ipa_wwan_remove(struct platform_device *pdev) +{ + int ret; + struct wwan_private *wwan_ptr; + + wwan_ptr = netdev_priv(ipa_netdevs[0]); + + pr_info("rmnet_ipa started deinitialization\n"); + mutex_lock(&ipa_to_apps_pipe_handle_guard); + ret = ipa2_teardown_sys_pipe(ipa_to_apps_hdl); + if (ret < 0) + IPAWANERR("Failed to teardown IPA->APPS pipe\n"); + else + ipa_to_apps_hdl = -1; + if (ipa_rmnet_res.ipa_napi_enable) + netif_napi_del(&(wwan_ptr->napi)); + mutex_unlock(&ipa_to_apps_pipe_handle_guard); + unregister_netdev(ipa_netdevs[0]); + ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD, + IPA_RM_RESOURCE_Q6_CONS); + if (ret < 0) + IPAWANERR("Error deleting dependency %d->%d, ret=%d\n", + IPA_RM_RESOURCE_WWAN_0_PROD, IPA_RM_RESOURCE_Q6_CONS, + ret); + ret = ipa_rm_inactivity_timer_destroy(IPA_RM_RESOURCE_WWAN_0_PROD); + if (ret < 0) + IPAWANERR( + "Error ipa_rm_inactivity_timer_destroy resource %d, ret=%d\n", + IPA_RM_RESOURCE_WWAN_0_PROD, ret); + ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_WWAN_0_PROD); + if (ret < 0) + IPAWANERR("Error deleting resource %d, ret=%d\n", + IPA_RM_RESOURCE_WWAN_0_PROD, ret); + cancel_work_sync(&ipa_tx_wakequeue_work); + cancel_delayed_work(&ipa_tether_stats_poll_wakequeue_work); + free_netdev(ipa_netdevs[0]); + ipa_netdevs[0] = NULL; + /* No need to remove wwan_ioctl during SSR */ + if (!atomic_read(&is_ssr)) + wan_ioctl_deinit(); + ipa_del_dflt_wan_rt_tables(); + ipa_del_a7_qmap_hdr(); + ipa_del_mux_qmap_hdrs(); + if (ipa_qmi_ctx && !ipa_qmi_ctx->modem_cfg_emb_pipe_flt) + wwan_del_ul_flt_rule_to_ipa(); + ipa_cleanup_deregister_intf(); + atomic_set(&is_initialized, 0); + pr_info("rmnet_ipa completed deinitialization\n"); + return 0; +} + +/** + * rmnet_ipa_ap_suspend() - suspend callback for runtime_pm + * @dev: pointer to device + * + * This callback will be invoked by the runtime_pm framework when an AP suspend + * operation is invoked, usually by pressing a suspend button. + * + * Returns -EAGAIN to runtime_pm framework in case there are pending packets + * in the Tx queue. This will postpone the suspend operation until all the + * pending packets will be transmitted. + * + * In case there are no packets to send, releases the WWAN0_PROD entity. + * As an outcome, the number of IPA active clients should be decremented + * until IPA clocks can be gated. + */ +static int rmnet_ipa_ap_suspend(struct device *dev) +{ + struct net_device *netdev = ipa_netdevs[0]; + struct wwan_private *wwan_ptr = netdev_priv(netdev); + + IPAWANDBG_LOW("Enter...\n"); + /* Do not allow A7 to suspend in case there are outstanding packets */ + if (atomic_read(&wwan_ptr->outstanding_pkts) != 0) { + IPAWANDBG("Outstanding packets, postponing AP suspend.\n"); + return -EAGAIN; + } + + /* Make sure that there is no Tx operation ongoing */ + netif_tx_lock_bh(netdev); + netif_stop_queue(netdev); + + /* Stoppig Watch dog timer when pipe was in suspend state */ + if (del_timer(&netdev->watchdog_timer)) + dev_put(netdev); + + ipa_rm_release_resource(IPA_RM_RESOURCE_WWAN_0_PROD); + netif_tx_unlock_bh(netdev); + IPAWANDBG_LOW("Exit\n"); + + return 0; +} + +/** + * rmnet_ipa_ap_resume() - resume callback for runtime_pm + * @dev: pointer to device + * + * This callback will be invoked by the runtime_pm framework when an AP resume + * operation is invoked. + * + * Enables the network interface queue and returns success to the + * runtime_pm framework. + */ +static int rmnet_ipa_ap_resume(struct device *dev) +{ + struct net_device *netdev = ipa_netdevs[0]; + + IPAWANDBG_LOW("Enter...\n"); + if (netdev) { + netif_wake_queue(netdev); + /* Starting Watch dog timer, pipe was changes to resume state */ + if (netif_running(netdev) && netdev->watchdog_timeo <= 0) + __netdev_watchdog_up(netdev); + } + IPAWANDBG_LOW("Exit\n"); + + return 0; +} + +static void ipa_stop_polling_stats(void) +{ + cancel_delayed_work(&ipa_tether_stats_poll_wakequeue_work); + ipa_rmnet_ctx.polling_interval = 0; +} + +static const struct of_device_id rmnet_ipa_dt_match[] = { + {.compatible = "qcom,rmnet-ipa"}, + {}, +}; +MODULE_DEVICE_TABLE(of, rmnet_ipa_dt_match); + +static const struct dev_pm_ops rmnet_ipa_pm_ops = { + .suspend_noirq = rmnet_ipa_ap_suspend, + .resume_noirq = rmnet_ipa_ap_resume, +}; + +static struct platform_driver rmnet_ipa_driver = { + .driver = { + .name = "rmnet_ipa", + .pm = &rmnet_ipa_pm_ops, + .of_match_table = rmnet_ipa_dt_match, + }, + .probe = ipa_wwan_probe, + .remove = ipa_wwan_remove, +}; + +/** + * rmnet_ipa_send_ssr_notification(bool ssr_done) - send SSR notification + * + * This function sends the SSR notification before modem shutdown and + * after_powerup from SSR framework, to user-space module + */ +static void rmnet_ipa_send_ssr_notification(bool ssr_done) +{ + struct ipa_msg_meta msg_meta; + int rc; + + memset(&msg_meta, 0, sizeof(struct ipa_msg_meta)); + if (ssr_done) + msg_meta.msg_type = IPA_SSR_AFTER_POWERUP; + else + msg_meta.msg_type = IPA_SSR_BEFORE_SHUTDOWN; + rc = ipa_send_msg(&msg_meta, NULL, NULL); + if (rc) { + IPAWANERR("ipa_send_msg failed: %d\n", rc); + return; + } +} + +static int ssr_notifier_cb(struct notifier_block *this, + unsigned long code, + void *data) +{ + if (ipa_rmnet_ctx.ipa_rmnet_ssr) { + if (code == SUBSYS_BEFORE_SHUTDOWN) { + pr_info("IPA received MPSS BEFORE_SHUTDOWN\n"); + /* send SSR before-shutdown notification to IPACM */ + rmnet_ipa_send_ssr_notification(false); + atomic_set(&is_ssr, 1); + ipa_q6_pre_shutdown_cleanup(); + if (ipa_netdevs[0]) + netif_stop_queue(ipa_netdevs[0]); + ipa_qmi_stop_workqueues(); + wan_ioctl_stop_qmi_messages(); + ipa_stop_polling_stats(); + if (atomic_read(&is_initialized)) + platform_driver_unregister(&rmnet_ipa_driver); + pr_info("IPA BEFORE_SHUTDOWN handling is complete\n"); + return NOTIFY_DONE; + } + if (code == SUBSYS_AFTER_SHUTDOWN) { + pr_info("IPA received MPSS AFTER_SHUTDOWN\n"); + if (atomic_read(&is_ssr)) + ipa_q6_post_shutdown_cleanup(); + pr_info("IPA AFTER_SHUTDOWN handling is complete\n"); + return NOTIFY_DONE; + } + if (code == SUBSYS_AFTER_POWERUP) { + pr_info("IPA received MPSS AFTER_POWERUP\n"); + if (!atomic_read(&is_initialized) + && atomic_read(&is_ssr)) + platform_driver_register(&rmnet_ipa_driver); + pr_info("IPA AFTER_POWERUP handling is complete\n"); + return NOTIFY_DONE; + } + if (code == SUBSYS_BEFORE_POWERUP) { + pr_info("IPA received MPSS BEFORE_POWERUP\n"); + if (atomic_read(&is_ssr)) + /* clean up cached QMI msg/handlers */ + ipa_qmi_service_exit(); + ipa2_proxy_clk_vote(); + pr_info("IPA BEFORE_POWERUP handling is complete\n"); + return NOTIFY_DONE; + } + } + IPAWANDBG_LOW("Exit\n"); + return NOTIFY_DONE; +} + +/** + * rmnet_ipa_free_msg() - Free the msg sent to user space via ipa2_send_msg + * @buff: pointer to buffer containing the message + * @len: message len + * @type: message type + * + * This function is invoked when ipa2_send_msg is complete (Provided as a + * free function pointer along with the message). + */ +static void rmnet_ipa_free_msg(void *buff, u32 len, u32 type) +{ + if (!buff) { + IPAWANERR("Null buffer\n"); + return; + } + + if (type != IPA_TETHERING_STATS_UPDATE_STATS && + type != IPA_TETHERING_STATS_UPDATE_NETWORK_STATS) { + IPAWANERR("Wrong type given. buff %p type %d\n", + buff, type); + } + kfree(buff); +} + +/** + * rmnet_ipa_get_stats_and_update(bool reset) - Gets pipe stats from Modem + * + * This function queries the IPA Modem driver for the pipe stats + * via QMI, and updates the user space IPA entity. + */ +static void rmnet_ipa_get_stats_and_update(bool reset) +{ + struct ipa_get_data_stats_req_msg_v01 req; + struct ipa_get_data_stats_resp_msg_v01 *resp; + struct ipa_msg_meta msg_meta; + int rc; + + resp = kzalloc(sizeof(struct ipa_get_data_stats_resp_msg_v01), + GFP_KERNEL); + if (!resp) { + IPAWANERR("Can't allocate memory for stats message\n"); + return; + } + + memset(&req, 0, sizeof(struct ipa_get_data_stats_req_msg_v01)); + memset(resp, 0, sizeof(struct ipa_get_data_stats_resp_msg_v01)); + + req.ipa_stats_type = QMI_IPA_STATS_TYPE_PIPE_V01; + if (reset) { + req.reset_stats_valid = true; + req.reset_stats = true; + IPAWANERR("Get the latest pipe-stats and reset it\n"); + } + + rc = ipa_qmi_get_data_stats(&req, resp); + if (rc) { + IPAWANERR("ipa_qmi_get_data_stats failed: %d\n", rc); + kfree(resp); + return; + } + + memset(&msg_meta, 0, sizeof(struct ipa_msg_meta)); + msg_meta.msg_type = IPA_TETHERING_STATS_UPDATE_STATS; + msg_meta.msg_len = sizeof(struct ipa_get_data_stats_resp_msg_v01); + rc = ipa2_send_msg(&msg_meta, resp, rmnet_ipa_free_msg); + if (rc) { + IPAWANERR("ipa2_send_msg failed: %d\n", rc); + kfree(resp); + return; + } +} + +/** + * tethering_stats_poll_queue() - Stats polling function + * @work - Work entry + * + * This function is scheduled periodically (per the interval) in + * order to poll the IPA Modem driver for the pipe stats. + */ +static void tethering_stats_poll_queue(struct work_struct *work) +{ + rmnet_ipa_get_stats_and_update(false); + + /* Schedule again only if there's an active polling interval */ + if (ipa_rmnet_ctx.polling_interval != 0) + schedule_delayed_work(&ipa_tether_stats_poll_wakequeue_work, + msecs_to_jiffies(ipa_rmnet_ctx.polling_interval*1000)); +} + +/** + * rmnet_ipa_get_network_stats_and_update() - Get network stats from IPA Modem + * + * This function retrieves the data usage (used quota) from the IPA Modem driver + * via QMI, and updates IPA user space entity. + */ +static void rmnet_ipa_get_network_stats_and_update(void) +{ + struct ipa_get_apn_data_stats_req_msg_v01 req; + struct ipa_get_apn_data_stats_resp_msg_v01 *resp; + struct ipa_msg_meta msg_meta; + int rc; + + resp = kzalloc(sizeof(struct ipa_get_apn_data_stats_resp_msg_v01), + GFP_KERNEL); + if (!resp) { + IPAWANERR("Can't allocate memory for network stats message\n"); + return; + } + + memset(&req, 0, sizeof(struct ipa_get_apn_data_stats_req_msg_v01)); + memset(resp, 0, sizeof(struct ipa_get_apn_data_stats_resp_msg_v01)); + + req.mux_id_list_valid = true; + req.mux_id_list_len = 1; + req.mux_id_list[0] = ipa_rmnet_ctx.metered_mux_id; + + rc = ipa_qmi_get_network_stats(&req, resp); + if (rc) { + IPAWANERR("ipa_qmi_get_network_stats failed %d\n", rc); + kfree(resp); + return; + } + + memset(&msg_meta, 0, sizeof(struct ipa_msg_meta)); + msg_meta.msg_type = IPA_TETHERING_STATS_UPDATE_NETWORK_STATS; + msg_meta.msg_len = sizeof(struct ipa_get_apn_data_stats_resp_msg_v01); + rc = ipa2_send_msg(&msg_meta, resp, rmnet_ipa_free_msg); + if (rc) { + IPAWANERR("ipa2_send_msg failed: %d\n", rc); + kfree(resp); + return; + } +} + +/** + * rmnet_ipa_send_quota_reach_ind() - send quota_reach notification from + * IPA Modem + * This function sends the quota_reach indication from the IPA Modem driver + * via QMI, to user-space module + */ +static void rmnet_ipa_send_quota_reach_ind(void) +{ + struct ipa_msg_meta msg_meta; + int rc; + + memset(&msg_meta, 0, sizeof(struct ipa_msg_meta)); + msg_meta.msg_type = IPA_QUOTA_REACH; + rc = ipa_send_msg(&msg_meta, NULL, NULL); + if (rc) { + IPAWANERR("ipa_send_msg failed: %d\n", rc); + return; + } +} + +/** + * rmnet_ipa_poll_tethering_stats() - Tethering stats polling IOCTL handler + * @data - IOCTL data + * + * This function handles WAN_IOC_POLL_TETHERING_STATS. + * In case polling interval received is 0, polling will stop + * (If there's a polling in progress, it will allow it to finish), and then will + * fetch network stats, and update the IPA user space. + * + * Return codes: + * 0: Success + */ +int rmnet_ipa_poll_tethering_stats(struct wan_ioctl_poll_tethering_stats *data) +{ + ipa_rmnet_ctx.polling_interval = data->polling_interval_secs; + + cancel_delayed_work_sync(&ipa_tether_stats_poll_wakequeue_work); + + if (ipa_rmnet_ctx.polling_interval == 0) { + ipa_qmi_stop_data_qouta(); + rmnet_ipa_get_network_stats_and_update(); + rmnet_ipa_get_stats_and_update(true); + return 0; + } + + schedule_delayed_work(&ipa_tether_stats_poll_wakequeue_work, 0); + return 0; +} + +/** + * rmnet_ipa_set_data_quota_modem() - Data quota setting IOCTL handler + * @data - IOCTL data + * + * This function handles WAN_IOC_SET_DATA_QUOTA on modem interface. + * It translates the given interface name to the Modem MUX ID and + * sends the request of the quota to the IPA Modem driver via QMI. + * + * Return codes: + * 0: Success + * -EFAULT: Invalid interface name provided + * other: See ipa_qmi_set_data_quota + */ +static int rmnet_ipa_set_data_quota_modem(struct wan_ioctl_set_data_quota *data) +{ + u32 mux_id; + int index; + struct ipa_set_data_usage_quota_req_msg_v01 req; + + /* stop quota */ + if (!data->set_quota) + ipa_qmi_stop_data_qouta(); + + /* prevent string buffer overflows */ + data->interface_name[IFNAMSIZ-1] = '\0'; + + index = find_vchannel_name_index(data->interface_name); + IPAWANERR("iface name %s, quota %lu\n", + data->interface_name, + (unsigned long) data->quota_mbytes); + + if (index == MAX_NUM_OF_MUX_CHANNEL) { + IPAWANERR("%s is an invalid iface name\n", + data->interface_name); + return -ENODEV; + } + + mux_id = mux_channel[index].mux_id; + + ipa_rmnet_ctx.metered_mux_id = mux_id; + + memset(&req, 0, sizeof(struct ipa_set_data_usage_quota_req_msg_v01)); + req.apn_quota_list_valid = true; + req.apn_quota_list_len = 1; + req.apn_quota_list[0].mux_id = mux_id; + req.apn_quota_list[0].num_Mbytes = data->quota_mbytes; + + return ipa_qmi_set_data_quota(&req); +} + +static int rmnet_ipa_set_data_quota_wifi(struct wan_ioctl_set_data_quota *data) +{ + struct ipa_set_wifi_quota wifi_quota; + int rc = 0; + + memset(&wifi_quota, 0, sizeof(struct ipa_set_wifi_quota)); + wifi_quota.set_quota = data->set_quota; + wifi_quota.quota_bytes = data->quota_mbytes; + IPAWANDBG("iface name %s, quota %lu\n", + data->interface_name, + (unsigned long) data->quota_mbytes); + + rc = ipa2_set_wlan_quota(&wifi_quota); + /* check if wlan-fw takes this quota-set */ + if (!wifi_quota.set_valid) + rc = -EFAULT; + return rc; +} + +/** + * rmnet_ipa_set_data_quota() - Data quota setting IOCTL handler + * @data - IOCTL data + * + * This function handles WAN_IOC_SET_DATA_QUOTA. + * It translates the given interface name to the Modem MUX ID and + * sends the request of the quota to the IPA Modem driver via QMI. + * + * Return codes: + * 0: Success + * -EFAULT: Invalid interface name provided + * other: See ipa_qmi_set_data_quota + */ +int rmnet_ipa_set_data_quota(struct wan_ioctl_set_data_quota *data) +{ + enum ipa_upstream_type upstream_type; + int rc = 0; + + /* prevent string buffer overflows */ + data->interface_name[IFNAMSIZ-1] = '\0'; + + /* get IPA backhaul type */ + upstream_type = find_upstream_type(data->interface_name); + + if (upstream_type == IPA_UPSTEAM_MAX) { + IPAWANERR("upstream iface %s not supported\n", + data->interface_name); + } else if (upstream_type == IPA_UPSTEAM_WLAN) { + rc = rmnet_ipa_set_data_quota_wifi(data); + if (rc) { + IPAWANERR("set quota on wifi failed\n"); + return rc; + } + } else { + rc = rmnet_ipa_set_data_quota_modem(data); + if (rc) { + IPAWANERR("set quota on modem failed\n"); + return rc; + } + } + return rc; +} + + /* rmnet_ipa_set_tether_client_pipe() - + * @data - IOCTL data + * + * This function handles WAN_IOC_SET_DATA_QUOTA. + * It translates the given interface name to the Modem MUX ID and + * sends the request of the quota to the IPA Modem driver via QMI. + * + * Return codes: + * 0: Success + * -EFAULT: Invalid src/dst pipes provided + * other: See ipa_qmi_set_data_quota + */ +int rmnet_ipa_set_tether_client_pipe( + struct wan_ioctl_set_tether_client_pipe *data) +{ + int number, i; + + /* error checking if ul_src_pipe_len valid or not*/ + if (data->ul_src_pipe_len > QMI_IPA_MAX_PIPES_V01 || + data->ul_src_pipe_len < 0) { + IPAWANERR("UL src pipes %d exceeding max %d\n", + data->ul_src_pipe_len, + QMI_IPA_MAX_PIPES_V01); + return -EFAULT; + } + /* error checking if dl_dst_pipe_len valid or not*/ + if (data->dl_dst_pipe_len > QMI_IPA_MAX_PIPES_V01 || + data->dl_dst_pipe_len < 0) { + IPAWANERR("DL dst pipes %d exceeding max %d\n", + data->dl_dst_pipe_len, + QMI_IPA_MAX_PIPES_V01); + return -EFAULT; + } + + IPAWANDBG("client %d, UL %d, DL %d, reset %d\n", + data->ipa_client, + data->ul_src_pipe_len, + data->dl_dst_pipe_len, + data->reset_client); + number = data->ul_src_pipe_len; + for (i = 0; i < number; i++) { + IPAWANDBG("UL index-%d pipe %d\n", i, + data->ul_src_pipe_list[i]); + if (data->reset_client) + ipa_set_client(data->ul_src_pipe_list[i], + 0, false); + else + ipa_set_client(data->ul_src_pipe_list[i], + data->ipa_client, true); + } + number = data->dl_dst_pipe_len; + for (i = 0; i < number; i++) { + IPAWANDBG("DL index-%d pipe %d\n", i, + data->dl_dst_pipe_list[i]); + if (data->reset_client) + ipa_set_client(data->dl_dst_pipe_list[i], + 0, false); + else + ipa_set_client(data->dl_dst_pipe_list[i], + data->ipa_client, false); + } + return 0; +} + +static int rmnet_ipa_query_tethering_stats_wifi( + struct wan_ioctl_query_tether_stats *data, bool reset) +{ + struct ipa_get_wdi_sap_stats *sap_stats; + int rc; + + sap_stats = kzalloc(sizeof(struct ipa_get_wdi_sap_stats), + GFP_KERNEL); + if (!sap_stats) + return -ENOMEM; + + sap_stats->reset_stats = reset; + IPAWANDBG("reset the pipe stats %d\n", sap_stats->reset_stats); + + rc = ipa2_get_wlan_stats(sap_stats); + if (rc) { + kfree(sap_stats); + return rc; + } else if (data == NULL) { + IPAWANDBG("only reset wlan stats\n"); + kfree(sap_stats); + return 0; + } + + if (sap_stats->stats_valid) { + data->ipv4_tx_packets = sap_stats->ipv4_tx_packets; + data->ipv4_tx_bytes = sap_stats->ipv4_tx_bytes; + data->ipv4_rx_packets = sap_stats->ipv4_rx_packets; + data->ipv4_rx_bytes = sap_stats->ipv4_rx_bytes; + data->ipv6_tx_packets = sap_stats->ipv6_tx_packets; + data->ipv6_tx_bytes = sap_stats->ipv6_tx_bytes; + data->ipv6_rx_packets = sap_stats->ipv6_rx_packets; + data->ipv6_rx_bytes = sap_stats->ipv6_rx_bytes; + } + + IPAWANDBG("v4_rx_p(%lu) v6_rx_p(%lu) v4_rx_b(%lu) v6_rx_b(%lu)\n", + (unsigned long) data->ipv4_rx_packets, + (unsigned long) data->ipv6_rx_packets, + (unsigned long) data->ipv4_rx_bytes, + (unsigned long) data->ipv6_rx_bytes); + IPAWANDBG("tx_p_v4(%lu)v6(%lu)tx_b_v4(%lu) v6(%lu)\n", + (unsigned long) data->ipv4_tx_packets, + (unsigned long) data->ipv6_tx_packets, + (unsigned long) data->ipv4_tx_bytes, + (unsigned long) data->ipv6_tx_bytes); + + kfree(sap_stats); + return rc; +} + +int rmnet_ipa_query_tethering_stats_modem( + struct wan_ioctl_query_tether_stats *data, + bool reset +) +{ + struct ipa_get_data_stats_req_msg_v01 *req; + struct ipa_get_data_stats_resp_msg_v01 *resp; + int pipe_len, rc; + + req = kzalloc(sizeof(struct ipa_get_data_stats_req_msg_v01), + GFP_KERNEL); + if (!req) { + IPAWANERR("failed to allocate memory for stats message\n"); + return -ENOMEM; + } + resp = kzalloc(sizeof(struct ipa_get_data_stats_resp_msg_v01), + GFP_KERNEL); + if (!resp) { + IPAWANERR("failed to allocate memory for stats message\n"); + kfree(req); + return -ENOMEM; + } + memset(req, 0, sizeof(struct ipa_get_data_stats_req_msg_v01)); + memset(resp, 0, sizeof(struct ipa_get_data_stats_resp_msg_v01)); + + req->ipa_stats_type = QMI_IPA_STATS_TYPE_PIPE_V01; + if (reset) { + req->reset_stats_valid = true; + req->reset_stats = true; + IPAWANDBG("reset the pipe stats\n"); + } else { + /* print tethered-client enum */ + IPAWANDBG_LOW("Tethered-client enum(%d)\n", data->ipa_client); + } + + rc = ipa_qmi_get_data_stats(req, resp); + if (rc) { + IPAWANERR("can't get ipa_qmi_get_data_stats\n"); + kfree(req); + kfree(resp); + return rc; + } else if (data == NULL) { + IPAWANDBG("only reset modem stats\n"); + kfree(req); + kfree(resp); + return 0; + } + + if (resp->dl_dst_pipe_stats_list_valid) { + for (pipe_len = 0; pipe_len < resp->dl_dst_pipe_stats_list_len; + pipe_len++) { + IPAWANDBG_LOW("Check entry(%d) dl_dst_pipe(%d)\n", + pipe_len, + resp->dl_dst_pipe_stats_list[pipe_len].pipe_index); + IPAWANDBG_LOW + ("dl_p_v4(%lu)v6(%lu) dl_b_v4(%lu)v6(%lu)\n", + (unsigned long) + resp->dl_dst_pipe_stats_list[pipe_len].num_ipv4_packets, + (unsigned long) + resp->dl_dst_pipe_stats_list[pipe_len].num_ipv6_packets, + (unsigned long) + resp->dl_dst_pipe_stats_list[pipe_len].num_ipv4_bytes, + (unsigned long) + resp->dl_dst_pipe_stats_list[pipe_len].num_ipv6_bytes); + if (ipa_get_client_uplink( + resp->dl_dst_pipe_stats_list[pipe_len].pipe_index) + == false) { + if (data->ipa_client == ipa_get_client( + resp->dl_dst_pipe_stats_list[pipe_len].pipe_index)) { + /* update the DL stats */ + data->ipv4_rx_packets += + resp->dl_dst_pipe_stats_list[pipe_len].num_ipv4_packets; + data->ipv6_rx_packets += + resp->dl_dst_pipe_stats_list[pipe_len].num_ipv6_packets; + data->ipv4_rx_bytes += + resp->dl_dst_pipe_stats_list[pipe_len].num_ipv4_bytes; + data->ipv6_rx_bytes += + resp->dl_dst_pipe_stats_list[pipe_len].num_ipv6_bytes; + } + } + } + } + IPAWANDBG_LOW("v4_rx_p(%lu) v6_rx_p(%lu) v4_rx_b(%lu) v6_rx_b(%lu)\n", + (unsigned long) data->ipv4_rx_packets, + (unsigned long) data->ipv6_rx_packets, + (unsigned long) data->ipv4_rx_bytes, + (unsigned long) data->ipv6_rx_bytes); + + if (resp->ul_src_pipe_stats_list_valid) { + for (pipe_len = 0; pipe_len < resp->ul_src_pipe_stats_list_len; + pipe_len++) { + IPAWANDBG_LOW("Check entry(%d) ul_dst_pipe(%d)\n", + pipe_len, + resp->ul_src_pipe_stats_list[pipe_len].pipe_index); + IPAWANDBG_LOW + ("ul_p_v4(%lu)v6(%lu)ul_b_v4(%lu)v6(%lu)\n", + (unsigned long) + resp->ul_src_pipe_stats_list[pipe_len].num_ipv4_packets, + (unsigned long) + resp->ul_src_pipe_stats_list[pipe_len].num_ipv6_packets, + (unsigned long) + resp->ul_src_pipe_stats_list[pipe_len].num_ipv4_bytes, + (unsigned long) + resp->ul_src_pipe_stats_list[pipe_len].num_ipv6_bytes); + if (ipa_get_client_uplink( + resp->ul_src_pipe_stats_list[pipe_len].pipe_index) + == true) { + if (data->ipa_client == ipa_get_client( + resp->ul_src_pipe_stats_list[pipe_len].pipe_index)) { + /* update the DL stats */ + data->ipv4_tx_packets += + resp->ul_src_pipe_stats_list[pipe_len].num_ipv4_packets; + data->ipv6_tx_packets += + resp->ul_src_pipe_stats_list[pipe_len].num_ipv6_packets; + data->ipv4_tx_bytes += + resp->ul_src_pipe_stats_list[pipe_len].num_ipv4_bytes; + data->ipv6_tx_bytes += + resp->ul_src_pipe_stats_list[pipe_len].num_ipv6_bytes; + } + } + } + } + IPAWANDBG_LOW("tx_p_v4(%lu)v6(%lu)tx_b_v4(%lu) v6(%lu)\n", + (unsigned long) data->ipv4_tx_packets, + (unsigned long) data->ipv6_tx_packets, + (unsigned long) data->ipv4_tx_bytes, + (unsigned long) data->ipv6_tx_bytes); + kfree(req); + kfree(resp); + return 0; +} + +int rmnet_ipa_query_tethering_stats(struct wan_ioctl_query_tether_stats *data, + bool reset) +{ + enum ipa_upstream_type upstream_type; + int rc = 0; + + /* prevent string buffer overflows */ + data->upstreamIface[IFNAMSIZ-1] = '\0'; + data->tetherIface[IFNAMSIZ-1] = '\0'; + + /* get IPA backhaul type */ + upstream_type = find_upstream_type(data->upstreamIface); + + if (upstream_type == IPA_UPSTEAM_MAX) { + IPAWANERR("upstreamIface %s not supported\n", + data->upstreamIface); + } else if (upstream_type == IPA_UPSTEAM_WLAN) { + IPAWANDBG_LOW(" query wifi-backhaul stats\n"); + rc = rmnet_ipa_query_tethering_stats_wifi( + data, false); + if (rc) { + IPAWANERR("wlan WAN_IOC_QUERY_TETHER_STATS failed\n"); + return rc; + } + } else { + IPAWANDBG_LOW(" query modem-backhaul stats\n"); + rc = rmnet_ipa_query_tethering_stats_modem( + data, false); + if (rc) { + IPAWANERR("modem WAN_IOC_QUERY_TETHER_STATS failed\n"); + return rc; + } + } + return rc; +} + +int rmnet_ipa_query_tethering_stats_all( + struct wan_ioctl_query_tether_stats_all *data) +{ + struct wan_ioctl_query_tether_stats tether_stats; + enum ipa_upstream_type upstream_type; + int rc = 0; + + memset(&tether_stats, 0, sizeof(struct wan_ioctl_query_tether_stats)); + + /* prevent string buffer overflows */ + data->upstreamIface[IFNAMSIZ-1] = '\0'; + + /* get IPA backhaul type */ + upstream_type = find_upstream_type(data->upstreamIface); + + if (upstream_type == IPA_UPSTEAM_MAX) { + IPAWANERR(" Wrong upstreamIface name %s\n", + data->upstreamIface); + } else if (upstream_type == IPA_UPSTEAM_WLAN) { + IPAWANDBG_LOW(" query wifi-backhaul stats\n"); + rc = rmnet_ipa_query_tethering_stats_wifi( + &tether_stats, data->reset_stats); + if (rc) { + IPAWANERR_RL( + "wlan WAN_IOC_QUERY_TETHER_STATS failed\n"); + return rc; + } + data->tx_bytes = tether_stats.ipv4_tx_bytes + + tether_stats.ipv6_tx_bytes; + data->rx_bytes = tether_stats.ipv4_rx_bytes + + tether_stats.ipv6_rx_bytes; + } else { + IPAWANDBG_LOW(" query modem-backhaul stats\n"); + tether_stats.ipa_client = data->ipa_client; + rc = rmnet_ipa_query_tethering_stats_modem( + &tether_stats, data->reset_stats); + if (rc) { + IPAWANERR("modem WAN_IOC_QUERY_TETHER_STATS failed\n"); + return rc; + } + data->tx_bytes = tether_stats.ipv4_tx_bytes + + tether_stats.ipv6_tx_bytes; + data->rx_bytes = tether_stats.ipv4_rx_bytes + + tether_stats.ipv6_rx_bytes; + } + return rc; +} + +int rmnet_ipa_reset_tethering_stats(struct wan_ioctl_reset_tether_stats *data) +{ + enum ipa_upstream_type upstream_type; + int rc = 0; + + /* prevent string buffer overflows */ + data->upstreamIface[IFNAMSIZ-1] = '\0'; + + /* get IPA backhaul type */ + upstream_type = find_upstream_type(data->upstreamIface); + + if (upstream_type == IPA_UPSTEAM_MAX) { + IPAWANERR("upstream iface %s not supported\n", + data->upstreamIface); + } else if (upstream_type == IPA_UPSTEAM_WLAN) { + IPAWANDBG(" reset wifi-backhaul stats\n"); + rc = rmnet_ipa_query_tethering_stats_wifi( + NULL, true); + if (rc) { + IPAWANERR("reset WLAN stats failed\n"); + return rc; + } + } else { + IPAWANDBG(" reset modem-backhaul stats\n"); + rc = rmnet_ipa_query_tethering_stats_modem( + NULL, true); + if (rc) { + IPAWANERR("reset MODEM stats failed\n"); + return rc; + } + } + return rc; +} + + +/** + * ipa_broadcast_quota_reach_ind() - Send Netlink broadcast on Quota + * @mux_id - The MUX ID on which the quota has been reached + * + * This function broadcasts a Netlink event using the kobject of the + * rmnet_ipa interface in order to alert the user space that the quota + * on the specific interface which matches the mux_id has been reached. + * + */ +void ipa_broadcast_quota_reach_ind(u32 mux_id, + enum ipa_upstream_type upstream_type) +{ + char alert_msg[IPA_QUOTA_REACH_ALERT_MAX_SIZE]; + char iface_name_l[IPA_QUOTA_REACH_IF_NAME_MAX_SIZE]; + char iface_name_m[IPA_QUOTA_REACH_IF_NAME_MAX_SIZE]; + char *envp[IPA_UEVENT_NUM_EVNP] = { + alert_msg, iface_name_l, iface_name_m, NULL }; + int res; + int index; + + /* check upstream_type*/ + if (upstream_type == IPA_UPSTEAM_MAX) { + IPAWANERR("upstreamIface type %d not supported\n", + upstream_type); + return; + } else if (upstream_type == IPA_UPSTEAM_MODEM) { + index = find_mux_channel_index(mux_id); + if (index == MAX_NUM_OF_MUX_CHANNEL) { + IPAWANERR("%u is an mux ID\n", mux_id); + return; + } + } + + res = scnprintf(alert_msg, IPA_QUOTA_REACH_ALERT_MAX_SIZE, + "ALERT_NAME=%s", "quotaReachedAlert"); + if (res >= IPA_QUOTA_REACH_ALERT_MAX_SIZE) { + IPAWANERR("message too long (%d)", res); + return; + } + + /* posting msg for L-release for CNE */ + if (upstream_type == IPA_UPSTEAM_MODEM) { + res = scnprintf(iface_name_l, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE, + "UPSTREAM=%s", mux_channel[index].vchannel_name); + } else { + res = scnprintf(iface_name_l, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE, + "UPSTREAM=%s", IPA_UPSTEAM_WLAN_IFACE_NAME); + } + if (res >= IPA_QUOTA_REACH_IF_NAME_MAX_SIZE) { + IPAWANERR("message too long (%d)", res); + return; + } + + /* posting msg for M-release for CNE */ + if (upstream_type == IPA_UPSTEAM_MODEM) { + res = scnprintf(iface_name_m, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE, + "INTERFACE=%s", mux_channel[index].vchannel_name); + } else { + res = scnprintf(iface_name_m, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE, + "INTERFACE=%s", IPA_UPSTEAM_WLAN_IFACE_NAME); + } + if (res >= IPA_QUOTA_REACH_IF_NAME_MAX_SIZE) { + IPAWANERR("message too long (%d)", res); + return; + } + + IPAWANERR("putting nlmsg: <%s> <%s> <%s>\n", + alert_msg, iface_name_l, iface_name_m); + kobject_uevent_env(&(ipa_netdevs[0]->dev.kobj), KOBJ_CHANGE, envp); + + rmnet_ipa_send_quota_reach_ind(); +} + +/** + * ipa_q6_handshake_complete() - Perform operations once Q6 is up + * @ssr_bootup - Indicates whether this is a cold boot-up or post-SSR. + * + * This function is invoked once the handshake between the IPA AP driver + * and IPA Q6 driver is complete. At this point, it is possible to perform + * operations which can't be performed until IPA Q6 driver is up. + * + */ +void ipa_q6_handshake_complete(bool ssr_bootup) +{ + /* It is required to recover the network stats after SSR recovery */ + if (ssr_bootup) { + /* + * In case the uC is required to be loaded by the Modem, + * the proxy vote will be removed only when uC loading is + * complete and indication is received by the AP. After SSR, + * uC is already loaded. Therefore, proxy vote can be removed + * once Modem init is complete. + */ + ipa2_proxy_clk_unvote(); + + /* send SSR power-up notification to IPACM */ + rmnet_ipa_send_ssr_notification(true); + + /* + * It is required to recover the network stats after + * SSR recovery + */ + rmnet_ipa_get_network_stats_and_update(); + + /* Enable holb monitoring on Q6 pipes. */ + ipa_q6_monitor_holb_mitigation(true); + } +} + +static int __init ipa_wwan_init(void) +{ + atomic_set(&is_initialized, 0); + atomic_set(&is_ssr, 0); + + mutex_init(&ipa_to_apps_pipe_handle_guard); + mutex_init(&add_mux_channel_lock); + ipa_to_apps_hdl = -1; + + ipa_qmi_init(); + + /* Register for Modem SSR */ + subsys_notify_handle = subsys_notif_register_notifier(SUBSYS_MODEM, + &ssr_notifier); + if (!IS_ERR(subsys_notify_handle)) + return platform_driver_register(&rmnet_ipa_driver); + else + return (int)PTR_ERR(subsys_notify_handle); +} + +static void __exit ipa_wwan_cleanup(void) +{ + int ret; + + ipa_qmi_cleanup(); + mutex_destroy(&ipa_to_apps_pipe_handle_guard); + mutex_destroy(&add_mux_channel_lock); + ret = subsys_notif_unregister_notifier(subsys_notify_handle, + &ssr_notifier); + if (ret) + IPAWANERR( + "Error subsys_notif_unregister_notifier system %s, ret=%d\n", + SUBSYS_MODEM, ret); + platform_driver_unregister(&rmnet_ipa_driver); +} + +static void ipa_wwan_msg_free_cb(void *buff, u32 len, u32 type) +{ + if (!buff) + IPAWANERR("Null buffer.\n"); + kfree(buff); +} + +static void ipa_rmnet_rx_cb(void *priv) +{ + struct net_device *dev = priv; + struct wwan_private *wwan_ptr; + + IPAWANDBG("\n"); + + if (dev != ipa_netdevs[0]) { + IPAWANERR("Not matching with netdev\n"); + return; + } + + wwan_ptr = netdev_priv(dev); + napi_schedule(&(wwan_ptr->napi)); +} + +static int ipa_rmnet_poll(struct napi_struct *napi, int budget) +{ + int rcvd_pkts = 0; + + rcvd_pkts = ipa_rx_poll(ipa_to_apps_hdl, NAPI_WEIGHT); + IPAWANDBG("rcvd packets: %d\n", rcvd_pkts); + return rcvd_pkts; +} + +late_initcall(ipa_wwan_init); +module_exit(ipa_wwan_cleanup); +MODULE_DESCRIPTION("WWAN Network Interface"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa_fd_ioctl.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa_fd_ioctl.c new file mode 100644 index 000000000000..d6fbfa86cae3 --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa_fd_ioctl.c @@ -0,0 +1,414 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2013-2015, 2018, 2020, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "ipa_qmi_service.h" + +#define DRIVER_NAME "wwan_ioctl" + +#ifdef CONFIG_COMPAT +#define WAN_IOC_ADD_FLT_RULE32 _IOWR(WAN_IOC_MAGIC, \ + WAN_IOCTL_ADD_FLT_RULE, \ + compat_uptr_t) +#define WAN_IOC_ADD_FLT_RULE_INDEX32 _IOWR(WAN_IOC_MAGIC, \ + WAN_IOCTL_ADD_FLT_INDEX, \ + compat_uptr_t) +#define WAN_IOC_POLL_TETHERING_STATS32 _IOWR(WAN_IOC_MAGIC, \ + WAN_IOCTL_POLL_TETHERING_STATS, \ + compat_uptr_t) +#define WAN_IOC_SET_DATA_QUOTA32 _IOWR(WAN_IOC_MAGIC, \ + WAN_IOCTL_SET_DATA_QUOTA, \ + compat_uptr_t) +#define WAN_IOC_SET_TETHER_CLIENT_PIPE32 _IOWR(WAN_IOC_MAGIC, \ + WAN_IOCTL_SET_TETHER_CLIENT_PIPE, \ + compat_uptr_t) +#define WAN_IOC_QUERY_TETHER_STATS32 _IOWR(WAN_IOC_MAGIC, \ + WAN_IOCTL_QUERY_TETHER_STATS, \ + compat_uptr_t) +#define WAN_IOC_RESET_TETHER_STATS32 _IOWR(WAN_IOC_MAGIC, \ + WAN_IOCTL_RESET_TETHER_STATS, \ + compat_uptr_t) +#define WAN_IOC_QUERY_DL_FILTER_STATS32 _IOWR(WAN_IOC_MAGIC, \ + WAN_IOCTL_QUERY_DL_FILTER_STATS, \ + compat_uptr_t) +#define WAN_IOC_QUERY_TETHER_STATS_ALL32 _IOWR(WAN_IOC_MAGIC, \ + WAN_IOCTL_QUERY_TETHER_STATS_ALL, \ + compat_uptr_t) + +#endif + +static unsigned int dev_num = 1; +static struct cdev wan_ioctl_cdev; +static unsigned int process_ioctl = 1; +static struct class *class; +static dev_t device; + +static long wan_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + int retval = 0, rc = 0; + u32 pyld_sz; + u8 *param = NULL; + + IPAWANDBG("device %s got ioctl events :>>>\n", + DRIVER_NAME); + + if (!process_ioctl) { + IPAWANDBG("modem is in SSR, ignoring ioctl\n"); + return -EAGAIN; + } + + switch (cmd) { + case WAN_IOC_ADD_FLT_RULE: + IPAWANDBG("device %s got WAN_IOC_ADD_FLT_RULE :>>>\n", + DRIVER_NAME); + pyld_sz = sizeof(struct ipa_install_fltr_rule_req_msg_v01); + param = vmemdup_user((const void __user *)arg, pyld_sz); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + if (qmi_filter_request_send( + (struct ipa_install_fltr_rule_req_msg_v01 *)param)) { + IPAWANDBG("IPACM->Q6 add filter rule failed\n"); + retval = -EFAULT; + break; + } + if (copy_to_user((u8 *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case WAN_IOC_ADD_FLT_RULE_INDEX: + IPAWANDBG("device %s got WAN_IOC_ADD_FLT_RULE_INDEX :>>>\n", + DRIVER_NAME); + pyld_sz = sizeof(struct ipa_fltr_installed_notif_req_msg_v01); + param = vmemdup_user((const void __user *)arg, pyld_sz); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + if (qmi_filter_notify_send( + (struct ipa_fltr_installed_notif_req_msg_v01 *)param)) { + IPAWANDBG("IPACM->Q6 rule index fail\n"); + retval = -EFAULT; + break; + } + if (copy_to_user((u8 *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case WAN_IOC_VOTE_FOR_BW_MBPS: + IPAWANDBG("device %s got WAN_IOC_VOTE_FOR_BW_MBPS :>>>\n", + DRIVER_NAME); + pyld_sz = sizeof(uint32_t); + param = vmemdup_user((const void __user *)arg, pyld_sz); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + if (vote_for_bus_bw((uint32_t *)param)) { + IPAWANERR("Failed to vote for bus BW\n"); + retval = -EFAULT; + break; + } + if (copy_to_user((u8 *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case WAN_IOC_POLL_TETHERING_STATS: + IPAWANDBG_LOW("got WAN_IOCTL_POLL_TETHERING_STATS :>>>\n"); + pyld_sz = sizeof(struct wan_ioctl_poll_tethering_stats); + param = vmemdup_user((const void __user *)arg, pyld_sz); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + if (rmnet_ipa_poll_tethering_stats( + (struct wan_ioctl_poll_tethering_stats *)param)) { + IPAWANERR_RL("WAN_IOCTL_POLL_TETHERING_STATS failed\n"); + retval = -EFAULT; + break; + } + if (copy_to_user((u8 *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case WAN_IOC_SET_DATA_QUOTA: + IPAWANDBG_LOW("got WAN_IOCTL_SET_DATA_QUOTA :>>>\n"); + pyld_sz = sizeof(struct wan_ioctl_set_data_quota); + param = vmemdup_user((const void __user *)arg, pyld_sz); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + rc = rmnet_ipa_set_data_quota( + (struct wan_ioctl_set_data_quota *)param); + if (rc != 0) { + IPAWANERR("WAN_IOC_SET_DATA_QUOTA failed\n"); + if (rc == -ENODEV) + retval = -ENODEV; + else + retval = -EFAULT; + break; + } + if (copy_to_user((u8 *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case WAN_IOC_SET_TETHER_CLIENT_PIPE: + IPAWANDBG_LOW("got WAN_IOC_SET_TETHER_CLIENT_PIPE :>>>\n"); + pyld_sz = sizeof(struct wan_ioctl_set_tether_client_pipe); + param = vmemdup_user((const void __user *)arg, pyld_sz); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + if (rmnet_ipa_set_tether_client_pipe( + (struct wan_ioctl_set_tether_client_pipe *)param)) { + IPAWANERR("WAN_IOC_SET_TETHER_CLIENT_PIPE failed\n"); + retval = -EFAULT; + break; + } + break; + + case WAN_IOC_QUERY_TETHER_STATS: + IPAWANDBG_LOW("got WAN_IOC_QUERY_TETHER_STATS :>>>\n"); + pyld_sz = sizeof(struct wan_ioctl_query_tether_stats); + param = vmemdup_user((const void __user *)arg, pyld_sz); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + + if (rmnet_ipa_query_tethering_stats( + (struct wan_ioctl_query_tether_stats *)param, false)) { + IPAWANERR("WAN_IOC_QUERY_TETHER_STATS failed\n"); + retval = -EFAULT; + break; + } + + if (copy_to_user((u8 *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case WAN_IOC_QUERY_TETHER_STATS_ALL: + IPAWANDBG_LOW("got WAN_IOC_QUERY_TETHER_STATS_ALL :>>>\n"); + pyld_sz = sizeof(struct wan_ioctl_query_tether_stats_all); + param = vmemdup_user((const void __user *)arg, pyld_sz); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + + if (rmnet_ipa_query_tethering_stats_all( + (struct wan_ioctl_query_tether_stats_all *)param)) { + IPAWANERR("WAN_IOC_QUERY_TETHER_STATS failed\n"); + retval = -EFAULT; + break; + } + + if (copy_to_user((u8 *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + + case WAN_IOC_RESET_TETHER_STATS: + IPAWANDBG_LOW("got WAN_IOC_RESET_TETHER_STATS :>>>\n"); + pyld_sz = sizeof(struct wan_ioctl_reset_tether_stats); + param = vmemdup_user((const void __user *)arg, pyld_sz); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + + if (rmnet_ipa_reset_tethering_stats( + (struct wan_ioctl_reset_tether_stats *)param)) { + IPAWANERR("WAN_IOC_RESET_TETHER_STATS failed\n"); + retval = -EFAULT; + break; + } + break; + + default: + retval = -ENOTTY; + } + kfree(param); + return retval; +} + +#ifdef CONFIG_COMPAT +long compat_wan_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + switch (cmd) { + case WAN_IOC_ADD_FLT_RULE32: + cmd = WAN_IOC_ADD_FLT_RULE; + break; + case WAN_IOC_ADD_FLT_RULE_INDEX32: + cmd = WAN_IOC_ADD_FLT_RULE_INDEX; + break; + case WAN_IOC_POLL_TETHERING_STATS32: + cmd = WAN_IOC_POLL_TETHERING_STATS; + break; + case WAN_IOC_SET_DATA_QUOTA32: + cmd = WAN_IOC_SET_DATA_QUOTA; + break; + case WAN_IOC_SET_TETHER_CLIENT_PIPE32: + cmd = WAN_IOC_SET_TETHER_CLIENT_PIPE; + break; + case WAN_IOC_QUERY_TETHER_STATS32: + cmd = WAN_IOC_QUERY_TETHER_STATS; + break; + case WAN_IOC_RESET_TETHER_STATS32: + cmd = WAN_IOC_RESET_TETHER_STATS; + break; + case WAN_IOC_QUERY_DL_FILTER_STATS32: + cmd = WAN_IOC_QUERY_DL_FILTER_STATS; + break; + default: + return -ENOIOCTLCMD; + } + return wan_ioctl(file, cmd, (unsigned long) compat_ptr(arg)); +} +#endif + +static int wan_ioctl_open(struct inode *inode, struct file *filp) +{ + IPAWANDBG("\n IPA A7 wan_ioctl open OK :>>>> "); + return 0; +} + +const struct file_operations fops = { + .owner = THIS_MODULE, + .open = wan_ioctl_open, + .read = NULL, + .unlocked_ioctl = wan_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = compat_wan_ioctl, +#endif +}; + +int wan_ioctl_init(void) +{ + unsigned int wan_ioctl_major = 0; + int ret; + struct device *dev; + + device = MKDEV(wan_ioctl_major, 0); + + ret = alloc_chrdev_region(&device, 0, dev_num, DRIVER_NAME); + if (ret) { + IPAWANERR(":device_alloc err.\n"); + goto dev_alloc_err; + } + wan_ioctl_major = MAJOR(device); + + class = class_create(THIS_MODULE, DRIVER_NAME); + if (IS_ERR(class)) { + IPAWANERR(":class_create err.\n"); + goto class_err; + } + + dev = device_create(class, NULL, device, + NULL, DRIVER_NAME); + if (IS_ERR(dev)) { + IPAWANERR(":device_create err.\n"); + goto device_err; + } + + cdev_init(&wan_ioctl_cdev, &fops); + ret = cdev_add(&wan_ioctl_cdev, device, dev_num); + if (ret) { + IPAWANERR(":cdev_add err.\n"); + goto cdev_add_err; + } + + process_ioctl = 1; + + IPAWANDBG("IPA %s major(%d) initial ok :>>>>\n", + DRIVER_NAME, wan_ioctl_major); + return 0; + +cdev_add_err: + device_destroy(class, device); +device_err: + class_destroy(class); +class_err: + unregister_chrdev_region(device, dev_num); +dev_alloc_err: + return -ENODEV; +} + +void wan_ioctl_stop_qmi_messages(void) +{ + process_ioctl = 0; +} + +void wan_ioctl_enable_qmi_messages(void) +{ + process_ioctl = 1; +} + +void wan_ioctl_deinit(void) +{ + cdev_del(&wan_ioctl_cdev); + device_destroy(class, device); + class_destroy(class); + unregister_chrdev_region(device, dev_num); +} diff --git a/drivers/platform/msm/ipa/ipa_v2/teth_bridge.c b/drivers/platform/msm/ipa/ipa_v2/teth_bridge.c new file mode 100644 index 000000000000..8bd1acb4215c --- /dev/null +++ b/drivers/platform/msm/ipa/ipa_v2/teth_bridge.c @@ -0,0 +1,233 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2013-2016, 2020, The Linux Foundation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "ipa_i.h" + +#define TETH_BRIDGE_DRV_NAME "ipa_tethering_bridge" + +#define TETH_DBG(fmt, args...) \ + pr_debug(TETH_BRIDGE_DRV_NAME " %s:%d " fmt, \ + __func__, __LINE__, ## args) +#define TETH_DBG_FUNC_ENTRY() \ + pr_debug(TETH_BRIDGE_DRV_NAME " %s:%d ENTRY\n", __func__, __LINE__) +#define TETH_DBG_FUNC_EXIT() \ + pr_debug(TETH_BRIDGE_DRV_NAME " %s:%d EXIT\n", __func__, __LINE__) +#define TETH_ERR(fmt, args...) \ + pr_err(TETH_BRIDGE_DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args) + +/** + * struct teth_bridge_ctx - Tethering bridge driver context information + * @class: kernel class pointer + * @dev_num: kernel device number + * @dev: kernel device struct pointer + * @cdev: kernel character device struct + */ +struct teth_bridge_ctx { + struct class *class; + dev_t dev_num; + struct device *dev; + struct cdev cdev; +}; +static struct teth_bridge_ctx *teth_ctx; + +/** + * teth_bridge_ipa_cb() - Callback to handle IPA data path events + * @priv - private data + * @evt - event type + * @data - event specific data (usually skb) + * + * This callback is called by IPA driver for exception packets from USB. + * All exception packets are handled by Q6 and should not reach this function. + * Packets will arrive to AP exception pipe only in case where packets are + * sent from USB before Q6 has setup the call. + */ +static void teth_bridge_ipa_cb(void *priv, enum ipa_dp_evt_type evt, + unsigned long data) +{ + struct sk_buff *skb = (struct sk_buff *)data; + + TETH_DBG_FUNC_ENTRY(); + if (evt != IPA_RECEIVE) { + TETH_ERR("unexpected event %d\n", evt); + WARN_ON(1); + return; + } + + TETH_ERR("Unexpected exception packet from USB, dropping packet\n"); + dev_kfree_skb_any(skb); + TETH_DBG_FUNC_EXIT(); +} + +/** + * ipa2_teth_bridge_init() - Initialize the Tethering bridge driver + * @params - in/out params for USB initialization API (please look at struct + * definition for more info) + * + * USB driver gets a pointer to a callback function (usb_notify_cb) and an + * associated data. USB driver installs this callback function in the call to + * ipa_connect(). + * + * Builds IPA resource manager dependency graph. + * + * Return codes: 0: success, + * -EINVAL - Bad parameter + * Other negative value - Failure + */ +int ipa2_teth_bridge_init(struct teth_bridge_init_params *params) +{ + int res = 0; + + TETH_DBG_FUNC_ENTRY(); + + if (!params) { + TETH_ERR("Bad parameter\n"); + TETH_DBG_FUNC_EXIT(); + return -EINVAL; + } + + params->usb_notify_cb = teth_bridge_ipa_cb; + params->private_data = NULL; + params->skip_ep_cfg = true; + + /* Build dependency graph */ + res = ipa_rm_add_dependency(IPA_RM_RESOURCE_USB_PROD, + IPA_RM_RESOURCE_Q6_CONS); + if (res < 0 && res != -EINPROGRESS) { + TETH_ERR("ipa_rm_add_dependency() failed.\n"); + goto bail; + } + res = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD, + IPA_RM_RESOURCE_USB_CONS); + if (res < 0 && res != -EINPROGRESS) { + ipa_rm_delete_dependency(IPA_RM_RESOURCE_USB_PROD, + IPA_RM_RESOURCE_Q6_CONS); + TETH_ERR("ipa_rm_add_dependency() failed.\n"); + goto bail; + } + + res = 0; + goto bail; + +bail: + TETH_DBG_FUNC_EXIT(); + return res; +} + +/** + * ipa2_teth_bridge_disconnect() - Disconnect tethering bridge module + */ +int ipa2_teth_bridge_disconnect(enum ipa_client_type client) +{ + TETH_DBG_FUNC_ENTRY(); + ipa_rm_delete_dependency(IPA_RM_RESOURCE_USB_PROD, + IPA_RM_RESOURCE_Q6_CONS); + ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD, + IPA_RM_RESOURCE_USB_CONS); + TETH_DBG_FUNC_EXIT(); + + return 0; +} + +/** + * ipa2_teth_bridge_connect() - Connect bridge for a tethered Rmnet / MBIM call + * @connect_params: Connection info + * + * Return codes: 0: success + * -EINVAL: invalid parameters + * -EPERM: Operation not permitted as the bridge is already + * connected + */ +int ipa2_teth_bridge_connect(struct teth_bridge_connect_params *connect_params) +{ + return 0; +} + +static long teth_bridge_ioctl(struct file *filp, + unsigned int cmd, + unsigned long arg) +{ + IPAERR("No ioctls are supported !\n"); + return -ENOIOCTLCMD; +} + +static const struct file_operations teth_bridge_drv_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = teth_bridge_ioctl, +}; + +/** + * teth_bridge_driver_init() - Initialize tethering bridge driver + * + */ +int teth_bridge_driver_init(void) +{ + int res; + + TETH_DBG("Tethering bridge driver init\n"); + teth_ctx = kzalloc(sizeof(*teth_ctx), GFP_KERNEL); + if (!teth_ctx) { + TETH_ERR("kzalloc err.\n"); + return -ENOMEM; + } + + teth_ctx->class = class_create(THIS_MODULE, TETH_BRIDGE_DRV_NAME); + + res = alloc_chrdev_region(&teth_ctx->dev_num, 0, 1, + TETH_BRIDGE_DRV_NAME); + if (res) { + TETH_ERR("alloc_chrdev_region err.\n"); + res = -ENODEV; + goto fail_alloc_chrdev_region; + } + + teth_ctx->dev = device_create(teth_ctx->class, NULL, teth_ctx->dev_num, + teth_ctx, TETH_BRIDGE_DRV_NAME); + if (IS_ERR(teth_ctx->dev)) { + TETH_ERR(":device_create err.\n"); + res = -ENODEV; + goto fail_device_create; + } + + cdev_init(&teth_ctx->cdev, &teth_bridge_drv_fops); + teth_ctx->cdev.owner = THIS_MODULE; + teth_ctx->cdev.ops = &teth_bridge_drv_fops; + + res = cdev_add(&teth_ctx->cdev, teth_ctx->dev_num, 1); + if (res) { + TETH_ERR(":cdev_add err=%d\n", -res); + res = -ENODEV; + goto fail_cdev_add; + } + TETH_DBG("Tethering bridge driver init OK\n"); + + return 0; +fail_cdev_add: + device_destroy(teth_ctx->class, teth_ctx->dev_num); +fail_device_create: + unregister_chrdev_region(teth_ctx->dev_num, 1); +fail_alloc_chrdev_region: + kfree(teth_ctx); + teth_ctx = NULL; + + return res; +} +EXPORT_SYMBOL(teth_bridge_driver_init); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Tethering bridge driver"); -- GitLab From 6f99e8cf0a40fa55ad9971ff67dce2d409ba2ab5 Mon Sep 17 00:00:00 2001 From: Mukesh Kumar Savaliya Date: Sun, 13 Sep 2020 21:07:58 +0530 Subject: [PATCH 1262/1304] i3c: i3c-master-qcom-geni: Fix DMA and FIFO mode timeout scenario This change primarily fixes timeout issue observed during command send as part of read and write operations. This will do proper cleanup of the GENI in command timeout with cancel and abort operations while waiting for the done event. Change-Id: Icd1a546fd6f54046640910e2d1225da855b2ea42 Signed-off-by: Mukesh Kumar Savaliya --- drivers/i3c/master/i3c-master-qcom-geni.c | 72 +++++++++++++++-------- 1 file changed, 49 insertions(+), 23 deletions(-) diff --git a/drivers/i3c/master/i3c-master-qcom-geni.c b/drivers/i3c/master/i3c-master-qcom-geni.c index b45dff609b35..87cf369d7185 100644 --- a/drivers/i3c/master/i3c-master-qcom-geni.c +++ b/drivers/i3c/master/i3c-master-qcom-geni.c @@ -580,6 +580,26 @@ static irqreturn_t geni_i3c_ibi_irq(int irq, void *dev) return IRQ_HANDLED; } +static void geni_i3c_handle_err(struct geni_i3c_dev *gi3c, u32 status) +{ + if (status & M_GP_IRQ_0_EN) + geni_i3c_err(gi3c, RD_TERM); + if (status & M_GP_IRQ_1_EN) + geni_i3c_err(gi3c, NACK); + if (status & M_GP_IRQ_2_EN) + geni_i3c_err(gi3c, CRC_ERR); + if (status & M_GP_IRQ_3_EN) + geni_i3c_err(gi3c, BUS_PROTO); + if (status & M_GP_IRQ_4_EN) + geni_i3c_err(gi3c, NACK_7E); + if (status & M_CMD_OVERRUN_EN) + geni_i3c_err(gi3c, GENI_OVERRUN); + if (status & M_ILLEGAL_CMD_EN) + geni_i3c_err(gi3c, GENI_ILLEGAL_CMD); + if (status & M_CMD_ABORT_EN) + geni_i3c_err(gi3c, GENI_ABORT_DONE); +} + static irqreturn_t geni_i3c_irq(int irq, void *dev) { struct geni_i3c_dev *gi3c = dev; @@ -597,24 +617,8 @@ static irqreturn_t geni_i3c_irq(int irq, void *dev) dm_rx_st = readl_relaxed(gi3c->se.base + SE_DMA_RX_IRQ_STAT); dma = readl_relaxed(gi3c->se.base + SE_GENI_DMA_MODE_EN); - if ((m_stat & SE_I3C_ERR) || - (dm_rx_st & DM_I3C_CB_ERR)) { - if (m_stat & M_GP_IRQ_0_EN) - geni_i3c_err(gi3c, RD_TERM); - if (m_stat & M_GP_IRQ_1_EN) - geni_i3c_err(gi3c, NACK); - if (m_stat & M_GP_IRQ_2_EN) - geni_i3c_err(gi3c, CRC_ERR); - if (m_stat & M_GP_IRQ_3_EN) - geni_i3c_err(gi3c, BUS_PROTO); - if (m_stat & M_GP_IRQ_4_EN) - geni_i3c_err(gi3c, NACK_7E); - if (m_stat & M_CMD_OVERRUN_EN) - geni_i3c_err(gi3c, GENI_OVERRUN); - if (m_stat & M_ILLEGAL_CMD_EN) - geni_i3c_err(gi3c, GENI_ILLEGAL_CMD); - if (m_stat & M_CMD_ABORT_EN) - geni_i3c_err(gi3c, GENI_ABORT_DONE); + if ((m_stat & SE_I3C_ERR) || (dm_rx_st & DM_I3C_CB_ERR)) { + geni_i3c_handle_err(gi3c, m_stat); /* Disable the TX Watermark interrupt to stop TX */ if (!dma) @@ -688,8 +692,11 @@ static irqreturn_t geni_i3c_irq(int irq, void *dev) complete(&gi3c->done); } else if ((dm_tx_st & TX_DMA_DONE) || (dm_rx_st & RX_DMA_DONE) || - (dm_rx_st & RX_RESET_DONE)) + (dm_rx_st & RX_RESET_DONE) || + (dm_tx_st & TX_RESET_DONE)) { + complete(&gi3c->done); + } spin_unlock_irqrestore(&gi3c->spinlock, flags); return IRQ_HANDLED; @@ -792,18 +799,37 @@ static int _i3c_geni_execute_command GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev, "got wait_for_completion timeout\n"); - spin_lock_irqsave(&gi3c->spinlock, flags); geni_i3c_err(gi3c, GENI_TIMEOUT); gi3c->cur_buf = NULL; gi3c->cur_len = gi3c->cur_idx = 0; gi3c->cur_rnw = 0; - geni_abort_m_cmd(gi3c->se.base); + + reinit_completion(&gi3c->done); + + spin_lock_irqsave(&gi3c->spinlock, flags); + geni_cancel_m_cmd(gi3c->se.base); spin_unlock_irqrestore(&gi3c->spinlock, flags); - time_remaining = wait_for_completion_timeout(&gi3c->done, - XFER_TIMEOUT); + + time_remaining = wait_for_completion_timeout(&gi3c->done, HZ); + if (!time_remaining) { + GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev, + "%s:Cancel cmd failed : Aborting\n", __func__); + + reinit_completion(&gi3c->done); + spin_lock_irqsave(&gi3c->spinlock, flags); + geni_abort_m_cmd(gi3c->se.base); + spin_unlock_irqrestore(&gi3c->spinlock, flags); + time_remaining = + wait_for_completion_timeout(&gi3c->done, XFER_TIMEOUT); + if (!time_remaining) + GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev, + "%s:Abort Failed\n", __func__); + } } + if (xfer->mode == SE_DMA) { if (gi3c->err) { + reinit_completion(&gi3c->done); if (rnw == READ_TRANSACTION) writel_relaxed(1, gi3c->se.base + SE_DMA_RX_FSM_RST); -- GitLab From 8be05ea1c14d511f25d8d6e192a3dca4190dfcdb Mon Sep 17 00:00:00 2001 From: Ashok Raj Deenadayalan Date: Fri, 28 Aug 2020 22:46:43 +0530 Subject: [PATCH 1263/1304] msm: ipa2: Add changes compatible to kernel-4.14 Add following changes, - Include dependent files. - Handle buffer recycling in new kernel. - Add WLAN endpoints for IPA_v2_6. - Fix indentation errors. - Add structure, api changes in QMI files compatible with kernel 4.14. - Add changes to support late clk initialization. - Add IPA_v2_6 support for platform probing. - Add locking mechanism as per the new kernel. Change-Id: I6f153398f9a11026744c076a67a609cf7af807ba Signed-off-by: Praveen Kurapati Signed-off-by: Ashok Raj Deenadayalan Signed-off-by: Swetha Chikkaboraiah --- drivers/platform/msm/ipa/ipa_api.c | 4 + drivers/platform/msm/ipa/ipa_api.h | 21 +- drivers/platform/msm/ipa/ipa_common_i.h | 2 +- drivers/platform/msm/ipa/ipa_v2/ipa.c | 252 +++--- drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c | 106 +-- drivers/platform/msm/ipa/ipa_v2/ipa_dma.c | 6 +- drivers/platform/msm/ipa/ipa_v2/ipa_dp.c | 42 +- drivers/platform/msm/ipa/ipa_v2/ipa_flt.c | 2 +- drivers/platform/msm/ipa/ipa_v2/ipa_i.h | 10 +- drivers/platform/msm/ipa/ipa_v2/ipa_intf.c | 1 + drivers/platform/msm/ipa/ipa_v2/ipa_nat.c | 19 +- .../platform/msm/ipa/ipa_v2/ipa_qmi_service.c | 814 +++++++++--------- .../platform/msm/ipa/ipa_v2/ipa_qmi_service.h | 56 +- .../msm/ipa/ipa_v2/ipa_qmi_service_v01.c | 113 +-- drivers/platform/msm/ipa/ipa_v2/ipa_uc.c | 12 +- drivers/platform/msm/ipa/ipa_v2/ipa_uc_mhi.c | 2 +- drivers/platform/msm/ipa/ipa_v2/ipa_uc_ntn.c | 2 +- drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c | 2 +- drivers/platform/msm/ipa/ipa_v2/ipa_utils.c | 75 +- drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c | 81 +- .../msm/ipa/ipa_v2/rmnet_ipa_fd_ioctl.c | 37 +- drivers/platform/msm/ipa/ipa_v3/ipa_pm.h | 4 +- .../platform/msm/ipa/ipa_v3/ipa_qmi_service.h | 2 + include/linux/ipa.h | 70 ++ 24 files changed, 904 insertions(+), 831 deletions(-) diff --git a/drivers/platform/msm/ipa/ipa_api.c b/drivers/platform/msm/ipa/ipa_api.c index 639231528cf1..1f67c3953646 100644 --- a/drivers/platform/msm/ipa/ipa_api.c +++ b/drivers/platform/msm/ipa/ipa_api.c @@ -3187,6 +3187,10 @@ static int ipa_generic_plat_drv_probe(struct platform_device *pdev_p) /* call probe based on IPA HW version */ switch (ipa_api_hw_type) { + case IPA_HW_v2_6L: + result = ipa_plat_drv_probe(pdev_p, ipa_api_ctrl, + ipa_plat_drv_match); + break; case IPA_HW_v3_0: case IPA_HW_v3_1: case IPA_HW_v3_5: diff --git a/drivers/platform/msm/ipa/ipa_api.h b/drivers/platform/msm/ipa/ipa_api.h index 216b0520fc8a..dfd72504f911 100644 --- a/drivers/platform/msm/ipa/ipa_api.h +++ b/drivers/platform/msm/ipa/ipa_api.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved. */ #include @@ -12,6 +12,12 @@ #define _IPA_API_H_ struct ipa_api_controller { + + int (*ipa_connect)(const struct ipa_connect_params *in, + struct ipa_sps_params *sps, u32 *clnt_hdl); + + int (*ipa_disconnect)(u32 clnt_hdl); + int (*ipa_reset_endpoint)(u32 clnt_hdl); int (*ipa_clear_endpoint_delay)(u32 clnt_hdl); @@ -484,6 +490,19 @@ struct ipa_api_controller { int (*ipa_get_prot_id)(enum ipa_client_type client); }; +#ifdef CONFIG_IPA +int ipa_plat_drv_probe(struct platform_device *pdev_p, + struct ipa_api_controller *api_ctrl, + const struct of_device_id *pdrv_match); +#else +static inline int ipa_plat_drv_probe(struct platform_device *pdev_p, + struct ipa_api_controller *api_ctrl, + const struct of_device_id *pdrv_match) +{ + return -ENODEV; +} +#endif /* (CONFIG_IPA) */ + #ifdef CONFIG_IPA3 int ipa3_plat_drv_probe(struct platform_device *pdev_p, struct ipa_api_controller *api_ctrl, diff --git a/drivers/platform/msm/ipa/ipa_common_i.h b/drivers/platform/msm/ipa/ipa_common_i.h index 3d71e390a829..f765bda3a83e 100644 --- a/drivers/platform/msm/ipa/ipa_common_i.h +++ b/drivers/platform/msm/ipa/ipa_common_i.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved. */ #include diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa.c b/drivers/platform/msm/ipa/ipa_v2/ipa.c index 5425d1d57a6a..7c5779297b47 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa.c @@ -184,7 +184,7 @@ static DECLARE_DELAYED_WORK(ipa_sps_release_resource_work, static struct ipa_plat_drv_res ipa_res = {0, }; -struct msm_bus_scale_pdata *bus_scale_table; +static struct msm_bus_scale_pdata *bus_scale_table; static struct clk *ipa_clk_src; static struct clk *ipa_clk; @@ -195,7 +195,7 @@ static struct clk *ipa_inactivity_clk; struct ipa_context *ipa_ctx; static struct device *master_dev; -struct platform_device *ipa_pdev; +static struct platform_device *ipa_pdev; static struct { bool present; bool arm_smmu; @@ -207,6 +207,8 @@ static struct { static char *active_clients_table_buf; +static u32 register_ipa_bus_hdl; + int ipa2_active_clients_log_print_buffer(char *buf, int size) { int i; @@ -561,7 +563,7 @@ static int ipa_send_wan_msg(unsigned long usr_param, uint8_t msg_type, return -ENOMEM; } - if (copy_from_user((u8 *)wan_msg, (u8 *)usr_param, + if (copy_from_user(wan_msg, (const void __user *)usr_param, sizeof(struct ipa_wan_msg))) { kfree(wan_msg); return -EFAULT; @@ -606,7 +608,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { int retval = 0; u32 pyld_sz; - u8 header[128] = { 0 }; + u8 header[192] = { 0 }; u8 *param = NULL; struct ipa_ioc_nat_alloc_mem nat_mem; struct ipa_ioc_v4_nat_init nat_init; @@ -624,7 +626,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) switch (cmd) { case IPA_IOC_ALLOC_NAT_MEM: - if (copy_from_user((u8 *)&nat_mem, (u8 *)arg, + if (copy_from_user(&nat_mem, (const void __user *)arg, sizeof(struct ipa_ioc_nat_alloc_mem))) { retval = -EFAULT; break; @@ -636,14 +638,14 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = -EFAULT; break; } - if (copy_to_user((u8 *)arg, (u8 *)&nat_mem, + if (copy_to_user((void __user *)arg, &nat_mem, sizeof(struct ipa_ioc_nat_alloc_mem))) { retval = -EFAULT; break; } break; case IPA_IOC_V4_INIT_NAT: - if (copy_from_user((u8 *)&nat_init, (u8 *)arg, + if (copy_from_user(&nat_init, (const void __user *)arg, sizeof(struct ipa_ioc_v4_nat_init))) { retval = -EFAULT; break; @@ -655,7 +657,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) break; case IPA_IOC_NAT_DMA: - if (copy_from_user(header, (u8 *)arg, + if (copy_from_user(header, (const void __user *)arg, sizeof(struct ipa_ioc_nat_dma_cmd))) { retval = -EFAULT; break; @@ -671,7 +673,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) break; } - if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + if (copy_from_user(param, (const void __user *)arg, pyld_sz)) { retval = -EFAULT; break; } @@ -691,7 +693,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) break; case IPA_IOC_V4_DEL_NAT: - if (copy_from_user((u8 *)&nat_del, (u8 *)arg, + if (copy_from_user(&nat_del, (const void __user *)arg, sizeof(struct ipa_ioc_v4_nat_del))) { retval = -EFAULT; break; @@ -703,7 +705,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) break; case IPA_IOC_ADD_HDR: - if (copy_from_user(header, (u8 *)arg, + if (copy_from_user(header, (const void __user *)arg, sizeof(struct ipa_ioc_add_hdr))) { retval = -EFAULT; break; @@ -718,7 +720,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = -ENOMEM; break; } - if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + if (copy_from_user(param, (const void __user *)arg, pyld_sz)) { retval = -EFAULT; break; } @@ -736,14 +738,14 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = -EFAULT; break; } - if (copy_to_user((u8 *)arg, param, pyld_sz)) { + if (copy_to_user((void __user *)arg, param, pyld_sz)) { retval = -EFAULT; break; } break; case IPA_IOC_DEL_HDR: - if (copy_from_user(header, (u8 *)arg, + if (copy_from_user(header, (const void __user *)arg, sizeof(struct ipa_ioc_del_hdr))) { retval = -EFAULT; break; @@ -758,7 +760,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = -ENOMEM; break; } - if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + if (copy_from_user(param, (const void __user *)arg, pyld_sz)) { retval = -EFAULT; break; } @@ -776,14 +778,14 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = -EFAULT; break; } - if (copy_to_user((u8 *)arg, param, pyld_sz)) { + if (copy_to_user((void __user *)arg, param, pyld_sz)) { retval = -EFAULT; break; } break; case IPA_IOC_ADD_RT_RULE: - if (copy_from_user(header, (u8 *)arg, + if (copy_from_user(header, (const void __user *)arg, sizeof(struct ipa_ioc_add_rt_rule))) { retval = -EFAULT; break; @@ -798,7 +800,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = -ENOMEM; break; } - if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + if (copy_from_user(param, (const void __user *)arg, pyld_sz)) { retval = -EFAULT; break; } @@ -817,14 +819,14 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = -EFAULT; break; } - if (copy_to_user((u8 *)arg, param, pyld_sz)) { + if (copy_to_user((void __user *)arg, param, pyld_sz)) { retval = -EFAULT; break; } break; case IPA_IOC_MDFY_RT_RULE: - if (copy_from_user(header, (u8 *)arg, + if (copy_from_user(header, (const void __user *)arg, sizeof(struct ipa_ioc_mdfy_rt_rule))) { retval = -EFAULT; break; @@ -839,7 +841,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = -ENOMEM; break; } - if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + if (copy_from_user(param, (const void __user *)arg, pyld_sz)) { retval = -EFAULT; break; } @@ -857,14 +859,14 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = -EFAULT; break; } - if (copy_to_user((u8 *)arg, param, pyld_sz)) { + if (copy_to_user((void __user *)arg, param, pyld_sz)) { retval = -EFAULT; break; } break; case IPA_IOC_DEL_RT_RULE: - if (copy_from_user(header, (u8 *)arg, + if (copy_from_user(header, (const void __user *)arg, sizeof(struct ipa_ioc_del_rt_rule))) { retval = -EFAULT; break; @@ -879,7 +881,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = -ENOMEM; break; } - if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + if (copy_from_user(param, (const void __user *)arg, pyld_sz)) { retval = -EFAULT; break; } @@ -896,14 +898,14 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = -EFAULT; break; } - if (copy_to_user((u8 *)arg, param, pyld_sz)) { + if (copy_to_user((void __user *)arg, param, pyld_sz)) { retval = -EFAULT; break; } break; case IPA_IOC_ADD_FLT_RULE: - if (copy_from_user(header, (u8 *)arg, + if (copy_from_user(header, (const void __user *)arg, sizeof(struct ipa_ioc_add_flt_rule))) { retval = -EFAULT; break; @@ -918,7 +920,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = -ENOMEM; break; } - if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + if (copy_from_user(param, (const void __user *)arg, pyld_sz)) { retval = -EFAULT; break; } @@ -937,14 +939,14 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = -EFAULT; break; } - if (copy_to_user((u8 *)arg, param, pyld_sz)) { + if (copy_to_user((void __user *)arg, param, pyld_sz)) { retval = -EFAULT; break; } break; case IPA_IOC_DEL_FLT_RULE: - if (copy_from_user(header, (u8 *)arg, + if (copy_from_user(header, (const void __user *)arg, sizeof(struct ipa_ioc_del_flt_rule))) { retval = -EFAULT; break; @@ -959,7 +961,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = -ENOMEM; break; } - if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + if (copy_from_user(param, (const void __user *)arg, pyld_sz)) { retval = -EFAULT; break; } @@ -977,14 +979,14 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = -EFAULT; break; } - if (copy_to_user((u8 *)arg, param, pyld_sz)) { + if (copy_to_user((void __user *)arg, param, pyld_sz)) { retval = -EFAULT; break; } break; case IPA_IOC_MDFY_FLT_RULE: - if (copy_from_user(header, (u8 *)arg, + if (copy_from_user(header, (const void __user *)arg, sizeof(struct ipa_ioc_mdfy_flt_rule))) { retval = -EFAULT; break; @@ -999,7 +1001,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = -ENOMEM; break; } - if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + if (copy_from_user(param, (const void __user *)arg, pyld_sz)) { retval = -EFAULT; break; } @@ -1017,7 +1019,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = -EFAULT; break; } - if (copy_to_user((u8 *)arg, param, pyld_sz)) { + if (copy_to_user((void __user *)arg, param, pyld_sz)) { retval = -EFAULT; break; } @@ -1042,7 +1044,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = ipa2_reset_flt(arg, false); break; case IPA_IOC_GET_RT_TBL: - if (copy_from_user(header, (u8 *)arg, + if (copy_from_user(header, (const void __user *)arg, sizeof(struct ipa_ioc_get_rt_tbl))) { retval = -EFAULT; break; @@ -1051,7 +1053,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = -EFAULT; break; } - if (copy_to_user((u8 *)arg, header, + if (copy_to_user((void __user *)arg, header, sizeof(struct ipa_ioc_get_rt_tbl))) { retval = -EFAULT; break; @@ -1061,7 +1063,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = ipa2_put_rt_tbl(arg); break; case IPA_IOC_GET_HDR: - if (copy_from_user(header, (u8 *)arg, + if (copy_from_user(header, (const void __user *)arg, sizeof(struct ipa_ioc_get_hdr))) { retval = -EFAULT; break; @@ -1070,7 +1072,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = -EFAULT; break; } - if (copy_to_user((u8 *)arg, header, + if (copy_to_user((void __user *)arg, header, sizeof(struct ipa_ioc_get_hdr))) { retval = -EFAULT; break; @@ -1083,7 +1085,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = ipa_cfg_filter(arg); break; case IPA_IOC_COPY_HDR: - if (copy_from_user(header, (u8 *)arg, + if (copy_from_user(header, (const void __user *)arg, sizeof(struct ipa_ioc_copy_hdr))) { retval = -EFAULT; break; @@ -1092,14 +1094,14 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = -EFAULT; break; } - if (copy_to_user((u8 *)arg, header, + if (copy_to_user((void __user *)arg, header, sizeof(struct ipa_ioc_copy_hdr))) { retval = -EFAULT; break; } break; case IPA_IOC_QUERY_INTF: - if (copy_from_user(header, (u8 *)arg, + if (copy_from_user(header, (const void __user *)arg, sizeof(struct ipa_ioc_query_intf))) { retval = -EFAULT; break; @@ -1108,7 +1110,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = -1; break; } - if (copy_to_user((u8 *)arg, header, + if (copy_to_user((void __user *)arg, header, sizeof(struct ipa_ioc_query_intf))) { retval = -EFAULT; break; @@ -1116,7 +1118,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) break; case IPA_IOC_QUERY_INTF_TX_PROPS: sz = sizeof(struct ipa_ioc_query_intf_tx_props); - if (copy_from_user(header, (u8 *)arg, sz)) { + if (copy_from_user(header, (const void __user *)arg, sz)) { retval = -EFAULT; break; } @@ -1136,7 +1138,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = -ENOMEM; break; } - if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + if (copy_from_user(param, (const void __user *)arg, pyld_sz)) { retval = -EFAULT; break; } @@ -1155,14 +1157,14 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = -1; break; } - if (copy_to_user((u8 *)arg, param, pyld_sz)) { + if (copy_to_user((void __user *)arg, param, pyld_sz)) { retval = -EFAULT; break; } break; case IPA_IOC_QUERY_INTF_RX_PROPS: sz = sizeof(struct ipa_ioc_query_intf_rx_props); - if (copy_from_user(header, (u8 *)arg, sz)) { + if (copy_from_user(header, (const void __user *)arg, sz)) { retval = -EFAULT; break; } @@ -1182,7 +1184,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = -ENOMEM; break; } - if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + if (copy_from_user(param, (const void __user *)arg, pyld_sz)) { retval = -EFAULT; break; } @@ -1200,14 +1202,14 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = -1; break; } - if (copy_to_user((u8 *)arg, param, pyld_sz)) { + if (copy_to_user((void __user *)arg, param, pyld_sz)) { retval = -EFAULT; break; } break; case IPA_IOC_QUERY_INTF_EXT_PROPS: sz = sizeof(struct ipa_ioc_query_intf_ext_props); - if (copy_from_user(header, (u8 *)arg, sz)) { + if (copy_from_user(header, (const void __user *)arg, sz)) { retval = -EFAULT; break; } @@ -1227,7 +1229,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = -ENOMEM; break; } - if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + if (copy_from_user(param, (const void __user *)arg, pyld_sz)) { retval = -EFAULT; break; } @@ -1245,13 +1247,13 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = -1; break; } - if (copy_to_user((u8 *)arg, param, pyld_sz)) { + if (copy_to_user((void __user *)arg, param, pyld_sz)) { retval = -EFAULT; break; } break; case IPA_IOC_PULL_MSG: - if (copy_from_user(header, (u8 *)arg, + if (copy_from_user(header, (const void __user *)arg, sizeof(struct ipa_msg_meta))) { retval = -EFAULT; break; @@ -1265,7 +1267,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = -ENOMEM; break; } - if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + if (copy_from_user(param, (const void __user *)arg, pyld_sz)) { retval = -EFAULT; break; } @@ -1285,13 +1287,13 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = -1; break; } - if (copy_to_user((u8 *)arg, param, pyld_sz)) { + if (copy_to_user((void __user *)arg, param, pyld_sz)) { retval = -EFAULT; break; } break; case IPA_IOC_RM_ADD_DEPENDENCY: - if (copy_from_user((u8 *)&rm_depend, (u8 *)arg, + if (copy_from_user(&rm_depend, (const void __user *)arg, sizeof(struct ipa_ioc_rm_dependency))) { retval = -EFAULT; break; @@ -1300,7 +1302,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) rm_depend.resource_name, rm_depend.depends_on_name); break; case IPA_IOC_RM_DEL_DEPENDENCY: - if (copy_from_user((u8 *)&rm_depend, (u8 *)arg, + if (copy_from_user(&rm_depend, (const void __user *)arg, sizeof(struct ipa_ioc_rm_dependency))) { retval = -EFAULT; break; @@ -1312,7 +1314,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct ipa_ioc_generate_flt_eq flt_eq; - if (copy_from_user(&flt_eq, (u8 *)arg, + if (copy_from_user(&flt_eq, (const void __user *)arg, sizeof(struct ipa_ioc_generate_flt_eq))) { retval = -EFAULT; break; @@ -1322,7 +1324,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = -EFAULT; break; } - if (copy_to_user((u8 *)arg, &flt_eq, + if (copy_to_user((void __user *)arg, &flt_eq, sizeof(struct ipa_ioc_generate_flt_eq))) { retval = -EFAULT; break; @@ -1335,7 +1337,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) break; } case IPA_IOC_QUERY_RT_TBL_INDEX: - if (copy_from_user(header, (u8 *)arg, + if (copy_from_user(header, (const void __user *)arg, sizeof(struct ipa_ioc_get_rt_tbl_indx))) { retval = -EFAULT; break; @@ -1345,14 +1347,14 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = -EFAULT; break; } - if (copy_to_user((u8 *)arg, header, + if (copy_to_user((void __user *)arg, header, sizeof(struct ipa_ioc_get_rt_tbl_indx))) { retval = -EFAULT; break; } break; case IPA_IOC_WRITE_QMAPID: - if (copy_from_user(header, (u8 *)arg, + if (copy_from_user(header, (const void __user *)arg, sizeof(struct ipa_ioc_write_qmapid))) { retval = -EFAULT; break; @@ -1361,7 +1363,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = -EFAULT; break; } - if (copy_to_user((u8 *)arg, header, + if (copy_to_user((void __user *)arg, header, sizeof(struct ipa_ioc_write_qmapid))) { retval = -EFAULT; break; @@ -1389,7 +1391,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) } break; case IPA_IOC_ADD_HDR_PROC_CTX: - if (copy_from_user(header, (u8 *)arg, + if (copy_from_user(header, (const void __user *)arg, sizeof(struct ipa_ioc_add_hdr_proc_ctx))) { retval = -EFAULT; break; @@ -1405,7 +1407,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = -ENOMEM; break; } - if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + if (copy_from_user(param, (const void __user *)arg, pyld_sz)) { retval = -EFAULT; break; } @@ -1423,13 +1425,13 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = -EFAULT; break; } - if (copy_to_user((u8 *)arg, param, pyld_sz)) { + if (copy_to_user((void __user *)arg, param, pyld_sz)) { retval = -EFAULT; break; } break; case IPA_IOC_DEL_HDR_PROC_CTX: - if (copy_from_user(header, (u8 *)arg, + if (copy_from_user(header, (const void __user *)arg, sizeof(struct ipa_ioc_del_hdr_proc_ctx))) { retval = -EFAULT; break; @@ -1444,7 +1446,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = -ENOMEM; break; } - if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + if (copy_from_user(param, (const void __user *)arg, pyld_sz)) { retval = -EFAULT; break; } @@ -1463,7 +1465,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = -EFAULT; break; } - if (copy_to_user((u8 *)arg, param, pyld_sz)) { + if (copy_to_user((void __user *)arg, param, pyld_sz)) { retval = -EFAULT; break; } @@ -1477,7 +1479,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) break; } memcpy(param, &ipa_ctx->ipa_hw_type, pyld_sz); - if (copy_to_user((u8 *)arg, param, pyld_sz)) { + if (copy_to_user((void __user *)arg, param, pyld_sz)) { retval = -EFAULT; break; } @@ -1517,7 +1519,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) * -ENOMEM: failed to allocate memory * -EPERM: failed to add the tables */ -int ipa_setup_dflt_rt_tables(void) +static int ipa_setup_dflt_rt_tables(void) { struct ipa_ioc_add_rt_rule *rt_rule; struct ipa_rt_rule_add *rt_rule_entry; @@ -1750,39 +1752,6 @@ static void ipa_free_buffer(void *user1, int user2) kfree(user1); } -int ipa_q6_pipe_delay(bool zip_pipes) -{ - u32 reg_val = 0; - int client_idx; - int ep_idx; - - /* For ZIP pipes, processing is done in AFTER_SHUTDOWN callback. */ - for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) { - /* Skip the processing for non Q6 pipes. */ - if (!IPA_CLIENT_IS_Q6_PROD(client_idx)) - continue; - /* Skip the processing for NON-ZIP pipes. */ - else if (zip_pipes && IPA_CLIENT_IS_Q6_NON_ZIP_PROD(client_idx)) - continue; - /* Skip the processing for ZIP pipes. */ - else if (!zip_pipes && IPA_CLIENT_IS_Q6_ZIP_PROD(client_idx)) - continue; - - ep_idx = ipa2_get_ep_mapping(client_idx); - if (ep_idx == -1) - continue; - - IPA_SETFIELD_IN_REG(reg_val, 1, - IPA_ENDP_INIT_CTRL_N_ENDP_DELAY_SHFT, - IPA_ENDP_INIT_CTRL_N_ENDP_DELAY_BMSK); - - ipa_write_reg(ipa_ctx->mmio, - IPA_ENDP_INIT_CTRL_N_OFST(ep_idx), reg_val); - } - - return 0; -} - int ipa_q6_monitor_holb_mitigation(bool enable) { int ep_idx; @@ -1886,7 +1855,7 @@ static int ipa_q6_clean_q6_tables(void) int num_cmds = 0; int index; int retval; - struct ipa_mem_buffer mem = { 0 }; + struct ipa_mem_buffer mem = { NULL }; u32 *entry; u32 max_cmds = ipa_get_max_flt_rt_cmds(ipa_ctx->ipa_num_pipes); gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0); @@ -2217,7 +2186,7 @@ int ipa_q6_post_shutdown_cleanup(void) int _ipa_init_sram_v2(void) { - u32 *ipa_sram_mmio; + void __iomem *ipa_sram_mmio; unsigned long phys_addr; struct ipa_hw_imm_cmd_dma_shared_mem *cmd = NULL; struct ipa_desc desc = {0}; @@ -2237,7 +2206,7 @@ int _ipa_init_sram_v2(void) return -ENOMEM; } -#define IPA_SRAM_SET(ofst, val) (ipa_sram_mmio[(ofst - 4) / 4] = val) +#define IPA_SRAM_SET(ofst, val) iowrite32(val, ipa_sram_mmio + (ofst - 4)) IPA_SRAM_SET(IPA_MEM_PART(v6_flt_ofst) - 4, IPA_MEM_CANARY_VAL); IPA_SRAM_SET(IPA_MEM_PART(v6_flt_ofst), IPA_MEM_CANARY_VAL); @@ -2249,6 +2218,7 @@ int _ipa_init_sram_v2(void) IPA_SRAM_SET(IPA_MEM_PART(apps_v4_flt_ofst), IPA_MEM_CANARY_VAL); IPA_SRAM_SET(IPA_MEM_PART(uc_info_ofst), IPA_MEM_CANARY_VAL); + iounmap(ipa_sram_mmio); mem.size = IPA_STATUS_CLEAR_SIZE; @@ -2287,7 +2257,7 @@ int _ipa_init_sram_v2(void) int _ipa_init_sram_v2_5(void) { - u32 *ipa_sram_mmio; + void __iomem *ipa_sram_mmio; unsigned long phys_addr; phys_addr = ipa_ctx->ipa_wrapper_base + @@ -2301,7 +2271,7 @@ int _ipa_init_sram_v2_5(void) return -ENOMEM; } -#define IPA_SRAM_SET(ofst, val) (ipa_sram_mmio[(ofst - 4) / 4] = val) +#define IPA_SRAM_SET(ofst, val) iowrite32(val, ipa_sram_mmio + (ofst - 4)) IPA_SRAM_SET(IPA_MEM_PART(v4_flt_ofst) - 4, IPA_MEM_CANARY_VAL); IPA_SRAM_SET(IPA_MEM_PART(v4_flt_ofst), IPA_MEM_CANARY_VAL); @@ -2322,15 +2292,15 @@ int _ipa_init_sram_v2_5(void) return 0; } -static inline void ipa_sram_set_canary(u32 *sram_mmio, int offset) +static inline void ipa_sram_set_canary(void __iomem *sram_mmio, int offset) { /* Set 4 bytes of CANARY before the offset */ - sram_mmio[(offset - 4) / 4] = IPA_MEM_CANARY_VAL; + iowrite32(IPA_MEM_CANARY_VAL, sram_mmio + (offset - 4)); } int _ipa_init_sram_v2_6L(void) { - u32 *ipa_sram_mmio; + void __iomem *ipa_sram_mmio; unsigned long phys_addr; phys_addr = ipa_ctx->ipa_wrapper_base + @@ -2354,9 +2324,9 @@ int _ipa_init_sram_v2_6L(void) ipa_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_rt_ofst)); ipa_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_hdr_ofst)); ipa_sram_set_canary(ipa_sram_mmio, - IPA_MEM_PART(modem_comp_decomp_ofst) - 4); + IPA_MEM_PART(modem_comp_decomp_ofst) - 4); ipa_sram_set_canary(ipa_sram_mmio, - IPA_MEM_PART(modem_comp_decomp_ofst)); + IPA_MEM_PART(modem_comp_decomp_ofst)); ipa_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_ofst)); ipa_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(end_ofst)); @@ -2877,7 +2847,8 @@ static void ipa_teardown_apps_pipes(void) } #ifdef CONFIG_COMPAT -long compat_ipa_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +static long compat_ipa_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) { int retval = 0; struct ipa_ioc_nat_alloc_mem32 nat_mem32; @@ -2924,7 +2895,7 @@ long compat_ipa_ioctl(struct file *file, unsigned int cmd, unsigned long arg) cmd = IPA_IOC_GET_HDR; break; case IPA_IOC_ALLOC_NAT_MEM32: - if (copy_from_user((u8 *)&nat_mem32, (u8 *)arg, + if (copy_from_user(&nat_mem32, (const void __user *)arg, sizeof(struct ipa_ioc_nat_alloc_mem32))) { retval = -EFAULT; goto ret; @@ -2942,7 +2913,7 @@ long compat_ipa_ioctl(struct file *file, unsigned int cmd, unsigned long arg) goto ret; } nat_mem32.offset = (compat_off_t)nat_mem.offset; - if (copy_to_user((u8 *)arg, (u8 *)&nat_mem32, + if (copy_to_user((void __user *)arg, (u8 *)&nat_mem32, sizeof(struct ipa_ioc_nat_alloc_mem32))) { retval = -EFAULT; } @@ -3297,7 +3268,8 @@ static void ipa_start_tag_process(struct work_struct *work) * - Remove and deallocate unneeded data structure * - Log the call in the circular history buffer (unless it is a simple call) */ -void ipa2_active_clients_log_mod(struct ipa_active_client_logging_info *id, +static void ipa2_active_clients_log_mod( + struct ipa_active_client_logging_info *id, bool inc, bool int_ctx) { char temp_str[IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN]; @@ -3512,7 +3484,7 @@ void ipa_dec_release_wakelock(enum ipa_wakelock_ref_client ref_client) static int ipa_setup_bam_cfg(const struct ipa_plat_drv_res *res) { - void *ipa_bam_mmio; + void __iomem *ipa_bam_mmio; int reg_val; int retval = 0; @@ -3842,7 +3814,7 @@ static void ipa_sps_release_resource(struct work_struct *work) mutex_unlock(&ipa_ctx->sps_pm.sps_pm_lock); } -int ipa_create_apps_resource(void) +static int ipa_create_apps_resource(void) { struct ipa_rm_create_params apps_cons_create_params; struct ipa_rm_perf_profile profile; @@ -3996,9 +3968,14 @@ static int ipa_init(const struct ipa_plat_drv_res *resource_p, if (ipa_ctx->ipa_hw_mode != IPA_HW_MODE_VIRTUAL) { /* get BUS handle */ - ipa_ctx->ipa_bus_hdl = - msm_bus_scale_register_client( - ipa_ctx->ctrl->msm_bus_data_ptr); + /* Check if bus handle is already registered */ + if (!register_ipa_bus_hdl) + ipa_ctx->ipa_bus_hdl = + msm_bus_scale_register_client( + ipa_ctx->ctrl->msm_bus_data_ptr); + else + ipa_ctx->ipa_bus_hdl = register_ipa_bus_hdl; + if (!ipa_ctx->ipa_bus_hdl) { IPAERR("fail to register with bus mgr!\n"); result = -EPROBE_DEFER; @@ -4464,6 +4441,7 @@ static int ipa_init(const struct ipa_plat_drv_res *resource_p, if (bus_scale_table) { msm_bus_cl_clear_pdata(bus_scale_table); bus_scale_table = NULL; + register_ipa_bus_hdl = 0; } fail_bus_reg: fail_bind: @@ -4973,6 +4951,36 @@ int ipa_plat_drv_probe(struct platform_device *pdev_p, IPADBG("IPA driver probing started\n"); + /* + * Due to late initialization of msm_bus in kernel >= 4.14, add + * mechanism to defer IPA probing until msm_bus is initialized + * successfully. + */ + if (of_device_is_compatible(dev->of_node, "qcom,ipa")) { + if (!ipa_pdev) + ipa_pdev = pdev_p; + if (!bus_scale_table) + bus_scale_table = msm_bus_cl_get_pdata(ipa_pdev); + } + if (bus_scale_table != NULL) { + if (of_device_is_compatible(dev->of_node, "qcom,ipa")) { + /* + * Register with bus client to check if msm_bus + * is completely initialized. + */ + register_ipa_bus_hdl = + msm_bus_scale_register_client( + bus_scale_table); + if (!register_ipa_bus_hdl) { + IPAERR("fail to register with bus mgr!\n"); + bus_scale_table = NULL; + return -EPROBE_DEFER; + } + } + } else { + return -EPROBE_DEFER; + } + if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-ap-cb")) return ipa_smmu_ap_cb_probe(dev); diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c index c40cbeff140f..11be67b8e8ef 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c @@ -27,7 +27,7 @@ #define IPA_DUMP_STATUS_FIELD(f) \ pr_err(#f "=0x%x\n", status->f) -const char *ipa_excp_name[] = { +static const char * const ipa_excp_name[] = { __stringify_1(IPA_A5_MUX_HDR_EXCP_RSVD0), __stringify_1(IPA_A5_MUX_HDR_EXCP_RSVD1), __stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_IHL), @@ -38,7 +38,7 @@ const char *ipa_excp_name[] = { __stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_IP), }; -const char *ipa_status_excp_name[] = { +static const char * const ipa_status_excp_name[] = { __stringify_1(IPA_EXCP_DEAGGR), __stringify_1(IPA_EXCP_REPLICATION), __stringify_1(IPA_EXCP_IP), @@ -49,7 +49,7 @@ const char *ipa_status_excp_name[] = { __stringify_1(IPA_EXCP_NONE), }; -const char *ipa_event_name[] = { +static const char * const ipa_event_name[] = { __stringify(WLAN_CLIENT_CONNECT), __stringify(WLAN_CLIENT_DISCONNECT), __stringify(WLAN_CLIENT_POWER_SAVE_MODE), @@ -89,13 +89,13 @@ const char *ipa_event_name[] = { __stringify(IPA_GSB_DISCONNECT), }; -const char *ipa_hdr_l2_type_name[] = { +static const char * const ipa_hdr_l2_type_name[] = { __stringify(IPA_HDR_L2_NONE), __stringify(IPA_HDR_L2_ETHERNET_II), __stringify(IPA_HDR_L2_802_3), }; -const char *ipa_hdr_proc_type_name[] = { +static const char *const ipa_hdr_proc_type_name[] = { __stringify(IPA_HDR_PROC_NONE), __stringify(IPA_HDR_PROC_ETHII_TO_ETHII), __stringify(IPA_HDR_PROC_ETHII_TO_802_3), @@ -473,8 +473,8 @@ static ssize_t ipa_read_hdr(struct file *file, char __user *ubuf, size_t count, static int ipa_attrib_dump(struct ipa_rule_attrib *attrib, enum ipa_ip_type ip) { - uint32_t addr[4]; - uint32_t mask[4]; + __be32 addr[4]; + __be32 mask[4]; int nbytes = 0; int i; @@ -1973,94 +1973,94 @@ static ssize_t ipa_enable_ipc_low(struct file *file, return count; } -const struct file_operations ipa_gen_reg_ops = { +static const struct file_operations ipa_gen_reg_ops = { .read = ipa_read_gen_reg, }; -const struct file_operations ipa_ep_reg_ops = { +static const struct file_operations ipa_ep_reg_ops = { .read = ipa_read_ep_reg, .write = ipa_write_ep_reg, }; -const struct file_operations ipa_keep_awake_ops = { +static const struct file_operations ipa_keep_awake_ops = { .read = ipa_read_keep_awake, .write = ipa_write_keep_awake, }; -const struct file_operations ipa_ep_holb_ops = { +static const struct file_operations ipa_ep_holb_ops = { .write = ipa_write_ep_holb, }; -const struct file_operations ipa_hdr_ops = { +static const struct file_operations ipa_hdr_ops = { .read = ipa_read_hdr, }; -const struct file_operations ipa_rt_ops = { +static const struct file_operations ipa_rt_ops = { .read = ipa_read_rt, .open = simple_open, }; -const struct file_operations ipa_proc_ctx_ops = { +static const struct file_operations ipa_proc_ctx_ops = { .read = ipa_read_proc_ctx, }; -const struct file_operations ipa_flt_ops = { +static const struct file_operations ipa_flt_ops = { .read = ipa_read_flt, .open = simple_open, }; -const struct file_operations ipa_stats_ops = { +static const struct file_operations ipa_stats_ops = { .read = ipa_read_stats, }; -const struct file_operations ipa_wstats_ops = { +static const struct file_operations ipa_wstats_ops = { .read = ipa_read_wstats, }; -const struct file_operations ipa_wdi_ops = { +static const struct file_operations ipa_wdi_ops = { .read = ipa_read_wdi, }; -const struct file_operations ipa_ntn_ops = { +static const struct file_operations ipa_ntn_ops = { .read = ipa_read_ntn, }; -const struct file_operations ipa_msg_ops = { +static const struct file_operations ipa_msg_ops = { .read = ipa_read_msg, }; -const struct file_operations ipa_dbg_cnt_ops = { +static const struct file_operations ipa_dbg_cnt_ops = { .read = ipa_read_dbg_cnt, .write = ipa_write_dbg_cnt, }; -const struct file_operations ipa_nat4_ops = { +static const struct file_operations ipa_nat4_ops = { .read = ipa_read_nat4, }; -const struct file_operations ipa_rm_stats = { +static const struct file_operations ipa_rm_stats = { .read = ipa_rm_read_stats, }; -const struct file_operations ipa_status_stats_ops = { +static const struct file_operations ipa_status_stats_ops = { .read = ipa_status_stats_read, }; -const struct file_operations ipa2_active_clients = { +static const struct file_operations ipa2_active_clients = { .read = ipa2_print_active_clients_log, .write = ipa2_clear_active_clients_log, }; -const struct file_operations ipa_ipc_low_ops = { +static const struct file_operations ipa_ipc_low_ops = { .write = ipa_enable_ipc_low, }; -const struct file_operations ipa_rx_poll_time_ops = { +static const struct file_operations ipa_rx_poll_time_ops = { .read = ipa_read_rx_polling_timeout, .write = ipa_write_rx_polling_timeout, }; -const struct file_operations ipa_poll_iteration_ops = { +static const struct file_operations ipa_poll_iteration_ops = { .read = ipa_read_polling_iteration, .write = ipa_write_polling_iteration, }; @@ -2072,7 +2072,7 @@ void ipa_debugfs_init(void) const mode_t write_only_mode = 0220; struct dentry *file; - dent = debugfs_create_dir("ipa", 0); + dent = debugfs_create_dir("ipa", NULL); if (IS_ERR(dent)) { IPAERR("fail to create folder in debug_fs.\n"); return; @@ -2086,15 +2086,15 @@ void ipa_debugfs_init(void) } - dfile_gen_reg = debugfs_create_file("gen_reg", read_only_mode, dent, 0, - &ipa_gen_reg_ops); + dfile_gen_reg = debugfs_create_file("gen_reg", read_only_mode, dent, + NULL, &ipa_gen_reg_ops); if (!dfile_gen_reg || IS_ERR(dfile_gen_reg)) { IPAERR("fail to create file for debug_fs gen_reg\n"); goto fail; } dfile_active_clients = debugfs_create_file("active_clients", - read_write_mode, dent, 0, &ipa2_active_clients); + read_write_mode, dent, NULL, &ipa2_active_clients); if (!dfile_active_clients || IS_ERR(dfile_active_clients)) { IPAERR("fail to create file for debug_fs active_clients\n"); goto fail; @@ -2106,28 +2106,28 @@ void ipa_debugfs_init(void) if (active_clients_buf == NULL) IPAERR("fail to allocate active clients memory buffer"); - dfile_ep_reg = debugfs_create_file("ep_reg", read_write_mode, dent, 0, - &ipa_ep_reg_ops); + dfile_ep_reg = debugfs_create_file("ep_reg", read_write_mode, dent, + NULL, &ipa_ep_reg_ops); if (!dfile_ep_reg || IS_ERR(dfile_ep_reg)) { IPAERR("fail to create file for debug_fs ep_reg\n"); goto fail; } dfile_keep_awake = debugfs_create_file("keep_awake", read_write_mode, - dent, 0, &ipa_keep_awake_ops); + dent, NULL, &ipa_keep_awake_ops); if (!dfile_keep_awake || IS_ERR(dfile_keep_awake)) { IPAERR("fail to create file for debug_fs dfile_keep_awake\n"); goto fail; } dfile_ep_holb = debugfs_create_file("holb", write_only_mode, dent, - 0, &ipa_ep_holb_ops); + NULL, &ipa_ep_holb_ops); if (!dfile_ep_holb || IS_ERR(dfile_ep_holb)) { IPAERR("fail to create file for debug_fs dfile_ep_hol_en\n"); goto fail; } - dfile_hdr = debugfs_create_file("hdr", read_only_mode, dent, 0, + dfile_hdr = debugfs_create_file("hdr", read_only_mode, dent, NULL, &ipa_hdr_ops); if (!dfile_hdr || IS_ERR(dfile_hdr)) { IPAERR("fail to create file for debug_fs hdr\n"); @@ -2135,7 +2135,7 @@ void ipa_debugfs_init(void) } dfile_proc_ctx = debugfs_create_file("proc_ctx", read_only_mode, dent, - 0, &ipa_proc_ctx_ops); + NULL, &ipa_proc_ctx_ops); if (!dfile_hdr || IS_ERR(dfile_hdr)) { IPAERR("fail to create file for debug_fs proc_ctx\n"); goto fail; @@ -2169,7 +2169,7 @@ void ipa_debugfs_init(void) goto fail; } - dfile_stats = debugfs_create_file("stats", read_only_mode, dent, 0, + dfile_stats = debugfs_create_file("stats", read_only_mode, dent, NULL, &ipa_stats_ops); if (!dfile_stats || IS_ERR(dfile_stats)) { IPAERR("fail to create file for debug_fs stats\n"); @@ -2177,34 +2177,34 @@ void ipa_debugfs_init(void) } dfile_wstats = debugfs_create_file("wstats", read_only_mode, - dent, 0, &ipa_wstats_ops); + dent, NULL, &ipa_wstats_ops); if (!dfile_wstats || IS_ERR(dfile_wstats)) { IPAERR("fail to create file for debug_fs wstats\n"); goto fail; } - dfile_wdi_stats = debugfs_create_file("wdi", read_only_mode, dent, 0, - &ipa_wdi_ops); + dfile_wdi_stats = debugfs_create_file("wdi", read_only_mode, dent, + NULL, &ipa_wdi_ops); if (!dfile_wdi_stats || IS_ERR(dfile_wdi_stats)) { IPAERR("fail to create file for debug_fs wdi stats\n"); goto fail; } - dfile_ntn_stats = debugfs_create_file("ntn", read_only_mode, dent, 0, - &ipa_ntn_ops); + dfile_ntn_stats = debugfs_create_file("ntn", read_only_mode, dent, + NULL, &ipa_ntn_ops); if (!dfile_ntn_stats || IS_ERR(dfile_ntn_stats)) { IPAERR("fail to create file for debug_fs ntn stats\n"); goto fail; } - dfile_dbg_cnt = debugfs_create_file("dbg_cnt", read_write_mode, dent, 0, - &ipa_dbg_cnt_ops); + dfile_dbg_cnt = debugfs_create_file("dbg_cnt", read_write_mode, dent, + NULL, &ipa_dbg_cnt_ops); if (!dfile_dbg_cnt || IS_ERR(dfile_dbg_cnt)) { IPAERR("fail to create file for debug_fs dbg_cnt\n"); goto fail; } - dfile_msg = debugfs_create_file("msg", read_only_mode, dent, 0, + dfile_msg = debugfs_create_file("msg", read_only_mode, dent, NULL, &ipa_msg_ops); if (!dfile_msg || IS_ERR(dfile_msg)) { IPAERR("fail to create file for debug_fs msg\n"); @@ -2212,35 +2212,35 @@ void ipa_debugfs_init(void) } dfile_ip4_nat = debugfs_create_file("ip4_nat", read_only_mode, dent, - 0, &ipa_nat4_ops); + NULL, &ipa_nat4_ops); if (!dfile_ip4_nat || IS_ERR(dfile_ip4_nat)) { IPAERR("fail to create file for debug_fs ip4 nat\n"); goto fail; } dfile_rm_stats = debugfs_create_file("rm_stats", - read_only_mode, dent, 0, &ipa_rm_stats); + read_only_mode, dent, NULL, &ipa_rm_stats); if (!dfile_rm_stats || IS_ERR(dfile_rm_stats)) { IPAERR("fail to create file for debug_fs rm_stats\n"); goto fail; } dfile_status_stats = debugfs_create_file("status_stats", - read_only_mode, dent, 0, &ipa_status_stats_ops); + read_only_mode, dent, NULL, &ipa_status_stats_ops); if (!dfile_status_stats || IS_ERR(dfile_status_stats)) { IPAERR("fail to create file for debug_fs status_stats\n"); goto fail; } dfile_ipa_rx_poll_timeout = debugfs_create_file("ipa_rx_poll_time", - read_write_mode, dent, 0, &ipa_rx_poll_time_ops); + read_write_mode, dent, NULL, &ipa_rx_poll_time_ops); if (!dfile_ipa_rx_poll_timeout || IS_ERR(dfile_ipa_rx_poll_timeout)) { IPAERR("fail to create file for debug_fs rx poll timeout\n"); goto fail; } dfile_ipa_poll_iteration = debugfs_create_file("ipa_poll_iteration", - read_write_mode, dent, 0, &ipa_poll_iteration_ops); + read_write_mode, dent, NULL, &ipa_poll_iteration_ops); if (!dfile_ipa_poll_iteration || IS_ERR(dfile_ipa_poll_iteration)) { IPAERR("fail to create file for debug_fs poll iteration\n"); goto fail; @@ -2270,7 +2270,7 @@ void ipa_debugfs_init(void) } file = debugfs_create_file("enable_low_prio_print", write_only_mode, - dent, 0, &ipa_ipc_low_ops); + dent, NULL, &ipa_ipc_low_ops); if (!file) { IPAERR("could not create enable_low_prio_print file\n"); goto fail; diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_dma.c b/drivers/platform/msm/ipa/ipa_v2/ipa_dma.c index 232d9216d1bf..0313b19b4141 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_dma.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_dma.c @@ -867,7 +867,7 @@ static ssize_t ipa_dma_debugfs_reset_statistics(struct file *file, return count; } -const struct file_operations ipadma_stats_ops = { +static const struct file_operations ipadma_stats_ops = { .read = ipa_dma_debugfs_read, .write = ipa_dma_debugfs_reset_statistics, }; @@ -876,7 +876,7 @@ static void ipa_dma_debugfs_init(void) { const mode_t read_write_mode = 0666; - dent = debugfs_create_dir("ipa_dma", 0); + dent = debugfs_create_dir("ipa_dma", NULL); if (IS_ERR(dent)) { IPADMA_ERR("fail to create folder ipa_dma\n"); return; @@ -884,7 +884,7 @@ static void ipa_dma_debugfs_init(void) dfile_info = debugfs_create_file("info", read_write_mode, dent, - 0, &ipadma_stats_ops); + NULL, &ipadma_stats_ops); if (!dfile_info || IS_ERR(dfile_info)) { IPADMA_ERR("fail to create file stats\n"); goto fail; diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c index b6bbc37f178a..b3f66161f263 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c @@ -295,7 +295,7 @@ int ipa_send_one(struct ipa_sys_context *sys, struct ipa_desc *desc, u16 sps_flags = SPS_IOVEC_FLAG_EOT; dma_addr_t dma_address; u16 len; - u32 mem_flag = GFP_ATOMIC; + gfp_t mem_flag = GFP_ATOMIC; struct sps_iovec iov; int ret; @@ -948,7 +948,7 @@ void ipa_sps_irq_control_all(bool enable) ipa_ep_idx = ipa_get_ep_mapping(client_num); if (ipa_ep_idx == -1) { - IPAERR("Invalid client.\n"); + IPADBG_LOW("Invalid client.\n"); continue; } ep = &ipa_ctx->ep[ipa_ep_idx]; @@ -2151,10 +2151,10 @@ static void ipa_replenish_rx_cache_recycle(struct ipa_sys_context *sys) rx_len_cached = sys->len; while (rx_len_cached < sys->rx_pool_sz) { - spin_lock_bh(&sys->spinlock); if (list_empty(&sys->rcycl_list)) goto fail_kmem_cache_alloc; + spin_lock_bh(&sys->spinlock); rx_pkt = list_first_entry(&sys->rcycl_list, struct ipa_rx_pkt_wrapper, link); list_del(&rx_pkt->link); @@ -2197,7 +2197,6 @@ static void ipa_replenish_rx_cache_recycle(struct ipa_sys_context *sys) INIT_LIST_HEAD(&rx_pkt->link); spin_unlock_bh(&sys->spinlock); fail_kmem_cache_alloc: - spin_unlock_bh(&sys->spinlock); if (rx_len_cached == 0) queue_delayed_work(sys->wq, &sys->replenish_rx_work, msecs_to_jiffies(1)); @@ -2664,7 +2663,7 @@ static int ipa_wan_rx_pyld_hdlr(struct sk_buff *skb, { struct ipa_hw_pkt_status *status; struct sk_buff *skb2; - u16 pkt_len_with_pad; + __be16 pkt_len_with_pad; u32 qmap_hdr; int checksum_trailer_exists; int frame_len; @@ -2831,10 +2830,6 @@ static int ipa_rx_pyld_hdlr(struct sk_buff *rx_skb, struct ipa_sys_context *sys) src_pipe = mux_hdr->src_pipe_index; - IPADBG("RX pkt len=%d IID=0x%x src=%d, flags=0x%x, meta=0x%x\n", - rx_skb->len, ntohs(mux_hdr->interface_id), - src_pipe, mux_hdr->flags, ntohl(mux_hdr->metadata)); - IPA_DUMP_BUFF(rx_skb->data, 0, rx_skb->len); IPA_STATS_INC_CNT(ipa_ctx->stats.rx_pkts); @@ -3075,35 +3070,6 @@ static void ipa_wq_rx_avail(struct work_struct *work) ipa_wq_rx_common(sys, 0); } -/** - * ipa_sps_irq_rx_no_aggr_notify() - Callback function which will be called by - * the SPS driver after a Rx operation is complete. - * Called in an interrupt context. - * @notify: SPS driver supplied notification struct - * - * This function defer the work for this event to a workqueue. - */ -void ipa_sps_irq_rx_no_aggr_notify(struct sps_event_notify *notify) -{ - struct ipa_rx_pkt_wrapper *rx_pkt; - - switch (notify->event_id) { - case SPS_EVENT_EOT: - rx_pkt = notify->data.transfer.user; - if (IPA_CLIENT_IS_APPS_CONS(rx_pkt->sys->ep->client)) - atomic_set(&ipa_ctx->sps_pm.eot_activity, 1); - rx_pkt->len = notify->data.transfer.iovec.size; - IPADBG_LOW - ("event %d notified sys=%p len=%u\n", notify->event_id, - notify->user, rx_pkt->len); - queue_work(rx_pkt->sys->wq, &rx_pkt->work); - break; - default: - IPAERR("received unexpected event id %d sys=%p\n", - notify->event_id, notify->user); - } -} - static int ipa_odu_rx_pyld_hdlr(struct sk_buff *rx_skb, struct ipa_sys_context *sys) { diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c index da0304845deb..7de00414e3db 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c @@ -566,7 +566,7 @@ static int ipa_generate_flt_hw_tbl_v1_1(enum ipa_ip_type ip, /* write a dummy header to move cursor */ hdr = ipa_write_32(hdr_top, hdr); - if (ipa_generate_flt_hw_tbl_common(ip, body, hdr, hdr_sz, 0, + if (ipa_generate_flt_hw_tbl_common(ip, body, hdr, hdr_sz, NULL, &hdr_top)) { IPAERR("fail to generate FLT HW table\n"); goto proc_err; diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h index 66399f6f0a66..81fdeb778a8d 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h @@ -1806,19 +1806,22 @@ void _ipa_enable_clks_v1_1(void); void _ipa_enable_clks_v2_0(void); void _ipa_disable_clks_v1_1(void); void _ipa_disable_clks_v2_0(void); +void ipa_suspend_handler(enum ipa_irq_type interrupt, + void *private_data, + void *interrupt_data); -static inline u32 ipa_read_reg(void *base, u32 offset) +static inline u32 ipa_read_reg(void __iomem *base, u32 offset) { return ioread32(base + offset); } -static inline u32 ipa_read_reg_field(void *base, u32 offset, +static inline u32 ipa_read_reg_field(void __iomem *base, u32 offset, u32 mask, u32 shift) { return (ipa_read_reg(base, offset) & mask) >> shift; } -static inline void ipa_write_reg(void *base, u32 offset, u32 val) +static inline void ipa_write_reg(void __iomem *base, u32 offset, u32 val) { iowrite32(val, base + offset); } @@ -1950,6 +1953,7 @@ struct iommu_domain *ipa2_get_wlan_smmu_domain(void); int ipa2_ap_suspend(struct device *dev); int ipa2_ap_resume(struct device *dev); struct iommu_domain *ipa2_get_smmu_domain(void); +struct iommu_domain *ipa2_get_uc_smmu_domain(void); struct device *ipa2_get_dma_dev(void); int ipa2_release_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info); int ipa2_create_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info); diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c b/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c index 18b6e2ffa578..437e212f102f 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c @@ -5,6 +5,7 @@ #include #include +#include #include "ipa_i.h" #include diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c b/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c index 862943461ac9..eafec5978243 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c @@ -35,8 +35,7 @@ enum nat_table_type { #define IPA_TABLE_MAX_ENTRIES 1000 #define MAX_ALLOC_NAT_SIZE (IPA_TABLE_MAX_ENTRIES * NAT_TABLE_ENTRY_SIZE_BYTE) -static int ipa_nat_vma_fault_remap( - struct vm_area_struct *vma, struct vm_fault *vmf) +static int ipa_nat_vma_fault_remap(struct vm_fault *vmf) { IPADBG("\n"); vmf->page = NULL; @@ -131,10 +130,10 @@ static const struct file_operations ipa_nat_fops = { * * Called during nat table delete */ -void allocate_temp_nat_memory(void) +static void allocate_temp_nat_memory(void) { struct ipa_nat_mem *nat_ctx = &(ipa_ctx->nat_mem); - int gfp_flags = GFP_KERNEL | __GFP_ZERO; + gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO; nat_ctx->tmp_vaddr = dma_alloc_coherent(ipa_ctx->pdev, IPA_NAT_TEMP_MEM_SIZE, @@ -245,7 +244,7 @@ int create_nat_device(void) int ipa2_allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem) { struct ipa_nat_mem *nat_ctx = &(ipa_ctx->nat_mem); - int gfp_flags = GFP_KERNEL | __GFP_ZERO; + gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO; int result; IPADBG("passed memory size %zu\n", mem->size); @@ -749,7 +748,7 @@ int ipa2_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma) * * Called by NAT client driver to free the NAT memory and remove the device */ -void ipa_nat_free_mem_and_device(struct ipa_nat_mem *nat_ctx) +static void ipa_nat_free_mem_and_device(struct ipa_nat_mem *nat_ctx) { IPADBG("\n"); mutex_lock(&nat_ctx->lock); @@ -861,10 +860,10 @@ int ipa2_nat_del_cmd(struct ipa_ioc_v4_nat_del *del) ipa_ctx->nat_mem.size_base_tables = 0; ipa_ctx->nat_mem.size_expansion_tables = 0; ipa_ctx->nat_mem.public_ip_addr = 0; - ipa_ctx->nat_mem.ipv4_rules_addr = 0; - ipa_ctx->nat_mem.ipv4_expansion_rules_addr = 0; - ipa_ctx->nat_mem.index_table_addr = 0; - ipa_ctx->nat_mem.index_table_expansion_addr = 0; + ipa_ctx->nat_mem.ipv4_rules_addr = NULL; + ipa_ctx->nat_mem.ipv4_expansion_rules_addr = NULL; + ipa_ctx->nat_mem.index_table_addr = NULL; + ipa_ctx->nat_mem.index_table_expansion_addr = NULL; ipa_nat_free_mem_and_device(&ipa_ctx->nat_mem); IPADBG("return\n"); diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c index 47325d7ef056..d322deebda4f 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c @@ -8,7 +8,6 @@ #include #include #include -#include #include #include #include @@ -34,12 +33,7 @@ #define QMI_IPA_FORCE_CLEAR_DATAPATH_TIMEOUT_MS 1000 static struct qmi_handle *ipa_svc_handle; -static void ipa_a5_svc_recv_msg(struct work_struct *work); -static DECLARE_DELAYED_WORK(work_recv_msg, ipa_a5_svc_recv_msg); -static struct workqueue_struct *ipa_svc_workqueue; static struct workqueue_struct *ipa_clnt_req_workqueue; -static struct workqueue_struct *ipa_clnt_resp_workqueue; -static void *curr_conn; static bool qmi_modem_init_fin, qmi_indication_fin; static uint32_t ipa_wan_platform; struct ipa_qmi_context *ipa_qmi_ctx; @@ -48,68 +42,43 @@ static atomic_t workqueues_stopped; static atomic_t ipa_qmi_initialized; struct mutex ipa_qmi_lock; -/* QMI A5 service */ - -static struct msg_desc ipa_indication_reg_req_desc = { - .max_msg_len = QMI_IPA_INDICATION_REGISTER_REQ_MAX_MSG_LEN_V01, - .msg_id = QMI_IPA_INDICATION_REGISTER_REQ_V01, - .ei_array = ipa_indication_reg_req_msg_data_v01_ei, -}; -static struct msg_desc ipa_indication_reg_resp_desc = { - .max_msg_len = QMI_IPA_INDICATION_REGISTER_RESP_MAX_MSG_LEN_V01, - .msg_id = QMI_IPA_INDICATION_REGISTER_RESP_V01, - .ei_array = ipa_indication_reg_resp_msg_data_v01_ei, -}; -static struct msg_desc ipa_master_driver_complete_indication_desc = { - .max_msg_len = QMI_IPA_MASTER_DRIVER_INIT_COMPLETE_IND_MAX_MSG_LEN_V01, - .msg_id = QMI_IPA_MASTER_DRIVER_INIT_COMPLETE_IND_V01, - .ei_array = ipa_master_driver_init_complt_ind_msg_data_v01_ei, -}; -static struct msg_desc ipa_install_fltr_rule_req_desc = { - .max_msg_len = QMI_IPA_INSTALL_FILTER_RULE_REQ_MAX_MSG_LEN_V01, - .msg_id = QMI_IPA_INSTALL_FILTER_RULE_REQ_V01, - .ei_array = ipa_install_fltr_rule_req_msg_data_v01_ei, -}; -static struct msg_desc ipa_install_fltr_rule_resp_desc = { - .max_msg_len = QMI_IPA_INSTALL_FILTER_RULE_RESP_MAX_MSG_LEN_V01, - .msg_id = QMI_IPA_INSTALL_FILTER_RULE_RESP_V01, - .ei_array = ipa_install_fltr_rule_resp_msg_data_v01_ei, -}; -static struct msg_desc ipa_filter_installed_notif_req_desc = { - .max_msg_len = QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_MAX_MSG_LEN_V01, - .msg_id = QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_V01, - .ei_array = ipa_fltr_installed_notif_req_msg_data_v01_ei, -}; -static struct msg_desc ipa_filter_installed_notif_resp_desc = { - .max_msg_len = QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_MAX_MSG_LEN_V01, - .msg_id = QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_V01, - .ei_array = ipa_fltr_installed_notif_resp_msg_data_v01_ei, -}; -static struct msg_desc ipa_config_req_desc = { - .max_msg_len = QMI_IPA_CONFIG_REQ_MAX_MSG_LEN_V01, - .msg_id = QMI_IPA_CONFIG_REQ_V01, - .ei_array = ipa_config_req_msg_data_v01_ei, -}; -static struct msg_desc ipa_config_resp_desc = { - .max_msg_len = QMI_IPA_CONFIG_RESP_MAX_MSG_LEN_V01, - .msg_id = QMI_IPA_CONFIG_RESP_V01, - .ei_array = ipa_config_resp_msg_data_v01_ei, +struct ipa_msg_desc { + uint16_t msg_id; + int max_msg_len; + struct qmi_elem_info *ei_array; }; -static int handle_indication_req(void *req_h, void *req) +/* QMI A5 service */ + +static void handle_indication_req(struct qmi_handle *qmi_handle, + struct sockaddr_qrtr *sq, + struct qmi_txn *txn, + const void *decoded_msg) { struct ipa_indication_reg_req_msg_v01 *indication_req; struct ipa_indication_reg_resp_msg_v01 resp; struct ipa_master_driver_init_complt_ind_msg_v01 ind; int rc; - indication_req = (struct ipa_indication_reg_req_msg_v01 *)req; + indication_req = (struct ipa_indication_reg_req_msg_v01 *)decoded_msg; IPAWANDBG("Received INDICATION Request\n"); + /* cache the client sq */ + memcpy(&ipa_qmi_ctx->client_sq, sq, sizeof(*sq)); + memset(&resp, 0, sizeof(struct ipa_indication_reg_resp_msg_v01)); resp.resp.result = IPA_QMI_RESULT_SUCCESS_V01; - rc = qmi_send_resp_from_cb(ipa_svc_handle, curr_conn, req_h, - &ipa_indication_reg_resp_desc, &resp, sizeof(resp)); + rc = qmi_send_response(qmi_handle, sq, txn, + QMI_IPA_INDICATION_REGISTER_RESP_V01, + QMI_IPA_INDICATION_REGISTER_RESP_MAX_MSG_LEN_V01, + ipa_indication_reg_resp_msg_data_v01_ei, + &resp); + + if (rc < 0) { + IPAWANERR("send response for Indication register failed\n"); + return; + } + qmi_indication_fin = true; /* check if need sending indication to modem */ if (qmi_modem_init_fin) { @@ -119,33 +88,44 @@ static int handle_indication_req(void *req_h, void *req) ipa_master_driver_init_complt_ind_msg_v01)); ind.master_driver_init_status.result = IPA_QMI_RESULT_SUCCESS_V01; - rc = qmi_send_ind_from_cb(ipa_svc_handle, curr_conn, - &ipa_master_driver_complete_indication_desc, - &ind, - sizeof(ind)); + rc = qmi_send_indication(qmi_handle, + &(ipa_qmi_ctx->client_sq), + QMI_IPA_MASTER_DRIVER_INIT_COMPLETE_IND_V01, + QMI_IPA_MASTER_DRIVER_INIT_COMPLETE_IND_MAX_MSG_LEN_V01, + ipa_master_driver_init_complt_ind_msg_data_v01_ei, + &ind); + + if (rc < 0) { + IPAWANERR("send indication failed\n"); + qmi_indication_fin = false; + } } else { IPAWANERR("not send indication\n"); } - return rc; } -static int handle_install_filter_rule_req(void *req_h, void *req) +static void handle_install_filter_rule_req(struct qmi_handle *qmi_handle, + struct sockaddr_qrtr *sq, + struct qmi_txn *txn, + const void *decoded_msg) { struct ipa_install_fltr_rule_req_msg_v01 *rule_req; struct ipa_install_fltr_rule_resp_msg_v01 resp; uint32_t rule_hdl[MAX_NUM_Q6_RULE]; int rc = 0, i; - rule_req = (struct ipa_install_fltr_rule_req_msg_v01 *)req; + rule_req = (struct ipa_install_fltr_rule_req_msg_v01 *)decoded_msg; memset(rule_hdl, 0, sizeof(rule_hdl)); memset(&resp, 0, sizeof(struct ipa_install_fltr_rule_resp_msg_v01)); IPAWANDBG("Received install filter Request\n"); rc = copy_ul_filter_rule_to_ipa((struct - ipa_install_fltr_rule_req_msg_v01*)req, rule_hdl); - if (rc) + ipa_install_fltr_rule_req_msg_v01*)decoded_msg, rule_hdl); + if (rc) { IPAWANERR("copy UL rules from modem is failed\n"); + return; + } resp.resp.result = IPA_QMI_RESULT_SUCCESS_V01; if (rule_req->filter_spec_list_valid == true) { @@ -171,14 +151,23 @@ static int handle_install_filter_rule_req(void *req_h, void *req) resp.filter_handle_list[i].filter_handle = rule_hdl[i]; } - rc = qmi_send_resp_from_cb(ipa_svc_handle, curr_conn, req_h, - &ipa_install_fltr_rule_resp_desc, &resp, sizeof(resp)); + rc = qmi_send_response(qmi_handle, sq, txn, + QMI_IPA_INSTALL_FILTER_RULE_RESP_V01, + QMI_IPA_INSTALL_FILTER_RULE_RESP_MAX_MSG_LEN_V01, + ipa_install_fltr_rule_resp_msg_data_v01_ei, + &resp); - IPAWANDBG("Replied to install filter request\n"); - return rc; + if (rc < 0) + IPAWANERR("install filter rules failed\n"); + else + IPAWANDBG("Replied to install filter request\n"); } -static int handle_filter_installed_notify_req(void *req_h, void *req) +static void handle_filter_installed_notify_req( + struct qmi_handle *qmi_handle, + struct sockaddr_qrtr *sq, + struct qmi_txn *txn, + const void *decoded_msg) { struct ipa_fltr_installed_notif_resp_msg_v01 resp; int rc = 0; @@ -187,15 +176,22 @@ static int handle_filter_installed_notify_req(void *req_h, void *req) IPAWANDBG("Received filter_install_notify Request\n"); resp.resp.result = IPA_QMI_RESULT_SUCCESS_V01; - rc = qmi_send_resp_from_cb(ipa_svc_handle, curr_conn, req_h, - &ipa_filter_installed_notif_resp_desc, - &resp, sizeof(resp)); + rc = qmi_send_response(qmi_handle, sq, txn, + QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_V01, + QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_MAX_MSG_LEN_V01, + ipa_fltr_installed_notif_resp_msg_data_v01_ei, + &resp); - IPAWANDBG("Responsed filter_install_notify Request\n"); - return rc; + if (rc < 0) + IPAWANERR("handle filter rules failed\n"); + else + IPAWANDBG("Responsed filter_install_notify Request\n"); } -static int handle_ipa_config_req(void *req_h, void *req) +static void handle_ipa_config_req(struct qmi_handle *qmi_handle, + struct sockaddr_qrtr *sq, + struct qmi_txn *txn, + const void *decoded_msg) { struct ipa_config_resp_msg_v01 resp; int rc; @@ -204,151 +200,40 @@ static int handle_ipa_config_req(void *req_h, void *req) resp.resp.result = IPA_QMI_RESULT_SUCCESS_V01; IPAWANDBG("Received IPA CONFIG Request\n"); rc = ipa_mhi_handle_ipa_config_req( - (struct ipa_config_req_msg_v01 *)req); + (struct ipa_config_req_msg_v01 *)decoded_msg); if (rc) { IPAERR("ipa_mhi_handle_ipa_config_req failed %d\n", rc); resp.resp.result = IPA_QMI_RESULT_FAILURE_V01; } - rc = qmi_send_resp_from_cb(ipa_svc_handle, curr_conn, req_h, - &ipa_config_resp_desc, - &resp, sizeof(resp)); - IPAWANDBG("Responsed IPA CONFIG Request\n"); - return rc; -} - -static int ipa_a5_svc_connect_cb(struct qmi_handle *handle, - void *conn_h) -{ - if (ipa_svc_handle != handle || !conn_h) - return -EINVAL; - - if (curr_conn) { - IPAWANERR("Service is busy\n"); - return -ECONNREFUSED; - } - curr_conn = conn_h; - return 0; -} - -static int ipa_a5_svc_disconnect_cb(struct qmi_handle *handle, - void *conn_h) -{ - if (ipa_svc_handle != handle || curr_conn != conn_h) - return -EINVAL; - - curr_conn = NULL; - return 0; -} - -static int ipa_a5_svc_req_desc_cb(unsigned int msg_id, - struct msg_desc **req_desc) -{ - int rc; - - switch (msg_id) { - case QMI_IPA_INDICATION_REGISTER_REQ_V01: - *req_desc = &ipa_indication_reg_req_desc; - rc = sizeof(struct ipa_indication_reg_req_msg_v01); - break; - - case QMI_IPA_INSTALL_FILTER_RULE_REQ_V01: - *req_desc = &ipa_install_fltr_rule_req_desc; - rc = sizeof(struct ipa_install_fltr_rule_req_msg_v01); - break; - case QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_V01: - *req_desc = &ipa_filter_installed_notif_req_desc; - rc = sizeof(struct ipa_fltr_installed_notif_req_msg_v01); - break; - case QMI_IPA_CONFIG_REQ_V01: - *req_desc = &ipa_config_req_desc; - rc = sizeof(struct ipa_config_req_msg_v01); - break; - default: - rc = -ENOTSUPP; - break; - } - return rc; -} - -static int ipa_a5_svc_req_cb(struct qmi_handle *handle, void *conn_h, - void *req_h, unsigned int msg_id, void *req) -{ - int rc; - - if (ipa_svc_handle != handle || curr_conn != conn_h) - return -EINVAL; - - switch (msg_id) { - case QMI_IPA_INDICATION_REGISTER_REQ_V01: - rc = handle_indication_req(req_h, req); - break; - case QMI_IPA_INSTALL_FILTER_RULE_REQ_V01: - rc = handle_install_filter_rule_req(req_h, req); - rc = wwan_update_mux_channel_prop(); - break; - case QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_V01: - rc = handle_filter_installed_notify_req(req_h, req); - break; - case QMI_IPA_CONFIG_REQ_V01: - rc = handle_ipa_config_req(req_h, req); - break; - default: - rc = -ENOTSUPP; - break; - } - return rc; -} - -static void ipa_a5_svc_recv_msg(struct work_struct *work) -{ - int rc; + IPAWANDBG("qmi_snd_rsp: result %d, err %d\n", + resp.resp.result, resp.resp.error); + rc = qmi_send_response(qmi_handle, sq, txn, + QMI_IPA_CONFIG_RESP_V01, + QMI_IPA_CONFIG_RESP_MAX_MSG_LEN_V01, + ipa_config_resp_msg_data_v01_ei, + &resp); - do { - IPAWANDBG_LOW("Notified about a Receive Event"); - rc = qmi_recv_msg(ipa_svc_handle); - } while (rc == 0); - if (rc != -ENOMSG) - IPAWANERR("Error receiving message\n"); + if (rc < 0) + IPAWANERR("QMI_IPA_CONFIG_RESP_V01 failed\n"); + else + IPAWANDBG("Responsed QMI_IPA_CONFIG_RESP_V01\n"); } -static void qmi_ipa_a5_svc_ntfy(struct qmi_handle *handle, - enum qmi_event_type event, void *priv) +static void ipa_a5_svc_disconnect_cb(struct qmi_handle *qmi, + unsigned int node, unsigned int port) { - switch (event) { - case QMI_RECV_MSG: - if (!atomic_read(&workqueues_stopped)) - queue_delayed_work(ipa_svc_workqueue, - &work_recv_msg, 0); - break; - default: - break; - } + IPAWANDBG_LOW("Received QMI client disconnect\n"); } -static struct qmi_svc_ops_options ipa_a5_svc_ops_options = { - .version = 1, - .service_id = IPA_A5_SERVICE_SVC_ID, - .service_vers = IPA_A5_SVC_VERS, - .service_ins = IPA_A5_SERVICE_INS_ID, - .connect_cb = ipa_a5_svc_connect_cb, - .disconnect_cb = ipa_a5_svc_disconnect_cb, - .req_desc_cb = ipa_a5_svc_req_desc_cb, - .req_cb = ipa_a5_svc_req_cb, -}; - - /****************************************************/ /* QMI A5 client ->Q6 */ /****************************************************/ -static void ipa_q6_clnt_recv_msg(struct work_struct *work); -static DECLARE_DELAYED_WORK(work_recv_msg_client, ipa_q6_clnt_recv_msg); static void ipa_q6_clnt_svc_arrive(struct work_struct *work); static DECLARE_DELAYED_WORK(work_svc_arrive, ipa_q6_clnt_svc_arrive); static void ipa_q6_clnt_svc_exit(struct work_struct *work); static DECLARE_DELAYED_WORK(work_svc_exit, ipa_q6_clnt_svc_exit); /* Test client port for IPC Router */ static struct qmi_handle *ipa_q6_clnt; -static int ipa_q6_clnt_reset; static int ipa_check_qmi_response(int rc, int req_id, @@ -382,11 +267,43 @@ static int ipa_check_qmi_response(int rc, return 0; } +static int ipa_qmi_send_req_wait(struct qmi_handle *client_handle, + struct ipa_msg_desc *req_desc, void *req, + struct ipa_msg_desc *resp_desc, void *resp, + unsigned long timeout_ms) +{ + struct qmi_txn txn; + int ret; + + ret = qmi_txn_init(client_handle, &txn, resp_desc->ei_array, resp); + + if (ret < 0) { + IPAWANERR("QMI txn init failed, ret= %d\n", ret); + return ret; + } + + ret = qmi_send_request(client_handle, + &ipa_qmi_ctx->server_sq, + &txn, + req_desc->msg_id, + req_desc->max_msg_len, + req_desc->ei_array, + req); + + if (ret < 0) { + qmi_txn_cancel(&txn); + return ret; + } + ret = qmi_txn_wait(&txn, msecs_to_jiffies(timeout_ms)); + + return ret; +} + static int qmi_init_modem_send_sync_msg(void) { struct ipa_init_modem_driver_req_msg_v01 req; struct ipa_init_modem_driver_resp_msg_v01 resp; - struct msg_desc req_desc, resp_desc; + struct ipa_msg_desc req_desc, resp_desc; int rc; u16 smem_restr_bytes = ipa2_get_smem_restr_bytes(); @@ -489,9 +406,17 @@ static int qmi_init_modem_send_sync_msg(void) pr_info("Sending QMI_IPA_INIT_MODEM_DRIVER_REQ_V01\n"); if (unlikely(!ipa_q6_clnt)) return -ETIMEDOUT; - rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, &req, sizeof(req), - &resp_desc, &resp, sizeof(resp), + rc = ipa_qmi_send_req_wait(ipa_q6_clnt, &req_desc, &req, + &resp_desc, &resp, QMI_SEND_REQ_TIMEOUT_MS); + + if (rc < 0) { + IPAWANERR("QMI send Req %d failed, rc= %d\n", + QMI_IPA_INIT_MODEM_DRIVER_REQ_V01, + rc); + return rc; + } + pr_info("QMI_IPA_INIT_MODEM_DRIVER_REQ_V01 response received\n"); return ipa_check_qmi_response(rc, QMI_IPA_INIT_MODEM_DRIVER_REQ_V01, resp.resp.result, @@ -502,7 +427,7 @@ static int qmi_init_modem_send_sync_msg(void) int qmi_filter_request_send(struct ipa_install_fltr_rule_req_msg_v01 *req) { struct ipa_install_fltr_rule_resp_msg_v01 resp; - struct msg_desc req_desc, resp_desc; + struct ipa_msg_desc req_desc, resp_desc; int rc; int i; @@ -577,11 +502,18 @@ int qmi_filter_request_send(struct ipa_install_fltr_rule_req_msg_v01 *req) resp_desc.ei_array = ipa_install_fltr_rule_resp_msg_data_v01_ei; if (unlikely(!ipa_q6_clnt)) return -ETIMEDOUT; - rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, + rc = ipa_qmi_send_req_wait(ipa_q6_clnt, &req_desc, req, - sizeof(struct ipa_install_fltr_rule_req_msg_v01), - &resp_desc, &resp, sizeof(resp), + &resp_desc, &resp, QMI_SEND_REQ_TIMEOUT_MS); + + if (rc < 0) { + IPAWANERR("QMI send Req %d failed, rc= %d\n", + QMI_IPA_INSTALL_FILTER_RULE_REQ_V01, + rc); + return rc; + } + return ipa_check_qmi_response(rc, QMI_IPA_INSTALL_FILTER_RULE_REQ_V01, resp.resp.result, resp.resp.error, "ipa_install_filter"); @@ -592,7 +524,7 @@ int qmi_enable_force_clear_datapath_send( struct ipa_enable_force_clear_datapath_req_msg_v01 *req) { struct ipa_enable_force_clear_datapath_resp_msg_v01 resp; - struct msg_desc req_desc, resp_desc; + struct ipa_msg_desc req_desc, resp_desc; int rc = 0; @@ -614,14 +546,15 @@ int qmi_enable_force_clear_datapath_send( ipa_enable_force_clear_datapath_resp_msg_data_v01_ei; if (unlikely(!ipa_q6_clnt)) return -ETIMEDOUT; - rc = qmi_send_req_wait(ipa_q6_clnt, - &req_desc, - req, - sizeof(*req), - &resp_desc, &resp, sizeof(resp), - QMI_IPA_FORCE_CLEAR_DATAPATH_TIMEOUT_MS); + rc = ipa_qmi_send_req_wait(ipa_q6_clnt, + &req_desc, + req, + &resp_desc, &resp, + QMI_IPA_FORCE_CLEAR_DATAPATH_TIMEOUT_MS); if (rc < 0) { - IPAWANERR("send req failed %d\n", rc); + IPAWANERR("send Req %d failed, rc= %d\n", + QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_REQ_V01, + rc); return rc; } if (resp.resp.result != IPA_QMI_RESULT_SUCCESS_V01) { @@ -637,7 +570,7 @@ int qmi_disable_force_clear_datapath_send( struct ipa_disable_force_clear_datapath_req_msg_v01 *req) { struct ipa_disable_force_clear_datapath_resp_msg_v01 resp; - struct msg_desc req_desc, resp_desc; + struct ipa_msg_desc req_desc, resp_desc; int rc = 0; @@ -660,14 +593,15 @@ int qmi_disable_force_clear_datapath_send( ipa_disable_force_clear_datapath_resp_msg_data_v01_ei; if (unlikely(!ipa_q6_clnt)) return -ETIMEDOUT; - rc = qmi_send_req_wait(ipa_q6_clnt, - &req_desc, - req, - sizeof(*req), - &resp_desc, &resp, sizeof(resp), - QMI_IPA_FORCE_CLEAR_DATAPATH_TIMEOUT_MS); + rc = ipa_qmi_send_req_wait(ipa_q6_clnt, + &req_desc, + req, + &resp_desc, &resp, + QMI_IPA_FORCE_CLEAR_DATAPATH_TIMEOUT_MS); if (rc < 0) { - IPAWANERR("send req failed %d\n", rc); + IPAWANERR("send Req %d failed, rc= %d\n", + QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_REQ_V01, + rc); return rc; } if (resp.resp.result != IPA_QMI_RESULT_SUCCESS_V01) { @@ -683,7 +617,7 @@ int qmi_disable_force_clear_datapath_send( int qmi_filter_notify_send(struct ipa_fltr_installed_notif_req_msg_v01 *req) { struct ipa_fltr_installed_notif_resp_msg_v01 resp; - struct msg_desc req_desc, resp_desc; + struct ipa_msg_desc req_desc, resp_desc; int rc = 0, i = 0; /* check if the filter rules from IPACM is valid */ @@ -752,77 +686,43 @@ int qmi_filter_notify_send(struct ipa_fltr_installed_notif_req_msg_v01 *req) resp_desc.ei_array = ipa_fltr_installed_notif_resp_msg_data_v01_ei; if (unlikely(!ipa_q6_clnt)) return -ETIMEDOUT; - rc = qmi_send_req_wait(ipa_q6_clnt, - &req_desc, - req, - sizeof(struct ipa_fltr_installed_notif_req_msg_v01), - &resp_desc, &resp, sizeof(resp), - QMI_SEND_REQ_TIMEOUT_MS); + rc = ipa_qmi_send_req_wait(ipa_q6_clnt, + &req_desc, + req, + &resp_desc, &resp, + QMI_SEND_REQ_TIMEOUT_MS); + + if (rc < 0) { + IPAWANERR("send Req %d failed, rc= %d\n", + QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_V01, + rc); + return rc; + } + return ipa_check_qmi_response(rc, QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_V01, resp.resp.result, resp.resp.error, "ipa_fltr_installed_notif_resp"); } -static void ipa_q6_clnt_recv_msg(struct work_struct *work) -{ - int rc; - - do { - IPAWANDBG_LOW("Notified about a Receive Event"); - rc = qmi_recv_msg(ipa_q6_clnt); - } while (rc == 0); - if (rc != -ENOMSG) - IPAWANERR("Error receiving message\n"); -} - -static void ipa_q6_clnt_notify(struct qmi_handle *handle, - enum qmi_event_type event, void *notify_priv) -{ - switch (event) { - case QMI_RECV_MSG: - IPAWANDBG_LOW("client qmi recv message called"); - if (!atomic_read(&workqueues_stopped)) - queue_delayed_work(ipa_clnt_resp_workqueue, - &work_recv_msg_client, 0); - break; - default: - break; - } -} - -static void ipa_q6_clnt_ind_cb(struct qmi_handle *handle, unsigned int msg_id, - void *msg, unsigned int msg_len, - void *ind_cb_priv) +static void ipa_q6_clnt_quota_reached_ind_cb(struct qmi_handle *handle, + struct sockaddr_qrtr *sq, + struct qmi_txn *txn, + const void *data) { - struct ipa_data_usage_quota_reached_ind_msg_v01 qmi_ind; - struct msg_desc qmi_ind_desc; - int rc = 0; + struct ipa_data_usage_quota_reached_ind_msg_v01 *qmi_ind; if (handle != ipa_q6_clnt) { IPAWANERR("Wrong client\n"); return; } - if (msg_id == QMI_IPA_DATA_USAGE_QUOTA_REACHED_IND_V01) { - memset(&qmi_ind, 0, sizeof( - struct ipa_data_usage_quota_reached_ind_msg_v01)); - qmi_ind_desc.max_msg_len = - QMI_IPA_DATA_USAGE_QUOTA_REACHED_IND_MAX_MSG_LEN_V01; - qmi_ind_desc.msg_id = QMI_IPA_DATA_USAGE_QUOTA_REACHED_IND_V01; - qmi_ind_desc.ei_array = - ipa_data_usage_quota_reached_ind_msg_data_v01_ei; + qmi_ind = (struct ipa_data_usage_quota_reached_ind_msg_v01 *) data; - rc = qmi_kernel_decode(&qmi_ind_desc, &qmi_ind, msg, msg_len); - if (rc < 0) { - IPAWANERR("Error decoding msg_id %d\n", msg_id); - return; - } - IPAWANDBG("Quota reached indication on qmux(%d) Mbytes(%lu)\n", - qmi_ind.apn.mux_id, - (unsigned long) qmi_ind.apn.num_Mbytes); - ipa_broadcast_quota_reach_ind(qmi_ind.apn.mux_id, - IPA_UPSTEAM_MODEM); - } + IPAWANDBG("Quota reached indication on qmux(%d) Mbytes(%lu)\n", + qmi_ind->apn.mux_id, + (unsigned long) qmi_ind->apn.num_Mbytes); + ipa_broadcast_quota_reach_ind(qmi_ind->apn.mux_id, + IPA_UPSTEAM_MODEM); } static void ipa_q6_clnt_svc_arrive(struct work_struct *work) @@ -830,30 +730,16 @@ static void ipa_q6_clnt_svc_arrive(struct work_struct *work) int rc; struct ipa_master_driver_init_complt_ind_msg_v01 ind; - /* Create a Local client port for QMI communication */ - ipa_q6_clnt = qmi_handle_create(ipa_q6_clnt_notify, NULL); - if (!ipa_q6_clnt) { - IPAWANERR("QMI client handle alloc failed\n"); - return; - } + rc = kernel_connect(ipa_q6_clnt->sock, + (struct sockaddr *) &ipa_qmi_ctx->server_sq, + sizeof(ipa_qmi_ctx->server_sq), + 0); - IPAWANDBG("Lookup server name, get client-hdl(%p)\n", - ipa_q6_clnt); - rc = qmi_connect_to_service(ipa_q6_clnt, - IPA_Q6_SERVICE_SVC_ID, - IPA_Q6_SVC_VERS, - IPA_Q6_SERVICE_INS_ID); if (rc < 0) { - IPAWANERR("Server not found\n"); - ipa_q6_clnt_svc_exit(0); + IPAWANERR("Couldnt connect Server\n"); return; } - rc = qmi_register_ind_cb(ipa_q6_clnt, ipa_q6_clnt_ind_cb, NULL); - if (rc < 0) - IPAWANERR("Unable to register for indications\n"); - - ipa_q6_clnt_reset = 0; IPAWANDBG("Q6 QMI service available now\n"); /* Initialize modem IPA-driver */ IPAWANDBG("send qmi_init_modem_send_sync_msg to modem\n"); @@ -861,6 +747,8 @@ static void ipa_q6_clnt_svc_arrive(struct work_struct *work) if ((rc == -ENETRESET) || (rc == -ENODEV)) { IPAWANERR("qmi_init_modem_send_sync_msg failed due to SSR!\n"); /* Cleanup will take place when ipa_wwan_remove is called */ + vfree(ipa_q6_clnt); + ipa_q6_clnt = NULL; return; } if (rc != 0) { @@ -888,10 +776,13 @@ static void ipa_q6_clnt_svc_arrive(struct work_struct *work) ipa_master_driver_init_complt_ind_msg_v01)); ind.master_driver_init_status.result = IPA_QMI_RESULT_SUCCESS_V01; - rc = qmi_send_ind(ipa_svc_handle, curr_conn, - &ipa_master_driver_complete_indication_desc, - &ind, - sizeof(ind)); + rc = qmi_send_indication(ipa_svc_handle, + &ipa_qmi_ctx->client_sq, + QMI_IPA_MASTER_DRIVER_INIT_COMPLETE_IND_V01, + QMI_IPA_MASTER_DRIVER_INIT_COMPLETE_IND_MAX_MSG_LEN_V01, + ipa_master_driver_init_complt_ind_msg_data_v01_ei, + &ind); + IPAWANDBG("ipa_qmi_service_client good\n"); } else { IPAWANERR("not send indication (%d)\n", @@ -902,37 +793,103 @@ static void ipa_q6_clnt_svc_arrive(struct work_struct *work) static void ipa_q6_clnt_svc_exit(struct work_struct *work) { - mutex_lock(&ipa_qmi_lock); - - if (ipa_q6_clnt) - qmi_handle_destroy(ipa_q6_clnt); - ipa_q6_clnt_reset = 1; - ipa_q6_clnt = NULL; - mutex_unlock(&ipa_qmi_lock); + if (ipa_qmi_ctx != NULL) { + ipa_qmi_ctx->server_sq.sq_family = 0; + ipa_qmi_ctx->server_sq.sq_node = 0; + ipa_qmi_ctx->server_sq.sq_port = 0; + } } - -static int ipa_q6_clnt_svc_event_notify(struct notifier_block *this, - unsigned long code, - void *_cmd) +static int ipa_q6_clnt_svc_event_notify_svc_new(struct qmi_handle *qmi, + struct qmi_service *service) { - IPAWANDBG("event %ld\n", code); - switch (code) { - case QMI_SERVER_ARRIVE: - if (!atomic_read(&workqueues_stopped)) - queue_delayed_work(ipa_clnt_req_workqueue, - &work_svc_arrive, 0); - break; - default: - break; + IPAWANDBG("QMI svc:%d vers:%d ins:%d node:%d port:%d\n", + service->service, service->version, service->instance, + service->node, service->port); + + if (ipa_qmi_ctx != NULL) { + ipa_qmi_ctx->server_sq.sq_family = AF_QIPCRTR; + ipa_qmi_ctx->server_sq.sq_node = service->node; + ipa_qmi_ctx->server_sq.sq_port = service->port; + } + if (!atomic_read(&workqueues_stopped)) { + queue_delayed_work(ipa_clnt_req_workqueue, + &work_svc_arrive, 0); } return 0; } +static void ipa_q6_clnt_svc_event_notify_net_reset(struct qmi_handle *qmi) +{ + if (!atomic_read(&workqueues_stopped)) + queue_delayed_work(ipa_clnt_req_workqueue, + &work_svc_exit, 0); +} + +static void ipa_q6_clnt_svc_event_notify_svc_exit(struct qmi_handle *qmi, + struct qmi_service *svc) +{ + IPAWANDBG("QMI svc:%d vers:%d ins:%d node:%d port:%d\n", svc->service, + svc->version, svc->instance, svc->node, svc->port); + + if (!atomic_read(&workqueues_stopped)) + queue_delayed_work(ipa_clnt_req_workqueue, + &work_svc_exit, 0); +} + +static struct qmi_ops server_ops = { + .del_client = ipa_a5_svc_disconnect_cb, +}; + +static struct qmi_ops client_ops = { + .new_server = ipa_q6_clnt_svc_event_notify_svc_new, + .del_server = ipa_q6_clnt_svc_event_notify_svc_exit, + .net_reset = ipa_q6_clnt_svc_event_notify_net_reset, +}; + +static struct qmi_msg_handler server_handlers[] = { + { + .type = QMI_REQUEST, + .msg_id = QMI_IPA_INDICATION_REGISTER_REQ_V01, + .ei = ipa_indication_reg_req_msg_data_v01_ei, + .decoded_size = sizeof(struct ipa_indication_reg_req_msg_v01), + .fn = handle_indication_req, + }, + { + .type = QMI_REQUEST, + .msg_id = QMI_IPA_INSTALL_FILTER_RULE_REQ_V01, + .ei = ipa_install_fltr_rule_req_msg_data_v01_ei, + .decoded_size = sizeof( + struct ipa_install_fltr_rule_req_msg_v01), + .fn = handle_install_filter_rule_req, + }, + { + .type = QMI_REQUEST, + .msg_id = QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_V01, + .ei = ipa_fltr_installed_notif_req_msg_data_v01_ei, + .decoded_size = sizeof( + struct ipa_fltr_installed_notif_req_msg_v01), + .fn = handle_filter_installed_notify_req, + }, + { + .type = QMI_REQUEST, + .msg_id = QMI_IPA_CONFIG_REQ_V01, + .ei = ipa_config_req_msg_data_v01_ei, + .decoded_size = sizeof(struct ipa_config_req_msg_v01), + .fn = handle_ipa_config_req, + }, +}; -static struct notifier_block ipa_q6_clnt_nb = { - .notifier_call = ipa_q6_clnt_svc_event_notify, +static struct qmi_msg_handler client_handlers[] = { + { + .type = QMI_INDICATION, + .msg_id = QMI_IPA_DATA_USAGE_QUOTA_REACHED_IND_V01, + .ei = ipa_data_usage_quota_reached_ind_msg_data_v01_ei, + .decoded_size = sizeof( + struct ipa_data_usage_quota_reached_ind_msg_v01), + .fn = ipa_q6_clnt_quota_reached_ind_cb, + }, }; static void ipa_qmi_service_init_worker(void) @@ -951,34 +908,30 @@ static void ipa_qmi_service_init_worker(void) ipa_qmi_ctx->modem_cfg_emb_pipe_flt = ipa2_get_modem_cfg_emb_pipe_flt(); - ipa_svc_workqueue = create_singlethread_workqueue("ipa_A7_svc"); - if (!ipa_svc_workqueue) { - IPAWANERR("Creating ipa_A7_svc workqueue failed\n"); - vfree(ipa_qmi_ctx); - ipa_qmi_ctx = NULL; - return; - } - - ipa_svc_handle = qmi_handle_create(qmi_ipa_a5_svc_ntfy, NULL); - if (!ipa_svc_handle) { - IPAWANERR("Creating ipa_A7_svc qmi handle failed\n"); + ipa_svc_handle = vzalloc(sizeof(*ipa_svc_handle)); + if (!ipa_svc_handle) goto destroy_ipa_A7_svc_wq; + + rc = qmi_handle_init(ipa_svc_handle, + QMI_IPA_MAX_MSG_LEN, + &server_ops, + server_handlers); + + if (rc < 0) { + IPAWANERR("Initializing ipa_a5 svc failed %d\n", rc); + goto destroy_qmi_handle; } - /* - * Setting the current connection to NULL, as due to a race between - * server and client clean-up in SSR, the disconnect_cb might not - * have necessarily been called - */ - curr_conn = NULL; + rc = qmi_add_server(ipa_svc_handle, + IPA_A5_SERVICE_SVC_ID, + IPA_A5_SVC_VERS, + IPA_A5_SERVICE_INS_ID); - rc = qmi_svc_register(ipa_svc_handle, &ipa_a5_svc_ops_options); if (rc < 0) { IPAWANERR("Registering ipa_a5 svc failed %d\n", rc); - goto destroy_qmi_handle; + goto deregister_qmi_srv; } - /* Initialize QMI-client */ ipa_clnt_req_workqueue = create_singlethread_workqueue("clnt_req"); @@ -987,40 +940,50 @@ static void ipa_qmi_service_init_worker(void) goto deregister_qmi_srv; } - ipa_clnt_resp_workqueue = create_singlethread_workqueue("clnt_resp"); - if (!ipa_clnt_resp_workqueue) { - IPAWANERR("Creating clnt_resp workqueue failed\n"); + /* Create a Local client port for QMI communication */ + ipa_q6_clnt = vzalloc(sizeof(*ipa_q6_clnt)); + + if (!ipa_q6_clnt) goto destroy_clnt_req_wq; - } - rc = qmi_svc_event_notifier_register(IPA_Q6_SERVICE_SVC_ID, - IPA_Q6_SVC_VERS, - IPA_Q6_SERVICE_INS_ID, &ipa_q6_clnt_nb); + rc = qmi_handle_init(ipa_q6_clnt, + QMI_IPA_MAX_MSG_LEN, + &client_ops, + client_handlers); + if (rc < 0) { - IPAWANERR("notifier register failed\n"); - goto destroy_clnt_resp_wq; + IPAWANERR("Creating clnt handle failed\n"); + goto destroy_qmi_client_handle; } - atomic_set(&ipa_qmi_initialized, 1); + rc = qmi_add_lookup(ipa_q6_clnt, + IPA_Q6_SERVICE_SVC_ID, + IPA_Q6_SVC_VERS, + IPA_Q6_SERVICE_INS_ID); + + if (rc < 0) { + IPAWANERR("Adding Q6 Svc failed\n"); + goto deregister_qmi_client; + } /* get Q6 service and start send modem-initial to Q6 */ IPAWANDBG("wait service available\n"); return; -destroy_clnt_resp_wq: - destroy_workqueue(ipa_clnt_resp_workqueue); - ipa_clnt_resp_workqueue = NULL; +deregister_qmi_client: + qmi_handle_release(ipa_q6_clnt); +destroy_qmi_client_handle: + vfree(ipa_q6_clnt); + ipa_q6_clnt = NULL; destroy_clnt_req_wq: destroy_workqueue(ipa_clnt_req_workqueue); ipa_clnt_req_workqueue = NULL; deregister_qmi_srv: - qmi_svc_unregister(ipa_svc_handle); + qmi_handle_release(ipa_svc_handle); destroy_qmi_handle: - qmi_handle_destroy(ipa_svc_handle); - ipa_svc_handle = 0; -destroy_ipa_A7_svc_wq: - destroy_workqueue(ipa_svc_workqueue); - ipa_svc_workqueue = NULL; vfree(ipa_qmi_ctx); +destroy_ipa_A7_svc_wq: + vfree(ipa_svc_handle); + ipa_svc_handle = NULL; ipa_qmi_ctx = NULL; } @@ -1038,56 +1001,32 @@ int ipa_qmi_service_init(uint32_t wan_platform_type) void ipa_qmi_service_exit(void) { - int ret = 0; atomic_set(&workqueues_stopped, 1); /* qmi-service */ - if (ipa_svc_handle) { - ret = qmi_svc_unregister(ipa_svc_handle); - if (ret < 0) - IPAWANERR("unregister qmi handle %p failed, ret=%d\n", - ipa_svc_handle, ret); - } - if (ipa_svc_workqueue) { - flush_workqueue(ipa_svc_workqueue); - destroy_workqueue(ipa_svc_workqueue); - ipa_svc_workqueue = NULL; + if (ipa_svc_handle != NULL) { + qmi_handle_release(ipa_svc_handle); + vfree(ipa_svc_handle); + ipa_svc_handle = NULL; } - if (ipa_svc_handle) { - ret = qmi_handle_destroy(ipa_svc_handle); - if (ret < 0) - IPAWANERR("Error destroying qmi handle %p, ret=%d\n", - ipa_svc_handle, ret); - } - ipa_svc_handle = 0; - /* qmi-client */ - /* Unregister from events */ - ret = qmi_svc_event_notifier_unregister(IPA_Q6_SERVICE_SVC_ID, - IPA_Q6_SVC_VERS, - IPA_Q6_SERVICE_INS_ID, &ipa_q6_clnt_nb); - if (ret < 0) - IPAWANERR( - "Error qmi_svc_event_notifier_unregister service %d, ret=%d\n", - IPA_Q6_SERVICE_SVC_ID, ret); /* Release client handle */ - ipa_q6_clnt_svc_exit(0); - - if (ipa_clnt_req_workqueue) { - destroy_workqueue(ipa_clnt_req_workqueue); - ipa_clnt_req_workqueue = NULL; - } - if (ipa_clnt_resp_workqueue) { - destroy_workqueue(ipa_clnt_resp_workqueue); - ipa_clnt_resp_workqueue = NULL; + if (ipa_q6_clnt != NULL) { + qmi_handle_release(ipa_q6_clnt); + vfree(ipa_q6_clnt); + ipa_q6_clnt = NULL; + if (ipa_clnt_req_workqueue) { + destroy_workqueue(ipa_clnt_req_workqueue); + ipa_clnt_req_workqueue = NULL; + } } - mutex_lock(&ipa_qmi_lock); /* clean the QMI msg cache */ + mutex_lock(&ipa_qmi_lock); if (ipa_qmi_ctx != NULL) { vfree(ipa_qmi_ctx); ipa_qmi_ctx = NULL; @@ -1106,8 +1045,6 @@ void ipa_qmi_stop_workqueues(void) atomic_set(&workqueues_stopped, 1); /* Making sure that the current scheduled work won't be executed */ - cancel_delayed_work(&work_recv_msg); - cancel_delayed_work(&work_recv_msg_client); cancel_delayed_work(&work_svc_arrive); cancel_delayed_work(&work_svc_exit); } @@ -1140,7 +1077,7 @@ int vote_for_bus_bw(uint32_t *bw_mbps) int ipa_qmi_get_data_stats(struct ipa_get_data_stats_req_msg_v01 *req, struct ipa_get_data_stats_resp_msg_v01 *resp) { - struct msg_desc req_desc, resp_desc; + struct ipa_msg_desc req_desc, resp_desc; int rc; req_desc.max_msg_len = QMI_IPA_GET_DATA_STATS_REQ_MAX_MSG_LEN_V01; @@ -1154,11 +1091,16 @@ int ipa_qmi_get_data_stats(struct ipa_get_data_stats_req_msg_v01 *req, IPAWANDBG_LOW("Sending QMI_IPA_GET_DATA_STATS_REQ_V01\n"); if (unlikely(!ipa_q6_clnt)) return -ETIMEDOUT; - rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, req, - sizeof(struct ipa_get_data_stats_req_msg_v01), - &resp_desc, resp, - sizeof(struct ipa_get_data_stats_resp_msg_v01), - QMI_SEND_STATS_REQ_TIMEOUT_MS); + rc = ipa_qmi_send_req_wait(ipa_q6_clnt, &req_desc, req, + &resp_desc, resp, + QMI_SEND_STATS_REQ_TIMEOUT_MS); + + if (rc < 0) { + IPAWANERR("QMI send Req %d failed, rc= %d\n", + QMI_IPA_GET_DATA_STATS_REQ_V01, + rc); + return rc; + } IPAWANDBG_LOW("QMI_IPA_GET_DATA_STATS_RESP_V01 received\n"); @@ -1170,7 +1112,7 @@ int ipa_qmi_get_data_stats(struct ipa_get_data_stats_req_msg_v01 *req, int ipa_qmi_get_network_stats(struct ipa_get_apn_data_stats_req_msg_v01 *req, struct ipa_get_apn_data_stats_resp_msg_v01 *resp) { - struct msg_desc req_desc, resp_desc; + struct ipa_msg_desc req_desc, resp_desc; int rc; req_desc.max_msg_len = QMI_IPA_GET_APN_DATA_STATS_REQ_MAX_MSG_LEN_V01; @@ -1184,11 +1126,16 @@ int ipa_qmi_get_network_stats(struct ipa_get_apn_data_stats_req_msg_v01 *req, IPAWANDBG_LOW("Sending QMI_IPA_GET_APN_DATA_STATS_REQ_V01\n"); if (unlikely(!ipa_q6_clnt)) return -ETIMEDOUT; - rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, req, - sizeof(struct ipa_get_apn_data_stats_req_msg_v01), - &resp_desc, resp, - sizeof(struct ipa_get_apn_data_stats_resp_msg_v01), - QMI_SEND_STATS_REQ_TIMEOUT_MS); + rc = ipa_qmi_send_req_wait(ipa_q6_clnt, &req_desc, req, + &resp_desc, resp, + QMI_SEND_STATS_REQ_TIMEOUT_MS); + + if (rc < 0) { + IPAWANERR("QMI send Req %d failed, rc= %d\n", + QMI_IPA_GET_APN_DATA_STATS_REQ_V01, + rc); + return rc; + } IPAWANDBG_LOW("QMI_IPA_GET_APN_DATA_STATS_RESP_V01 received\n"); @@ -1200,7 +1147,7 @@ int ipa_qmi_get_network_stats(struct ipa_get_apn_data_stats_req_msg_v01 *req, int ipa_qmi_set_data_quota(struct ipa_set_data_usage_quota_req_msg_v01 *req) { struct ipa_set_data_usage_quota_resp_msg_v01 resp; - struct msg_desc req_desc, resp_desc; + struct ipa_msg_desc req_desc, resp_desc; int rc; memset(&resp, 0, sizeof(struct ipa_set_data_usage_quota_resp_msg_v01)); @@ -1217,10 +1164,16 @@ int ipa_qmi_set_data_quota(struct ipa_set_data_usage_quota_req_msg_v01 *req) IPAWANDBG_LOW("Sending QMI_IPA_SET_DATA_USAGE_QUOTA_REQ_V01\n"); if (unlikely(!ipa_q6_clnt)) return -ETIMEDOUT; - rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, req, - sizeof(struct ipa_set_data_usage_quota_req_msg_v01), - &resp_desc, &resp, sizeof(resp), - QMI_SEND_STATS_REQ_TIMEOUT_MS); + rc = ipa_qmi_send_req_wait(ipa_q6_clnt, &req_desc, req, + &resp_desc, &resp, + QMI_SEND_STATS_REQ_TIMEOUT_MS); + + if (rc < 0) { + IPAWANERR("QMI send Req %d failed, rc= %d\n", + QMI_IPA_SET_DATA_USAGE_QUOTA_REQ_V01, + rc); + return rc; + } IPAWANDBG_LOW("QMI_IPA_SET_DATA_USAGE_QUOTA_RESP_V01 received\n"); @@ -1233,7 +1186,7 @@ int ipa_qmi_stop_data_qouta(void) { struct ipa_stop_data_usage_quota_req_msg_v01 req; struct ipa_stop_data_usage_quota_resp_msg_v01 resp; - struct msg_desc req_desc, resp_desc; + struct ipa_msg_desc req_desc, resp_desc; int rc; memset(&req, 0, sizeof(struct ipa_stop_data_usage_quota_req_msg_v01)); @@ -1252,10 +1205,17 @@ int ipa_qmi_stop_data_qouta(void) IPAWANDBG_LOW("Sending QMI_IPA_STOP_DATA_USAGE_QUOTA_REQ_V01\n"); if (unlikely(!ipa_q6_clnt)) return -ETIMEDOUT; - rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, &req, sizeof(req), - &resp_desc, &resp, sizeof(resp), + rc = ipa_qmi_send_req_wait(ipa_q6_clnt, &req_desc, &req, + &resp_desc, &resp, QMI_SEND_STATS_REQ_TIMEOUT_MS); + if (rc < 0) { + IPAWANERR("QMI send Req %d failed, rc= %d\n", + QMI_IPA_STOP_DATA_USAGE_QUOTA_REQ_V01, + rc); + return rc; + } + IPAWANDBG_LOW("QMI_IPA_STOP_DATA_USAGE_QUOTA_RESP_V01 received\n"); return ipa_check_qmi_response(rc, diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.h b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.h index 3ce809f8d317..8a4c9797ecd5 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.h +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.h @@ -9,7 +9,7 @@ #include #include #include -#include +#include #include "ipa_i.h" #include @@ -81,6 +81,8 @@ int num_ipa_fltr_installed_notif_req_msg; struct ipa_fltr_installed_notif_req_msg_v01 ipa_fltr_installed_notif_req_msg_cache[MAX_NUM_QMI_RULE_CACHE]; bool modem_cfg_emb_pipe_flt; +struct sockaddr_qrtr client_sq; +struct sockaddr_qrtr server_sq; }; struct rmnet_mux_val { @@ -92,30 +94,34 @@ struct rmnet_mux_val { uint32_t hdr_hdl; }; -extern struct elem_info ipa_init_modem_driver_req_msg_data_v01_ei[]; -extern struct elem_info ipa_init_modem_driver_resp_msg_data_v01_ei[]; -extern struct elem_info ipa_indication_reg_req_msg_data_v01_ei[]; -extern struct elem_info ipa_indication_reg_resp_msg_data_v01_ei[]; -extern struct elem_info ipa_master_driver_init_complt_ind_msg_data_v01_ei[]; -extern struct elem_info ipa_install_fltr_rule_req_msg_data_v01_ei[]; -extern struct elem_info ipa_install_fltr_rule_resp_msg_data_v01_ei[]; -extern struct elem_info ipa_fltr_installed_notif_req_msg_data_v01_ei[]; -extern struct elem_info ipa_fltr_installed_notif_resp_msg_data_v01_ei[]; -extern struct elem_info ipa_enable_force_clear_datapath_req_msg_data_v01_ei[]; -extern struct elem_info ipa_enable_force_clear_datapath_resp_msg_data_v01_ei[]; -extern struct elem_info ipa_disable_force_clear_datapath_req_msg_data_v01_ei[]; -extern struct elem_info ipa_disable_force_clear_datapath_resp_msg_data_v01_ei[]; -extern struct elem_info ipa_config_req_msg_data_v01_ei[]; -extern struct elem_info ipa_config_resp_msg_data_v01_ei[]; -extern struct elem_info ipa_get_data_stats_req_msg_data_v01_ei[]; -extern struct elem_info ipa_get_data_stats_resp_msg_data_v01_ei[]; -extern struct elem_info ipa_get_apn_data_stats_req_msg_data_v01_ei[]; -extern struct elem_info ipa_get_apn_data_stats_resp_msg_data_v01_ei[]; -extern struct elem_info ipa_set_data_usage_quota_req_msg_data_v01_ei[]; -extern struct elem_info ipa_set_data_usage_quota_resp_msg_data_v01_ei[]; -extern struct elem_info ipa_data_usage_quota_reached_ind_msg_data_v01_ei[]; -extern struct elem_info ipa_stop_data_usage_quota_req_msg_data_v01_ei[]; -extern struct elem_info ipa_stop_data_usage_quota_resp_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa_init_modem_driver_req_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa_init_modem_driver_resp_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa_indication_reg_req_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa_indication_reg_resp_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa_master_driver_init_complt_ind_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa_install_fltr_rule_req_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa_install_fltr_rule_resp_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa_fltr_installed_notif_req_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa_fltr_installed_notif_resp_msg_data_v01_ei[]; +extern struct + qmi_elem_info ipa_enable_force_clear_datapath_req_msg_data_v01_ei[]; +extern struct + qmi_elem_info ipa_enable_force_clear_datapath_resp_msg_data_v01_ei[]; +extern struct + qmi_elem_info ipa_disable_force_clear_datapath_req_msg_data_v01_ei[]; +extern struct + qmi_elem_info ipa_disable_force_clear_datapath_resp_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa_config_req_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa_config_resp_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa_get_data_stats_req_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa_get_data_stats_resp_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa_get_apn_data_stats_req_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa_get_apn_data_stats_resp_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa_set_data_usage_quota_req_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa_set_data_usage_quota_resp_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa_data_usage_quota_reached_ind_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa_stop_data_usage_quota_req_msg_data_v01_ei[]; +extern struct qmi_elem_info ipa_stop_data_usage_quota_resp_msg_data_v01_ei[]; /** * struct ipa_rmnet_context - IPA rmnet context diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service_v01.c b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service_v01.c index 8e09937002fc..1c1b309077cb 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service_v01.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service_v01.c @@ -3,13 +3,14 @@ * Copyright (c) 2013-2017, 2020, The Linux Foundation. All rights reserved. */ -#include #include -#include +#include + +#include "ipa_qmi_service.h" /* Type Definitions */ -static struct elem_info ipa_hdr_tbl_info_type_data_v01_ei[] = { +static struct qmi_elem_info ipa_hdr_tbl_info_type_data_v01_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, @@ -35,7 +36,7 @@ static struct elem_info ipa_hdr_tbl_info_type_data_v01_ei[] = { }, }; -static struct elem_info ipa_route_tbl_info_type_data_v01_ei[] = { +static struct qmi_elem_info ipa_route_tbl_info_type_data_v01_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, @@ -61,7 +62,7 @@ static struct elem_info ipa_route_tbl_info_type_data_v01_ei[] = { }, }; -static struct elem_info ipa_modem_mem_info_type_data_v01_ei[] = { +static struct qmi_elem_info ipa_modem_mem_info_type_data_v01_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, @@ -87,7 +88,7 @@ static struct elem_info ipa_modem_mem_info_type_data_v01_ei[] = { }, }; -static struct elem_info ipa_hdr_proc_ctx_tbl_info_type_data_v01_ei[] = { +static struct qmi_elem_info ipa_hdr_proc_ctx_tbl_info_type_data_v01_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, @@ -115,7 +116,7 @@ static struct elem_info ipa_hdr_proc_ctx_tbl_info_type_data_v01_ei[] = { }, }; -static struct elem_info ipa_zip_tbl_info_type_data_v01_ei[] = { +static struct qmi_elem_info ipa_zip_tbl_info_type_data_v01_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, @@ -141,7 +142,7 @@ static struct elem_info ipa_zip_tbl_info_type_data_v01_ei[] = { }, }; -static struct elem_info ipa_ipfltr_range_eq_16_type_data_v01_ei[] = { +static struct qmi_elem_info ipa_ipfltr_range_eq_16_type_data_v01_ei[] = { { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, @@ -179,7 +180,7 @@ static struct elem_info ipa_ipfltr_range_eq_16_type_data_v01_ei[] = { }, }; -static struct elem_info ipa_ipfltr_mask_eq_32_type_data_v01_ei[] = { +static struct qmi_elem_info ipa_ipfltr_mask_eq_32_type_data_v01_ei[] = { { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, @@ -217,7 +218,7 @@ static struct elem_info ipa_ipfltr_mask_eq_32_type_data_v01_ei[] = { }, }; -static struct elem_info ipa_ipfltr_eq_16_type_data_v01_ei[] = { +static struct qmi_elem_info ipa_ipfltr_eq_16_type_data_v01_ei[] = { { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, @@ -244,7 +245,7 @@ static struct elem_info ipa_ipfltr_eq_16_type_data_v01_ei[] = { }, }; -static struct elem_info ipa_ipfltr_eq_32_type_data_v01_ei[] = { +static struct qmi_elem_info ipa_ipfltr_eq_32_type_data_v01_ei[] = { { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, @@ -270,7 +271,7 @@ static struct elem_info ipa_ipfltr_eq_32_type_data_v01_ei[] = { }, }; -static struct elem_info ipa_ipfltr_mask_eq_128_type_data_v01_ei[] = { +static struct qmi_elem_info ipa_ipfltr_mask_eq_128_type_data_v01_ei[] = { { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, @@ -308,7 +309,7 @@ static struct elem_info ipa_ipfltr_mask_eq_128_type_data_v01_ei[] = { }, }; -static struct elem_info ipa_filter_rule_type_data_v01_ei[] = { +static struct qmi_elem_info ipa_filter_rule_type_data_v01_ei[] = { { .data_type = QMI_UNSIGNED_2_BYTE, .elem_len = 1, @@ -545,7 +546,7 @@ static struct elem_info ipa_filter_rule_type_data_v01_ei[] = { }, }; -static struct elem_info ipa_filter_spec_type_data_v01_ei[] = { +static struct qmi_elem_info ipa_filter_spec_type_data_v01_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, @@ -626,7 +627,7 @@ static struct elem_info ipa_filter_spec_type_data_v01_ei[] = { }, }; -static struct elem_info +static struct qmi_elem_info ipa_filter_rule_identifier_to_handle_map_data_v01_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, @@ -655,7 +656,7 @@ static struct elem_info }, }; -static struct elem_info ipa_filter_handle_to_index_map_data_v01_ei[] = { +static struct qmi_elem_info ipa_filter_handle_to_index_map_data_v01_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, @@ -683,7 +684,7 @@ static struct elem_info ipa_filter_handle_to_index_map_data_v01_ei[] = { }, }; -struct elem_info ipa_init_modem_driver_req_msg_data_v01_ei[] = { +struct qmi_elem_info ipa_init_modem_driver_req_msg_data_v01_ei[] = { { .data_type = QMI_OPT_FLAG, .elem_len = 1, @@ -918,7 +919,7 @@ struct elem_info ipa_init_modem_driver_req_msg_data_v01_ei[] = { }, }; -struct elem_info ipa_init_modem_driver_resp_msg_data_v01_ei[] = { +struct qmi_elem_info ipa_init_modem_driver_resp_msg_data_v01_ei[] = { { .data_type = QMI_STRUCT, .elem_len = 1, @@ -928,7 +929,7 @@ struct elem_info ipa_init_modem_driver_resp_msg_data_v01_ei[] = { .offset = offsetof( struct ipa_init_modem_driver_resp_msg_v01, resp), - .ei_array = get_qmi_response_type_v01_ei(), + .ei_array = qmi_response_type_v01_ei, }, { .data_type = QMI_OPT_FLAG, @@ -977,7 +978,7 @@ struct elem_info ipa_init_modem_driver_resp_msg_data_v01_ei[] = { }, }; -struct elem_info ipa_indication_reg_req_msg_data_v01_ei[] = { +struct qmi_elem_info ipa_indication_reg_req_msg_data_v01_ei[] = { { .data_type = QMI_OPT_FLAG, .elem_len = 1, @@ -1025,7 +1026,7 @@ struct elem_info ipa_indication_reg_req_msg_data_v01_ei[] = { }, }; -struct elem_info ipa_indication_reg_resp_msg_data_v01_ei[] = { +struct qmi_elem_info ipa_indication_reg_resp_msg_data_v01_ei[] = { { .data_type = QMI_STRUCT, .elem_len = 1, @@ -1035,7 +1036,7 @@ struct elem_info ipa_indication_reg_resp_msg_data_v01_ei[] = { .offset = offsetof( struct ipa_indication_reg_resp_msg_v01, resp), - .ei_array = get_qmi_response_type_v01_ei(), + .ei_array = qmi_response_type_v01_ei, }, { .data_type = QMI_EOTI, @@ -1044,7 +1045,7 @@ struct elem_info ipa_indication_reg_resp_msg_data_v01_ei[] = { }, }; -struct elem_info ipa_master_driver_init_complt_ind_msg_data_v01_ei[] = { +struct qmi_elem_info ipa_master_driver_init_complt_ind_msg_data_v01_ei[] = { { .data_type = QMI_STRUCT, .elem_len = 1, @@ -1054,7 +1055,7 @@ struct elem_info ipa_master_driver_init_complt_ind_msg_data_v01_ei[] = { .offset = offsetof(struct ipa_master_driver_init_complt_ind_msg_v01, master_driver_init_status), - .ei_array = get_qmi_response_type_v01_ei(), + .ei_array = qmi_response_type_v01_ei, }, { .data_type = QMI_EOTI, @@ -1063,7 +1064,7 @@ struct elem_info ipa_master_driver_init_complt_ind_msg_data_v01_ei[] = { }, }; -struct elem_info ipa_install_fltr_rule_req_msg_data_v01_ei[] = { +struct qmi_elem_info ipa_install_fltr_rule_req_msg_data_v01_ei[] = { { .data_type = QMI_OPT_FLAG, .elem_len = 1, @@ -1192,7 +1193,7 @@ struct elem_info ipa_install_fltr_rule_req_msg_data_v01_ei[] = { }, }; -struct elem_info ipa_install_fltr_rule_resp_msg_data_v01_ei[] = { +struct qmi_elem_info ipa_install_fltr_rule_resp_msg_data_v01_ei[] = { { .data_type = QMI_STRUCT, .elem_len = 1, @@ -1202,7 +1203,7 @@ struct elem_info ipa_install_fltr_rule_resp_msg_data_v01_ei[] = { .offset = offsetof( struct ipa_install_fltr_rule_resp_msg_v01, resp), - .ei_array = get_qmi_response_type_v01_ei(), + .ei_array = qmi_response_type_v01_ei, }, { .data_type = QMI_OPT_FLAG, @@ -1244,7 +1245,7 @@ struct elem_info ipa_install_fltr_rule_resp_msg_data_v01_ei[] = { }, }; -struct elem_info ipa_fltr_installed_notif_req_msg_data_v01_ei[] = { +struct qmi_elem_info ipa_fltr_installed_notif_req_msg_data_v01_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, @@ -1494,7 +1495,7 @@ struct elem_info ipa_fltr_installed_notif_req_msg_data_v01_ei[] = { }, }; -struct elem_info ipa_fltr_installed_notif_resp_msg_data_v01_ei[] = { +struct qmi_elem_info ipa_fltr_installed_notif_resp_msg_data_v01_ei[] = { { .data_type = QMI_STRUCT, .elem_len = 1, @@ -1504,7 +1505,7 @@ struct elem_info ipa_fltr_installed_notif_resp_msg_data_v01_ei[] = { .offset = offsetof( struct ipa_fltr_installed_notif_resp_msg_v01, resp), - .ei_array = get_qmi_response_type_v01_ei(), + .ei_array = qmi_response_type_v01_ei, }, { .data_type = QMI_EOTI, @@ -1513,7 +1514,7 @@ struct elem_info ipa_fltr_installed_notif_resp_msg_data_v01_ei[] = { }, }; -struct elem_info ipa_enable_force_clear_datapath_req_msg_data_v01_ei[] = { +struct qmi_elem_info ipa_enable_force_clear_datapath_req_msg_data_v01_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, @@ -1561,7 +1562,7 @@ struct elem_info ipa_enable_force_clear_datapath_req_msg_data_v01_ei[] = { }, }; -struct elem_info ipa_enable_force_clear_datapath_resp_msg_data_v01_ei[] = { +struct qmi_elem_info ipa_enable_force_clear_datapath_resp_msg_data_v01_ei[] = { { .data_type = QMI_STRUCT, .elem_len = 1, @@ -1571,7 +1572,7 @@ struct elem_info ipa_enable_force_clear_datapath_resp_msg_data_v01_ei[] = { .offset = offsetof( struct ipa_enable_force_clear_datapath_resp_msg_v01, resp), - .ei_array = get_qmi_response_type_v01_ei(), + .ei_array = qmi_response_type_v01_ei, }, { .data_type = QMI_EOTI, @@ -1580,7 +1581,7 @@ struct elem_info ipa_enable_force_clear_datapath_resp_msg_data_v01_ei[] = { }, }; -struct elem_info ipa_disable_force_clear_datapath_req_msg_data_v01_ei[] = { +struct qmi_elem_info ipa_disable_force_clear_datapath_req_msg_data_v01_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, @@ -1598,7 +1599,7 @@ struct elem_info ipa_disable_force_clear_datapath_req_msg_data_v01_ei[] = { }, }; -struct elem_info ipa_disable_force_clear_datapath_resp_msg_data_v01_ei[] = { +struct qmi_elem_info ipa_disable_force_clear_datapath_resp_msg_data_v01_ei[] = { { .data_type = QMI_STRUCT, .elem_len = 1, @@ -1608,7 +1609,7 @@ struct elem_info ipa_disable_force_clear_datapath_resp_msg_data_v01_ei[] = { .offset = offsetof( struct ipa_disable_force_clear_datapath_resp_msg_v01, resp), - .ei_array = get_qmi_response_type_v01_ei(), + .ei_array = qmi_response_type_v01_ei, }, { .data_type = QMI_EOTI, @@ -1617,7 +1618,7 @@ struct elem_info ipa_disable_force_clear_datapath_resp_msg_data_v01_ei[] = { }, }; -struct elem_info ipa_config_req_msg_data_v01_ei[] = { +struct qmi_elem_info ipa_config_req_msg_data_v01_ei[] = { { .data_type = QMI_OPT_FLAG, .elem_len = 1, @@ -1865,7 +1866,7 @@ struct elem_info ipa_config_req_msg_data_v01_ei[] = { }, }; -struct elem_info ipa_config_resp_msg_data_v01_ei[] = { +struct qmi_elem_info ipa_config_resp_msg_data_v01_ei[] = { { .data_type = QMI_STRUCT, .elem_len = 1, @@ -1875,7 +1876,7 @@ struct elem_info ipa_config_resp_msg_data_v01_ei[] = { .offset = offsetof( struct ipa_config_resp_msg_v01, resp), - .ei_array = get_qmi_response_type_v01_ei(), + .ei_array = qmi_response_type_v01_ei, }, { .data_type = QMI_EOTI, @@ -1884,7 +1885,7 @@ struct elem_info ipa_config_resp_msg_data_v01_ei[] = { }, }; -struct elem_info ipa_get_data_stats_req_msg_data_v01_ei[] = { +struct qmi_elem_info ipa_get_data_stats_req_msg_data_v01_ei[] = { { .data_type = QMI_SIGNED_4_BYTE_ENUM, .elem_len = 1, @@ -1922,7 +1923,7 @@ struct elem_info ipa_get_data_stats_req_msg_data_v01_ei[] = { }, }; -static struct elem_info ipa_pipe_stats_info_type_data_v01_ei[] = { +static struct qmi_elem_info ipa_pipe_stats_info_type_data_v01_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, @@ -1975,7 +1976,7 @@ static struct elem_info ipa_pipe_stats_info_type_data_v01_ei[] = { }, }; -static struct elem_info ipa_stats_type_filter_rule_data_v01_ei[] = { +static struct qmi_elem_info ipa_stats_type_filter_rule_data_v01_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, @@ -2003,7 +2004,7 @@ static struct elem_info ipa_stats_type_filter_rule_data_v01_ei[] = { }, }; -struct elem_info ipa_get_data_stats_resp_msg_data_v01_ei[] = { +struct qmi_elem_info ipa_get_data_stats_resp_msg_data_v01_ei[] = { { .data_type = QMI_STRUCT, .elem_len = 1, @@ -2013,7 +2014,7 @@ struct elem_info ipa_get_data_stats_resp_msg_data_v01_ei[] = { .offset = offsetof( struct ipa_get_data_stats_resp_msg_v01, resp), - .ei_array = get_qmi_response_type_v01_ei(), + .ei_array = qmi_response_type_v01_ei, }, { .data_type = QMI_OPT_FLAG, @@ -2135,7 +2136,7 @@ struct elem_info ipa_get_data_stats_resp_msg_data_v01_ei[] = { }, }; -static struct elem_info ipa_apn_data_stats_info_type_data_v01_ei[] = { +static struct qmi_elem_info ipa_apn_data_stats_info_type_data_v01_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, @@ -2193,7 +2194,7 @@ static struct elem_info ipa_apn_data_stats_info_type_data_v01_ei[] = { }, }; -struct elem_info ipa_get_apn_data_stats_req_msg_data_v01_ei[] = { +struct qmi_elem_info ipa_get_apn_data_stats_req_msg_data_v01_ei[] = { { .data_type = QMI_OPT_FLAG, .elem_len = 1, @@ -2231,7 +2232,7 @@ struct elem_info ipa_get_apn_data_stats_req_msg_data_v01_ei[] = { }, }; -struct elem_info ipa_get_apn_data_stats_resp_msg_data_v01_ei[] = { +struct qmi_elem_info ipa_get_apn_data_stats_resp_msg_data_v01_ei[] = { { .data_type = QMI_STRUCT, .elem_len = 1, @@ -2241,7 +2242,7 @@ struct elem_info ipa_get_apn_data_stats_resp_msg_data_v01_ei[] = { .offset = offsetof( struct ipa_get_apn_data_stats_resp_msg_v01, resp), - .ei_array = get_qmi_response_type_v01_ei(), + .ei_array = qmi_response_type_v01_ei, }, { .data_type = QMI_OPT_FLAG, @@ -2282,7 +2283,7 @@ struct elem_info ipa_get_apn_data_stats_resp_msg_data_v01_ei[] = { }, }; -static struct elem_info ipa_data_usage_quota_info_type_data_v01_ei[] = { +static struct qmi_elem_info ipa_data_usage_quota_info_type_data_v01_ei[] = { { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, @@ -2310,7 +2311,7 @@ static struct elem_info ipa_data_usage_quota_info_type_data_v01_ei[] = { }, }; -struct elem_info ipa_set_data_usage_quota_req_msg_data_v01_ei[] = { +struct qmi_elem_info ipa_set_data_usage_quota_req_msg_data_v01_ei[] = { { .data_type = QMI_OPT_FLAG, .elem_len = 1, @@ -2350,7 +2351,7 @@ struct elem_info ipa_set_data_usage_quota_req_msg_data_v01_ei[] = { }, }; -struct elem_info ipa_set_data_usage_quota_resp_msg_data_v01_ei[] = { +struct qmi_elem_info ipa_set_data_usage_quota_resp_msg_data_v01_ei[] = { { .data_type = QMI_STRUCT, .elem_len = 1, @@ -2360,7 +2361,7 @@ struct elem_info ipa_set_data_usage_quota_resp_msg_data_v01_ei[] = { .offset = offsetof( struct ipa_set_data_usage_quota_resp_msg_v01, resp), - .ei_array = get_qmi_response_type_v01_ei(), + .ei_array = qmi_response_type_v01_ei, }, { .data_type = QMI_EOTI, @@ -2369,7 +2370,7 @@ struct elem_info ipa_set_data_usage_quota_resp_msg_data_v01_ei[] = { }, }; -struct elem_info ipa_data_usage_quota_reached_ind_msg_data_v01_ei[] = { +struct qmi_elem_info ipa_data_usage_quota_reached_ind_msg_data_v01_ei[] = { { .data_type = QMI_STRUCT, .elem_len = 1, @@ -2389,7 +2390,7 @@ struct elem_info ipa_data_usage_quota_reached_ind_msg_data_v01_ei[] = { }, }; -struct elem_info ipa_stop_data_usage_quota_req_msg_data_v01_ei[] = { +struct qmi_elem_info ipa_stop_data_usage_quota_req_msg_data_v01_ei[] = { /* ipa_stop_data_usage_quota_req_msg is empty */ { .data_type = QMI_EOTI, @@ -2398,7 +2399,7 @@ struct elem_info ipa_stop_data_usage_quota_req_msg_data_v01_ei[] = { }, }; -struct elem_info ipa_stop_data_usage_quota_resp_msg_data_v01_ei[] = { +struct qmi_elem_info ipa_stop_data_usage_quota_resp_msg_data_v01_ei[] = { { .data_type = QMI_STRUCT, .elem_len = 1, @@ -2408,7 +2409,7 @@ struct elem_info ipa_stop_data_usage_quota_resp_msg_data_v01_ei[] = { .offset = offsetof( struct ipa_stop_data_usage_quota_resp_msg_v01, resp), - .ei_array = get_qmi_response_type_v01_ei(), + .ei_array = qmi_response_type_v01_ei, }, { .data_type = QMI_EOTI, diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc.c b/drivers/platform/msm/ipa/ipa_v2/ipa_uc.c index 74b4fe632475..6a268ca091e1 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_uc.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc.c @@ -200,7 +200,7 @@ union IpaHwUpdateFlagsCmdData_t { u32 raw32b; }; -struct ipa_uc_hdlrs uc_hdlrs[IPA_HW_NUM_FEATURES] = { { 0 } }; +static struct ipa_uc_hdlrs uc_hdlrs[IPA_HW_NUM_FEATURES] = { { NULL } }; static inline const char *ipa_hw_error_str(enum ipa_hw_errors err_type) { @@ -269,7 +269,6 @@ static void ipa_log_evt_hdlr(void) ipa_ctx->uc_ctx.uc_event_top_ofst); } } - return; bad_uc_top_ofst: @@ -522,9 +521,8 @@ int ipa_uc_interface_init(void) IPA_SRAM_DIRECT_ACCESS_N_OFST_v2_0( ipa_ctx->smem_restricted_bytes / 4); } - ipa_ctx->uc_ctx.uc_sram_mmio = ioremap(phys_addr, - IPA_RAM_UC_SMEM_SIZE); + IPA_RAM_UC_SMEM_SIZE); if (!ipa_ctx->uc_ctx.uc_sram_mmio) { IPAERR("Fail to ioremap IPA uC SRAM\n"); result = -ENOMEM; @@ -642,9 +640,9 @@ int ipa_uc_send_cmd(u32 cmd, u32 opcode, u32 expected_status, timeout_jiffies) == 0) { IPAERR("uC timed out\n"); if (ipa_ctx->uc_ctx.uc_failed) { - IPAERR("uC reported on Error, - errorType = %s\n", ipa_hw_error_str( - ipa_ctx->uc_ctx.uc_error_type)); + IPAERR("uC reported on Error,errorType = %s\n", + ipa_hw_error_str( + ipa_ctx->uc_ctx.uc_error_type)); } mutex_unlock(&ipa_ctx->uc_ctx.uc_lock); ipa_assert(); diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_mhi.c b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_mhi.c index c77694d12e9b..2a7cada8cce1 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_mhi.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_mhi.c @@ -596,7 +596,7 @@ int ipa2_uc_mhi_init(void (*ready_cb)(void), void (*wakeup_request_cb)(void)) void ipa2_uc_mhi_cleanup(void) { - struct ipa_uc_hdlrs null_hdlrs = { 0 }; + struct ipa_uc_hdlrs null_hdlrs = { NULL }; IPADBG("Enter\n"); diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_ntn.c b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_ntn.c index bf4c8c57d5e9..d49acc534f69 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_ntn.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_ntn.c @@ -189,7 +189,7 @@ static void ipa_uc_ntn_loaded_handler(void) int ipa_ntn_init(void) { - struct ipa_uc_hdlrs uc_ntn_cbs = { 0 }; + struct ipa_uc_hdlrs uc_ntn_cbs = { NULL }; uc_ntn_cbs.ipa_uc_event_hdlr = ipa_uc_ntn_event_handler; uc_ntn_cbs.ipa_uc_event_log_info_hdlr = diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c index e76657e5ac20..1fe1ed37fc9d 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c @@ -435,7 +435,7 @@ int ipa2_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats) int ipa2_wdi_init(void) { - struct ipa_uc_hdlrs uc_wdi_cbs = { 0 }; + struct ipa_uc_hdlrs uc_wdi_cbs = { NULL}; uc_wdi_cbs.ipa_uc_event_hdlr = ipa_uc_wdi_event_handler; uc_wdi_cbs.ipa_uc_event_log_info_hdlr = diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c index 95cfc4a28862..fd85997c7f82 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c @@ -159,6 +159,11 @@ static const struct ipa_ep_confing ep_mapping[3][IPA_CLIENT_MAX] = { [IPA_2_6L][IPA_CLIENT_USB_PROD] = {true, 1}, + [IPA_2_6L][IPA_CLIENT_WLAN1_PROD] = {true, 18}, + [IPA_2_6L][IPA_CLIENT_WLAN1_CONS] = {true, 17}, + [IPA_2_6L][IPA_CLIENT_WLAN2_CONS] = {true, 16}, + [IPA_2_6L][IPA_CLIENT_WLAN3_CONS] = {true, 15}, + [IPA_2_6L][IPA_CLIENT_WLAN4_CONS] = {true, 19}, [IPA_2_6L][IPA_CLIENT_APPS_LAN_WAN_PROD] = {true, 4}, [IPA_2_6L][IPA_CLIENT_APPS_CMD_PROD] = {true, 3}, [IPA_2_6L][IPA_CLIENT_Q6_LAN_PROD] = {true, 6}, @@ -343,7 +348,7 @@ void ipa_active_clients_unlock(void) * * Return codes: 0 on success, negative on failure. */ -int ipa_get_clients_from_rm_resource( +static int ipa_get_clients_from_rm_resource( enum ipa_rm_resource_name resource, struct ipa_client_names *clients) { @@ -625,7 +630,7 @@ int ipa2_resume_resource(enum ipa_rm_resource_name resource) * In case of IPAv2.0 this will also supply an offset from * which we can start write */ -void _ipa_sram_settings_read_v1_1(void) +static void _ipa_sram_settings_read_v1_1(void) { ipa_ctx->smem_restricted_bytes = 0; ipa_ctx->smem_sz = ipa_read_reg(ipa_ctx->mmio, @@ -638,7 +643,7 @@ void _ipa_sram_settings_read_v1_1(void) ipa_ctx->ip6_flt_tbl_lcl = true; } -void _ipa_sram_settings_read_v2_0(void) +static void _ipa_sram_settings_read_v2_0(void) { ipa_ctx->smem_restricted_bytes = ipa_read_reg_field(ipa_ctx->mmio, IPA_SHARED_MEM_SIZE_OFST_v2_0, @@ -656,7 +661,7 @@ void _ipa_sram_settings_read_v2_0(void) ipa_ctx->ip6_flt_tbl_lcl = false; } -void _ipa_sram_settings_read_v2_5(void) +static void _ipa_sram_settings_read_v2_5(void) { ipa_ctx->smem_restricted_bytes = ipa_read_reg_field(ipa_ctx->mmio, IPA_SHARED_MEM_SIZE_OFST_v2_0, @@ -684,7 +689,7 @@ void _ipa_sram_settings_read_v2_5(void) ipa_ctx->ip6_flt_tbl_lcl = false; } -void _ipa_sram_settings_read_v2_6L(void) +static void _ipa_sram_settings_read_v2_6L(void) { ipa_ctx->smem_restricted_bytes = ipa_read_reg_field(ipa_ctx->mmio, IPA_SHARED_MEM_SIZE_OFST_v2_0, @@ -702,7 +707,7 @@ void _ipa_sram_settings_read_v2_6L(void) ipa_ctx->ip6_flt_tbl_lcl = false; } -void _ipa_cfg_route_v1_1(struct ipa_route *route) +static void _ipa_cfg_route_v1_1(struct ipa_route *route) { u32 reg_val = 0; @@ -725,7 +730,7 @@ void _ipa_cfg_route_v1_1(struct ipa_route *route) ipa_write_reg(ipa_ctx->mmio, IPA_ROUTE_OFST_v1_1, reg_val); } -void _ipa_cfg_route_v2_0(struct ipa_route *route) +static void _ipa_cfg_route_v2_0(struct ipa_route *route) { u32 reg_val = 0; @@ -815,7 +820,7 @@ int ipa_init_hw(void) ipa_write_reg(ipa_ctx->mmio, IPA_COMP_SW_RESET_OFST, 0); /* enable IPA */ - ipa_write_reg(ipa_ctx->mmio, IPA_COMP_CFG_OFST, 1); + ipa_write_reg(ipa_ctx->mmio, IPA_COMP_CFG_OFST, 0x11); /* Read IPA version and make sure we have access to the registers */ ipa_version = ipa_read_reg(ipa_ctx->mmio, IPA_VERSION_OFST); @@ -1015,7 +1020,7 @@ enum ipa_client_type ipa2_get_client_mapping(int pipe_idx) return ipa_ctx->ep[pipe_idx].client; } -void ipa_generate_mac_addr_hw_rule(u8 **buf, u8 hdr_mac_addr_offset, +static void ipa_generate_mac_addr_hw_rule(u8 **buf, u8 hdr_mac_addr_offset, const uint8_t mac_addr_mask[ETH_ALEN], const uint8_t mac_addr[ETH_ALEN]) { @@ -1698,7 +1703,7 @@ int ipa_generate_hw_rule(enum ipa_ip_type ip, return 0; } -void ipa_generate_flt_mac_addr_eq(struct ipa_ipfltri_rule_eq *eq_atrb, +static void ipa_generate_flt_mac_addr_eq(struct ipa_ipfltri_rule_eq *eq_atrb, u8 hdr_mac_addr_offset, const uint8_t mac_addr_mask[ETH_ALEN], const uint8_t mac_addr[ETH_ALEN], u8 ofst_meq128) { @@ -2415,7 +2420,7 @@ int ipa2_cfg_ep(u32 clnt_hdl, const struct ipa_ep_cfg *ipa_ep_cfg) return 0; } -const char *ipa_get_nat_en_str(enum ipa_nat_en_type nat_en) +static const char *ipa_get_nat_en_str(enum ipa_nat_en_type nat_en) { switch (nat_en) { case (IPA_BYPASS_NAT): @@ -2429,7 +2434,7 @@ const char *ipa_get_nat_en_str(enum ipa_nat_en_type nat_en) return "undefined"; } -void _ipa_cfg_ep_nat_v1_1(u32 clnt_hdl, +static void _ipa_cfg_ep_nat_v1_1(u32 clnt_hdl, const struct ipa_ep_cfg_nat *ep_nat) { u32 reg_val = 0; @@ -2443,7 +2448,7 @@ void _ipa_cfg_ep_nat_v1_1(u32 clnt_hdl, reg_val); } -void _ipa_cfg_ep_nat_v2_0(u32 clnt_hdl, +static void _ipa_cfg_ep_nat_v2_0(u32 clnt_hdl, const struct ipa_ep_cfg_nat *ep_nat) { u32 reg_val = 0; @@ -2676,7 +2681,7 @@ int ipa2_cfg_ep_metadata_mask(u32 clnt_hdl, return 0; } -void _ipa_cfg_ep_hdr_v1_1(u32 pipe_number, +static void _ipa_cfg_ep_hdr_v1_1(u32 pipe_number, const struct ipa_ep_cfg_hdr *ep_hdr) { u32 val = 0; @@ -2706,7 +2711,7 @@ void _ipa_cfg_ep_hdr_v1_1(u32 pipe_number, IPA_ENDP_INIT_HDR_N_OFST_v1_1(pipe_number), val); } -void _ipa_cfg_ep_hdr_v2_0(u32 pipe_number, +static void _ipa_cfg_ep_hdr_v2_0(u32 pipe_number, const struct ipa_ep_cfg_hdr *ep_hdr) { u32 reg_val = 0; @@ -3030,7 +3035,7 @@ int ipa_cfg_eot_coal_cntr_granularity(u8 eot_coal_granularity) } EXPORT_SYMBOL(ipa_cfg_eot_coal_cntr_granularity); -const char *ipa_get_mode_type_str(enum ipa_mode_type mode) +static const char * const ipa_get_mode_type_str(enum ipa_mode_type mode) { switch (mode) { case (IPA_BASIC): @@ -3046,7 +3051,7 @@ const char *ipa_get_mode_type_str(enum ipa_mode_type mode) return "undefined"; } -void _ipa_cfg_ep_mode_v1_1(u32 pipe_number, u32 dst_pipe_number, +static void _ipa_cfg_ep_mode_v1_1(u32 pipe_number, u32 dst_pipe_number, const struct ipa_ep_cfg_mode *ep_mode) { u32 reg_val = 0; @@ -3063,7 +3068,7 @@ void _ipa_cfg_ep_mode_v1_1(u32 pipe_number, u32 dst_pipe_number, IPA_ENDP_INIT_MODE_N_OFST_v1_1(pipe_number), reg_val); } -void _ipa_cfg_ep_mode_v2_0(u32 pipe_number, u32 dst_pipe_number, +static void _ipa_cfg_ep_mode_v2_0(u32 pipe_number, u32 dst_pipe_number, const struct ipa_ep_cfg_mode *ep_mode) { u32 reg_val = 0; @@ -3142,7 +3147,7 @@ int ipa2_cfg_ep_mode(u32 clnt_hdl, const struct ipa_ep_cfg_mode *ep_mode) return 0; } -const char *get_aggr_enable_str(enum ipa_aggr_en_type aggr_en) +static const char * const get_aggr_enable_str(enum ipa_aggr_en_type aggr_en) { switch (aggr_en) { case (IPA_BYPASS_AGGR): @@ -3156,7 +3161,7 @@ const char *get_aggr_enable_str(enum ipa_aggr_en_type aggr_en) return "undefined"; } -const char *get_aggr_type_str(enum ipa_aggr_type aggr_type) +static const char * const get_aggr_type_str(enum ipa_aggr_type aggr_type) { switch (aggr_type) { case (IPA_MBIM_16): @@ -3171,11 +3176,13 @@ const char *get_aggr_type_str(enum ipa_aggr_type aggr_type) return "GENERIC"; case (IPA_QCMAP): return "QCMAP"; + case (IPA_COALESCE): + return "COALESCE"; } return "undefined"; } -void _ipa_cfg_ep_aggr_v1_1(u32 pipe_number, +static void _ipa_cfg_ep_aggr_v1_1(u32 pipe_number, const struct ipa_ep_cfg_aggr *ep_aggr) { u32 reg_val = 0; @@ -3200,7 +3207,7 @@ void _ipa_cfg_ep_aggr_v1_1(u32 pipe_number, IPA_ENDP_INIT_AGGR_N_OFST_v1_1(pipe_number), reg_val); } -void _ipa_cfg_ep_aggr_v2_0(u32 pipe_number, +static void _ipa_cfg_ep_aggr_v2_0(u32 pipe_number, const struct ipa_ep_cfg_aggr *ep_aggr) { u32 reg_val = 0; @@ -3272,7 +3279,7 @@ int ipa2_cfg_ep_aggr(u32 clnt_hdl, const struct ipa_ep_cfg_aggr *ep_aggr) return 0; } -void _ipa_cfg_ep_route_v1_1(u32 pipe_index, u32 rt_tbl_index) +static void _ipa_cfg_ep_route_v1_1(u32 pipe_index, u32 rt_tbl_index) { int reg_val = 0; @@ -3285,7 +3292,7 @@ void _ipa_cfg_ep_route_v1_1(u32 pipe_index, u32 rt_tbl_index) reg_val); } -void _ipa_cfg_ep_route_v2_0(u32 pipe_index, u32 rt_tbl_index) +static void _ipa_cfg_ep_route_v2_0(u32 pipe_index, u32 rt_tbl_index) { int reg_val = 0; @@ -3356,7 +3363,7 @@ int ipa2_cfg_ep_route(u32 clnt_hdl, const struct ipa_ep_cfg_route *ep_route) return 0; } -void _ipa_cfg_ep_holb_v1_1(u32 pipe_number, +static void _ipa_cfg_ep_holb_v1_1(u32 pipe_number, const struct ipa_ep_cfg_holb *ep_holb) { ipa_write_reg(ipa_ctx->mmio, @@ -3368,7 +3375,7 @@ void _ipa_cfg_ep_holb_v1_1(u32 pipe_number, (u16)ep_holb->tmr_val); } -void _ipa_cfg_ep_holb_v2_0(u32 pipe_number, +static void _ipa_cfg_ep_holb_v2_0(u32 pipe_number, const struct ipa_ep_cfg_holb *ep_holb) { ipa_write_reg(ipa_ctx->mmio, @@ -3380,7 +3387,7 @@ void _ipa_cfg_ep_holb_v2_0(u32 pipe_number, (u16)ep_holb->tmr_val); } -void _ipa_cfg_ep_holb_v2_5(u32 pipe_number, +static void _ipa_cfg_ep_holb_v2_5(u32 pipe_number, const struct ipa_ep_cfg_holb *ep_holb) { ipa_write_reg(ipa_ctx->mmio, @@ -3392,7 +3399,7 @@ void _ipa_cfg_ep_holb_v2_5(u32 pipe_number, ep_holb->tmr_val); } -void _ipa_cfg_ep_holb_v2_6L(u32 pipe_number, +static void _ipa_cfg_ep_holb_v2_6L(u32 pipe_number, const struct ipa_ep_cfg_holb *ep_holb) { ipa_write_reg(ipa_ctx->mmio, @@ -3579,7 +3586,8 @@ static void _ipa_cfg_ep_metadata_v2_0(u32 pipe_number, * * Note: Should not be called from atomic context */ -int ipa2_cfg_ep_metadata(u32 clnt_hdl, const struct ipa_ep_cfg_metadata *ep_md) +static int ipa2_cfg_ep_metadata(u32 clnt_hdl, + const struct ipa_ep_cfg_metadata *ep_md) { if (clnt_hdl >= ipa_ctx->ipa_num_pipes || ipa_ctx->ep[clnt_hdl].valid == 0 || ep_md == NULL) { @@ -4305,7 +4313,7 @@ static void ipa_init_mem_partition_v2_6L(void) * * @ctrl: data structure which holds the function pointers */ -void ipa_controller_shared_static_bind(struct ipa_controller *ctrl) +static void ipa_controller_shared_static_bind(struct ipa_controller *ctrl) { ctrl->ipa_init_rt4 = _ipa_init_rt4_v2; ctrl->ipa_init_rt6 = _ipa_init_rt6_v2; @@ -4894,7 +4902,7 @@ bool ipa2_get_modem_cfg_emb_pipe_flt(void) * * Return value: enum ipa_transport_type */ -enum ipa_transport_type ipa2_get_transport_type(void) +static enum ipa_transport_type ipa2_get_transport_type(void) { return IPA_TRANSPORT_TYPE_SPS; } @@ -4914,7 +4922,8 @@ EXPORT_SYMBOL(ipa_get_num_pipes); * * Return value: 0 or negative in case of failure */ -int ipa2_disable_apps_wan_cons_deaggr(uint32_t agg_size, uint32_t agg_count) +static int ipa2_disable_apps_wan_cons_deaggr(uint32_t agg_size, + uint32_t agg_count) { int res = -1; @@ -5115,8 +5124,6 @@ int ipa2_bind_api_controller(enum ipa_hw_type ipa_hw_type, ipa2_mhi_reset_channel_internal; api_ctrl->ipa_mhi_start_channel_internal = ipa2_mhi_start_channel_internal; - api_ctrl->ipa_mhi_resume_channels_internal = - ipa2_mhi_resume_channels_internal; api_ctrl->ipa_uc_mhi_send_dl_ul_sync_info = ipa2_uc_mhi_send_dl_ul_sync_info; api_ctrl->ipa_uc_mhi_init = ipa2_uc_mhi_init; diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c index eca80f9e2339..418a142a2911 100644 --- a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c +++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c @@ -71,7 +71,7 @@ static atomic_t is_initialized; static atomic_t is_ssr; static void *subsys_notify_handle; -u32 apps_to_ipa_hdl, ipa_to_apps_hdl; /* get handler from ipa */ +static u32 apps_to_ipa_hdl, ipa_to_apps_hdl; /* get handler from ipa */ static struct mutex ipa_to_apps_pipe_handle_guard; static struct mutex add_mux_channel_lock; static int wwan_add_ul_flt_rule_to_ipa(void); @@ -430,10 +430,44 @@ int copy_ul_filter_rule_to_ipa(struct ipa_install_fltr_rule_req_msg_v01 ipa_qmi_ctx->q6_ul_filter_rule[i].filter_hdl = UL_FILTER_RULE_HANDLE_START + i; rule_hdl[i] = ipa_qmi_ctx->q6_ul_filter_rule[i].filter_hdl; - ipa_qmi_ctx->q6_ul_filter_rule[i].ip = - rule_req->filter_spec_list[i].ip_type; - ipa_qmi_ctx->q6_ul_filter_rule[i].action = - rule_req->filter_spec_list[i].filter_action; + switch (rule_req->filter_spec_list[i].ip_type) { + case QMI_IPA_IP_TYPE_V4_V01: + ipa_qmi_ctx->q6_ul_filter_rule[i].ip = IPA_IP_v4; + break; + + case QMI_IPA_IP_TYPE_V6_V01: + ipa_qmi_ctx->q6_ul_filter_rule[i].ip = IPA_IP_v6; + break; + + case QMI_IPA_IP_TYPE_V4V6_V01: + /* Fall through */ + default: + ipa_qmi_ctx->q6_ul_filter_rule[i].ip = IPA_IP_MAX; + break; + } + + switch (rule_req->filter_spec_list[i].filter_action) { + case QMI_IPA_FILTER_ACTION_SRC_NAT_V01: + ipa_qmi_ctx->q6_ul_filter_rule[i].action = + IPA_PASS_TO_SRC_NAT; + break; + case QMI_IPA_FILTER_ACTION_DST_NAT_V01: + ipa_qmi_ctx->q6_ul_filter_rule[i].action = + IPA_PASS_TO_DST_NAT; + break; + + case QMI_IPA_FILTER_ACTION_ROUTING_V01: + ipa_qmi_ctx->q6_ul_filter_rule[i].action = + IPA_PASS_TO_ROUTING; + break; + + case QMI_IPA_FILTER_ACTION_EXCEPTION_V01: + /* Fall through */ + default: + ipa_qmi_ctx->q6_ul_filter_rule[i].action = + IPA_PASS_TO_EXCEPTION; + break; + } if (rule_req->filter_spec_list[i].is_routing_table_index_valid == true) ipa_qmi_ctx->q6_ul_filter_rule[i].rt_tbl_idx = @@ -1229,7 +1263,6 @@ static int handle_ingress_format(struct net_device *dev, struct rmnet_ioctl_extended_s *in) { int ret = 0; - struct rmnet_phys_ep_conf_s *ep_cfg; IPAWANDBG("Get RMNET_IOCTL_SET_INGRESS_DATA_FORMAT\n"); if ((in->u.data) & RMNET_IOCTL_INGRESS_FORMAT_CHECKSUM) @@ -1250,14 +1283,6 @@ static int handle_ingress_format(struct net_device *dev, in->u.ingress_format.agg_size; ipa_to_apps_ep_cfg.ipa_ep_cfg.aggr.aggr_pkt_limit = in->u.ingress_format.agg_count; - - if (ipa_rmnet_res.ipa_napi_enable) { - ipa_to_apps_ep_cfg.recycle_enabled = true; - ep_cfg = (struct rmnet_phys_ep_conf_s *) - rcu_dereference(dev->rx_handler_data); - ep_cfg->recycle = ipa_recycle_wan_skb; - pr_info("Wan Recycle Enabled\n"); - } } } @@ -1395,7 +1420,7 @@ static int ipa_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) return -EPERM; IPAWANDBG("get ioctl: RMNET_IOCTL_EXTENDED\n"); if (copy_from_user(&extend_ioctl_data, - (u8 *)ifr->ifr_ifru.ifru_data, + (const void __user *)ifr->ifr_ifru.ifru_data, sizeof(struct rmnet_ioctl_extended_s))) { IPAWANERR("failed to copy extended ioctl data\n"); rc = -EFAULT; @@ -1409,7 +1434,7 @@ static int ipa_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) (RMNET_IOCTL_FEAT_NOTIFY_MUX_CHANNEL | RMNET_IOCTL_FEAT_SET_EGRESS_DATA_FORMAT | RMNET_IOCTL_FEAT_SET_INGRESS_DATA_FORMAT); - if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data, + if (copy_to_user((void __user *)ifr->ifr_ifru.ifru_data, &extend_ioctl_data, sizeof(struct rmnet_ioctl_extended_s))) rc = -EFAULT; @@ -1423,7 +1448,7 @@ static int ipa_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /* Get MRU */ case RMNET_IOCTL_GET_MRU: extend_ioctl_data.u.data = mru; - if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data, + if (copy_to_user((void __user *)ifr->ifr_ifru.ifru_data, &extend_ioctl_data, sizeof(struct rmnet_ioctl_extended_s))) rc = -EFAULT; @@ -1432,7 +1457,7 @@ static int ipa_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) case RMNET_IOCTL_GET_SG_SUPPORT: extend_ioctl_data.u.data = ipa_rmnet_res.ipa_advertise_sg_support; - if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data, + if (copy_to_user((void __user *)ifr->ifr_ifru.ifru_data, &extend_ioctl_data, sizeof(struct rmnet_ioctl_extended_s))) rc = -EFAULT; @@ -1441,12 +1466,12 @@ static int ipa_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) case RMNET_IOCTL_GET_EPID: IPAWANDBG("get ioctl: RMNET_IOCTL_GET_EPID\n"); extend_ioctl_data.u.data = epid; - if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data, + if (copy_to_user((void __user *)ifr->ifr_ifru.ifru_data, &extend_ioctl_data, sizeof(struct rmnet_ioctl_extended_s))) rc = -EFAULT; if (copy_from_user(&extend_ioctl_data, - (u8 *)ifr->ifr_ifru.ifru_data, + (const void __user *)ifr->ifr_ifru.ifru_data, sizeof(struct rmnet_ioctl_extended_s))) { IPAWANERR("copy extended ioctl data failed\n"); rc = -EFAULT; @@ -1462,12 +1487,12 @@ static int ipa_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD); extend_ioctl_data.u.ipa_ep_pair.producer_pipe_num = ipa2_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS); - if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data, + if (copy_to_user((void __user *)ifr->ifr_ifru.ifru_data, &extend_ioctl_data, sizeof(struct rmnet_ioctl_extended_s))) rc = -EFAULT; if (copy_from_user(&extend_ioctl_data, - (u8 *)ifr->ifr_ifru.ifru_data, + (const void __user *)ifr->ifr_ifru.ifru_data, sizeof(struct rmnet_ioctl_extended_s))) { IPAWANERR("copy extended ioctl data failed\n"); rc = -EFAULT; @@ -1482,7 +1507,7 @@ static int ipa_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) memcpy(&extend_ioctl_data.u.if_name, ipa_netdevs[0]->name, IFNAMSIZ); extend_ioctl_data.u.if_name[IFNAMSIZ - 1] = '\0'; - if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data, + if (copy_to_user((void __user *)ifr->ifr_ifru.ifru_data, &extend_ioctl_data, sizeof(struct rmnet_ioctl_extended_s))) rc = -EFAULT; @@ -1677,8 +1702,8 @@ static const struct net_device_ops ipa_wwan_ops_ip = { .ndo_tx_timeout = ipa_wwan_tx_timeout, .ndo_do_ioctl = ipa_wwan_ioctl, .ndo_change_mtu = ipa_wwan_change_mtu, - .ndo_set_mac_address = 0, - .ndo_validate_addr = 0, + .ndo_set_mac_address = NULL, + .ndo_validate_addr = NULL, }; /** @@ -1695,7 +1720,7 @@ static void ipa_wwan_setup(struct net_device *dev) dev->netdev_ops = &ipa_wwan_ops_ip; ether_setup(dev); /* set this after calling ether_setup */ - dev->header_ops = 0; /* No header */ + dev->header_ops = NULL; /* No header */ dev->type = ARPHRD_RAWIP; dev->hard_header_len = 0; dev->mtu = WWAN_DATA_LEN; @@ -1827,7 +1852,7 @@ static int q6_initialize_rm(void) return result; } -void q6_deinitialize_rm(void) +static void q6_deinitialize_rm(void) { int ret; @@ -2803,7 +2828,7 @@ static int rmnet_ipa_query_tethering_stats_wifi( return rc; } -int rmnet_ipa_query_tethering_stats_modem( +static int rmnet_ipa_query_tethering_stats_modem( struct wan_ioctl_query_tether_stats *data, bool reset ) diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa_fd_ioctl.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa_fd_ioctl.c index d6fbfa86cae3..91064e5ea9c4 100644 --- a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa_fd_ioctl.c +++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa_fd_ioctl.c @@ -51,6 +51,9 @@ static struct cdev wan_ioctl_cdev; static unsigned int process_ioctl = 1; static struct class *class; static dev_t device; +#ifdef CONFIG_COMPAT +long compat_wan_ioctl(struct file *file, unsigned int cmd, unsigned long arg); +#endif static long wan_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { @@ -76,7 +79,7 @@ static long wan_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = -ENOMEM; break; } - if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + if (copy_from_user(param, (const void __user *)arg, pyld_sz)) { retval = -EFAULT; break; } @@ -86,7 +89,7 @@ static long wan_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = -EFAULT; break; } - if (copy_to_user((u8 *)arg, param, pyld_sz)) { + if (copy_to_user((void __user *)arg, param, pyld_sz)) { retval = -EFAULT; break; } @@ -101,7 +104,7 @@ static long wan_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = -ENOMEM; break; } - if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + if (copy_from_user(param, (const void __user *)arg, pyld_sz)) { retval = -EFAULT; break; } @@ -111,7 +114,7 @@ static long wan_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = -EFAULT; break; } - if (copy_to_user((u8 *)arg, param, pyld_sz)) { + if (copy_to_user((void __user *)arg, param, pyld_sz)) { retval = -EFAULT; break; } @@ -126,7 +129,7 @@ static long wan_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = -ENOMEM; break; } - if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + if (copy_from_user(param, (const void __user *)arg, pyld_sz)) { retval = -EFAULT; break; } @@ -135,7 +138,7 @@ static long wan_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = -EFAULT; break; } - if (copy_to_user((u8 *)arg, param, pyld_sz)) { + if (copy_to_user((void __user *)arg, param, pyld_sz)) { retval = -EFAULT; break; } @@ -149,7 +152,7 @@ static long wan_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = -ENOMEM; break; } - if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + if (copy_from_user(param, (const void __user *)arg, pyld_sz)) { retval = -EFAULT; break; } @@ -159,7 +162,7 @@ static long wan_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = -EFAULT; break; } - if (copy_to_user((u8 *)arg, param, pyld_sz)) { + if (copy_to_user((void __user *)arg, param, pyld_sz)) { retval = -EFAULT; break; } @@ -173,7 +176,7 @@ static long wan_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = -ENOMEM; break; } - if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + if (copy_from_user(param, (const void __user *)arg, pyld_sz)) { retval = -EFAULT; break; } @@ -187,7 +190,7 @@ static long wan_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = -EFAULT; break; } - if (copy_to_user((u8 *)arg, param, pyld_sz)) { + if (copy_to_user((void __user *)arg, param, pyld_sz)) { retval = -EFAULT; break; } @@ -201,7 +204,7 @@ static long wan_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = -ENOMEM; break; } - if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + if (copy_from_user(param, (const void __user *)arg, pyld_sz)) { retval = -EFAULT; break; } @@ -221,7 +224,7 @@ static long wan_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = -ENOMEM; break; } - if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + if (copy_from_user(param, (const void __user *)arg, pyld_sz)) { retval = -EFAULT; break; } @@ -233,7 +236,7 @@ static long wan_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) break; } - if (copy_to_user((u8 *)arg, param, pyld_sz)) { + if (copy_to_user((void __user *)arg, param, pyld_sz)) { retval = -EFAULT; break; } @@ -247,7 +250,7 @@ static long wan_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = -ENOMEM; break; } - if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + if (copy_from_user(param, (const void __user *)arg, pyld_sz)) { retval = -EFAULT; break; } @@ -259,7 +262,7 @@ static long wan_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) break; } - if (copy_to_user((u8 *)arg, param, pyld_sz)) { + if (copy_to_user((void __user *)arg, param, pyld_sz)) { retval = -EFAULT; break; } @@ -273,7 +276,7 @@ static long wan_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = -ENOMEM; break; } - if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + if (copy_from_user(param, (const void __user *)arg, pyld_sz)) { retval = -EFAULT; break; } @@ -334,7 +337,7 @@ static int wan_ioctl_open(struct inode *inode, struct file *filp) return 0; } -const struct file_operations fops = { +static const struct file_operations fops = { .owner = THIS_MODULE, .open = wan_ioctl_open, .read = NULL, diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_pm.h b/drivers/platform/msm/ipa/ipa_v3/ipa_pm.h index 4ae011659c4a..4136cee8c709 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_pm.h +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_pm.h @@ -179,12 +179,12 @@ static inline int ipa_pm_exceptions_stat(char *buf, int size) return -EPERM; } -static inline int ipa_pm_add_dummy_clients(s8 power_plan); +static inline int ipa_pm_add_dummy_clients(s8 power_plan) { return -EPERM; } -static inline int ipa_pm_remove_dummy_clients(void); +static inline int ipa_pm_remove_dummy_clients(void) { return -EPERM; } diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h index 37406c8ebf30..1292b9c9c6cd 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h @@ -484,11 +484,13 @@ static inline int ipa3_qmi_send_mhi_ready_indication( return -EPERM; } +#ifdef CONFIG_RMNET_IPA3 static int ipa3_qmi_send_rsc_pipe_indication( struct ipa_endp_desc_indication_msg_v01 *req) { return -EPERM; } +#endif /* CONFIG_RMNET_IPA3 */ static inline int ipa3_qmi_send_mhi_cleanup_request( struct ipa_mhi_cleanup_req_msg_v01 *req) diff --git a/include/linux/ipa.h b/include/linux/ipa.h index 9ad4cb0550b3..c102d2b23b7f 100644 --- a/include/linux/ipa.h +++ b/include/linux/ipa.h @@ -11,6 +11,7 @@ #include #include #include "linux/msm_gsi.h" +#include #define IPA_APPS_MAX_BW_IN_MBPS 700 #define IPA_BW_THRESHOLD_MAX 3 @@ -97,6 +98,8 @@ enum ipa_aggr_mode { enum ipa_dp_evt_type { IPA_RECEIVE, IPA_WRITE_DONE, + IPA_CLIENT_START_POLL, + IPA_CLIENT_COMP_NAPI, }; /** @@ -572,6 +575,60 @@ struct ipa_inform_wlan_bw { typedef void (*ipa_wdi_meter_notifier_cb)(enum ipa_wdi_meter_evt_type evt, void *data); +/** + * struct ipa_connect_params - low-level client connect input parameters. Either + * client allocates the data and desc FIFO and specifies that in data+desc OR + * specifies sizes and pipe_mem pref and IPA does the allocation. + * + * @ipa_ep_cfg: IPA EP configuration + * @client: type of "client" + * @client_bam_hdl: client SPS handle + * @client_ep_idx: client PER EP index + * @priv: callback cookie + * @notify: callback + * priv - callback cookie evt - type of event data - data relevant + * to event. May not be valid. See event_type enum for valid + * cases. + * @desc_fifo_sz: size of desc FIFO + * @data_fifo_sz: size of data FIFO + * @pipe_mem_preferred: if true, try to alloc the FIFOs in pipe mem, fallback + * to sys mem if pipe mem alloc fails + * @desc: desc FIFO meta-data when client has allocated it + * @data: data FIFO meta-data when client has allocated it + * @skip_ep_cfg: boolean field that determines if EP should be configured + * by IPA driver + * @keep_ipa_awake: when true, IPA will not be clock gated + */ +struct ipa_connect_params { + struct ipa_ep_cfg ipa_ep_cfg; + enum ipa_client_type client; + unsigned long client_bam_hdl; + u32 client_ep_idx; + void *priv; + ipa_notify_cb notify; + u32 desc_fifo_sz; + u32 data_fifo_sz; + bool pipe_mem_preferred; + struct sps_mem_buffer desc; + struct sps_mem_buffer data; + bool skip_ep_cfg; + bool keep_ipa_awake; +}; + +/** + * struct ipa_sps_params - SPS related output parameters resulting from + * low/high level client connect + * @ipa_bam_hdl: IPA SPS handle + * @ipa_ep_idx: IPA PER EP index + * @desc: desc FIFO meta-data + * @data: data FIFO meta-data + */ +struct ipa_sps_params { + unsigned long ipa_bam_hdl; + u32 ipa_ep_idx; + struct sps_mem_buffer desc; + struct sps_mem_buffer data; +}; /** * struct ipa_tx_intf - interface tx properties @@ -637,6 +694,7 @@ struct ipa_sys_connect_params { bool skip_ep_cfg; bool keep_ipa_awake; struct napi_struct *napi_obj; + bool napi_enabled; bool recycle_enabled; }; @@ -848,12 +906,15 @@ struct ipa_rx_page_data { */ enum ipa_irq_type { IPA_BAD_SNOC_ACCESS_IRQ, + IPA_EOT_COAL_IRQ, IPA_UC_IRQ_0, IPA_UC_IRQ_1, IPA_UC_IRQ_2, IPA_UC_IRQ_3, IPA_UC_IN_Q_NOT_EMPTY_IRQ, IPA_UC_RX_CMD_Q_NOT_FULL_IRQ, + IPA_UC_TX_CMD_Q_NOT_FULL_IRQ, + IPA_UC_TO_PROC_ACK_Q_NOT_FULL_IRQ, IPA_PROC_TO_UC_ACK_Q_NOT_EMPTY_IRQ, IPA_RX_ERR_IRQ, IPA_DEAGGR_ERR_IRQ, @@ -862,6 +923,8 @@ enum ipa_irq_type { IPA_PROC_ERR_IRQ, IPA_TX_SUSPEND_IRQ, IPA_TX_HOLB_DROP_IRQ, + IPA_BAM_IDLE_IRQ, + IPA_GSI_IDLE_IRQ = IPA_BAM_IDLE_IRQ, IPA_BAM_GSI_IDLE_IRQ, IPA_PIPE_YELLOW_MARKER_BELOW_IRQ, IPA_PIPE_RED_MARKER_BELOW_IRQ, @@ -1306,6 +1369,13 @@ struct ipa_smmu_out_params { #if defined CONFIG_IPA || defined CONFIG_IPA3 +/* + * Connect / Disconnect + */ +int ipa_connect(const struct ipa_connect_params *in, struct ipa_sps_params *sps, + u32 *clnt_hdl); +int ipa_disconnect(u32 clnt_hdl); + /* * Resume / Suspend */ -- GitLab From e166c6a8e11deef368d942c8fb5e027e5ec4d410 Mon Sep 17 00:00:00 2001 From: Ashok Raj Deenadayalan Date: Fri, 28 Aug 2020 22:55:08 +0530 Subject: [PATCH 1264/1304] msm: ipa: Add Kconfig changes of IPA2 driver Add Kconfig changes to support IPAv2 driver for sdm660 target. Change-Id: I093adf36f0e04a090e802c1ec064d9d6e3deac1a Signed-off-by: Praveen Kurapati Signed-off-by: Ashok Raj Deenadayalan Signed-off-by: Swetha Chikkaboraiah --- drivers/platform/msm/Kconfig | 26 ++++++++++++++++++++++++-- 1 file changed, 24 insertions(+), 2 deletions(-) diff --git a/drivers/platform/msm/Kconfig b/drivers/platform/msm/Kconfig index bb646fb97b72..273ed10ecd3b 100644 --- a/drivers/platform/msm/Kconfig +++ b/drivers/platform/msm/Kconfig @@ -87,6 +87,28 @@ config IPA3 Kernel and user-space processes can call the IPA driver to configure IPA core. +config IPA + tristate "IPA support" + depends on SPS && NET + help + This driver supports the Internet Packet Accelerator (IPA3) core. + IPA is a programmable protocol processor HW block. + It is designed to support generic HW processing of UL/DL IP packets + for various use cases independent of radio technology. + The driver support client connection and configuration + for the IPA core. + Kernel and user-space processes can call the IPA driver + to configure IPA core. + +config RMNET_IPA + tristate "IPA RMNET WWAN Network Device" + depends on IPA && QCOM_QMI_HELPERS + help + This WWAN Network Driver implements network stack class device. + It supports Embedded data transfer from A7 to Q6. Configures IPA HW + for RmNet Data Driver and also exchange of QMI messages between + A7 and Q6 IPA-driver. + config IPA_DEBUG bool "IPA DEBUG for non-perf build" depends on IPA3 @@ -117,7 +139,7 @@ config RMNET_IPA3 config ECM_IPA tristate "STD ECM LAN Driver support" - depends on IPA3 + depends on IPA || IPA3 help Enables LAN between applications processor and a tethered host using the STD ECM protocol. @@ -126,7 +148,7 @@ config ECM_IPA config RNDIS_IPA tristate "RNDIS_IPA Network Interface Driver support" - depends on IPA3 + depends on IPA || IPA3 help Enables LAN between applications processor and a tethered host using the RNDIS protocol. -- GitLab From b68bb0aafc1ae0ed61c1c45e92122d0649fc7817 Mon Sep 17 00:00:00 2001 From: Ashok Raj Deenadayalan Date: Fri, 28 Aug 2020 22:52:19 +0530 Subject: [PATCH 1265/1304] msm: ipa2: Add change to fix ipa padding Due to incorrect padding, rule attributes are not present at 32bit multiples, which is leading to rule geenration failure. Add changes to the rule padding. Change-Id: I6f2c7e6078bdc11e9d02c2d70803e9e839d48fbb Signed-off-by: Praveen Kurapati Signed-off-by: Ashok Raj Deenadayalan Signed-off-by: Swetha Chikkaboraiah --- drivers/platform/msm/ipa/ipa_api.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/platform/msm/ipa/ipa_api.c b/drivers/platform/msm/ipa/ipa_api.c index 1f67c3953646..580a3ce29767 100644 --- a/drivers/platform/msm/ipa/ipa_api.c +++ b/drivers/platform/msm/ipa/ipa_api.c @@ -335,7 +335,7 @@ u8 *ipa_pad_to_32(u8 *dest) return dest; } - i = (long)dest & 0x7; + i = (long)dest & 0x3; if (i) for (j = 0; j < (4 - i); j++) -- GitLab From fc218ba267db245c6ab35009ea026c18f4f1c324 Mon Sep 17 00:00:00 2001 From: Ashok Raj Deenadayalan Date: Sun, 30 Aug 2020 16:09:12 +0530 Subject: [PATCH 1266/1304] msm: ipa2: Add changes compatible to kernel-4.19 Previously deprecated smmu calls have been removed. These changes also caused the reorganization of a key data structure and logic. Change-Id: Ic5ac67e64483f8cc905235a3134c342df4eab79d Signed-off-by: Ashok Raj Deenadayalan --- drivers/platform/msm/ipa/ipa_api.h | 3 + drivers/platform/msm/ipa/ipa_v2/ipa.c | 383 ++++++-------- drivers/platform/msm/ipa/ipa_v2/ipa_client.c | 4 +- drivers/platform/msm/ipa/ipa_v2/ipa_i.h | 19 +- .../msm/ipa/ipa_v2/ipa_qmi_service_v01.c | 498 +++++++++--------- drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c | 12 +- .../platform/msm/ipa/ipa_v3/ipa_qmi_service.h | 2 +- 7 files changed, 428 insertions(+), 493 deletions(-) diff --git a/drivers/platform/msm/ipa/ipa_api.h b/drivers/platform/msm/ipa/ipa_api.h index dfd72504f911..901b4c3f27b1 100644 --- a/drivers/platform/msm/ipa/ipa_api.h +++ b/drivers/platform/msm/ipa/ipa_api.h @@ -350,6 +350,8 @@ struct ipa_api_controller { enum ipa_client_type (*ipa_get_client_mapping)(int pipe_idx); + enum ipa_rm_resource_name (*ipa_get_rm_resource_from_ep)(int pipe_idx); + bool (*ipa_get_modem_cfg_emb_pipe_flt)(void); enum ipa_transport_type (*ipa_get_transport_type)(void); @@ -442,6 +444,7 @@ struct ipa_api_controller { struct ipa_smmu_out_params *out); int (*ipa_is_vlan_mode)(enum ipa_vlan_ifaces iface, bool *res); + bool (*ipa_pm_is_used)(void); int (*ipa_wigig_internal_init)( struct ipa_wdi_uc_ready_params *inout, ipa_wigig_misc_int_cb int_notify, diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa.c b/drivers/platform/msm/ipa/ipa_v2/ipa.c index 7c5779297b47..7e8e083d8715 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa.c @@ -388,44 +388,30 @@ static void ipa2_active_clients_log_destroy(void) IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1; } -enum ipa_smmu_cb_type { - IPA_SMMU_CB_AP, - IPA_SMMU_CB_WLAN, - IPA_SMMU_CB_UC, - IPA_SMMU_CB_MAX - -}; - static struct ipa_smmu_cb_ctx smmu_cb[IPA_SMMU_CB_MAX]; -struct iommu_domain *ipa2_get_smmu_domain(void) +struct iommu_domain *ipa2_get_smmu_domain_by_type(enum ipa_smmu_cb_type cb_type) { - if (smmu_cb[IPA_SMMU_CB_AP].valid) - return smmu_cb[IPA_SMMU_CB_AP].mapping->domain; - - IPAERR("CB not valid\n"); + if (VALID_IPA_SMMU_CB_TYPE(cb_type) && smmu_cb[cb_type].valid) + return smmu_cb[cb_type].iommu_domain; + IPAERR("cb_type(%d) not valid\n", cb_type); return NULL; } -struct iommu_domain *ipa2_get_uc_smmu_domain(void) +struct iommu_domain *ipa2_get_smmu_domain(void) { - if (smmu_cb[IPA_SMMU_CB_UC].valid) - return smmu_cb[IPA_SMMU_CB_UC].mapping->domain; - - IPAERR("CB not valid\n"); + return ipa2_get_smmu_domain_by_type(IPA_SMMU_CB_AP); +} - return NULL; +struct iommu_domain *ipa2_get_uc_smmu_domain(void) +{ + return ipa2_get_smmu_domain_by_type(IPA_SMMU_CB_UC); } struct iommu_domain *ipa2_get_wlan_smmu_domain(void) { - if (smmu_cb[IPA_SMMU_CB_WLAN].valid) - return smmu_cb[IPA_SMMU_CB_WLAN].iommu; - - IPAERR("CB not valid\n"); - - return NULL; + return ipa2_get_smmu_domain_by_type(IPA_SMMU_CB_WLAN); } struct device *ipa2_get_dma_dev(void) @@ -438,9 +424,9 @@ struct device *ipa2_get_dma_dev(void) * * Return value: pointer to smmu context address */ -struct ipa_smmu_cb_ctx *ipa2_get_smmu_ctx(void) +struct ipa_smmu_cb_ctx *ipa2_get_smmu_ctx(enum ipa_smmu_cb_type cb_type) { - return &smmu_cb[IPA_SMMU_CB_AP]; + return &smmu_cb[cb_type]; } @@ -1675,6 +1661,7 @@ static int ipa_init_smem_region(int memory_region_size, desc.len = sizeof(*cmd); desc.type = IPA_IMM_CMD_DESC; + rc = ipa_send_cmd(1, &desc); if (rc) { IPAERR("failed to send immediate command (error %d)\n", rc); @@ -1704,13 +1691,14 @@ int ipa_init_q6_smem(void) IPA_ACTIVE_CLIENTS_INC_SIMPLE(); - if (ipa_ctx->ipa_hw_type == IPA_HW_v2_0) + if (ipa_ctx->ipa_hw_type == IPA_HW_v2_0) { rc = ipa_init_smem_region(IPA_MEM_PART(modem_size) - IPA_MEM_RAM_MODEM_NETWORK_STATS_SIZE, IPA_MEM_PART(modem_ofst)); - else + } else { rc = ipa_init_smem_region(IPA_MEM_PART(modem_size), IPA_MEM_PART(modem_ofst)); + } if (rc) { IPAERR("failed to initialize Modem RAM memory\n"); @@ -3453,7 +3441,7 @@ void ipa_inc_acquire_wakelock(enum ipa_wakelock_ref_client ref_client) ref_client, ipa_ctx->wakelock_ref_cnt.cnt); ipa_ctx->wakelock_ref_cnt.cnt |= (1 << ref_client); if (ipa_ctx->wakelock_ref_cnt.cnt) - __pm_stay_awake(&ipa_ctx->w_lock); + __pm_stay_awake(ipa_ctx->w_lock); IPADBG_LOW("active wakelock ref cnt = %d client enum %d\n", ipa_ctx->wakelock_ref_cnt.cnt, ref_client); spin_unlock_irqrestore(&ipa_ctx->wakelock_ref_cnt.spinlock, flags); @@ -3478,7 +3466,7 @@ void ipa_dec_release_wakelock(enum ipa_wakelock_ref_client ref_client) IPADBG_LOW("active wakelock ref cnt = %d client enum %d\n", ipa_ctx->wakelock_ref_cnt.cnt, ref_client); if (ipa_ctx->wakelock_ref_cnt.cnt == 0) - __pm_relax(&ipa_ctx->w_lock); + __pm_relax(ipa_ctx->w_lock); spin_unlock_irqrestore(&ipa_ctx->wakelock_ref_cnt.spinlock, flags); } @@ -4300,10 +4288,15 @@ static int ipa_init(const struct ipa_plat_drv_res *resource_p, goto fail_nat_dev_add; } + /* Register a wakeup source. */ + ipa_ctx->w_lock = + wakeup_source_register(&ipa_pdev->dev, "IPA_WS"); + if (!ipa_ctx->w_lock) { + IPAERR("IPA wakeup source register failed\n"); + result = -ENOMEM; + goto fail_w_source_register; + } - - /* Create a wakeup source. */ - wakeup_source_init(&ipa_ctx->w_lock, "IPA_WS"); spin_lock_init(&ipa_ctx->wakelock_ref_cnt.spinlock); /* Initialize the SPS PM lock. */ @@ -4389,6 +4382,9 @@ static int ipa_init(const struct ipa_plat_drv_res *resource_p, fail_create_apps_resource: ipa_rm_exit(); fail_ipa_rm_init: + wakeup_source_unregister(ipa_ctx->w_lock); + ipa_ctx->w_lock = NULL; +fail_w_source_register: fail_nat_dev_add: cdev_del(&ipa_ctx->cdev); fail_cdev_add: @@ -4665,164 +4661,117 @@ static int get_ipa_dts_configuration(struct platform_device *pdev, static int ipa_smmu_wlan_cb_probe(struct device *dev) { - struct ipa_smmu_cb_ctx *cb = ipa2_get_wlan_smmu_ctx(); - int atomic_ctx = 1; - int fast = 1; - int bypass = 1; - int ret; + struct ipa_smmu_cb_ctx *cb = ipa2_get_smmu_ctx(IPA_SMMU_CB_WLAN); + int fast = 0; + int bypass = 0; + u32 iova_ap_mapping[2]; - IPADBG("sub pdev=%p\n", dev); + IPADBG("WLAN CB PROBE dev=%pK retrieving IOMMU mapping\n", dev); - cb->dev = dev; - cb->iommu = iommu_domain_alloc(&platform_bus_type); - if (!cb->iommu) { - IPAERR("could not alloc iommu domain\n"); - /* assume this failure is because iommu driver is not ready */ - return -EPROBE_DEFER; + cb->iommu_domain = iommu_get_domain_for_dev(dev); + if (IS_ERR_OR_NULL(cb->iommu_domain)) { + IPAERR("could not get iommu domain\n"); + return -EINVAL; } + IPADBG("WLAN CB PROBE mapping retrieved\n"); + + cb->dev = dev; cb->valid = true; - if (smmu_info.s1_bypass) { - if (iommu_domain_set_attr(cb->iommu, - DOMAIN_ATTR_S1_BYPASS, - &bypass)) { - IPAERR("couldn't set bypass\n"); - cb->valid = false; - return -EIO; - } - IPADBG("SMMU S1 BYPASS\n"); - } else { - if (iommu_domain_set_attr(cb->iommu, - DOMAIN_ATTR_ATOMIC, - &atomic_ctx)) { - IPAERR("couldn't set domain as atomic\n"); - cb->valid = false; - return -EIO; - } - IPADBG("SMMU atomic set\n"); - if (smmu_info.fast_map) { - if (iommu_domain_set_attr(cb->iommu, - DOMAIN_ATTR_FAST, - &fast)) { - IPAERR("couldn't set fast map\n"); - cb->valid = false; - return -EIO; - } - IPADBG("SMMU fast map set\n"); - } + cb->va_start = cb->va_end = cb->va_size = 0; + if (of_property_read_u32_array( + dev->of_node, "qcom,iommu-dma-addr-pool", + iova_ap_mapping, 2) == 0) { + cb->va_start = iova_ap_mapping[0]; + cb->va_size = iova_ap_mapping[1]; + cb->va_end = cb->va_start + cb->va_size; } - ret = iommu_attach_device(cb->iommu, dev); - if (ret) { - IPAERR("could not attach device ret=%d\n", ret); - cb->valid = false; - return ret; - } + IPADBG("WLAN CB PROBE dev=%pK va_start=0x%x va_size=0x%x\n", + dev, cb->va_start, cb->va_size); - if (!smmu_info.s1_bypass) { - IPAERR("map IPA region to WLAN_CB IOMMU\n"); - ret = ipa_iommu_map(cb->iommu, - rounddown(smmu_info.ipa_base, PAGE_SIZE), - rounddown(smmu_info.ipa_base, PAGE_SIZE), - roundup(smmu_info.ipa_size, PAGE_SIZE), - IOMMU_READ | IOMMU_WRITE | IOMMU_MMIO); - if (ret) { - IPAERR("map IPA to WLAN_CB IOMMU failed ret=%d\n", - ret); - arm_iommu_detach_device(cb->dev); - cb->valid = false; - return ret; - } - } + /* + * Prior to these calls to iommu_domain_get_attr(), these + * attributes were set in this function relative to dtsi values + * defined for this driver. In other words, if corresponding ipa + * driver owned values were found in the dtsi, they were read and + * set here. + * + * In this new world, the developer will use iommu owned dtsi + * settings to set them there. This new logic below, simply + * checks to see if they've been set in dtsi. If so, the logic + * further below acts accordingly... + */ + iommu_domain_get_attr(cb->iommu_domain, DOMAIN_ATTR_S1_BYPASS, &bypass); + iommu_domain_get_attr(cb->iommu_domain, DOMAIN_ATTR_FAST, &fast); + + IPADBG( + "WLAN CB PROBE dev=%pK DOMAIN ATTRS bypass=%d fast=%d\n", + dev, bypass, fast); return 0; } static int ipa_smmu_uc_cb_probe(struct device *dev) { - struct ipa_smmu_cb_ctx *cb = ipa2_get_uc_smmu_ctx(); - int atomic_ctx = 1; - int ret; - int fast = 1; - int bypass = 1; + struct ipa_smmu_cb_ctx *cb = ipa2_get_smmu_ctx(IPA_SMMU_CB_UC); + int fast = 0; + int bypass = 0; u32 iova_ap_mapping[2]; IPADBG("UC CB PROBE sub pdev=%p\n", dev); - ret = of_property_read_u32_array(dev->of_node, "qcom,iova-mapping", - iova_ap_mapping, 2); - if (ret) { - IPAERR("Fail to read UC start/size iova addresses\n"); - return ret; - } - cb->va_start = iova_ap_mapping[0]; - cb->va_size = iova_ap_mapping[1]; - cb->va_end = cb->va_start + cb->va_size; - IPADBG("UC va_start=0x%x va_sise=0x%x\n", cb->va_start, cb->va_size); - if (dma_set_mask(dev, DMA_BIT_MASK(32)) || dma_set_coherent_mask(dev, DMA_BIT_MASK(32))) { IPAERR("DMA set mask failed\n"); return -EOPNOTSUPP; } - IPADBG("UC CB PROBE=%p create IOMMU mapping\n", dev); + IPADBG("UC CB PROBE dev=%pK retrieving IOMMU mapping\n", dev); - cb->dev = dev; - cb->mapping = arm_iommu_create_mapping(&platform_bus_type, - cb->va_start, cb->va_size); - if (IS_ERR_OR_NULL(cb->mapping)) { - IPADBG("Fail to create mapping\n"); + cb->iommu_domain = iommu_get_domain_for_dev(dev); + if (IS_ERR_OR_NULL(cb->iommu_domain)) { + IPAERR("could not get iommu domain\n"); /* assume this failure is because iommu driver is not ready */ return -EPROBE_DEFER; } - IPADBG("SMMU mapping created\n"); + IPADBG("UC CB PROBE mapping retrieved\n"); + + cb->dev = dev; cb->valid = true; - IPADBG("UC CB PROBE sub pdev=%p set attribute\n", dev); - if (smmu_info.s1_bypass) { - if (iommu_domain_set_attr(cb->mapping->domain, - DOMAIN_ATTR_S1_BYPASS, - &bypass)) { - IPAERR("couldn't set bypass\n"); - arm_iommu_release_mapping(cb->mapping); - cb->valid = false; - return -EIO; - } - IPADBG("SMMU S1 BYPASS\n"); - } else { - if (iommu_domain_set_attr(cb->mapping->domain, - DOMAIN_ATTR_ATOMIC, - &atomic_ctx)) { - IPAERR("couldn't set domain as atomic\n"); - arm_iommu_release_mapping(cb->mapping); - cb->valid = false; - return -EIO; - } - IPADBG("SMMU atomic set\n"); - if (smmu_info.fast_map) { - if (iommu_domain_set_attr(cb->mapping->domain, - DOMAIN_ATTR_FAST, - &fast)) { - IPAERR("couldn't set fast map\n"); - arm_iommu_release_mapping(cb->mapping); - cb->valid = false; - return -EIO; - } - IPADBG("SMMU fast map set\n"); - } - } + cb->va_start = cb->va_end = cb->va_size = 0; - IPADBG("UC CB PROBE sub pdev=%p attaching IOMMU device\n", dev); - ret = arm_iommu_attach_device(cb->dev, cb->mapping); - if (ret) { - IPAERR("could not attach device ret=%d\n", ret); - arm_iommu_release_mapping(cb->mapping); - cb->valid = false; - return ret; + if (of_property_read_u32_array( + dev->of_node, "qcom,iommu-dma-addr-pool", + iova_ap_mapping, 2) == 0) { + cb->va_start = iova_ap_mapping[0]; + cb->va_size = iova_ap_mapping[1]; + cb->va_end = cb->va_start + cb->va_size; } - cb->next_addr = cb->va_end; + IPADBG("UC CB PROBE dev=%pK va_start=0x%x va_size=0x%x\n", + dev, cb->va_start, cb->va_size); + + /* + * Prior to these calls to iommu_domain_get_attr(), these + * attributes were set in this function relative to dtsi values + * defined for this driver. In other words, if corresponding ipa + * driver owned values were found in the dtsi, they were read and + * set here. + * + * In this new world, the developer will use iommu owned dtsi + * settings to set them there. This new logic below, simply + * checks to see if they've been set in dtsi. If so, the logic + * further below acts accordingly... + */ + + iommu_domain_get_attr(cb->iommu_domain, DOMAIN_ATTR_S1_BYPASS, &bypass); + iommu_domain_get_attr(cb->iommu_domain, DOMAIN_ATTR_FAST, &fast); + + IPADBG("UC CB PROBE dev=%pK DOMAIN ATTRS bypass=%d fast=%d\n", + dev, bypass, fast); + ipa_ctx->uc_pdev = dev; IPADBG("UC CB PROBE pdev=%p attached\n", dev); @@ -4831,25 +4780,13 @@ static int ipa_smmu_uc_cb_probe(struct device *dev) static int ipa_smmu_ap_cb_probe(struct device *dev) { - struct ipa_smmu_cb_ctx *cb = ipa2_get_smmu_ctx(); + struct ipa_smmu_cb_ctx *cb = ipa2_get_smmu_ctx(IPA_SMMU_CB_AP); int result; - int atomic_ctx = 1; - int fast = 1; - int bypass = 1; + int fast = 0; + int bypass = 0; u32 iova_ap_mapping[2]; - IPADBG("AP CB probe: sub pdev=%p\n", dev); - - result = of_property_read_u32_array(dev->of_node, "qcom,iova-mapping", - iova_ap_mapping, 2); - if (result) { - IPAERR("Fail to read AP start/size iova addresses\n"); - return result; - } - cb->va_start = iova_ap_mapping[0]; - cb->va_size = iova_ap_mapping[1]; - cb->va_end = cb->va_start + cb->va_size; - IPADBG("AP va_start=0x%x va_sise=0x%x\n", cb->va_start, cb->va_size); + IPADBG("AP CB probe: dev=%pK\n", dev); if (dma_set_mask(dev, DMA_BIT_MASK(32)) || dma_set_coherent_mask(dev, DMA_BIT_MASK(32))) { @@ -4857,68 +4794,59 @@ static int ipa_smmu_ap_cb_probe(struct device *dev) return -EOPNOTSUPP; } - cb->dev = dev; - cb->mapping = arm_iommu_create_mapping(&platform_bus_type, - cb->va_start, - cb->va_size); - if (IS_ERR_OR_NULL(cb->mapping)) { - IPADBG("Fail to create mapping\n"); + IPADBG("AP CB PROBE dev=%pK retrieving IOMMU mapping\n", dev); + cb->iommu_domain = iommu_get_domain_for_dev(dev); + if (IS_ERR_OR_NULL(cb->iommu_domain)) { + IPAERR("could not get iommu domain\n"); /* assume this failure is because iommu driver is not ready */ return -EPROBE_DEFER; } - IPADBG("SMMU mapping created\n"); - cb->valid = true; - if (smmu_info.s1_bypass) { - if (iommu_domain_set_attr(cb->mapping->domain, - DOMAIN_ATTR_S1_BYPASS, - &bypass)) { - IPAERR("couldn't set bypass\n"); - arm_iommu_release_mapping(cb->mapping); - cb->valid = false; - return -EIO; - } - IPADBG("SMMU S1 BYPASS\n"); - } else { - if (iommu_domain_set_attr(cb->mapping->domain, - DOMAIN_ATTR_ATOMIC, - &atomic_ctx)) { - IPAERR("couldn't set domain as atomic\n"); - arm_iommu_release_mapping(cb->mapping); - cb->valid = false; - return -EIO; - } - IPADBG("SMMU atomic set\n"); + IPADBG("AP CB PROBE mapping retrieved\n"); - if (iommu_domain_set_attr(cb->mapping->domain, - DOMAIN_ATTR_FAST, - &fast)) { - IPAERR("couldn't set fast map\n"); - arm_iommu_release_mapping(cb->mapping); - cb->valid = false; - return -EIO; - } - IPADBG("SMMU fast map set\n"); - } + cb->dev = dev; + cb->valid = true; - result = arm_iommu_attach_device(cb->dev, cb->mapping); - if (result) { - IPAERR("couldn't attach to IOMMU ret=%d\n", result); - cb->valid = false; - return result; + cb->va_start = cb->va_end = cb->va_size = 0; + if (of_property_read_u32_array( + dev->of_node, "qcom,iommu-dma-addr-pool", + iova_ap_mapping, 2) == 0) { + cb->va_start = iova_ap_mapping[0]; + cb->va_size = iova_ap_mapping[1]; + cb->va_end = cb->va_start + cb->va_size; } + + IPADBG("AP va_start=0x%x va_sise=0x%x\n", cb->va_start, cb->va_size); + + /* + * Prior to these calls to iommu_domain_get_attr(), these + * attributes were set in this function relative to dtsi values + * defined for this driver. In other words, if corresponding ipa + * driver owned values were found in the dtsi, they were read and + * set here. + * + * In this new world, the developer will use iommu owned dtsi + * settings to set them there. This new logic below, simply + * checks to see if they've been set in dtsi. If so, the logic + * further below acts accordingly... + */ + iommu_domain_get_attr(cb->iommu_domain, DOMAIN_ATTR_S1_BYPASS, &bypass); + iommu_domain_get_attr(cb->iommu_domain, DOMAIN_ATTR_FAST, &fast); + + IPADBG("AP CB PROBE dev=%pK DOMAIN ATTRS bypass=%d fast=%d\n", + dev, bypass, fast); + if (!smmu_info.s1_bypass) { IPAERR("map IPA region to AP_CB IOMMU\n"); - result = ipa_iommu_map(cb->mapping->domain, + result = ipa_iommu_map(cb->iommu_domain, rounddown(smmu_info.ipa_base, PAGE_SIZE), rounddown(smmu_info.ipa_base, PAGE_SIZE), roundup(smmu_info.ipa_size, PAGE_SIZE), IOMMU_READ | IOMMU_WRITE | IOMMU_MMIO); if (result) { IPAERR("map IPA region to AP_CB IOMMU failed ret=%d\n", - result); - arm_iommu_release_mapping(cb->mapping); + result); cb->valid = false; return result; } @@ -4933,8 +4861,6 @@ static int ipa_smmu_ap_cb_probe(struct device *dev) result = ipa_init(&ipa_res, dev); if (result) { IPAERR("ipa_init failed\n"); - arm_iommu_detach_device(cb->dev); - arm_iommu_release_mapping(cb->mapping); cb->valid = false; return result; } @@ -5010,18 +4936,12 @@ int ipa_plat_drv_probe(struct platform_device *pdev_p, if (of_property_read_bool(pdev_p->dev.of_node, "qcom,smmu-s1-bypass")) smmu_info.s1_bypass = true; - if (of_property_read_bool(pdev_p->dev.of_node, - "qcom,smmu-fast-map")) - smmu_info.fast_map = true; smmu_info.arm_smmu = true; pr_info("IPA smmu_info.s1_bypass=%d smmu_info.fast_map=%d\n", smmu_info.s1_bypass, smmu_info.fast_map); result = of_platform_populate(pdev_p->dev.of_node, pdrv_match, NULL, &pdev_p->dev); - } else if (of_property_read_bool(pdev_p->dev.of_node, - "qcom,msm-smmu")) { - IPAERR("Legacy IOMMU not supported\n"); - result = -EOPNOTSUPP; + } else { if (dma_set_mask(&pdev_p->dev, DMA_BIT_MASK(32)) || dma_set_coherent_mask(&pdev_p->dev, @@ -5040,6 +4960,7 @@ int ipa_plat_drv_probe(struct platform_device *pdev_p, return result; } } + IPADBG("IPA PROBE SUCCESSFUL, result %d\n", result); return result; } @@ -5100,7 +5021,7 @@ struct ipa_context *ipa_get_ctx(void) int ipa_iommu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot) { - struct ipa_smmu_cb_ctx *ap_cb = ipa2_get_smmu_ctx(); + struct ipa_smmu_cb_ctx *ap_cb = ipa2_get_smmu_ctx(IPA_SMMU_CB_AP); struct ipa_smmu_cb_ctx *uc_cb = ipa2_get_uc_smmu_ctx(); IPADBG("domain =0x%p iova 0x%lx\n", domain, iova); diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_client.c b/drivers/platform/msm/ipa/ipa_v2/ipa_client.c index df54de2184a0..89e58a059086 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_client.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_client.c @@ -133,7 +133,7 @@ static int ipa2_smmu_map_peer_bam(unsigned long dev) phys_addr_t base; u32 size; struct iommu_domain *smmu_domain; - struct ipa_smmu_cb_ctx *cb = ipa2_get_smmu_ctx(); + struct ipa_smmu_cb_ctx *cb = ipa2_get_smmu_ctx(IPA_SMMU_CB_AP); if (!ipa_ctx->smmu_s1_bypass) { if (ipa_ctx->peer_bam_map_cnt == 0) { @@ -529,7 +529,7 @@ static int ipa2_smmu_unmap_peer_bam(unsigned long dev) { size_t len; struct iommu_domain *smmu_domain; - struct ipa_smmu_cb_ctx *cb = ipa2_get_smmu_ctx(); + struct ipa_smmu_cb_ctx *cb = ipa2_get_smmu_ctx(IPA_SMMU_CB_AP); if (!ipa_ctx->smmu_s1_bypass) { WARN_ON(dev != ipa_ctx->peer_bam_dev); diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h index 81fdeb778a8d..514c3b423351 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h @@ -229,14 +229,25 @@ struct ipa_client_names { struct ipa_smmu_cb_ctx { bool valid; struct device *dev; - struct dma_iommu_mapping *mapping; - struct iommu_domain *iommu; + struct iommu_domain *iommu_domain; unsigned long next_addr; u32 va_start; u32 va_size; u32 va_end; }; + +enum ipa_smmu_cb_type { + IPA_SMMU_CB_AP, + IPA_SMMU_CB_WLAN, + IPA_SMMU_CB_UC, + IPA_SMMU_CB_MAX + +}; +#define VALID_IPA_SMMU_CB_TYPE(t) \ + ((t) >= IPA_SMMU_CB_AP && (t) < IPA_SMMU_CB_MAX) + + /** * struct ipa_flt_entry - IPA filtering table entry * @link: entry's link in global filtering enrties list @@ -1208,7 +1219,7 @@ struct ipa_context { u32 peer_bam_map_cnt; u32 wdi_map_cnt; bool use_dma_zone; - struct wakeup_source w_lock; + struct wakeup_source *w_lock; struct ipa_wakelock_ref_cnt wakelock_ref_cnt; /* RMNET_IOCTL_INGRESS_FORMAT_AGG_DATA */ @@ -1945,7 +1956,7 @@ int ipa2_uc_mhi_print_stats(char *dbg_buff, int size); int ipa_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len); u32 ipa_get_num_pipes(void); u32 ipa_get_sys_yellow_wm(struct ipa_sys_context *sys); -struct ipa_smmu_cb_ctx *ipa2_get_smmu_ctx(void); +struct ipa_smmu_cb_ctx *ipa2_get_smmu_ctx(enum ipa_smmu_cb_type cb_type); struct ipa_smmu_cb_ctx *ipa2_get_wlan_smmu_ctx(void); struct ipa_smmu_cb_ctx *ipa2_get_uc_smmu_ctx(void); struct iommu_domain *ipa_get_uc_smmu_domain(void); diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service_v01.c b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service_v01.c index 1c1b309077cb..137458876a9e 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service_v01.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service_v01.c @@ -15,7 +15,7 @@ static struct qmi_elem_info ipa_hdr_tbl_info_type_data_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_hdr_tbl_info_type_v01, modem_offset_start), @@ -24,14 +24,14 @@ static struct qmi_elem_info ipa_hdr_tbl_info_type_data_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_hdr_tbl_info_type_v01, modem_offset_end), }, { .data_type = QMI_EOTI, - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -41,7 +41,7 @@ static struct qmi_elem_info ipa_route_tbl_info_type_data_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_route_tbl_info_type_v01, route_tbl_start_addr), @@ -50,14 +50,14 @@ static struct qmi_elem_info ipa_route_tbl_info_type_data_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_route_tbl_info_type_v01, num_indices), }, { .data_type = QMI_EOTI, - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -67,7 +67,7 @@ static struct qmi_elem_info ipa_modem_mem_info_type_data_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_modem_mem_info_type_v01, block_start_addr), @@ -76,14 +76,14 @@ static struct qmi_elem_info ipa_modem_mem_info_type_data_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_modem_mem_info_type_v01, size), }, { .data_type = QMI_EOTI, - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -93,7 +93,7 @@ static struct qmi_elem_info ipa_hdr_proc_ctx_tbl_info_type_data_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof( struct ipa_hdr_proc_ctx_tbl_info_type_v01, @@ -103,7 +103,7 @@ static struct qmi_elem_info ipa_hdr_proc_ctx_tbl_info_type_data_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof( struct ipa_hdr_proc_ctx_tbl_info_type_v01, @@ -111,7 +111,7 @@ static struct qmi_elem_info ipa_hdr_proc_ctx_tbl_info_type_data_v01_ei[] = { }, { .data_type = QMI_EOTI, - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -121,7 +121,7 @@ static struct qmi_elem_info ipa_zip_tbl_info_type_data_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_zip_tbl_info_type_v01, modem_offset_start), @@ -130,14 +130,14 @@ static struct qmi_elem_info ipa_zip_tbl_info_type_data_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_zip_tbl_info_type_v01, modem_offset_end), }, { .data_type = QMI_EOTI, - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -147,7 +147,7 @@ static struct qmi_elem_info ipa_ipfltr_range_eq_16_type_data_v01_ei[] = { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof( struct ipa_ipfltr_range_eq_16_type_v01, @@ -157,7 +157,7 @@ static struct qmi_elem_info ipa_ipfltr_range_eq_16_type_data_v01_ei[] = { .data_type = QMI_UNSIGNED_2_BYTE, .elem_len = 1, .elem_size = sizeof(uint16_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof( struct ipa_ipfltr_range_eq_16_type_v01, @@ -167,7 +167,7 @@ static struct qmi_elem_info ipa_ipfltr_range_eq_16_type_data_v01_ei[] = { .data_type = QMI_UNSIGNED_2_BYTE, .elem_len = 1, .elem_size = sizeof(uint16_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof( struct ipa_ipfltr_range_eq_16_type_v01, @@ -175,7 +175,7 @@ static struct qmi_elem_info ipa_ipfltr_range_eq_16_type_data_v01_ei[] = { }, { .data_type = QMI_EOTI, - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -185,7 +185,7 @@ static struct qmi_elem_info ipa_ipfltr_mask_eq_32_type_data_v01_ei[] = { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof( struct ipa_ipfltr_mask_eq_32_type_v01, @@ -195,7 +195,7 @@ static struct qmi_elem_info ipa_ipfltr_mask_eq_32_type_data_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof( struct ipa_ipfltr_mask_eq_32_type_v01, @@ -205,7 +205,7 @@ static struct qmi_elem_info ipa_ipfltr_mask_eq_32_type_data_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof( struct ipa_ipfltr_mask_eq_32_type_v01, @@ -213,7 +213,7 @@ static struct qmi_elem_info ipa_ipfltr_mask_eq_32_type_data_v01_ei[] = { }, { .data_type = QMI_EOTI, - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -223,7 +223,7 @@ static struct qmi_elem_info ipa_ipfltr_eq_16_type_data_v01_ei[] = { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof( struct ipa_ipfltr_eq_16_type_v01, @@ -233,14 +233,14 @@ static struct qmi_elem_info ipa_ipfltr_eq_16_type_data_v01_ei[] = { .data_type = QMI_UNSIGNED_2_BYTE, .elem_len = 1, .elem_size = sizeof(uint16_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_ipfltr_eq_16_type_v01, value), }, { .data_type = QMI_EOTI, - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -250,7 +250,7 @@ static struct qmi_elem_info ipa_ipfltr_eq_32_type_data_v01_ei[] = { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_ipfltr_eq_32_type_v01, offset), @@ -259,14 +259,14 @@ static struct qmi_elem_info ipa_ipfltr_eq_32_type_data_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_ipfltr_eq_32_type_v01, value), }, { .data_type = QMI_EOTI, - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -276,7 +276,7 @@ static struct qmi_elem_info ipa_ipfltr_mask_eq_128_type_data_v01_ei[] = { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof( struct ipa_ipfltr_mask_eq_128_type_v01, @@ -286,7 +286,7 @@ static struct qmi_elem_info ipa_ipfltr_mask_eq_128_type_data_v01_ei[] = { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 16, .elem_size = sizeof(uint8_t), - .is_array = STATIC_ARRAY, + .array_type = STATIC_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof( struct ipa_ipfltr_mask_eq_128_type_v01, @@ -296,7 +296,7 @@ static struct qmi_elem_info ipa_ipfltr_mask_eq_128_type_data_v01_ei[] = { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 16, .elem_size = sizeof(uint8_t), - .is_array = STATIC_ARRAY, + .array_type = STATIC_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof( struct ipa_ipfltr_mask_eq_128_type_v01, @@ -304,7 +304,7 @@ static struct qmi_elem_info ipa_ipfltr_mask_eq_128_type_data_v01_ei[] = { }, { .data_type = QMI_EOTI, - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -314,7 +314,7 @@ static struct qmi_elem_info ipa_filter_rule_type_data_v01_ei[] = { .data_type = QMI_UNSIGNED_2_BYTE, .elem_len = 1, .elem_size = sizeof(uint16_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof( struct ipa_filter_rule_type_v01, @@ -324,7 +324,7 @@ static struct qmi_elem_info ipa_filter_rule_type_data_v01_ei[] = { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof( struct ipa_filter_rule_type_v01, @@ -334,7 +334,7 @@ static struct qmi_elem_info ipa_filter_rule_type_data_v01_ei[] = { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_filter_rule_type_v01, tos_eq), @@ -343,7 +343,7 @@ static struct qmi_elem_info ipa_filter_rule_type_data_v01_ei[] = { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_filter_rule_type_v01, protocol_eq_present), @@ -352,7 +352,7 @@ static struct qmi_elem_info ipa_filter_rule_type_data_v01_ei[] = { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_filter_rule_type_v01, protocol_eq), @@ -361,7 +361,7 @@ static struct qmi_elem_info ipa_filter_rule_type_data_v01_ei[] = { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_filter_rule_type_v01, num_ihl_offset_range_16), @@ -371,7 +371,7 @@ static struct qmi_elem_info ipa_filter_rule_type_data_v01_ei[] = { .elem_len = QMI_IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS_V01, .elem_size = sizeof( struct ipa_ipfltr_range_eq_16_type_v01), - .is_array = STATIC_ARRAY, + .array_type = STATIC_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_filter_rule_type_v01, ihl_offset_range_16), @@ -381,7 +381,7 @@ static struct qmi_elem_info ipa_filter_rule_type_data_v01_ei[] = { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_filter_rule_type_v01, num_offset_meq_32), @@ -390,7 +390,7 @@ static struct qmi_elem_info ipa_filter_rule_type_data_v01_ei[] = { .data_type = QMI_STRUCT, .elem_len = QMI_IPA_IPFLTR_NUM_MEQ_32_EQNS_V01, .elem_size = sizeof(struct ipa_ipfltr_mask_eq_32_type_v01), - .is_array = STATIC_ARRAY, + .array_type = STATIC_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_filter_rule_type_v01, offset_meq_32), @@ -400,7 +400,7 @@ static struct qmi_elem_info ipa_filter_rule_type_data_v01_ei[] = { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_filter_rule_type_v01, tc_eq_present), @@ -409,7 +409,7 @@ static struct qmi_elem_info ipa_filter_rule_type_data_v01_ei[] = { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_filter_rule_type_v01, tc_eq), @@ -418,7 +418,7 @@ static struct qmi_elem_info ipa_filter_rule_type_data_v01_ei[] = { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_filter_rule_type_v01, flow_eq_present), @@ -427,7 +427,7 @@ static struct qmi_elem_info ipa_filter_rule_type_data_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_filter_rule_type_v01, flow_eq), @@ -436,7 +436,7 @@ static struct qmi_elem_info ipa_filter_rule_type_data_v01_ei[] = { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_filter_rule_type_v01, ihl_offset_eq_16_present), @@ -445,7 +445,7 @@ static struct qmi_elem_info ipa_filter_rule_type_data_v01_ei[] = { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct ipa_ipfltr_eq_16_type_v01), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_filter_rule_type_v01, ihl_offset_eq_16), @@ -455,7 +455,7 @@ static struct qmi_elem_info ipa_filter_rule_type_data_v01_ei[] = { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_filter_rule_type_v01, ihl_offset_eq_32_present), @@ -464,7 +464,7 @@ static struct qmi_elem_info ipa_filter_rule_type_data_v01_ei[] = { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct ipa_ipfltr_eq_32_type_v01), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_filter_rule_type_v01, ihl_offset_eq_32), @@ -474,7 +474,7 @@ static struct qmi_elem_info ipa_filter_rule_type_data_v01_ei[] = { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_filter_rule_type_v01, num_ihl_offset_meq_32), @@ -483,7 +483,7 @@ static struct qmi_elem_info ipa_filter_rule_type_data_v01_ei[] = { .data_type = QMI_STRUCT, .elem_len = QMI_IPA_IPFLTR_NUM_IHL_MEQ_32_EQNS_V01, .elem_size = sizeof(struct ipa_ipfltr_mask_eq_32_type_v01), - .is_array = STATIC_ARRAY, + .array_type = STATIC_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_filter_rule_type_v01, ihl_offset_meq_32), @@ -493,7 +493,7 @@ static struct qmi_elem_info ipa_filter_rule_type_data_v01_ei[] = { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_filter_rule_type_v01, num_offset_meq_128), @@ -504,7 +504,7 @@ static struct qmi_elem_info ipa_filter_rule_type_data_v01_ei[] = { QMI_IPA_IPFLTR_NUM_MEQ_128_EQNS_V01, .elem_size = sizeof( struct ipa_ipfltr_mask_eq_128_type_v01), - .is_array = STATIC_ARRAY, + .array_type = STATIC_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof( struct ipa_filter_rule_type_v01, @@ -515,7 +515,7 @@ static struct qmi_elem_info ipa_filter_rule_type_data_v01_ei[] = { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_filter_rule_type_v01, metadata_meq32_present), @@ -524,7 +524,7 @@ static struct qmi_elem_info ipa_filter_rule_type_data_v01_ei[] = { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct ipa_ipfltr_mask_eq_32_type_v01), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_filter_rule_type_v01, metadata_meq32), @@ -534,14 +534,14 @@ static struct qmi_elem_info ipa_filter_rule_type_data_v01_ei[] = { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_filter_rule_type_v01, ipv4_frag_eq_present), }, { .data_type = QMI_EOTI, - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -551,7 +551,7 @@ static struct qmi_elem_info ipa_filter_spec_type_data_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_filter_spec_type_v01, filter_spec_identifier), @@ -560,7 +560,7 @@ static struct qmi_elem_info ipa_filter_spec_type_data_v01_ei[] = { .data_type = QMI_SIGNED_4_BYTE_ENUM, .elem_len = 1, .elem_size = sizeof(uint32_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_filter_spec_type_v01, ip_type), @@ -569,7 +569,7 @@ static struct qmi_elem_info ipa_filter_spec_type_data_v01_ei[] = { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct ipa_filter_rule_type_v01), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_filter_spec_type_v01, filter_rule), @@ -579,7 +579,7 @@ static struct qmi_elem_info ipa_filter_spec_type_data_v01_ei[] = { .data_type = QMI_SIGNED_4_BYTE_ENUM, .elem_len = 1, .elem_size = sizeof(uint32_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_filter_spec_type_v01, filter_action), @@ -588,7 +588,7 @@ static struct qmi_elem_info ipa_filter_spec_type_data_v01_ei[] = { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_filter_spec_type_v01, is_routing_table_index_valid), @@ -597,7 +597,7 @@ static struct qmi_elem_info ipa_filter_spec_type_data_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_filter_spec_type_v01, route_table_index), @@ -606,7 +606,7 @@ static struct qmi_elem_info ipa_filter_spec_type_data_v01_ei[] = { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_filter_spec_type_v01, is_mux_id_valid), @@ -615,14 +615,14 @@ static struct qmi_elem_info ipa_filter_spec_type_data_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_filter_spec_type_v01, mux_id), }, { .data_type = QMI_EOTI, - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -633,7 +633,7 @@ static struct qmi_elem_info .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof( struct ipa_filter_rule_identifier_to_handle_map_v01, @@ -643,7 +643,7 @@ static struct qmi_elem_info .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof( struct ipa_filter_rule_identifier_to_handle_map_v01, @@ -651,7 +651,7 @@ static struct qmi_elem_info }, { .data_type = QMI_EOTI, - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -661,7 +661,7 @@ static struct qmi_elem_info ipa_filter_handle_to_index_map_data_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof( struct ipa_filter_handle_to_index_map_v01, @@ -671,7 +671,7 @@ static struct qmi_elem_info ipa_filter_handle_to_index_map_data_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof( struct ipa_filter_handle_to_index_map_v01, @@ -679,7 +679,7 @@ static struct qmi_elem_info ipa_filter_handle_to_index_map_data_v01_ei[] = { }, { .data_type = QMI_EOTI, - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -689,7 +689,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_msg_data_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof( struct ipa_init_modem_driver_req_msg_v01, @@ -699,7 +699,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_msg_data_v01_ei[] = { .data_type = QMI_SIGNED_4_BYTE_ENUM, .elem_len = 1, .elem_size = sizeof(uint32_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof( struct ipa_init_modem_driver_req_msg_v01, @@ -709,7 +709,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_msg_data_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof( struct ipa_init_modem_driver_req_msg_v01, @@ -719,7 +719,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_msg_data_v01_ei[] = { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct ipa_hdr_tbl_info_type_v01), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof( struct ipa_init_modem_driver_req_msg_v01, @@ -730,7 +730,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_msg_data_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x12, .offset = offsetof( struct ipa_init_modem_driver_req_msg_v01, @@ -740,7 +740,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_msg_data_v01_ei[] = { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct ipa_route_tbl_info_type_v01), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x12, .offset = offsetof( struct ipa_init_modem_driver_req_msg_v01, @@ -751,7 +751,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_msg_data_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x13, .offset = offsetof( struct ipa_init_modem_driver_req_msg_v01, @@ -761,7 +761,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_msg_data_v01_ei[] = { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct ipa_route_tbl_info_type_v01), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x13, .offset = offsetof( struct ipa_init_modem_driver_req_msg_v01, @@ -772,7 +772,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_msg_data_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x14, .offset = offsetof( struct ipa_init_modem_driver_req_msg_v01, @@ -782,7 +782,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_msg_data_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x14, .offset = offsetof( struct ipa_init_modem_driver_req_msg_v01, @@ -792,7 +792,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_msg_data_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x15, .offset = offsetof( struct ipa_init_modem_driver_req_msg_v01, @@ -802,7 +802,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_msg_data_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x15, .offset = offsetof( struct ipa_init_modem_driver_req_msg_v01, @@ -812,7 +812,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_msg_data_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x16, .offset = offsetof( struct ipa_init_modem_driver_req_msg_v01, @@ -822,7 +822,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_msg_data_v01_ei[] = { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct ipa_modem_mem_info_type_v01), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x16, .offset = offsetof( struct ipa_init_modem_driver_req_msg_v01, @@ -833,7 +833,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_msg_data_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x17, .offset = offsetof( struct ipa_init_modem_driver_req_msg_v01, @@ -843,7 +843,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_msg_data_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x17, .offset = offsetof( struct ipa_init_modem_driver_req_msg_v01, @@ -853,7 +853,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_msg_data_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x18, .offset = offsetof( struct ipa_init_modem_driver_req_msg_v01, @@ -863,7 +863,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_msg_data_v01_ei[] = { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x18, .offset = offsetof( struct ipa_init_modem_driver_req_msg_v01, @@ -873,7 +873,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_msg_data_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x19, .offset = offsetof( struct ipa_init_modem_driver_req_msg_v01, @@ -884,7 +884,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_msg_data_v01_ei[] = { .elem_len = 1, .elem_size = sizeof( struct ipa_hdr_proc_ctx_tbl_info_type_v01), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x19, .offset = offsetof( struct ipa_init_modem_driver_req_msg_v01, @@ -895,7 +895,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_msg_data_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x1A, .offset = offsetof( struct ipa_init_modem_driver_req_msg_v01, @@ -905,7 +905,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_msg_data_v01_ei[] = { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct ipa_zip_tbl_info_type_v01), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x1A, .offset = offsetof( struct ipa_init_modem_driver_req_msg_v01, @@ -914,7 +914,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_msg_data_v01_ei[] = { }, { .data_type = QMI_EOTI, - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -924,7 +924,7 @@ struct qmi_elem_info ipa_init_modem_driver_resp_msg_data_v01_ei[] = { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_response_type_v01), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof( struct ipa_init_modem_driver_resp_msg_v01, @@ -935,7 +935,7 @@ struct qmi_elem_info ipa_init_modem_driver_resp_msg_data_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof( struct ipa_init_modem_driver_resp_msg_v01, @@ -945,7 +945,7 @@ struct qmi_elem_info ipa_init_modem_driver_resp_msg_data_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof( struct ipa_init_modem_driver_resp_msg_v01, @@ -955,7 +955,7 @@ struct qmi_elem_info ipa_init_modem_driver_resp_msg_data_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof( struct ipa_init_modem_driver_resp_msg_v01, @@ -965,7 +965,7 @@ struct qmi_elem_info ipa_init_modem_driver_resp_msg_data_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof( struct ipa_init_modem_driver_resp_msg_v01, @@ -973,7 +973,7 @@ struct qmi_elem_info ipa_init_modem_driver_resp_msg_data_v01_ei[] = { }, { .data_type = QMI_EOTI, - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -983,7 +983,7 @@ struct qmi_elem_info ipa_indication_reg_req_msg_data_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof( struct ipa_indication_reg_req_msg_v01, @@ -993,7 +993,7 @@ struct qmi_elem_info ipa_indication_reg_req_msg_data_v01_ei[] = { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof( struct ipa_indication_reg_req_msg_v01, @@ -1003,7 +1003,7 @@ struct qmi_elem_info ipa_indication_reg_req_msg_data_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof( struct ipa_indication_reg_req_msg_v01, @@ -1013,7 +1013,7 @@ struct qmi_elem_info ipa_indication_reg_req_msg_data_v01_ei[] = { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof( struct ipa_indication_reg_req_msg_v01, @@ -1021,7 +1021,7 @@ struct qmi_elem_info ipa_indication_reg_req_msg_data_v01_ei[] = { }, { .data_type = QMI_EOTI, - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1031,7 +1031,7 @@ struct qmi_elem_info ipa_indication_reg_resp_msg_data_v01_ei[] = { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_response_type_v01), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof( struct ipa_indication_reg_resp_msg_v01, @@ -1040,7 +1040,7 @@ struct qmi_elem_info ipa_indication_reg_resp_msg_data_v01_ei[] = { }, { .data_type = QMI_EOTI, - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1050,7 +1050,7 @@ struct qmi_elem_info ipa_master_driver_init_complt_ind_msg_data_v01_ei[] = { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_response_type_v01), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof(struct ipa_master_driver_init_complt_ind_msg_v01, @@ -1059,7 +1059,7 @@ struct qmi_elem_info ipa_master_driver_init_complt_ind_msg_data_v01_ei[] = { }, { .data_type = QMI_EOTI, - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1069,7 +1069,7 @@ struct qmi_elem_info ipa_install_fltr_rule_req_msg_data_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof( struct ipa_install_fltr_rule_req_msg_v01, @@ -1079,7 +1079,7 @@ struct qmi_elem_info ipa_install_fltr_rule_req_msg_data_v01_ei[] = { .data_type = QMI_DATA_LEN, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof( struct ipa_install_fltr_rule_req_msg_v01, @@ -1089,7 +1089,7 @@ struct qmi_elem_info ipa_install_fltr_rule_req_msg_data_v01_ei[] = { .data_type = QMI_STRUCT, .elem_len = QMI_IPA_MAX_FILTERS_V01, .elem_size = sizeof(struct ipa_filter_spec_type_v01), - .is_array = VAR_LEN_ARRAY, + .array_type = VAR_LEN_ARRAY, .tlv_type = 0x10, .offset = offsetof( struct ipa_install_fltr_rule_req_msg_v01, @@ -1100,7 +1100,7 @@ struct qmi_elem_info ipa_install_fltr_rule_req_msg_data_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof( struct ipa_install_fltr_rule_req_msg_v01, @@ -1110,7 +1110,7 @@ struct qmi_elem_info ipa_install_fltr_rule_req_msg_data_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof( struct ipa_install_fltr_rule_req_msg_v01, @@ -1120,7 +1120,7 @@ struct qmi_elem_info ipa_install_fltr_rule_req_msg_data_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x12, .offset = offsetof( struct ipa_install_fltr_rule_req_msg_v01, @@ -1130,7 +1130,7 @@ struct qmi_elem_info ipa_install_fltr_rule_req_msg_data_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x12, .offset = offsetof( struct ipa_install_fltr_rule_req_msg_v01, @@ -1140,7 +1140,7 @@ struct qmi_elem_info ipa_install_fltr_rule_req_msg_data_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x13, .offset = offsetof( struct ipa_install_fltr_rule_req_msg_v01, @@ -1150,7 +1150,7 @@ struct qmi_elem_info ipa_install_fltr_rule_req_msg_data_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x13, .offset = offsetof( struct ipa_install_fltr_rule_req_msg_v01, @@ -1160,7 +1160,7 @@ struct qmi_elem_info ipa_install_fltr_rule_req_msg_data_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x14, .offset = offsetof( struct ipa_install_fltr_rule_req_msg_v01, @@ -1170,7 +1170,7 @@ struct qmi_elem_info ipa_install_fltr_rule_req_msg_data_v01_ei[] = { .data_type = QMI_DATA_LEN, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x14, .offset = offsetof( struct ipa_install_fltr_rule_req_msg_v01, @@ -1180,7 +1180,7 @@ struct qmi_elem_info ipa_install_fltr_rule_req_msg_data_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = QMI_IPA_MAX_FILTERS_V01, .elem_size = sizeof(uint32_t), - .is_array = VAR_LEN_ARRAY, + .array_type = VAR_LEN_ARRAY, .tlv_type = 0x14, .offset = offsetof( struct ipa_install_fltr_rule_req_msg_v01, @@ -1188,7 +1188,7 @@ struct qmi_elem_info ipa_install_fltr_rule_req_msg_data_v01_ei[] = { }, { .data_type = QMI_EOTI, - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1198,7 +1198,7 @@ struct qmi_elem_info ipa_install_fltr_rule_resp_msg_data_v01_ei[] = { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_response_type_v01), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof( struct ipa_install_fltr_rule_resp_msg_v01, @@ -1209,7 +1209,7 @@ struct qmi_elem_info ipa_install_fltr_rule_resp_msg_data_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof( struct ipa_install_fltr_rule_resp_msg_v01, @@ -1219,7 +1219,7 @@ struct qmi_elem_info ipa_install_fltr_rule_resp_msg_data_v01_ei[] = { .data_type = QMI_DATA_LEN, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof( struct ipa_install_fltr_rule_resp_msg_v01, @@ -1230,7 +1230,7 @@ struct qmi_elem_info ipa_install_fltr_rule_resp_msg_data_v01_ei[] = { .elem_len = QMI_IPA_MAX_FILTERS_V01, .elem_size = sizeof( struct ipa_filter_rule_identifier_to_handle_map_v01), - .is_array = VAR_LEN_ARRAY, + .array_type = VAR_LEN_ARRAY, .tlv_type = 0x10, .offset = offsetof( struct ipa_install_fltr_rule_resp_msg_v01, @@ -1240,7 +1240,7 @@ struct qmi_elem_info ipa_install_fltr_rule_resp_msg_data_v01_ei[] = { }, { .data_type = QMI_EOTI, - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1250,7 +1250,7 @@ struct qmi_elem_info ipa_fltr_installed_notif_req_msg_data_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x01, .offset = offsetof( struct ipa_fltr_installed_notif_req_msg_v01, @@ -1260,7 +1260,7 @@ struct qmi_elem_info ipa_fltr_installed_notif_req_msg_data_v01_ei[] = { .data_type = QMI_UNSIGNED_2_BYTE, .elem_len = 1, .elem_size = sizeof(uint16_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof( struct ipa_fltr_installed_notif_req_msg_v01, @@ -1270,7 +1270,7 @@ struct qmi_elem_info ipa_fltr_installed_notif_req_msg_data_v01_ei[] = { .data_type = QMI_DATA_LEN, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x03, .offset = offsetof( struct ipa_fltr_installed_notif_req_msg_v01, @@ -1281,7 +1281,7 @@ struct qmi_elem_info ipa_fltr_installed_notif_req_msg_data_v01_ei[] = { .elem_len = QMI_IPA_MAX_FILTERS_V01, .elem_size = sizeof( struct ipa_filter_handle_to_index_map_v01), - .is_array = VAR_LEN_ARRAY, + .array_type = VAR_LEN_ARRAY, .tlv_type = 0x03, .offset = offsetof( struct ipa_fltr_installed_notif_req_msg_v01, @@ -1292,7 +1292,7 @@ struct qmi_elem_info ipa_fltr_installed_notif_req_msg_data_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof( struct ipa_fltr_installed_notif_req_msg_v01, @@ -1302,7 +1302,7 @@ struct qmi_elem_info ipa_fltr_installed_notif_req_msg_data_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof( struct ipa_fltr_installed_notif_req_msg_v01, @@ -1312,7 +1312,7 @@ struct qmi_elem_info ipa_fltr_installed_notif_req_msg_data_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof( struct ipa_fltr_installed_notif_req_msg_v01, @@ -1322,7 +1322,7 @@ struct qmi_elem_info ipa_fltr_installed_notif_req_msg_data_v01_ei[] = { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof( struct ipa_fltr_installed_notif_req_msg_v01, @@ -1332,7 +1332,7 @@ struct qmi_elem_info ipa_fltr_installed_notif_req_msg_data_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x12, .offset = offsetof( struct ipa_fltr_installed_notif_req_msg_v01, @@ -1342,7 +1342,7 @@ struct qmi_elem_info ipa_fltr_installed_notif_req_msg_data_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x12, .offset = offsetof( struct ipa_fltr_installed_notif_req_msg_v01, @@ -1352,7 +1352,7 @@ struct qmi_elem_info ipa_fltr_installed_notif_req_msg_data_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x13, .offset = offsetof( struct ipa_fltr_installed_notif_req_msg_v01, @@ -1362,7 +1362,7 @@ struct qmi_elem_info ipa_fltr_installed_notif_req_msg_data_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x13, .offset = offsetof( struct ipa_fltr_installed_notif_req_msg_v01, @@ -1372,7 +1372,7 @@ struct qmi_elem_info ipa_fltr_installed_notif_req_msg_data_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x14, .offset = offsetof( struct ipa_fltr_installed_notif_req_msg_v01, @@ -1382,7 +1382,7 @@ struct qmi_elem_info ipa_fltr_installed_notif_req_msg_data_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x14, .offset = offsetof( struct ipa_fltr_installed_notif_req_msg_v01, @@ -1392,7 +1392,7 @@ struct qmi_elem_info ipa_fltr_installed_notif_req_msg_data_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x15, .offset = offsetof( struct ipa_fltr_installed_notif_req_msg_v01, @@ -1402,7 +1402,7 @@ struct qmi_elem_info ipa_fltr_installed_notif_req_msg_data_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x15, .offset = offsetof( struct ipa_fltr_installed_notif_req_msg_v01, @@ -1412,7 +1412,7 @@ struct qmi_elem_info ipa_fltr_installed_notif_req_msg_data_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x16, .offset = offsetof( struct ipa_fltr_installed_notif_req_msg_v01, @@ -1422,7 +1422,7 @@ struct qmi_elem_info ipa_fltr_installed_notif_req_msg_data_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x16, .offset = offsetof( struct ipa_fltr_installed_notif_req_msg_v01, @@ -1432,7 +1432,7 @@ struct qmi_elem_info ipa_fltr_installed_notif_req_msg_data_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x17, .offset = offsetof( struct ipa_fltr_installed_notif_req_msg_v01, @@ -1442,7 +1442,7 @@ struct qmi_elem_info ipa_fltr_installed_notif_req_msg_data_v01_ei[] = { .data_type = QMI_DATA_LEN, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x17, .offset = offsetof( struct ipa_fltr_installed_notif_req_msg_v01, @@ -1452,7 +1452,7 @@ struct qmi_elem_info ipa_fltr_installed_notif_req_msg_data_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = QMI_IPA_MAX_FILTERS_V01, .elem_size = sizeof(uint32_t), - .is_array = VAR_LEN_ARRAY, + .array_type = VAR_LEN_ARRAY, .tlv_type = 0x17, .offset = offsetof( struct ipa_fltr_installed_notif_req_msg_v01, @@ -1462,7 +1462,7 @@ struct qmi_elem_info ipa_fltr_installed_notif_req_msg_data_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x18, .offset = offsetof( struct ipa_fltr_installed_notif_req_msg_v01, @@ -1472,7 +1472,7 @@ struct qmi_elem_info ipa_fltr_installed_notif_req_msg_data_v01_ei[] = { .data_type = QMI_DATA_LEN, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x18, .offset = offsetof( struct ipa_fltr_installed_notif_req_msg_v01, @@ -1482,7 +1482,7 @@ struct qmi_elem_info ipa_fltr_installed_notif_req_msg_data_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = QMI_IPA_MAX_CLIENT_DST_PIPES_V01, .elem_size = sizeof(uint32_t), - .is_array = VAR_LEN_ARRAY, + .array_type = VAR_LEN_ARRAY, .tlv_type = 0x18, .offset = offsetof( struct ipa_fltr_installed_notif_req_msg_v01, @@ -1490,7 +1490,7 @@ struct qmi_elem_info ipa_fltr_installed_notif_req_msg_data_v01_ei[] = { }, { .data_type = QMI_EOTI, - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1500,7 +1500,7 @@ struct qmi_elem_info ipa_fltr_installed_notif_resp_msg_data_v01_ei[] = { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_response_type_v01), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof( struct ipa_fltr_installed_notif_resp_msg_v01, @@ -1509,7 +1509,7 @@ struct qmi_elem_info ipa_fltr_installed_notif_resp_msg_data_v01_ei[] = { }, { .data_type = QMI_EOTI, - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1519,7 +1519,7 @@ struct qmi_elem_info ipa_enable_force_clear_datapath_req_msg_data_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x01, .offset = offsetof( struct ipa_enable_force_clear_datapath_req_msg_v01, @@ -1529,7 +1529,7 @@ struct qmi_elem_info ipa_enable_force_clear_datapath_req_msg_data_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof( struct ipa_enable_force_clear_datapath_req_msg_v01, @@ -1539,7 +1539,7 @@ struct qmi_elem_info ipa_enable_force_clear_datapath_req_msg_data_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof( struct ipa_enable_force_clear_datapath_req_msg_v01, @@ -1549,7 +1549,7 @@ struct qmi_elem_info ipa_enable_force_clear_datapath_req_msg_data_v01_ei[] = { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof( struct ipa_enable_force_clear_datapath_req_msg_v01, @@ -1557,7 +1557,7 @@ struct qmi_elem_info ipa_enable_force_clear_datapath_req_msg_data_v01_ei[] = { }, { .data_type = QMI_EOTI, - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1567,7 +1567,7 @@ struct qmi_elem_info ipa_enable_force_clear_datapath_resp_msg_data_v01_ei[] = { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_response_type_v01), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof( struct ipa_enable_force_clear_datapath_resp_msg_v01, @@ -1576,7 +1576,7 @@ struct qmi_elem_info ipa_enable_force_clear_datapath_resp_msg_data_v01_ei[] = { }, { .data_type = QMI_EOTI, - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1586,7 +1586,7 @@ struct qmi_elem_info ipa_disable_force_clear_datapath_req_msg_data_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x01, .offset = offsetof( struct ipa_disable_force_clear_datapath_req_msg_v01, @@ -1594,7 +1594,7 @@ struct qmi_elem_info ipa_disable_force_clear_datapath_req_msg_data_v01_ei[] = { }, { .data_type = QMI_EOTI, - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1604,7 +1604,7 @@ struct qmi_elem_info ipa_disable_force_clear_datapath_resp_msg_data_v01_ei[] = { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_response_type_v01), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof( struct ipa_disable_force_clear_datapath_resp_msg_v01, @@ -1613,7 +1613,7 @@ struct qmi_elem_info ipa_disable_force_clear_datapath_resp_msg_data_v01_ei[] = { }, { .data_type = QMI_EOTI, - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1623,7 +1623,7 @@ struct qmi_elem_info ipa_config_req_msg_data_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof( struct ipa_config_req_msg_v01, @@ -1633,7 +1633,7 @@ struct qmi_elem_info ipa_config_req_msg_data_v01_ei[] = { .data_type = QMI_SIGNED_4_BYTE_ENUM, .elem_len = 1, .elem_size = sizeof(uint32_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof( struct ipa_config_req_msg_v01, @@ -1643,7 +1643,7 @@ struct qmi_elem_info ipa_config_req_msg_data_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof( struct ipa_config_req_msg_v01, @@ -1653,7 +1653,7 @@ struct qmi_elem_info ipa_config_req_msg_data_v01_ei[] = { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof( struct ipa_config_req_msg_v01, @@ -1663,7 +1663,7 @@ struct qmi_elem_info ipa_config_req_msg_data_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x12, .offset = offsetof( struct ipa_config_req_msg_v01, @@ -1673,7 +1673,7 @@ struct qmi_elem_info ipa_config_req_msg_data_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x12, .offset = offsetof( struct ipa_config_req_msg_v01, @@ -1683,7 +1683,7 @@ struct qmi_elem_info ipa_config_req_msg_data_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x13, .offset = offsetof( struct ipa_config_req_msg_v01, @@ -1693,7 +1693,7 @@ struct qmi_elem_info ipa_config_req_msg_data_v01_ei[] = { .data_type = QMI_SIGNED_4_BYTE_ENUM, .elem_len = 1, .elem_size = sizeof(uint32_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x13, .offset = offsetof( struct ipa_config_req_msg_v01, @@ -1703,7 +1703,7 @@ struct qmi_elem_info ipa_config_req_msg_data_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x14, .offset = offsetof( struct ipa_config_req_msg_v01, @@ -1713,7 +1713,7 @@ struct qmi_elem_info ipa_config_req_msg_data_v01_ei[] = { .data_type = QMI_SIGNED_4_BYTE_ENUM, .elem_len = 1, .elem_size = sizeof(uint32_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x14, .offset = offsetof( struct ipa_config_req_msg_v01, @@ -1723,7 +1723,7 @@ struct qmi_elem_info ipa_config_req_msg_data_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x15, .offset = offsetof( struct ipa_config_req_msg_v01, @@ -1733,7 +1733,7 @@ struct qmi_elem_info ipa_config_req_msg_data_v01_ei[] = { .data_type = QMI_SIGNED_4_BYTE_ENUM, .elem_len = 1, .elem_size = sizeof(uint32_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x15, .offset = offsetof( struct ipa_config_req_msg_v01, @@ -1743,7 +1743,7 @@ struct qmi_elem_info ipa_config_req_msg_data_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x16, .offset = offsetof( struct ipa_config_req_msg_v01, @@ -1753,7 +1753,7 @@ struct qmi_elem_info ipa_config_req_msg_data_v01_ei[] = { .data_type = QMI_SIGNED_4_BYTE_ENUM, .elem_len = 1, .elem_size = sizeof(uint32_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x16, .offset = offsetof( struct ipa_config_req_msg_v01, @@ -1763,7 +1763,7 @@ struct qmi_elem_info ipa_config_req_msg_data_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x17, .offset = offsetof( struct ipa_config_req_msg_v01, @@ -1773,7 +1773,7 @@ struct qmi_elem_info ipa_config_req_msg_data_v01_ei[] = { .data_type = QMI_SIGNED_4_BYTE_ENUM, .elem_len = 1, .elem_size = sizeof(uint32_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x17, .offset = offsetof( struct ipa_config_req_msg_v01, @@ -1783,7 +1783,7 @@ struct qmi_elem_info ipa_config_req_msg_data_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x18, .offset = offsetof( struct ipa_config_req_msg_v01, @@ -1793,7 +1793,7 @@ struct qmi_elem_info ipa_config_req_msg_data_v01_ei[] = { .data_type = QMI_SIGNED_4_BYTE_ENUM, .elem_len = 1, .elem_size = sizeof(uint32_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x18, .offset = offsetof( struct ipa_config_req_msg_v01, @@ -1803,7 +1803,7 @@ struct qmi_elem_info ipa_config_req_msg_data_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x19, .offset = offsetof( struct ipa_config_req_msg_v01, @@ -1813,7 +1813,7 @@ struct qmi_elem_info ipa_config_req_msg_data_v01_ei[] = { .data_type = QMI_SIGNED_4_BYTE_ENUM, .elem_len = 1, .elem_size = sizeof(uint32_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x19, .offset = offsetof( struct ipa_config_req_msg_v01, @@ -1823,7 +1823,7 @@ struct qmi_elem_info ipa_config_req_msg_data_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x1A, .offset = offsetof( struct ipa_config_req_msg_v01, @@ -1833,7 +1833,7 @@ struct qmi_elem_info ipa_config_req_msg_data_v01_ei[] = { .data_type = QMI_SIGNED_4_BYTE_ENUM, .elem_len = 1, .elem_size = sizeof(uint32_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x1A, .offset = offsetof( struct ipa_config_req_msg_v01, @@ -1843,7 +1843,7 @@ struct qmi_elem_info ipa_config_req_msg_data_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x1B, .offset = offsetof( struct ipa_config_req_msg_v01, @@ -1853,7 +1853,7 @@ struct qmi_elem_info ipa_config_req_msg_data_v01_ei[] = { .data_type = QMI_SIGNED_4_BYTE_ENUM, .elem_len = 1, .elem_size = sizeof(uint32_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x1B, .offset = offsetof( struct ipa_config_req_msg_v01, @@ -1861,7 +1861,7 @@ struct qmi_elem_info ipa_config_req_msg_data_v01_ei[] = { }, { .data_type = QMI_EOTI, - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1871,7 +1871,7 @@ struct qmi_elem_info ipa_config_resp_msg_data_v01_ei[] = { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_response_type_v01), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof( struct ipa_config_resp_msg_v01, @@ -1880,7 +1880,7 @@ struct qmi_elem_info ipa_config_resp_msg_data_v01_ei[] = { }, { .data_type = QMI_EOTI, - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1890,7 +1890,7 @@ struct qmi_elem_info ipa_get_data_stats_req_msg_data_v01_ei[] = { .data_type = QMI_SIGNED_4_BYTE_ENUM, .elem_len = 1, .elem_size = sizeof(uint32_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x01, .offset = offsetof( struct ipa_get_data_stats_req_msg_v01, @@ -1900,7 +1900,7 @@ struct qmi_elem_info ipa_get_data_stats_req_msg_data_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof( struct ipa_get_data_stats_req_msg_v01, @@ -1910,7 +1910,7 @@ struct qmi_elem_info ipa_get_data_stats_req_msg_data_v01_ei[] = { .data_type = QMI_UNSIGNED_1_BYTE, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof( struct ipa_get_data_stats_req_msg_v01, @@ -1918,7 +1918,7 @@ struct qmi_elem_info ipa_get_data_stats_req_msg_data_v01_ei[] = { }, { .data_type = QMI_EOTI, - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1928,7 +1928,7 @@ static struct qmi_elem_info ipa_pipe_stats_info_type_data_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_pipe_stats_info_type_v01, pipe_index), @@ -1937,7 +1937,7 @@ static struct qmi_elem_info ipa_pipe_stats_info_type_data_v01_ei[] = { .data_type = QMI_UNSIGNED_8_BYTE, .elem_len = 1, .elem_size = sizeof(uint64_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_pipe_stats_info_type_v01, num_ipv4_packets), @@ -1946,7 +1946,7 @@ static struct qmi_elem_info ipa_pipe_stats_info_type_data_v01_ei[] = { .data_type = QMI_UNSIGNED_8_BYTE, .elem_len = 1, .elem_size = sizeof(uint64_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_pipe_stats_info_type_v01, num_ipv4_bytes), @@ -1955,7 +1955,7 @@ static struct qmi_elem_info ipa_pipe_stats_info_type_data_v01_ei[] = { .data_type = QMI_UNSIGNED_8_BYTE, .elem_len = 1, .elem_size = sizeof(uint64_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_pipe_stats_info_type_v01, num_ipv6_packets), @@ -1964,14 +1964,14 @@ static struct qmi_elem_info ipa_pipe_stats_info_type_data_v01_ei[] = { .data_type = QMI_UNSIGNED_8_BYTE, .elem_len = 1, .elem_size = sizeof(uint64_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_pipe_stats_info_type_v01, num_ipv6_bytes), }, { .data_type = QMI_EOTI, - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1981,7 +1981,7 @@ static struct qmi_elem_info ipa_stats_type_filter_rule_data_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_stats_type_filter_rule_v01, @@ -1991,7 +1991,7 @@ static struct qmi_elem_info ipa_stats_type_filter_rule_data_v01_ei[] = { .data_type = QMI_UNSIGNED_8_BYTE, .elem_len = 1, .elem_size = sizeof(uint64_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_stats_type_filter_rule_v01, @@ -1999,7 +1999,7 @@ static struct qmi_elem_info ipa_stats_type_filter_rule_data_v01_ei[] = { }, { .data_type = QMI_EOTI, - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -2009,7 +2009,7 @@ struct qmi_elem_info ipa_get_data_stats_resp_msg_data_v01_ei[] = { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_response_type_v01), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof( struct ipa_get_data_stats_resp_msg_v01, @@ -2020,7 +2020,7 @@ struct qmi_elem_info ipa_get_data_stats_resp_msg_data_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof( struct ipa_get_data_stats_resp_msg_v01, @@ -2030,7 +2030,7 @@ struct qmi_elem_info ipa_get_data_stats_resp_msg_data_v01_ei[] = { .data_type = QMI_SIGNED_4_BYTE_ENUM, .elem_len = 1, .elem_size = sizeof(uint32_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof( struct ipa_get_data_stats_resp_msg_v01, @@ -2040,7 +2040,7 @@ struct qmi_elem_info ipa_get_data_stats_resp_msg_data_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof( struct ipa_get_data_stats_resp_msg_v01, @@ -2050,7 +2050,7 @@ struct qmi_elem_info ipa_get_data_stats_resp_msg_data_v01_ei[] = { .data_type = QMI_DATA_LEN, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x11, .offset = offsetof( struct ipa_get_data_stats_resp_msg_v01, @@ -2060,7 +2060,7 @@ struct qmi_elem_info ipa_get_data_stats_resp_msg_data_v01_ei[] = { .data_type = QMI_STRUCT, .elem_len = QMI_IPA_MAX_PIPES_V01, .elem_size = sizeof(struct ipa_pipe_stats_info_type_v01), - .is_array = VAR_LEN_ARRAY, + .array_type = VAR_LEN_ARRAY, .tlv_type = 0x11, .offset = offsetof( struct ipa_get_data_stats_resp_msg_v01, @@ -2071,7 +2071,7 @@ struct qmi_elem_info ipa_get_data_stats_resp_msg_data_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x12, .offset = offsetof( struct ipa_get_data_stats_resp_msg_v01, @@ -2081,7 +2081,7 @@ struct qmi_elem_info ipa_get_data_stats_resp_msg_data_v01_ei[] = { .data_type = QMI_DATA_LEN, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x12, .offset = offsetof( struct ipa_get_data_stats_resp_msg_v01, @@ -2091,7 +2091,7 @@ struct qmi_elem_info ipa_get_data_stats_resp_msg_data_v01_ei[] = { .data_type = QMI_STRUCT, .elem_len = QMI_IPA_MAX_PIPES_V01, .elem_size = sizeof(struct ipa_pipe_stats_info_type_v01), - .is_array = VAR_LEN_ARRAY, + .array_type = VAR_LEN_ARRAY, .tlv_type = 0x12, .offset = offsetof( struct ipa_get_data_stats_resp_msg_v01, @@ -2102,7 +2102,7 @@ struct qmi_elem_info ipa_get_data_stats_resp_msg_data_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x13, .offset = offsetof( struct ipa_get_data_stats_resp_msg_v01, @@ -2112,7 +2112,7 @@ struct qmi_elem_info ipa_get_data_stats_resp_msg_data_v01_ei[] = { .data_type = QMI_DATA_LEN, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x13, .offset = offsetof( struct ipa_get_data_stats_resp_msg_v01, @@ -2122,7 +2122,7 @@ struct qmi_elem_info ipa_get_data_stats_resp_msg_data_v01_ei[] = { .data_type = QMI_STRUCT, .elem_len = QMI_IPA_MAX_FILTERS_V01, .elem_size = sizeof(struct ipa_pipe_stats_info_type_v01), - .is_array = VAR_LEN_ARRAY, + .array_type = VAR_LEN_ARRAY, .tlv_type = 0x13, .offset = offsetof( struct ipa_get_data_stats_resp_msg_v01, @@ -2131,7 +2131,7 @@ struct qmi_elem_info ipa_get_data_stats_resp_msg_data_v01_ei[] = { }, { .data_type = QMI_EOTI, - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -2141,7 +2141,7 @@ static struct qmi_elem_info ipa_apn_data_stats_info_type_data_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_apn_data_stats_info_type_v01, @@ -2151,7 +2151,7 @@ static struct qmi_elem_info ipa_apn_data_stats_info_type_data_v01_ei[] = { .data_type = QMI_UNSIGNED_8_BYTE, .elem_len = 1, .elem_size = sizeof(uint64_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_apn_data_stats_info_type_v01, @@ -2161,7 +2161,7 @@ static struct qmi_elem_info ipa_apn_data_stats_info_type_data_v01_ei[] = { .data_type = QMI_UNSIGNED_8_BYTE, .elem_len = 1, .elem_size = sizeof(uint64_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_apn_data_stats_info_type_v01, @@ -2171,7 +2171,7 @@ static struct qmi_elem_info ipa_apn_data_stats_info_type_data_v01_ei[] = { .data_type = QMI_UNSIGNED_8_BYTE, .elem_len = 1, .elem_size = sizeof(uint64_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_apn_data_stats_info_type_v01, @@ -2181,7 +2181,7 @@ static struct qmi_elem_info ipa_apn_data_stats_info_type_data_v01_ei[] = { .data_type = QMI_UNSIGNED_8_BYTE, .elem_len = 1, .elem_size = sizeof(uint64_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_apn_data_stats_info_type_v01, @@ -2189,7 +2189,7 @@ static struct qmi_elem_info ipa_apn_data_stats_info_type_data_v01_ei[] = { }, { .data_type = QMI_EOTI, - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -2199,7 +2199,7 @@ struct qmi_elem_info ipa_get_apn_data_stats_req_msg_data_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof( struct ipa_get_apn_data_stats_req_msg_v01, @@ -2209,7 +2209,7 @@ struct qmi_elem_info ipa_get_apn_data_stats_req_msg_data_v01_ei[] = { .data_type = QMI_DATA_LEN, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof( struct ipa_get_apn_data_stats_req_msg_v01, @@ -2219,7 +2219,7 @@ struct qmi_elem_info ipa_get_apn_data_stats_req_msg_data_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = QMI_IPA_MAX_APN_V01, .elem_size = sizeof(uint32_t), - .is_array = VAR_LEN_ARRAY, + .array_type = VAR_LEN_ARRAY, .tlv_type = 0x10, .offset = offsetof( struct ipa_get_apn_data_stats_req_msg_v01, @@ -2227,7 +2227,7 @@ struct qmi_elem_info ipa_get_apn_data_stats_req_msg_data_v01_ei[] = { }, { .data_type = QMI_EOTI, - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -2237,7 +2237,7 @@ struct qmi_elem_info ipa_get_apn_data_stats_resp_msg_data_v01_ei[] = { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_response_type_v01), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof( struct ipa_get_apn_data_stats_resp_msg_v01, @@ -2248,7 +2248,7 @@ struct qmi_elem_info ipa_get_apn_data_stats_resp_msg_data_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof( struct ipa_get_apn_data_stats_resp_msg_v01, @@ -2258,7 +2258,7 @@ struct qmi_elem_info ipa_get_apn_data_stats_resp_msg_data_v01_ei[] = { .data_type = QMI_DATA_LEN, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof( struct ipa_get_apn_data_stats_resp_msg_v01, @@ -2269,7 +2269,7 @@ struct qmi_elem_info ipa_get_apn_data_stats_resp_msg_data_v01_ei[] = { .elem_len = QMI_IPA_MAX_APN_V01, .elem_size = sizeof(struct ipa_apn_data_stats_info_type_v01), - .is_array = VAR_LEN_ARRAY, + .array_type = VAR_LEN_ARRAY, .tlv_type = 0x10, .offset = offsetof( struct ipa_get_apn_data_stats_resp_msg_v01, @@ -2278,7 +2278,7 @@ struct qmi_elem_info ipa_get_apn_data_stats_resp_msg_data_v01_ei[] = { }, { .data_type = QMI_EOTI, - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -2288,7 +2288,7 @@ static struct qmi_elem_info ipa_data_usage_quota_info_type_data_v01_ei[] = { .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, .elem_size = sizeof(uint32_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_data_usage_quota_info_type_v01, @@ -2298,7 +2298,7 @@ static struct qmi_elem_info ipa_data_usage_quota_info_type_data_v01_ei[] = { .data_type = QMI_UNSIGNED_8_BYTE, .elem_len = 1, .elem_size = sizeof(uint64_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, .offset = offsetof(struct ipa_data_usage_quota_info_type_v01, @@ -2306,7 +2306,7 @@ static struct qmi_elem_info ipa_data_usage_quota_info_type_data_v01_ei[] = { }, { .data_type = QMI_EOTI, - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -2316,7 +2316,7 @@ struct qmi_elem_info ipa_set_data_usage_quota_req_msg_data_v01_ei[] = { .data_type = QMI_OPT_FLAG, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof( struct ipa_set_data_usage_quota_req_msg_v01, @@ -2326,7 +2326,7 @@ struct qmi_elem_info ipa_set_data_usage_quota_req_msg_data_v01_ei[] = { .data_type = QMI_DATA_LEN, .elem_len = 1, .elem_size = sizeof(uint8_t), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof( struct ipa_set_data_usage_quota_req_msg_v01, @@ -2337,7 +2337,7 @@ struct qmi_elem_info ipa_set_data_usage_quota_req_msg_data_v01_ei[] = { .elem_len = QMI_IPA_MAX_APN_V01, .elem_size = sizeof(struct ipa_data_usage_quota_info_type_v01), - .is_array = VAR_LEN_ARRAY, + .array_type = VAR_LEN_ARRAY, .tlv_type = 0x10, .offset = offsetof( struct ipa_set_data_usage_quota_req_msg_v01, @@ -2346,7 +2346,7 @@ struct qmi_elem_info ipa_set_data_usage_quota_req_msg_data_v01_ei[] = { }, { .data_type = QMI_EOTI, - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -2356,7 +2356,7 @@ struct qmi_elem_info ipa_set_data_usage_quota_resp_msg_data_v01_ei[] = { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_response_type_v01), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof( struct ipa_set_data_usage_quota_resp_msg_v01, @@ -2365,7 +2365,7 @@ struct qmi_elem_info ipa_set_data_usage_quota_resp_msg_data_v01_ei[] = { }, { .data_type = QMI_EOTI, - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -2376,7 +2376,7 @@ struct qmi_elem_info ipa_data_usage_quota_reached_ind_msg_data_v01_ei[] = { .elem_len = 1, .elem_size = sizeof(struct ipa_data_usage_quota_info_type_v01), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x01, .offset = offsetof( struct ipa_data_usage_quota_reached_ind_msg_v01, @@ -2385,7 +2385,7 @@ struct qmi_elem_info ipa_data_usage_quota_reached_ind_msg_data_v01_ei[] = { }, { .data_type = QMI_EOTI, - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -2394,7 +2394,7 @@ struct qmi_elem_info ipa_stop_data_usage_quota_req_msg_data_v01_ei[] = { /* ipa_stop_data_usage_quota_req_msg is empty */ { .data_type = QMI_EOTI, - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -2404,7 +2404,7 @@ struct qmi_elem_info ipa_stop_data_usage_quota_resp_msg_data_v01_ei[] = { .data_type = QMI_STRUCT, .elem_len = 1, .elem_size = sizeof(struct qmi_response_type_v01), - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = 0x02, .offset = offsetof( struct ipa_stop_data_usage_quota_resp_msg_v01, @@ -2413,7 +2413,7 @@ struct qmi_elem_info ipa_stop_data_usage_quota_resp_msg_data_v01_ei[] = { }, { .data_type = QMI_EOTI, - .is_array = NO_ARRAY, + .array_type = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, }, }; diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c index 1fe1ed37fc9d..02ba9d5ea0d3 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c @@ -463,7 +463,7 @@ static int ipa_create_uc_smmu_mapping_pa(phys_addr_t pa, size_t len, return -EINVAL; } - ret = ipa_iommu_map(cb->mapping->domain, va, rounddown(pa, PAGE_SIZE), + ret = ipa_iommu_map(cb->iommu_domain, va, rounddown(pa, PAGE_SIZE), true_len, device ? (prot | IOMMU_MMIO) : prot); if (ret) { @@ -504,7 +504,7 @@ static int ipa_create_uc_smmu_mapping_sgt(struct sg_table *sgt, phys = page_to_phys(sg_page(sg)); len = PAGE_ALIGN(sg->offset + sg->length); - ret = ipa_iommu_map(cb->mapping->domain, va, phys, len, prot); + ret = ipa_iommu_map(cb->iommu_domain, va, phys, len, prot); if (ret) { IPAERR("iommu map failed for pa=%pa len=%zu\n", &phys, len); @@ -521,7 +521,7 @@ static int ipa_create_uc_smmu_mapping_sgt(struct sg_table *sgt, bad_mapping: for_each_sg(sgt->sgl, sg, count, i) - iommu_unmap(cb->mapping->domain, sg_dma_address(sg), + iommu_unmap(cb->iommu_domain, sg_dma_address(sg), sg_dma_len(sg)); return -EINVAL; } @@ -548,7 +548,7 @@ static void ipa_release_uc_smmu_mappings(enum ipa_client_type client) for (i = start; i <= end; i++) { if (wdi_res[i].valid) { for (j = 0; j < wdi_res[i].nents; j++) { - iommu_unmap(cb->mapping->domain, + iommu_unmap(cb->iommu_domain, wdi_res[i].res[j].iova, wdi_res[i].res[j].size); ipa_ctx->wdi_map_cnt--; @@ -1852,7 +1852,7 @@ int ipa2_create_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info) for (i = 0; i < num_buffers; i++) { IPADBG("i=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", i, &info[i].pa, info[i].iova, info[i].size); - info[i].result = ipa_iommu_map(cb->iommu, + info[i].result = ipa_iommu_map(cb->iommu_domain, rounddown(info[i].iova, PAGE_SIZE), rounddown(info[i].pa, PAGE_SIZE), roundup(info[i].size + info[i].pa - @@ -1882,7 +1882,7 @@ int ipa2_release_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info) for (i = 0; i < num_buffers; i++) { IPADBG("i=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", i, &info[i].pa, info[i].iova, info[i].size); - info[i].result = iommu_unmap(cb->iommu, + info[i].result = iommu_unmap(cb->iommu_domain, rounddown(info[i].iova, PAGE_SIZE), roundup(info[i].size + info[i].pa - rounddown(info[i].pa, PAGE_SIZE), PAGE_SIZE)); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h index 1292b9c9c6cd..4abef20c9629 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h @@ -371,7 +371,7 @@ static inline int ipa3_qmi_add_offload_request_send( } static inline int ipa3_qmi_rmv_offload_request_send( - struct ipa_rmv_offload_connection_req_msg_v01 *req) + struct ipa_remove_offload_connection_req_msg_v01 *req) { return -EPERM; } -- GitLab From fa57079e0276fe1e179716ff1a0abb2e0ed110fd Mon Sep 17 00:00:00 2001 From: Swetha Chikkaboraiah Date: Wed, 9 Sep 2020 18:05:40 +0530 Subject: [PATCH 1267/1304] net: support __alloc_skb to always use GFP_DMA This makes it possible to ensure that any clients which use __alloc_skb always allocate memory from the DMA zone. Change-Id: I3b399b8da113e38a050b0b3e81eeaec549253bb9 Signed-off-by: Liam Mark Signed-off-by: Swetha Chikkaboraiah Signed-off-by: Ashok Raj D --- net/core/skbuff.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 0629ca89ab74..95d80d34d827 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -183,6 +183,9 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, u8 *data; bool pfmemalloc; + if (IS_ENABLED(CONFIG_FORCE_ALLOC_FROM_DMA_ZONE)) + gfp_mask |= GFP_DMA; + cache = (flags & SKB_ALLOC_FCLONE) ? skbuff_fclone_cache : skbuff_head_cache; -- GitLab From 602a398f3207f9771ab6e8e8fe38c7a0dad59314 Mon Sep 17 00:00:00 2001 From: Ashok Raj Deenadayalan Date: Fri, 28 Aug 2020 22:50:46 +0530 Subject: [PATCH 1268/1304] msm: ipa3: Add low-level IPA client support Add low-level IPA client connect and disconnect support. Change-Id: Icb0aa110e2cbf71d9a5a19b88bb261667599461e Signed-off-by: Praveen Kurapati Signed-off-by: Chetan C R Signed-off-by: Ashok Raj Deenadayalan Signed-off-by: Swetha Chikkaboraiah --- drivers/platform/msm/ipa/ipa_api.c | 49 +++++++++++++++++++ .../platform/msm/ipa/ipa_clients/rndis_ipa.c | 3 +- 2 files changed, 51 insertions(+), 1 deletion(-) diff --git a/drivers/platform/msm/ipa/ipa_api.c b/drivers/platform/msm/ipa/ipa_api.c index 580a3ce29767..c5e8532ba90e 100644 --- a/drivers/platform/msm/ipa/ipa_api.c +++ b/drivers/platform/msm/ipa/ipa_api.c @@ -384,6 +384,55 @@ int ipa_smmu_free_sgt(struct sg_table **out_sgt_ptr) return 0; } +/** + * ipa_connect() - low-level IPA client connect + * @in: [in] input parameters from client + * @sps: [out] sps output from IPA needed by client for sps_connect + * @clnt_hdl: [out] opaque client handle assigned by IPA to client + * + * Should be called by the driver of the peripheral that wants to connect to + * IPA in BAM-BAM mode. these peripherals are USB and HSIC. this api + * expects caller to take responsibility to add any needed headers, routing + * and filtering tables and rules as needed. + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_connect(const struct ipa_connect_params *in, struct ipa_sps_params *sps, + u32 *clnt_hdl) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_connect, in, sps, clnt_hdl); + + return ret; +} +EXPORT_SYMBOL(ipa_connect); + +/** + * ipa_disconnect() - low-level IPA client disconnect + * @clnt_hdl: [in] opaque client handle assigned by IPA to client + * + * Should be called by the driver of the peripheral that wants to disconnect + * from IPA in BAM-BAM mode. this api expects caller to take responsibility to + * free any needed headers, routing and filtering tables and rules as needed. + * + * Returns: 0 on success, negative on failure + * + * Note: Should not be called from atomic context + */ +int ipa_disconnect(u32 clnt_hdl) +{ + int ret; + + IPA_API_DISPATCH_RETURN(ipa_disconnect, clnt_hdl); + + return ret; +} +EXPORT_SYMBOL(ipa_disconnect); + + /** * ipa_clear_endpoint_delay() - Clear ep_delay. * @clnt_hdl: [in] IPA client handle diff --git a/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c b/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c index 47bfe38f8c93..2d54862b430c 100644 --- a/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c +++ b/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c @@ -589,7 +589,8 @@ int rndis_ipa_init(struct ipa_usb_init_params *params) } RNDIS_IPA_DEBUG("Device Ethernet address set %pM\n", net->dev_addr); - if (ipa_is_vlan_mode(IPA_VLAN_IF_RNDIS, + if ((ipa_get_hw_type() >= IPA_HW_v3_0) && + ipa_is_vlan_mode(IPA_VLAN_IF_RNDIS, &rndis_ipa_ctx->is_vlan_mode)) { RNDIS_IPA_ERROR("couldn't acquire vlan mode, is ipa ready?\n"); goto fail_get_vlan_mode; -- GitLab From 795434e6079a1a76c6150cacaa1b65b94604d123 Mon Sep 17 00:00:00 2001 From: Asha Magadi Venkateshamurthy Date: Wed, 14 Oct 2020 22:44:39 +0530 Subject: [PATCH 1269/1304] msm: ipa: Add ipa rm support for ipa_v2 Ipa rm mechanism is added to support ipa_v2 for kernel msm-4.19. Change-Id: Ia537c9ccf26e7e304a31a6d9dd26834c194da9d3 Signed-off-by: Asha Magadi Venkateshamurthy Signed-off-by: Swetha Chikkaboraiah --- drivers/platform/msm/ipa/ipa_api.c | 32 +- .../platform/msm/ipa/ipa_clients/rndis_ipa.c | 299 +++++++++++++++++- drivers/platform/msm/ipa/ipa_common_i.h | 2 + 3 files changed, 310 insertions(+), 23 deletions(-) diff --git a/drivers/platform/msm/ipa/ipa_api.c b/drivers/platform/msm/ipa/ipa_api.c index c5e8532ba90e..2dab4cecae31 100644 --- a/drivers/platform/msm/ipa/ipa_api.c +++ b/drivers/platform/msm/ipa/ipa_api.c @@ -14,7 +14,6 @@ #include #include #include "ipa_api.h" -#include "ipa_v3/ipa_i.h" /* * The following for adding code (ie. for EMULATION) not found on x86. @@ -2883,18 +2882,23 @@ enum ipa_client_type ipa_get_client_mapping(int pipe_idx) EXPORT_SYMBOL(ipa_get_client_mapping); /** - * ipa_get_rm_resource_from_ep() - this function is part of the deprecated - * RM mechanism but is still used by some drivers so we kept the definition. + * ipa_get_rm_resource_from_ep() - get the IPA_RM resource which is related to + * the supplied pipe index. + * + * @pipe_idx: + * + * Return value: IPA_RM resource related to the pipe, -1 if a resource was not + * found. */ - enum ipa_rm_resource_name ipa_get_rm_resource_from_ep(int pipe_idx) { - IPAERR("IPA RM is not supported idx=%d\n", pipe_idx); - return -EFAULT; -} -EXPORT_SYMBOL(ipa_get_rm_resource_from_ep); + int ret; + IPA_API_DISPATCH_RETURN(ipa_get_rm_resource_from_ep, pipe_idx); + return ret; +} +EXPORT_SYMBOL(ipa_get_rm_resource_from_ep); /** * ipa_get_modem_cfg_emb_pipe_flt()- Return ipa_ctx->modem_cfg_emb_pipe_flt @@ -3792,6 +3796,18 @@ int ipa_get_prot_id(enum ipa_client_type client) } EXPORT_SYMBOL(ipa_get_prot_id); +/** + * ipa_pm_is_used() - Returns if IPA PM framework is used + */ +bool ipa_pm_is_used(void) +{ + bool ret; + + IPA_API_DISPATCH_RETURN(ipa_pm_is_used); + + return ret; +} + static const struct dev_pm_ops ipa_pm_ops = { .suspend_late = ipa_ap_suspend, .resume_early = ipa_ap_resume, diff --git a/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c b/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c index 2d54862b430c..0faf2b46e7ed 100644 --- a/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c +++ b/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c @@ -31,6 +31,7 @@ #define DEBUGFS_DIR_NAME "rndis_ipa" #define DEBUGFS_AGGR_DIR_NAME "rndis_ipa_aggregation" #define NETDEV_NAME "rndis" +#define DRV_RESOURCE_ID IPA_RM_RESOURCE_RNDIS_PROD #define IPV4_HDR_NAME "rndis_eth_ipv4" #define IPV6_HDR_NAME "rndis_eth_ipv6" #define IPA_TO_USB_CLIENT IPA_CLIENT_USB_CONS @@ -159,6 +160,7 @@ enum rndis_ipa_operation { * @rx_dropped: number of filtered out Rx packets * @rx_dump_enable: dump all Rx packets * @icmp_filter: allow all ICMP packet to pass through the filters + * @rm_enable: flag that enable/disable Resource manager request prior to Tx * @deaggregation_enable: enable/disable IPA HW deaggregation logic * @during_xmit_error: flags that indicate that the driver is in a middle * of error handling in Tx path @@ -194,6 +196,7 @@ struct rndis_ipa_dev { u32 rx_dropped; bool rx_dump_enable; bool icmp_filter; + bool rm_enable; bool deaggregation_enable; bool during_xmit_error; struct dentry *directory; @@ -255,10 +258,18 @@ static int rndis_ipa_hdrs_destroy(struct rndis_ipa_dev *rndis_ipa_ctx); static struct net_device_stats *rndis_ipa_get_stats(struct net_device *net); static int rndis_ipa_register_properties(char *netdev_name, bool is_vlan_mode); static int rndis_ipa_deregister_properties(char *netdev_name); +static void rndis_ipa_rm_notify + (void *user_data, enum ipa_rm_event event, + unsigned long data); +static int rndis_ipa_create_rm_resource(struct rndis_ipa_dev *rndis_ipa_ctx); +static int rndis_ipa_destroy_rm_resource(struct rndis_ipa_dev *rndis_ipa_ctx); static int rndis_ipa_register_pm_client(struct rndis_ipa_dev *rndis_ipa_ctx); static int rndis_ipa_deregister_pm_client(struct rndis_ipa_dev *rndis_ipa_ctx); static bool rx_filter(struct sk_buff *skb); static bool tx_filter(struct sk_buff *skb); +static bool rm_enabled(struct rndis_ipa_dev *rndis_ipa_ctx); +static int resource_request(struct rndis_ipa_dev *rndis_ipa_ctx); +static void resource_release(struct rndis_ipa_dev *rndis_ipa_ctx); static netdev_tx_t rndis_ipa_start_xmit (struct sk_buff *skb, struct net_device *net); static int rndis_ipa_debugfs_atomic_open @@ -541,6 +552,7 @@ int rndis_ipa_init(struct ipa_usb_init_params *params) rndis_ipa_ctx->tx_filter = false; rndis_ipa_ctx->rx_filter = false; rndis_ipa_ctx->icmp_filter = true; + rndis_ipa_ctx->rm_enable = true; rndis_ipa_ctx->tx_dropped = 0; rndis_ipa_ctx->rx_dropped = 0; rndis_ipa_ctx->tx_dump_enable = false; @@ -750,12 +762,15 @@ int rndis_ipa_pipe_connect_notify( return -EINVAL; } - result = rndis_ipa_register_pm_client(rndis_ipa_ctx); + if (ipa_pm_is_used()) + result = rndis_ipa_register_pm_client(rndis_ipa_ctx); + else + result = rndis_ipa_create_rm_resource(rndis_ipa_ctx); if (result) { - RNDIS_IPA_ERROR("fail on PM register\n"); - goto fail_register_pm; + RNDIS_IPA_ERROR("fail on RM create\n"); + goto fail_create_rm; } - RNDIS_IPA_DEBUG("PM client was registered\n"); + RNDIS_IPA_DEBUG("RM resource was created\n"); rndis_ipa_ctx->ipa_to_usb_hdl = ipa_to_usb_hdl; rndis_ipa_ctx->usb_to_ipa_hdl = usb_to_ipa_hdl; @@ -833,8 +848,11 @@ int rndis_ipa_pipe_connect_notify( return 0; fail: - rndis_ipa_deregister_pm_client(rndis_ipa_ctx); -fail_register_pm: + if (ipa_pm_is_used()) + rndis_ipa_deregister_pm_client(rndis_ipa_ctx); + else + rndis_ipa_destroy_rm_resource(rndis_ipa_ctx); +fail_create_rm: return result; } EXPORT_SYMBOL(rndis_ipa_pipe_connect_notify); @@ -952,11 +970,11 @@ static netdev_tx_t rndis_ipa_start_xmit(struct sk_buff *skb, goto out; } - ret = ipa_pm_activate(rndis_ipa_ctx->pm_hdl); - if (unlikely(ret)) { - RNDIS_IPA_DEBUG("Failed activate PM client\n"); + ret = resource_request(rndis_ipa_ctx); + if (ret) { + RNDIS_IPA_DEBUG("Waiting to resource\n"); netif_stop_queue(net); - goto fail_pm_activate; + goto resource_busy; } if (atomic_read(&rndis_ipa_ctx->outstanding_pkts) >= @@ -985,8 +1003,8 @@ static netdev_tx_t rndis_ipa_start_xmit(struct sk_buff *skb, fail_tx_packet: rndis_ipa_xmit_error(skb); out: - ipa_pm_deferred_deactivate(rndis_ipa_ctx->pm_hdl); -fail_pm_activate: + resource_release(rndis_ipa_ctx); +resource_busy: RNDIS_IPA_DEBUG ("packet Tx done - %s\n", (status == NETDEV_TX_OK) ? "OK" : "FAIL"); @@ -1074,6 +1092,50 @@ static void rndis_ipa_tx_timeout(struct net_device *net) net->stats.tx_errors++; } +/** + * rndis_ipa_rm_notify() - callback supplied to IPA resource manager + * for grant/release events + * user_data: the driver context supplied to IPA resource manager during call + * to ipa_rm_create_resource(). + * event: the event notified to us by IPA resource manager (Release/Grant) + * data: reserved field supplied by IPA resource manager + * + * This callback shall be called based on resource request/release sent + * to the IPA resource manager. + * In case the queue was stopped during EINPROGRESS for Tx path and the + * event received is Grant then the queue shall be restarted. + * In case the event notified is a release notification the netdev discard it. + */ +static void rndis_ipa_rm_notify( + void *user_data, enum ipa_rm_event event, + unsigned long data) +{ + struct rndis_ipa_dev *rndis_ipa_ctx = user_data; + + RNDIS_IPA_LOG_ENTRY(); + + if (event == IPA_RM_RESOURCE_RELEASED) { + RNDIS_IPA_DEBUG("Resource Released\n"); + return; + } + + if (event != IPA_RM_RESOURCE_GRANTED) { + RNDIS_IPA_ERROR + ("Unexceoted event receieved from RM (%d\n)", event); + return; + } + RNDIS_IPA_DEBUG("Resource Granted\n"); + + if (netif_queue_stopped(rndis_ipa_ctx->net)) { + RNDIS_IPA_DEBUG("starting queue\n"); + netif_start_queue(rndis_ipa_ctx->net); + } else { + RNDIS_IPA_DEBUG("queue already awake\n"); + } + + RNDIS_IPA_LOG_EXIT(); +} + /** * rndis_ipa_packet_receive_notify() - Rx notify for packet sent from * tethered PC (USB->IPA). @@ -1293,12 +1355,15 @@ int rndis_ipa_pipe_disconnect_notify(void *private) rndis_ipa_ctx->net->stats.tx_dropped += outstanding_dropped_pkts; atomic_set(&rndis_ipa_ctx->outstanding_pkts, 0); - retval = rndis_ipa_deregister_pm_client(rndis_ipa_ctx); + if (ipa_pm_is_used()) + retval = rndis_ipa_deregister_pm_client(rndis_ipa_ctx); + else + retval = rndis_ipa_destroy_rm_resource(rndis_ipa_ctx); if (retval) { - RNDIS_IPA_ERROR("Fail to deregister PM\n"); + RNDIS_IPA_ERROR("Fail to clean RM\n"); return retval; } - RNDIS_IPA_DEBUG("PM was successfully deregistered\n"); + RNDIS_IPA_DEBUG("RM was successfully destroyed\n"); spin_lock_irqsave(&rndis_ipa_ctx->state_lock, flags); next_state = rndis_ipa_next_state(rndis_ipa_ctx->state, @@ -1766,7 +1831,86 @@ static int rndis_ipa_deregister_properties(char *netdev_name) return 0; } +/** + * rndis_ipa_create_rm_resource() -creates the resource representing + * this Netdev and supply notification callback for resource event + * such as Grant/Release + * @rndis_ipa_ctx: this driver context + * + * In order make sure all needed resources are available during packet + * transmit this Netdev shall use Request/Release mechanism of + * the IPA resource manager. + * This mechanism shall iterate over a dependency graph and make sure + * all dependent entities are ready to for packet Tx + * transfer (Apps->IPA->USB). + * In this function the resource representing the Netdev is created + * in addition to the basic dependency between the Netdev and the USB client. + * Hence, USB client, is a dependency for the Netdev and may be notified in + * case of packet transmit from this Netdev to tethered Host. + * As implied from the "may" in the above sentence there is a scenario where + * the USB is not notified. This is done thanks to the IPA resource manager + * inactivity timer. + * The inactivity timer allow the Release requests to be delayed in order + * prevent ping-pong with the USB and other dependencies. + */ +static int rndis_ipa_create_rm_resource(struct rndis_ipa_dev *rndis_ipa_ctx) +{ + struct ipa_rm_create_params create_params = {0}; + struct ipa_rm_perf_profile profile; + int result; + + RNDIS_IPA_LOG_ENTRY(); + + create_params.name = DRV_RESOURCE_ID; + create_params.reg_params.user_data = rndis_ipa_ctx; + create_params.reg_params.notify_cb = rndis_ipa_rm_notify; + result = ipa_rm_create_resource(&create_params); + if (result) { + RNDIS_IPA_ERROR("Fail on ipa_rm_create_resource\n"); + goto fail_rm_create; + } + RNDIS_IPA_DEBUG("RM client was created\n"); + + profile.max_supported_bandwidth_mbps = IPA_APPS_MAX_BW_IN_MBPS; + ipa_rm_set_perf_profile(DRV_RESOURCE_ID, &profile); + result = ipa_rm_inactivity_timer_init + (DRV_RESOURCE_ID, + INACTIVITY_MSEC_DELAY); + if (result) { + RNDIS_IPA_ERROR("Fail on ipa_rm_inactivity_timer_init\n"); + goto fail_inactivity_timer; + } + + RNDIS_IPA_DEBUG("rm_it client was created\n"); + + result = ipa_rm_add_dependency_sync + (DRV_RESOURCE_ID, + IPA_RM_RESOURCE_USB_CONS); + + if (result && result != -EINPROGRESS) + RNDIS_IPA_ERROR("unable to add RNDIS/USB dependency (%d)\n", + result); + else + RNDIS_IPA_DEBUG("RNDIS/USB dependency was set\n"); + + result = ipa_rm_add_dependency_sync + (IPA_RM_RESOURCE_USB_PROD, + IPA_RM_RESOURCE_APPS_CONS); + if (result && result != -EINPROGRESS) + RNDIS_IPA_ERROR("unable to add USB/APPS dependency (%d)\n", + result); + else + RNDIS_IPA_DEBUG("USB/APPS dependency was set\n"); + + RNDIS_IPA_LOG_EXIT(); + + return 0; + +fail_inactivity_timer: +fail_rm_create: + return result; +} static void rndis_ipa_pm_cb(void *p, enum ipa_pm_cb_event event) { @@ -1791,6 +1935,64 @@ static void rndis_ipa_pm_cb(void *p, enum ipa_pm_cb_event event) RNDIS_IPA_LOG_EXIT(); } +/** + * rndis_ipa_destroy_rm_resource() - delete the dependency and destroy + * the resource done on rndis_ipa_create_rm_resource() + * @rndis_ipa_ctx: this driver context + * + * This function shall delete the dependency create between + * the Netdev to the USB. + * In addition the inactivity time shall be destroy and the resource shall + * be deleted. + */ +static int rndis_ipa_destroy_rm_resource(struct rndis_ipa_dev *rndis_ipa_ctx) +{ + int result; + + RNDIS_IPA_LOG_ENTRY(); + + result = ipa_rm_delete_dependency + (DRV_RESOURCE_ID, + IPA_RM_RESOURCE_USB_CONS); + if (result && result != -EINPROGRESS) { + RNDIS_IPA_ERROR("Fail to delete RNDIS/USB dependency\n"); + goto bail; + } + RNDIS_IPA_DEBUG("RNDIS/USB dependency was successfully deleted\n"); + + result = ipa_rm_delete_dependency + (IPA_RM_RESOURCE_USB_PROD, + IPA_RM_RESOURCE_APPS_CONS); + if (result == -EINPROGRESS) { + RNDIS_IPA_DEBUG("RM dependency deletion is in progress"); + } else if (result) { + RNDIS_IPA_ERROR("Fail to delete USB/APPS dependency\n"); + goto bail; + } else { + RNDIS_IPA_DEBUG("USB/APPS dependency was deleted\n"); + } + + result = ipa_rm_inactivity_timer_destroy(DRV_RESOURCE_ID); + if (result) { + RNDIS_IPA_ERROR("Fail to destroy inactivity timern"); + goto bail; + } + RNDIS_IPA_DEBUG("RM inactivity timer was successfully destroy\n"); + + result = ipa_rm_delete_resource(DRV_RESOURCE_ID); + if (result) { + RNDIS_IPA_ERROR("resource deletion failed\n"); + goto bail; + } + RNDIS_IPA_DEBUG + ("Netdev RM resource was deleted (resid:%d)\n", + DRV_RESOURCE_ID); + + RNDIS_IPA_LOG_EXIT(); + +bail: + return result; +} static int rndis_ipa_register_pm_client(struct rndis_ipa_dev *rndis_ipa_ctx) { @@ -1819,6 +2021,52 @@ static int rndis_ipa_deregister_pm_client(struct rndis_ipa_dev *rndis_ipa_ctx) return 0; } +/** + * resource_request() - request for the Netdev resource + * @rndis_ipa_ctx: main driver context + * + * This function shall send the IPA resource manager inactivity time a request + * to Grant the Netdev producer. + * In case the resource is already Granted the function shall return immediately + * and "pet" the inactivity timer. + * In case the resource was not already Granted this function shall + * return EINPROGRESS and the Netdev shall stop the send queue until + * the IPA resource manager notify it that the resource is + * granted (done in a differ context) + */ +static int resource_request(struct rndis_ipa_dev *rndis_ipa_ctx) +{ + int result = 0; + + if (!rm_enabled(rndis_ipa_ctx)) + return result; + + if (ipa_pm_is_used()) + return ipa_pm_activate(rndis_ipa_ctx->pm_hdl); + + return ipa_rm_inactivity_timer_request_resource( + DRV_RESOURCE_ID); + +} + +/** + * resource_release() - release the Netdev resource + * @rndis_ipa_ctx: main driver context + * + * start the inactivity timer count down.by using the IPA resource + * manager inactivity time. + * The actual resource release shall occur only if no request shall be done + * during the INACTIVITY_MSEC_DELAY. + */ +static void resource_release(struct rndis_ipa_dev *rndis_ipa_ctx) +{ + if (!rm_enabled(rndis_ipa_ctx)) + return; + if (ipa_pm_is_used()) + ipa_pm_deferred_deactivate(rndis_ipa_ctx->pm_hdl); + else + ipa_rm_inactivity_timer_release_resource(DRV_RESOURCE_ID); +} /** * rndis_encapsulate_skb() - encapsulate the given Ethernet skb with @@ -1906,6 +2154,19 @@ static bool tx_filter(struct sk_buff *skb) return true; } +/** + * rm_enabled() - allow the use of resource manager Request/Release to + * be bypassed + * @rndis_ipa_ctx: main driver context + * + * By disabling the resource manager flag the Request for the Netdev resource + * shall be bypassed and the packet shall be sent. + * accordingly, Release request shall be bypass as well. + */ +static bool rm_enabled(struct rndis_ipa_dev *rndis_ipa_ctx) +{ + return rndis_ipa_ctx->rm_enable; +} /** * rndis_ipa_ep_registers_cfg() - configure the USB endpoints @@ -2181,6 +2442,14 @@ static void rndis_ipa_debugfs_init(struct rndis_ipa_dev *rndis_ipa_ctx) goto fail_file; } + file = debugfs_create_bool + ("rm_enable", flags_read_write, + rndis_ipa_ctx->directory, &rndis_ipa_ctx->rm_enable); + if (!file) { + RNDIS_IPA_ERROR("could not create debugfs rm file\n"); + goto fail_file; + } + file = debugfs_create_u32 ("outstanding_high", flags_read_write, rndis_ipa_ctx->directory, diff --git a/drivers/platform/msm/ipa/ipa_common_i.h b/drivers/platform/msm/ipa/ipa_common_i.h index f765bda3a83e..4a313ad450c4 100644 --- a/drivers/platform/msm/ipa/ipa_common_i.h +++ b/drivers/platform/msm/ipa/ipa_common_i.h @@ -428,6 +428,8 @@ int ipa_disable_wdi_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx); const char *ipa_get_version_string(enum ipa_hw_type ver); int ipa_start_gsi_channel(u32 clnt_hdl); +bool ipa_pm_is_used(void); + int ipa_smmu_store_sgt(struct sg_table **out_ch_ptr, struct sg_table *in_sgt_ptr); int ipa_smmu_free_sgt(struct sg_table **out_sgt_ptr); -- GitLab From 62aff8e48c2eea44af2417e49e5c543f643a0cd6 Mon Sep 17 00:00:00 2001 From: Ashok Raj Deenadayalan Date: Thu, 27 Aug 2020 18:59:50 +0530 Subject: [PATCH 1270/1304] defcong: sdm660 : Adding support to IPA driver Adding support to IPA driver for SDM660 target. Change-Id: I9f6259970a02c6423d8616036bb1e2cd3018de37 Signed-off-by: Praveen Kurapati Signed-off-by: Ashok Raj Deenadayalan Signed-off-by: Swetha Chikkaboraiah --- arch/arm64/configs/vendor/sdm660-perf_defconfig | 5 ++++- arch/arm64/configs/vendor/sdm660_defconfig | 5 ++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/arch/arm64/configs/vendor/sdm660-perf_defconfig b/arch/arm64/configs/vendor/sdm660-perf_defconfig index d4ce5a0aa9fa..3bc00bd42eb8 100644 --- a/arch/arm64/configs/vendor/sdm660-perf_defconfig +++ b/arch/arm64/configs/vendor/sdm660-perf_defconfig @@ -524,7 +524,10 @@ CONFIG_ION_POOL_AUTO_REFILL=y CONFIG_QPNP_REVID=y CONFIG_SPS=y CONFIG_SPS_SUPPORT_NDP_BAM=y -CONFIG_GSI=y +CONFIG_IPA3=y +CONFIG_IPA=y +CONFIG_RMNET_IPA=y +CONFIG_RNDIS_IPA=y CONFIG_MSM_11AD=m CONFIG_USB_BAM=y CONFIG_MDSS_PLL=y diff --git a/arch/arm64/configs/vendor/sdm660_defconfig b/arch/arm64/configs/vendor/sdm660_defconfig index b4ea97330263..0148b819776d 100644 --- a/arch/arm64/configs/vendor/sdm660_defconfig +++ b/arch/arm64/configs/vendor/sdm660_defconfig @@ -558,7 +558,10 @@ CONFIG_ION_POOL_AUTO_REFILL=y CONFIG_QPNP_REVID=y CONFIG_SPS=y CONFIG_SPS_SUPPORT_NDP_BAM=y -CONFIG_GSI=y +CONFIG_IPA3=y +CONFIG_IPA=y +CONFIG_RMNET_IPA=y +CONFIG_RNDIS_IPA=y CONFIG_MSM_11AD=m CONFIG_USB_BAM=y CONFIG_MDSS_PLL=y -- GitLab From 2613c7fe9ce6cab36c11d5a1be4d4845cd1184f5 Mon Sep 17 00:00:00 2001 From: Mukesh Kumar Savaliya Date: Wed, 22 Jul 2020 11:45:50 +0530 Subject: [PATCH 1271/1304] i3c: i3c-master-qcom-geni: Log error if DMA mode fails This change adds IPC error logs if there is any failure while executing DMA mode transfer. Driver gracefully switches to the FIFO mode though, this change can help debug future issue in mode switching. Change-Id: Ifdcb0a42b13e4ecd3fd3bc70f46b377adb63236c Signed-off-by: Mukesh Kumar Savaliya --- drivers/i3c/master/i3c-master-qcom-geni.c | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/drivers/i3c/master/i3c-master-qcom-geni.c b/drivers/i3c/master/i3c-master-qcom-geni.c index 2ba3c128c6ce..8bc2bb6ec334 100644 --- a/drivers/i3c/master/i3c-master-qcom-geni.c +++ b/drivers/i3c/master/i3c-master-qcom-geni.c @@ -458,11 +458,11 @@ static void qcom_geni_i3c_conf(struct geni_i3c_dev *gi3c, static void geni_i3c_err(struct geni_i3c_dev *gi3c, int err) { if (gi3c->cur_rnw == WRITE_TRANSACTION) - GENI_SE_DBG(gi3c->ipcl, false, gi3c->se.dev, "len:%d, write\n", - gi3c->cur_len); + GENI_SE_DBG(gi3c->ipcl, false, gi3c->se.dev, + "%s:Error: Write, len:%d\n", __func__, gi3c->cur_len); else - GENI_SE_DBG(gi3c->ipcl, false, gi3c->se.dev, "len:%d, read\n", - gi3c->cur_len); + GENI_SE_DBG(gi3c->ipcl, false, gi3c->se.dev, + "%s:Error: Read, len:%d\n", __func__, gi3c->cur_len); GENI_SE_DBG(gi3c->ipcl, false, gi3c->se.dev, "%s\n", gi3c_log[err].msg); gi3c->err = gi3c_log[err].err; @@ -775,6 +775,8 @@ static int _i3c_geni_execute_command gi3c->se.base, gi3c->cur_buf, len, &rx_dma); if (ret) { + GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev, + "DMA Err:%d, FIFO mode enabled\n", ret); xfer->mode = FIFO_MODE; geni_se_select_mode(gi3c->se.base, xfer->mode); } @@ -793,6 +795,8 @@ static int _i3c_geni_execute_command gi3c->se.base, gi3c->cur_buf, len, &tx_dma); if (ret) { + GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev, + "DMA Err:%d, FIFO mode enabled\n", ret); xfer->mode = FIFO_MODE; geni_se_select_mode(gi3c->se.base, xfer->mode); } @@ -1122,6 +1126,10 @@ static int geni_i3c_master_priv_xfers << SLV_ADDR_SHFT); xfer.m_param |= (use_7e) ? USE_7E : 0; + GENI_SE_DBG(gi3c->ipcl, false, gi3c->se.dev, + "%s: stall:%d,use_7e:%d, nxfers:%d,i:%d,m_param:0x%x,rnw:%d\n", + __func__, stall, use_7e, nxfers, i, xfer.m_param, xfers[i].rnw); + /* Update use_7e status for next loop iteration */ use_7e = !stall; @@ -1211,6 +1219,7 @@ static int geni_i3c_master_attach_i2c_dev(struct i2c_dev_desc *dev) if (!data) return -ENOMEM; + GENI_SE_DBG(gi3c->ipcl, false, gi3c->se.dev, "%s\n", __func__); i2c_dev_set_master_data(dev, data); return 0; @@ -1371,6 +1380,8 @@ static bool geni_i3c_master_supports_ccc_cmd const struct i3c_ccc_cmd *cmd ) { + struct geni_i3c_dev *gi3c = to_geni_i3c_master(m); + switch (cmd->id) { case I3C_CCC_ENEC(true): /* fallthrough */ @@ -1428,6 +1439,8 @@ static bool geni_i3c_master_supports_ccc_cmd break; } + GENI_SE_DBG(gi3c->ipcl, false, gi3c->se.dev, + "%s: Unsupported cmnd\n", __func__); return false; } -- GitLab From 04e2774ee8d8f9b5fad0858f620ffedc38fa6541 Mon Sep 17 00:00:00 2001 From: Mukesh Kumar Savaliya Date: Fri, 2 Oct 2020 02:00:14 +0530 Subject: [PATCH 1272/1304] i3c: i3c-master-qcom-geni: Save master device info to debug list This change stores each probed master device to the global device list. This will help to debug and know the master device information easily. Change-Id: I1987aa54f1b24280a3b6a2436bbe1e5deccb8b6e Signed-off-by: Mukesh Kumar Savaliya --- drivers/i3c/master/i3c-master-qcom-geni.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/drivers/i3c/master/i3c-master-qcom-geni.c b/drivers/i3c/master/i3c-master-qcom-geni.c index 8bc2bb6ec334..800c1ad73a10 100644 --- a/drivers/i3c/master/i3c-master-qcom-geni.c +++ b/drivers/i3c/master/i3c-master-qcom-geni.c @@ -208,6 +208,8 @@ enum geni_i3c_err_code { #define IBI_SW_RESET_MIN_SLEEP 1000 #define IBI_SW_RESET_MAX_SLEEP 2000 +#define MAX_I3C_SE 2 + enum i3c_trans_dir { WRITE_TRANSACTION = 0, READ_TRANSACTION = 1 @@ -328,6 +330,9 @@ struct geni_i3c_clk_fld { static void geni_i3c_enable_ibi_ctrl(struct geni_i3c_dev *gi3c, bool enable); static void geni_i3c_enable_ibi_irq(struct geni_i3c_dev *gi3c, bool enable); +static struct geni_i3c_dev *i3c_geni_dev[MAX_I3C_SE]; +static int i3c_nos; + static struct geni_i3c_dev* to_geni_i3c_master(struct i3c_master_controller *master) { @@ -1996,6 +2001,9 @@ static int geni_i3c_probe(struct platform_device *pdev) if (!gi3c->ipcl) dev_info(&pdev->dev, "Error creating IPC Log\n"); + if (i3c_nos < MAX_I3C_SE) + i3c_geni_dev[i3c_nos++] = gi3c; + ret = i3c_geni_rsrcs_init(gi3c, pdev); if (ret) { GENI_SE_ERR(gi3c->ipcl, false, gi3c->se.dev, @@ -2117,7 +2125,7 @@ static int geni_i3c_probe(struct platform_device *pdev) static int geni_i3c_remove(struct platform_device *pdev) { struct geni_i3c_dev *gi3c = platform_get_drvdata(pdev); - int ret = 0; + int ret = 0, i; //Disable hot-join, until next probe happens geni_i3c_enable_hotjoin_irq(gi3c, false); @@ -2143,6 +2151,11 @@ static int geni_i3c_remove(struct platform_device *pdev) /* TBD : If we need debug for previous session, Don't delete logs */ if (gi3c->ipcl) ipc_log_context_destroy(gi3c->ipcl); + + for (i = 0; i < i3c_nos; i++) + i3c_geni_dev[i] = NULL; + i3c_nos = 0; + return ret; } -- GitLab From d9fc9718f5db6efaeccb29693e169f13796f23f0 Mon Sep 17 00:00:00 2001 From: Mukesh Kumar Savaliya Date: Fri, 9 Oct 2020 00:12:11 +0530 Subject: [PATCH 1273/1304] i3c: i3c-master-qcom-geni: Handle timeout for DMA FSM reset This change adds timeout handling scenario for the DMA TX/TX reset sequence and adds the proper error logs. If return value from API wait_for_completion_timeout() is not taken cared, may mislead the debug. Also Fix the issue of not initializing the completion variable during command execution time. Change-Id: I01442e611ea531bbd6124c0b5f4b6c8da9c79281 Signed-off-by: Mukesh Kumar Savaliya --- drivers/i3c/master/i3c-master-qcom-geni.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/i3c/master/i3c-master-qcom-geni.c b/drivers/i3c/master/i3c-master-qcom-geni.c index 800c1ad73a10..12b864485060 100644 --- a/drivers/i3c/master/i3c-master-qcom-geni.c +++ b/drivers/i3c/master/i3c-master-qcom-geni.c @@ -761,6 +761,7 @@ static int _i3c_geni_execute_command enum i3c_trans_dir rnw = gi3c->cur_rnw; u32 len = gi3c->cur_len; + reinit_completion(&gi3c->done); geni_se_select_mode(gi3c->se.base, xfer->mode); gi3c->err = 0; @@ -810,8 +811,8 @@ static int _i3c_geni_execute_command writel_relaxed(1, gi3c->se.base + SE_GENI_TX_WATERMARK_REG); } - time_remaining = wait_for_completion_timeout(&gi3c->done, - XFER_TIMEOUT); + + time_remaining = wait_for_completion_timeout(&gi3c->done, XFER_TIMEOUT); if (!time_remaining) { unsigned long flags; @@ -854,7 +855,11 @@ static int _i3c_geni_execute_command else writel_relaxed(1, gi3c->se.base + SE_DMA_TX_FSM_RST); + time_remaining = wait_for_completion_timeout(&gi3c->done, XFER_TIMEOUT); + if (!time_remaining) + GENI_SE_ERR(gi3c->ipcl, true, gi3c->se.dev, + "Timeout:FSM Reset, rnw:%d\n", rnw); } geni_se_rx_dma_unprep(gi3c->se.i3c_rsc.wrapper_dev, rx_dma, len); -- GitLab From f8e955b2bc1e50be8e534fabeacbe0c1db76520e Mon Sep 17 00:00:00 2001 From: Ashok Raj Deenadayalan Date: Fri, 28 Aug 2020 22:54:18 +0530 Subject: [PATCH 1274/1304] defconfig: arm64: msm: enable CONFIG_FORCE_ALLOC_FROM_DMA_ZONE This option helps ensure that clients who require ZONE_DMA memory are always using ZONE_DMA memory. Change-Id: I87785b9fcf9302f84ce952dc38e2d8e90e7f47b4 Signed-off-by: Praveen Kurapati Signed-off-by: Prakash Gupta Signed-off-by: Ashok Raj Deenadayalan Signed-off-by: Swetha Chikkaboraiah --- arch/arm64/configs/vendor/sdm660-perf_defconfig | 1 + arch/arm64/configs/vendor/sdm660_defconfig | 1 + 2 files changed, 2 insertions(+) diff --git a/arch/arm64/configs/vendor/sdm660-perf_defconfig b/arch/arm64/configs/vendor/sdm660-perf_defconfig index 3bc00bd42eb8..8e4972820b46 100644 --- a/arch/arm64/configs/vendor/sdm660-perf_defconfig +++ b/arch/arm64/configs/vendor/sdm660-perf_defconfig @@ -119,6 +119,7 @@ CONFIG_CMA_AREAS=8 CONFIG_ZSMALLOC=y CONFIG_BALANCE_ANON_FILE_RECLAIM=y CONFIG_HAVE_USERSPACE_LOW_MEMORY_KILLER=y +CONFIG_FORCE_ALLOC_FROM_DMA_ZONE=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y diff --git a/arch/arm64/configs/vendor/sdm660_defconfig b/arch/arm64/configs/vendor/sdm660_defconfig index 0148b819776d..ea123ba74b5e 100644 --- a/arch/arm64/configs/vendor/sdm660_defconfig +++ b/arch/arm64/configs/vendor/sdm660_defconfig @@ -125,6 +125,7 @@ CONFIG_CMA_AREAS=8 CONFIG_ZSMALLOC=y CONFIG_BALANCE_ANON_FILE_RECLAIM=y CONFIG_HAVE_USERSPACE_LOW_MEMORY_KILLER=y +CONFIG_FORCE_ALLOC_FROM_DMA_ZONE=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y -- GitLab From 4938c963305887e7ed96e81293e2fd0b1aca6796 Mon Sep 17 00:00:00 2001 From: Ziqi Chen Date: Tue, 22 Sep 2020 16:52:40 +0800 Subject: [PATCH 1275/1304] scsi: ufs: Fix ufshcd_hold dead loop issue if error recovery is handing Commit 53c12d0ef6fc ("scsi: ufs: fix error recovery after the hibern8 exit failure") would leave hba->clk_gating.active_reqs++ and skip subsequent actions in ufshcd_hold() if error handling is in progress. It may cause next ufschcd_release() queue a new clk gate work into hytimer even though the previous clk gate work has not yet got finish. Under this corner case, ufshcd_gate_work() may change uic_link_state to UIC_LINK_HIBERN8_STATE at the heels of setting clk state to CLK_ON by ufshcd_hold() and then run into ufshcd_hold dead loop. To fix this issue, we need to ensure there is no any pending and running clk gate work before changing clk state to CLK_ON in ufshcd_hold(). Change-Id: I25fe35f2cad18f8a77fccf40755d856ee670594d Signed-off-by: Ziqi Chen --- drivers/scsi/ufs/ufshcd.c | 7 ++++++- drivers/scsi/ufs/ufshcd.h | 1 + 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 1bc4e3fbe743..38464a8ac63e 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -2237,7 +2237,9 @@ int ufshcd_hold(struct ufs_hba *hba, bool async) * If the timer was active but the callback was not running * we have nothing to do, just change state and return. */ - if (hrtimer_try_to_cancel(&hba->clk_gating.gate_hrtimer) == 1) { + if ((hrtimer_try_to_cancel(&hba->clk_gating.gate_hrtimer) == 1) + && !(work_pending(&hba->clk_gating.gate_work)) + && !hba->clk_gating.gate_wk_in_process) { hba->clk_gating.state = CLKS_ON; trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state); @@ -2289,7 +2291,9 @@ static void ufshcd_gate_work(struct work_struct *work) clk_gating.gate_work); unsigned long flags; + hba->clk_gating.gate_wk_in_process = true; spin_lock_irqsave(hba->host->host_lock, flags); + if (hba->clk_gating.state == CLKS_OFF) goto rel_lock; /* @@ -2365,6 +2369,7 @@ static void ufshcd_gate_work(struct work_struct *work) rel_lock: spin_unlock_irqrestore(hba->host->host_lock, flags); out: + hba->clk_gating.gate_wk_in_process = false; return; } diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index a32091884dcc..ccdcfa4ed1ac 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -480,6 +480,7 @@ struct ufs_clk_gating { struct device_attribute delay_perf_attr; struct device_attribute enable_attr; bool is_enabled; + bool gate_wk_in_process; int active_reqs; struct workqueue_struct *clk_gating_workq; }; -- GitLab From 101138f967208a051062a0e64a4888f93334b700 Mon Sep 17 00:00:00 2001 From: Tony Truong Date: Fri, 16 Oct 2020 13:49:28 -0700 Subject: [PATCH 1276/1304] msm: pcie: update with link power on check for user PCIe resume Instead of checking PCIe S/W link state, check if the link is on is on or off to determine whether PCIe resume should be allowed or not. PCIe S/W link state is not enough to determine since MSM_PCIE_LINK_DOWN state can be set for link on or off. Change-Id: I23a27007eb90134c90d5a20034248697423ebeeb Signed-off-by: Tony Truong --- drivers/pci/controller/pci-msm.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/drivers/pci/controller/pci-msm.c b/drivers/pci/controller/pci-msm.c index d3bc48683db7..c123c1efc24b 100644 --- a/drivers/pci/controller/pci-msm.c +++ b/drivers/pci/controller/pci-msm.c @@ -7298,12 +7298,10 @@ int msm_pcie_pm_control(enum msm_pcie_pm_opt pm_opt, u32 busnr, void *user, break; } - if (msm_pcie_dev[rc_idx].link_status != - MSM_PCIE_LINK_DISABLED) { + if (msm_pcie_dev[rc_idx].power_on) { PCIE_ERR(&msm_pcie_dev[rc_idx], - "PCIe: RC%d: requested to resume when link is not disabled:%d. Number of active EP(s): %d\n", - rc_idx, msm_pcie_dev[rc_idx].link_status, - msm_pcie_dev[rc_idx].num_active_ep); + "PCIe: RC%d: requested to resume when link is already powered on. Number of active EP(s): %d\n", + rc_idx, msm_pcie_dev[rc_idx].num_active_ep); break; } -- GitLab From 15330ab94fd2cb84672cdcd880f1600f2b9cd0b8 Mon Sep 17 00:00:00 2001 From: Veerabhadrarao Badiganti Date: Tue, 27 Aug 2019 18:07:12 +0530 Subject: [PATCH 1277/1304] scsi: ufs: Increase fDeviceInit flag poll time to 5sec Few UFS devices are taking a long time to finish its internal initialization. UFS driver polls fDeviceInit flag which indicates device initialization completion. Increase this flag poll time to 5sec to cover support for those UFS devices as well. Change-Id: I99e1ed54d57c9fd75bbe1b22ea30553b1ce0fb83 Signed-off-by: Veerabhadrarao Badiganti --- drivers/scsi/ufs/ufshcd.c | 29 +++++++++++++++++++++++++---- 1 file changed, 25 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 1bc4e3fbe743..5f07ab518edc 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -5686,9 +5686,10 @@ EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode); */ static int ufshcd_complete_dev_init(struct ufs_hba *hba) { - int i; + int i = 0; int err; bool flag_res = 1; + ktime_t timeout; err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG, QUERY_FLAG_IDN_FDEVICEINIT, NULL); @@ -5699,10 +5700,30 @@ static int ufshcd_complete_dev_init(struct ufs_hba *hba) goto out; } - /* poll for max. 1000 iterations for fDeviceInit flag to clear */ - for (i = 0; i < 1000 && !err && flag_res; i++) + /* + * Some vendor devices are taking longer time to complete its internal + * initialization, so set fDeviceInit flag poll time to 5 secs + */ + timeout = ktime_add_ms(ktime_get(), 5000); + + /* poll for max. 5sec for fDeviceInit flag to clear */ + while (1) { + bool timedout = ktime_after(ktime_get(), timeout); err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG, - QUERY_FLAG_IDN_FDEVICEINIT, &flag_res); + QUERY_FLAG_IDN_FDEVICEINIT, &flag_res); + if (err || !flag_res || timedout) + break; + + /* + * Poll for this flag in a tight loop for first 1000 iterations. + * This is same as old logic which is working for most of the + * devices, so continue using the same. + */ + if (i == 1000) + msleep(20); + else + i++; + } if (err) dev_err(hba->dev, -- GitLab From 912c8df355c6c3f88f30a2d3b098da551e7ca687 Mon Sep 17 00:00:00 2001 From: Asha Magadi Venkateshamurthy Date: Wed, 14 Oct 2020 17:47:11 +0530 Subject: [PATCH 1278/1304] defconfig: Enable CONFIG_LEGACY_ENERGY_MODEL_DT for sdm660 Enable CONFIG_LEGACY_ENERGY_MODEL_DT for sdm660. Change-Id: If40fd441d7b68445facddfd114dc86a6a27a3c0d Signed-off-by: Asha Magadi Venkateshamurthy --- arch/arm64/configs/vendor/sdm660-perf_defconfig | 1 + arch/arm64/configs/vendor/sdm660_defconfig | 1 + 2 files changed, 2 insertions(+) diff --git a/arch/arm64/configs/vendor/sdm660-perf_defconfig b/arch/arm64/configs/vendor/sdm660-perf_defconfig index 8e4972820b46..3fce78febaf6 100644 --- a/arch/arm64/configs/vendor/sdm660-perf_defconfig +++ b/arch/arm64/configs/vendor/sdm660-perf_defconfig @@ -637,6 +637,7 @@ CONFIG_NVMEM_SPMI_SDAM=y CONFIG_SLIMBUS_MSM_NGD=y CONFIG_SENSORS_SSC=y CONFIG_QCOM_KGSL=y +CONFIG_LEGACY_ENERGY_MODEL_DT=y CONFIG_EXT4_FS=y CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_EXT4_FS_SECURITY=y diff --git a/arch/arm64/configs/vendor/sdm660_defconfig b/arch/arm64/configs/vendor/sdm660_defconfig index ea123ba74b5e..05d1f057e49e 100644 --- a/arch/arm64/configs/vendor/sdm660_defconfig +++ b/arch/arm64/configs/vendor/sdm660_defconfig @@ -676,6 +676,7 @@ CONFIG_NVMEM_SPMI_SDAM=y CONFIG_SLIMBUS_MSM_NGD=y CONFIG_SENSORS_SSC=y CONFIG_QCOM_KGSL=y +CONFIG_LEGACY_ENERGY_MODEL_DT=y CONFIG_EXT4_FS=y CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_EXT4_FS_SECURITY=y -- GitLab From 9f512d047978c62ddef252015f53a71af8d9bf61 Mon Sep 17 00:00:00 2001 From: Naman Padhiar Date: Mon, 19 Oct 2020 20:22:15 +0530 Subject: [PATCH 1279/1304] icnss2: Send power save enter/exit via SMP2P Send power save enter/exit via SMP2P instead of QMI. Change-Id: I8aeeab3efd2f7c39e4e7ac134af98c58827ff6d5 Signed-off-by: Naman Padhiar --- drivers/soc/qcom/icnss2/main.c | 82 +++++++++++++++++++++++++++++----- drivers/soc/qcom/icnss2/main.h | 12 ++++- 2 files changed, 81 insertions(+), 13 deletions(-) diff --git a/drivers/soc/qcom/icnss2/main.c b/drivers/soc/qcom/icnss2/main.c index a24323780645..54e3b820bc56 100644 --- a/drivers/soc/qcom/icnss2/main.c +++ b/drivers/soc/qcom/icnss2/main.c @@ -41,6 +41,8 @@ #include #include #include +#include +#include #include "main.h" #include "qmi.h" #include "debug.h" @@ -2757,6 +2759,8 @@ EXPORT_SYMBOL(icnss_idle_restart); int icnss_exit_power_save(struct device *dev) { struct icnss_priv *priv = dev_get_drvdata(dev); + unsigned int value = 0; + int ret; icnss_pr_dbg("Calling Exit Power Save\n"); @@ -2764,8 +2768,18 @@ int icnss_exit_power_save(struct device *dev) !test_bit(ICNSS_MODE_ON, &priv->state)) return 0; - return wlfw_power_save_send_msg(priv, - (enum wlfw_power_save_mode_v01)ICNSS_POWER_SAVE_EXIT); + value |= priv->smp2p_info.seq++; + value <<= ICNSS_SMEM_SEQ_NO_POS; + value |= ICNSS_POWER_SAVE_EXIT; + ret = qcom_smem_state_update_bits( + priv->smp2p_info.smem_state, + ICNSS_SMEM_VALUE_MASK, + value); + if (ret) + icnss_pr_dbg("Error in SMP2P sent ret: %d\n", ret); + + icnss_pr_dbg("SMP2P sent value: 0x%X\n", value); + return ret; } EXPORT_SYMBOL(icnss_exit_power_save); @@ -3150,6 +3164,20 @@ static void icnss_init_control_params(struct icnss_priv *priv) } } +static inline void icnss_get_smp2p_info(struct icnss_priv *priv) +{ + + priv->smp2p_info.smem_state = + qcom_smem_state_get(&priv->pdev->dev, + "wlan-smp2p-out", + &priv->smp2p_info.smem_bit); + if (IS_ERR(priv->smp2p_info.smem_state)) { + icnss_pr_dbg("Failed to get smem state %d", + PTR_ERR(priv->smp2p_info.smem_state)); + } + +} + static inline void icnss_runtime_pm_init(struct icnss_priv *priv) { pm_runtime_get_sync(&priv->pdev->dev); @@ -3271,6 +3299,7 @@ static int icnss_probe(struct platform_device *pdev) icnss_runtime_pm_init(priv); icnss_get_cpr_info(priv); + icnss_get_smp2p_info(priv); set_bit(ICNSS_COLD_BOOT_CAL, &priv->state); } @@ -3340,6 +3369,7 @@ static int icnss_remove(struct platform_device *pdev) static int icnss_pm_suspend(struct device *dev) { struct icnss_priv *priv = dev_get_drvdata(dev); + unsigned int value = 0; int ret = 0; if (priv->magic != ICNSS_MAGIC) { @@ -3351,6 +3381,7 @@ static int icnss_pm_suspend(struct device *dev) icnss_pr_vdbg("PM Suspend, state: 0x%lx\n", priv->state); if (!priv->ops || !priv->ops->pm_suspend || + IS_ERR(priv->smp2p_info.smem_state) || !test_bit(ICNSS_DRIVER_PROBED, &priv->state)) return 0; @@ -3358,11 +3389,23 @@ static int icnss_pm_suspend(struct device *dev) if (ret == 0) { if (priv->device_id == WCN6750_DEVICE_ID) { - ret = wlfw_power_save_send_msg(priv, - (enum wlfw_power_save_mode_v01) - ICNSS_POWER_SAVE_ENTER); + if (test_bit(ICNSS_PD_RESTART, &priv->state) || + !test_bit(ICNSS_MODE_ON, &priv->state)) + return 0; + + value |= priv->smp2p_info.seq++; + value <<= ICNSS_SMEM_SEQ_NO_POS; + value |= ICNSS_POWER_SAVE_ENTER; + + ret = qcom_smem_state_update_bits( + priv->smp2p_info.smem_state, + ICNSS_SMEM_VALUE_MASK, + value); if (ret) - return priv->ops->pm_resume(dev); + icnss_pr_dbg("Error in SMP2P sent ret: %d\n", + ret); + + icnss_pr_dbg("SMP2P sent value: 0x%X\n", value); } priv->stats.pm_suspend++; set_bit(ICNSS_PM_SUSPEND, &priv->state); @@ -3386,6 +3429,7 @@ static int icnss_pm_resume(struct device *dev) icnss_pr_vdbg("PM resume, state: 0x%lx\n", priv->state); if (!priv->ops || !priv->ops->pm_resume || + IS_ERR(priv->smp2p_info.smem_state) || !test_bit(ICNSS_DRIVER_PROBED, &priv->state)) goto out; @@ -3462,6 +3506,7 @@ static int icnss_pm_resume_noirq(struct device *dev) static int icnss_pm_runtime_suspend(struct device *dev) { struct icnss_priv *priv = dev_get_drvdata(dev); + unsigned int value = 0; int ret = 0; if (priv->magic != ICNSS_MAGIC) { @@ -3470,17 +3515,29 @@ static int icnss_pm_runtime_suspend(struct device *dev) return -EINVAL; } - if (!priv->ops || !priv->ops->runtime_suspend) + if (!priv->ops || !priv->ops->runtime_suspend || + IS_ERR(priv->smp2p_info.smem_state)) goto out; icnss_pr_vdbg("Runtime suspend\n"); ret = priv->ops->runtime_suspend(dev); if (!ret) { - ret = wlfw_power_save_send_msg(priv, - (enum wlfw_power_save_mode_v01) - ICNSS_POWER_SAVE_ENTER); + if (test_bit(ICNSS_PD_RESTART, &priv->state) || + !test_bit(ICNSS_MODE_ON, &priv->state)) + return 0; + + value |= priv->smp2p_info.seq++; + value <<= ICNSS_SMEM_SEQ_NO_POS; + value |= ICNSS_POWER_SAVE_ENTER; + + ret = qcom_smem_state_update_bits( + priv->smp2p_info.smem_state, + ICNSS_SMEM_VALUE_MASK, + value); if (ret) - return priv->ops->runtime_resume(dev); + icnss_pr_dbg("Error in SMP2P sent ret: %d\n", ret); + + icnss_pr_dbg("SMP2P sent value: 0x%X\n", value); } out: return ret; @@ -3497,7 +3554,8 @@ static int icnss_pm_runtime_resume(struct device *dev) return -EINVAL; } - if (!priv->ops || !priv->ops->runtime_resume) + if (!priv->ops || !priv->ops->runtime_resume || + IS_ERR(priv->smp2p_info.smem_state)) goto out; icnss_pr_vdbg("Runtime resume, state: 0x%lx\n", priv->state); diff --git a/drivers/soc/qcom/icnss2/main.h b/drivers/soc/qcom/icnss2/main.h index dc740b19ae91..5be127da214d 100644 --- a/drivers/soc/qcom/icnss2/main.h +++ b/drivers/soc/qcom/icnss2/main.h @@ -23,6 +23,9 @@ #define ADRASTEA_DEVICE_ID 0xabcd #define QMI_WLFW_MAX_NUM_MEM_SEG 32 #define THERMAL_NAME_LENGTH 20 +#define ICNSS_SMEM_VALUE_MASK 0xFFFFFFFF +#define ICNSS_SMEM_SEQ_NO_POS 16 + extern uint64_t dynamic_feature_mask; enum icnss_bdf_type { @@ -170,7 +173,7 @@ struct icnss_fw_mem { }; enum icnss_power_save_mode { - ICNSS_POWER_SAVE_ENTER, + ICNSS_POWER_SAVE_ENTER = 1, ICNSS_POWER_SAVE_EXIT, }; struct icnss_stats { @@ -311,6 +314,12 @@ struct icnss_thermal_cdev { struct thermal_cooling_device *tcdev; }; +struct smp2p_out_info { + unsigned short seq; + unsigned int smem_bit; + struct qcom_smem_state *smem_state; +}; + struct icnss_priv { uint32_t magic; struct platform_device *pdev; @@ -387,6 +396,7 @@ struct icnss_priv { struct mutex dev_lock; uint32_t fw_error_fatal_irq; uint32_t fw_early_crash_irq; + struct smp2p_out_info smp2p_info; struct completion unblock_shutdown; struct adc_tm_param vph_monitor_params; struct adc_tm_chip *adc_tm_dev; -- GitLab From 10239303838cf651962ad5e6ebb4585ffda2abf5 Mon Sep 17 00:00:00 2001 From: Shadab Naseem Date: Wed, 28 Oct 2020 12:27:15 +0530 Subject: [PATCH 1280/1304] soc: qcom: Correct the module description for llcc-orchid Correct the module description for llcc orchid driver. Change-Id: Iba3ab53913e8f2be6953de2a06fc92e5700d3f62 Signed-off-by: Shadab Naseem --- drivers/soc/qcom/llcc-orchid.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/soc/qcom/llcc-orchid.c b/drivers/soc/qcom/llcc-orchid.c index 8b800a89674b..c8743d5a7fc7 100644 --- a/drivers/soc/qcom/llcc-orchid.c +++ b/drivers/soc/qcom/llcc-orchid.c @@ -84,5 +84,5 @@ static struct platform_driver orchid_qcom_llcc_driver = { }; module_platform_driver(orchid_qcom_llcc_driver); -MODULE_DESCRIPTION("QCOM orchid LLCC driver"); +MODULE_DESCRIPTION("QTI orchid LLCC driver"); MODULE_LICENSE("GPL v2"); -- GitLab From 0897d6fc4837c1cc60b3e3ec5dfc78857466b59a Mon Sep 17 00:00:00 2001 From: Prudhvi Yarlagadda Date: Tue, 9 Jun 2020 12:26:10 +0530 Subject: [PATCH 1281/1304] spi: spi_qsd: Add Shared EE property check for spi Add Shared EE property check for spi to support dual EE use case. Have the prepare_message/unprepare_message API support for spi for Dual EE use case so that client can have control for spi driver get_sync, put_sync by calling the APIs pm_runtime_get_sync and pm_runtime_put_sync_suspend and use them when switching to other EEs. This will allow client driver to remove the delay/wait time while switching between the secure and non-secure EEs. Change-Id: I465bc4b9036b3b9701a876825412b55b4d4134c7 Signed-off-by: Prudhvi Yarlagadda --- drivers/spi/spi_qsd.c | 97 ++++++++++++++++++++++++++++++------ include/linux/spi/qcom-spi.h | 6 ++- 2 files changed, 87 insertions(+), 16 deletions(-) diff --git a/drivers/spi/spi_qsd.c b/drivers/spi/spi_qsd.c index aa3e69c49668..1991fbfe64d6 100644 --- a/drivers/spi/spi_qsd.c +++ b/drivers/spi/spi_qsd.c @@ -1704,28 +1704,82 @@ static int msm_spi_transfer_one(struct spi_master *master, return status_error; } -static int msm_spi_prepare_transfer_hardware(struct spi_master *master) +static int msm_spi_pm_get_sync(struct device *dev) { - struct msm_spi *dd = spi_master_get_devdata(master); - int resume_state = 0; - - resume_state = pm_runtime_get_sync(dd->dev); - if (resume_state < 0) - goto spi_finalize; + int ret; /* * Counter-part of system-suspend when runtime-pm is not enabled. * This way, resume can be left empty and device will be put in * active mode only if client requests anything on the bus */ - if (!pm_runtime_enabled(dd->dev)) - resume_state = msm_spi_pm_resume_runtime(dd->dev); + if (!pm_runtime_enabled(dev)) { + dev_info(dev, "%s: pm_runtime not enabled\n", __func__); + ret = msm_spi_pm_resume_runtime(dev); + } else { + ret = pm_runtime_get_sync(dev); + } + + return ret; +} + +static int msm_spi_pm_put_sync(struct device *dev) +{ + int ret = 0; + + if (!pm_runtime_enabled(dev)) { + dev_info(dev, "%s: pm_runtime not enabled\n", __func__); + ret = msm_spi_pm_suspend_runtime(dev); + } else { + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); + } + + return ret; +} + +static int msm_spi_prepare_message(struct spi_master *master, + struct spi_message *spi_msg) +{ + struct msm_spi *dd = spi_master_get_devdata(master); + int resume_state; + + resume_state = msm_spi_pm_get_sync(dd->dev); if (resume_state < 0) - goto spi_finalize; - if (dd->suspended) { - resume_state = -EBUSY; - goto spi_finalize; + return resume_state; + + return 0; +} + +static int msm_spi_unprepare_message(struct spi_master *master, + struct spi_message *spi_msg) +{ + struct msm_spi *dd = spi_master_get_devdata(master); + int ret; + + ret = msm_spi_pm_put_sync(dd->dev); + if (ret < 0) + return ret; + + return 0; +} + +static int msm_spi_prepare_transfer_hardware(struct spi_master *master) +{ + struct msm_spi *dd = spi_master_get_devdata(master); + int resume_state; + + if (!dd->pdata->shared_ee) { + resume_state = msm_spi_pm_get_sync(dd->dev); + if (resume_state < 0) + goto spi_finalize; + + if (dd->suspended) { + resume_state = -EBUSY; + goto spi_finalize; + } } + return 0; spi_finalize: @@ -1736,9 +1790,14 @@ static int msm_spi_prepare_transfer_hardware(struct spi_master *master) static int msm_spi_unprepare_transfer_hardware(struct spi_master *master) { struct msm_spi *dd = spi_master_get_devdata(master); + int ret; + + if (!dd->pdata->shared_ee) { + ret = msm_spi_pm_put_sync(dd->dev); + if (ret < 0) + return ret; + } - pm_runtime_mark_last_busy(dd->dev); - pm_runtime_put_autosuspend(dd->dev); return 0; } @@ -2234,6 +2293,8 @@ static struct msm_spi_platform_data *msm_spi_dt_to_pdata( &pdata->rt_priority, DT_OPT, DT_BOOL, 0}, {"qcom,shared", &pdata->is_shared, DT_OPT, DT_BOOL, 0}, + {"qcom,shared_ee", + &pdata->shared_ee, DT_OPT, DT_BOOL, 0}, {NULL, NULL, 0, 0, 0}, }; @@ -2557,6 +2618,12 @@ static int msm_spi_probe(struct platform_device *pdev) goto err_probe_reqmem; } + /* This property is required for Dual EE use case of spi */ + if (dd->pdata->shared_ee) { + master->prepare_message = msm_spi_prepare_message; + master->unprepare_message = msm_spi_unprepare_message; + } + pm_runtime_set_autosuspend_delay(&pdev->dev, MSEC_PER_SEC); pm_runtime_use_autosuspend(&pdev->dev); pm_runtime_enable(&pdev->dev); diff --git a/include/linux/spi/qcom-spi.h b/include/linux/spi/qcom-spi.h index 1888fe509ee3..3d8a8f6896bf 100644 --- a/include/linux/spi/qcom-spi.h +++ b/include/linux/spi/qcom-spi.h @@ -35,7 +35,10 @@ * @bam_producer_pipe_index BAM producer pipe * @rt_priority true if RT thread * @use_pinctrl true if pinctrl library is used - * @is_shared true when qup is shared between ee's + * @is_shared true when qup is shared between ee's and client driver is not + in control of spi pm_runtime_get_sync/put_sync. + * @shared_ee true when qup is shared between ee's and client driver is in + control of spi pm_runtime_get_sync/put_sync. */ struct msm_spi_platform_data { u32 max_clock_speed; @@ -54,4 +57,5 @@ struct msm_spi_platform_data { bool rt_priority; bool use_pinctrl; bool is_shared; + bool shared_ee; }; -- GitLab From d8359aa1e18dbfa438d95c42af6d9abec1115d2f Mon Sep 17 00:00:00 2001 From: Jishnu Prakash Date: Tue, 27 Oct 2020 16:05:54 +0530 Subject: [PATCH 1282/1304] power: qpnp-smb5: Update legacy cable detection logic in bootup If the target is rebooted with standard USB Type-C cable connected, it may be detected wrongly as legacy cable. Fix the wrong detection by resetting TypeC mode. In addition, correct logic to avoid early return when setting TypeC mode to NONE during probe, due to enum value for NONE matching with uninitialized variable value of 0. Change-Id: I84de500a33eb9fa82617422b03d7f277253846e6 Signed-off-by: Jishnu Prakash --- drivers/power/supply/qcom/qpnp-smb5.c | 15 ++++++--------- drivers/power/supply/qcom/smb5-lib.c | 7 ++++++- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/drivers/power/supply/qcom/qpnp-smb5.c b/drivers/power/supply/qcom/qpnp-smb5.c index d76d6c98569c..b566a62b4cb7 100644 --- a/drivers/power/supply/qcom/qpnp-smb5.c +++ b/drivers/power/supply/qcom/qpnp-smb5.c @@ -2149,16 +2149,13 @@ static int smb5_configure_typec(struct smb_charger *chg) } /* - * Across reboot, standard typeC cables get detected as legacy cables - * due to VBUS attachment prior to CC attach/dettach. To handle this, - * "early_usb_attach" flag is used, which assumes that across reboot, - * the cable connected can be standard typeC. However, its jurisdiction - * is limited to PD capable designs only. Hence, for non-PD type designs - * reset legacy cable detection by disabling/enabling typeC mode. + * Across reboot, standard typeC cables get detected as legacy + * cables due to VBUS attachment prior to CC attach/detach. Reset + * the legacy detection logic by enabling/disabling the typeC mode. */ - if (chg->pd_not_supported && (val & TYPEC_LEGACY_CABLE_STATUS_BIT)) { + if (val & TYPEC_LEGACY_CABLE_STATUS_BIT) { pval.intval = POWER_SUPPLY_TYPEC_PR_NONE; - smblib_set_prop_typec_power_role(chg, &pval); + rc = smblib_set_prop_typec_power_role(chg, &pval); if (rc < 0) { dev_err(chg->dev, "Couldn't disable TYPEC rc=%d\n", rc); return rc; @@ -2168,7 +2165,7 @@ static int smb5_configure_typec(struct smb_charger *chg) msleep(50); pval.intval = POWER_SUPPLY_TYPEC_PR_DUAL; - smblib_set_prop_typec_power_role(chg, &pval); + rc = smblib_set_prop_typec_power_role(chg, &pval); if (rc < 0) { dev_err(chg->dev, "Couldn't enable TYPEC rc=%d\n", rc); return rc; diff --git a/drivers/power/supply/qcom/smb5-lib.c b/drivers/power/supply/qcom/smb5-lib.c index 4fb61100240d..03c78b2d9aad 100644 --- a/drivers/power/supply/qcom/smb5-lib.c +++ b/drivers/power/supply/qcom/smb5-lib.c @@ -4447,7 +4447,12 @@ int smblib_set_prop_typec_power_role(struct smb_charger *chg, smblib_dbg(chg, PR_MISC, "power role change: %d --> %d!", chg->power_role, val->intval); - if (chg->power_role == val->intval) { + /* + * Force the power-role if the initial value is NONE, for the + * legacy cable detection WA. + */ + if (chg->power_role == val->intval && + chg->power_role != POWER_SUPPLY_TYPEC_PR_NONE) { smblib_dbg(chg, PR_MISC, "power role already in %d, ignore!", chg->power_role); return 0; -- GitLab From 7e0a3f13c5b03c916678c8f9bb006ca0df5a29e3 Mon Sep 17 00:00:00 2001 From: Archana Sriram Date: Sun, 18 Oct 2020 23:34:04 +0530 Subject: [PATCH 1283/1304] msm: kgsl: Compare pid pointer instead of TGID for a new process There is a possibility of sharing process_private between two unrelated processes due to PID wrapping. In kgsl_process_private_new(), instead of checking numeric TGID, compare the unique pid pointer of the current process with that of the existing processes in kgsl process list to allow sharing of process_private data judiciously. Also, in all required functions get TGID/PID of a process from its struct pid. Change-Id: I0e3d5d79275cdb3f3c304fb36322ad56b0d0b227 Signed-off-by: Archana Sriram --- drivers/gpu/msm/adreno_debugfs.c | 2 +- drivers/gpu/msm/adreno_dispatch.c | 2 +- drivers/gpu/msm/adreno_profile.c | 4 ++-- drivers/gpu/msm/kgsl.c | 31 ++++++++++++++++++------------- drivers/gpu/msm/kgsl_debugfs.c | 9 +++++---- drivers/gpu/msm/kgsl_device.h | 8 ++++---- drivers/gpu/msm/kgsl_iommu.c | 6 +++--- drivers/gpu/msm/kgsl_sharedmem.c | 6 +++--- drivers/gpu/msm/kgsl_trace.h | 10 +++++----- 9 files changed, 42 insertions(+), 36 deletions(-) diff --git a/drivers/gpu/msm/adreno_debugfs.c b/drivers/gpu/msm/adreno_debugfs.c index eb729b87f936..d67945724643 100644 --- a/drivers/gpu/msm/adreno_debugfs.c +++ b/drivers/gpu/msm/adreno_debugfs.c @@ -291,7 +291,7 @@ static int ctx_print(struct seq_file *s, void *unused) ctx_type_str(drawctxt->type), drawctxt->base.priority, drawctxt->base.proc_priv->comm, - drawctxt->base.proc_priv->pid, + pid_nr(drawctxt->base.proc_priv->pid), drawctxt->base.tid); seq_puts(s, "flags: "); diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c index 45ccff1a94f1..055afd6d6816 100644 --- a/drivers/gpu/msm/adreno_dispatch.c +++ b/drivers/gpu/msm/adreno_dispatch.c @@ -1671,7 +1671,7 @@ static inline const char *_kgsl_context_comm(struct kgsl_context *context) #define pr_fault(_d, _c, fmt, args...) \ dev_err((_d)->dev, "%s[%d]: " fmt, \ _kgsl_context_comm((_c)->context), \ - (_c)->context->proc_priv->pid, ##args) + pid_nr((_c)->context->proc_priv->pid), ##args) static void adreno_fault_header(struct kgsl_device *device, diff --git a/drivers/gpu/msm/adreno_profile.c b/drivers/gpu/msm/adreno_profile.c index 9e3740d6a9a9..b1b26fee8745 100644 --- a/drivers/gpu/msm/adreno_profile.c +++ b/drivers/gpu/msm/adreno_profile.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2013-2020, The Linux Foundation. All rights reserved. */ #include @@ -131,7 +131,7 @@ static int _build_pre_ib_cmds(struct adreno_device *adreno_dev, ibcmds += _ib_cmd_mem_write(adreno_dev, ibcmds, gpuaddr + data_offset, drawctxt->base.id, &data_offset); ibcmds += _ib_cmd_mem_write(adreno_dev, ibcmds, gpuaddr + data_offset, - drawctxt->base.proc_priv->pid, &data_offset); + pid_nr(drawctxt->base.proc_priv->pid), &data_offset); ibcmds += _ib_cmd_mem_write(adreno_dev, ibcmds, gpuaddr + data_offset, drawctxt->base.tid, &data_offset); ibcmds += _ib_cmd_mem_write(adreno_dev, ibcmds, gpuaddr + data_offset, diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c index 9f1eaa28f3c0..47ce35d960e3 100644 --- a/drivers/gpu/msm/kgsl.c +++ b/drivers/gpu/msm/kgsl.c @@ -589,7 +589,7 @@ int kgsl_context_init(struct kgsl_device_private *dev_priv, if (atomic_read(&proc_priv->ctxt_count) > KGSL_MAX_CONTEXTS_PER_PROC) { dev_err(device->dev, "Per process context limit reached for pid %u\n", - dev_priv->process_priv->pid); + pid_nr(dev_priv->process_priv->pid)); spin_unlock(&proc_priv->ctxt_count_lock); return -ENOSPC; } @@ -906,6 +906,7 @@ static void kgsl_destroy_process_private(struct kref *kref) struct kgsl_process_private *private = container_of(kref, struct kgsl_process_private, refcount); + put_pid(private->pid); idr_destroy(&private->mem_idr); idr_destroy(&private->syncsource_idr); @@ -935,7 +936,7 @@ struct kgsl_process_private *kgsl_process_private_find(pid_t pid) spin_lock(&kgsl_driver.proclist_lock); list_for_each_entry(p, &kgsl_driver.process_list, list) { - if (p->pid == pid) { + if (pid_nr(p->pid) == pid) { if (kgsl_process_private_get(p)) private = p; break; @@ -950,7 +951,7 @@ static struct kgsl_process_private *kgsl_process_private_new( struct kgsl_device *device) { struct kgsl_process_private *private; - pid_t tgid = task_tgid_nr(current); + struct pid *cur_pid = get_task_pid(current->group_leader, PIDTYPE_PID); /* * Flush mem_workqueue to make sure that any lingering @@ -961,9 +962,11 @@ static struct kgsl_process_private *kgsl_process_private_new( /* Search in the process list */ list_for_each_entry(private, &kgsl_driver.process_list, list) { - if (private->pid == tgid) { - if (!kgsl_process_private_get(private)) + if (private->pid == cur_pid) { + if (!kgsl_process_private_get(private)) { + put_pid(cur_pid); private = ERR_PTR(-EINVAL); + } return private; } } @@ -975,7 +978,7 @@ static struct kgsl_process_private *kgsl_process_private_new( kref_init(&private->refcount); - private->pid = tgid; + private->pid = cur_pid; get_task_comm(private->comm, current->group_leader); spin_lock_init(&private->mem_lock); @@ -988,12 +991,14 @@ static struct kgsl_process_private *kgsl_process_private_new( kgsl_reclaim_proc_private_init(private); /* Allocate a pagetable for the new process object */ - private->pagetable = kgsl_mmu_getpagetable(&device->mmu, tgid); + private->pagetable = kgsl_mmu_getpagetable(&device->mmu, + pid_nr(cur_pid)); if (IS_ERR(private->pagetable)) { int err = PTR_ERR(private->pagetable); idr_destroy(&private->mem_idr); idr_destroy(&private->syncsource_idr); + put_pid(private->pid); kfree(private); private = ERR_PTR(err); @@ -2172,7 +2177,7 @@ long gpumem_free_entry(struct kgsl_mem_entry *entry) return -EBUSY; trace_kgsl_mem_free(entry); - kgsl_memfree_add(entry->priv->pid, + kgsl_memfree_add(pid_nr(entry->priv->pid), entry->memdesc.pagetable ? entry->memdesc.pagetable->name : 0, entry->memdesc.gpuaddr, entry->memdesc.size, @@ -2195,7 +2200,7 @@ static void gpumem_free_func(struct kgsl_device *device, /* Free the memory for all event types */ trace_kgsl_mem_timestamp_free(device, entry, KGSL_CONTEXT_ID(context), timestamp, 0); - kgsl_memfree_add(entry->priv->pid, + kgsl_memfree_add(pid_nr(entry->priv->pid), entry->memdesc.pagetable ? entry->memdesc.pagetable->name : 0, entry->memdesc.gpuaddr, entry->memdesc.size, @@ -2295,7 +2300,7 @@ static bool gpuobj_free_fence_func(void *priv) struct kgsl_mem_entry *entry = priv; trace_kgsl_mem_free(entry); - kgsl_memfree_add(entry->priv->pid, + kgsl_memfree_add(pid_nr(entry->priv->pid), entry->memdesc.pagetable ? entry->memdesc.pagetable->name : 0, entry->memdesc.gpuaddr, entry->memdesc.size, @@ -4828,14 +4833,14 @@ kgsl_get_unmapped_area(struct file *file, unsigned long addr, if (IS_ERR_VALUE(val)) dev_err_ratelimited(device->dev, "get_unmapped_area: pid %d addr %lx pgoff %lx len %ld failed error %d\n", - private->pid, addr, pgoff, len, - (int) val); + pid_nr(private->pid), addr, + pgoff, len, (int) val); } else { val = _get_svm_area(private, entry, addr, len, flags); if (IS_ERR_VALUE(val)) dev_err_ratelimited(device->dev, "_get_svm_area: pid %d mmap_base %lx addr %lx pgoff %lx len %ld failed error %d\n", - private->pid, + pid_nr(private->pid), current->mm->mmap_base, addr, pgoff, len, (int) val); } diff --git a/drivers/gpu/msm/kgsl_debugfs.c b/drivers/gpu/msm/kgsl_debugfs.c index 6943c09adfc0..044b116c988f 100644 --- a/drivers/gpu/msm/kgsl_debugfs.c +++ b/drivers/gpu/msm/kgsl_debugfs.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2002,2008-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2002,2008-2020, The Linux Foundation. All rights reserved. */ #include @@ -393,7 +393,7 @@ void kgsl_process_init_debugfs(struct kgsl_process_private *private) unsigned char name[16]; struct dentry *dentry; - snprintf(name, sizeof(name), "%d", private->pid); + snprintf(name, sizeof(name), "%d", pid_nr(private->pid)); private->debug_root = debugfs_create_dir(name, proc_d_debugfs); @@ -413,14 +413,15 @@ void kgsl_process_init_debugfs(struct kgsl_process_private *private) } dentry = debugfs_create_file("mem", 0444, private->debug_root, - (void *) ((unsigned long) private->pid), &process_mem_fops); + (void *) ((unsigned long) pid_nr(private->pid)), + &process_mem_fops); if (IS_ERR_OR_NULL(dentry)) WARN((dentry == NULL), "Unable to create 'mem' file for %s\n", name); dentry = debugfs_create_file("sparse_mem", 0444, private->debug_root, - (void *) ((unsigned long) private->pid), + (void *) ((unsigned long) pid_nr(private->pid)), &process_sparse_mem_fops); if (IS_ERR_OR_NULL(dentry)) diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h index cc0476bb242e..9cb255046d34 100644 --- a/drivers/gpu/msm/kgsl_device.h +++ b/drivers/gpu/msm/kgsl_device.h @@ -419,13 +419,13 @@ struct kgsl_context { #define pr_context(_d, _c, fmt, args...) \ dev_err((_d)->dev, "%s[%d]: " fmt, \ _context_comm((_c)), \ - (_c)->proc_priv->pid, ##args) + pid_nr((_c)->proc_priv->pid), ##args) /** * struct kgsl_process_private - Private structure for a KGSL process (across * all devices) * @priv: Internal flags, use KGSL_PROCESS_* values - * @pid: ID for the task owner of the process + * @pid: Identification structure for the task owner of the process * @comm: task name of the process * @mem_lock: Spinlock to protect the process memory lists * @refcount: kref object for reference counting the process @@ -443,7 +443,7 @@ struct kgsl_context { */ struct kgsl_process_private { unsigned long priv; - pid_t pid; + struct pid *pid; char comm[TASK_COMM_LEN]; spinlock_t mem_lock; struct kref refcount; @@ -586,7 +586,7 @@ static inline void kgsl_process_sub_stats(struct kgsl_process_private *priv, struct mm_struct *mm; atomic_long_sub(size, &priv->stats[type].cur); - pid_struct = find_get_pid(priv->pid); + pid_struct = find_get_pid(pid_nr(priv->pid)); if (pid_struct) { task = get_pid_task(pid_struct, PIDTYPE_PID); if (task) { diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c index 6115e73ab950..f65ebfcc6afa 100644 --- a/drivers/gpu/msm/kgsl_iommu.c +++ b/drivers/gpu/msm/kgsl_iommu.c @@ -648,7 +648,7 @@ static void _get_entries(struct kgsl_process_private *private, prev->flags = p->memdesc.flags; prev->priv = p->memdesc.priv; prev->pending_free = p->pending_free; - prev->pid = private->pid; + prev->pid = pid_nr(private->pid); __kgsl_get_memory_usage(prev); } @@ -658,7 +658,7 @@ static void _get_entries(struct kgsl_process_private *private, next->flags = n->memdesc.flags; next->priv = n->memdesc.priv; next->pending_free = n->pending_free; - next->pid = private->pid; + next->pid = pid_nr(private->pid); __kgsl_get_memory_usage(next); } } @@ -787,7 +787,7 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain, private = kgsl_iommu_get_process(ptbase); if (private) { - pid = private->pid; + pid = pid_nr(private->pid); comm = private->comm; } diff --git a/drivers/gpu/msm/kgsl_sharedmem.c b/drivers/gpu/msm/kgsl_sharedmem.c index 9dd252e62676..8a4b0ea2d806 100644 --- a/drivers/gpu/msm/kgsl_sharedmem.c +++ b/drivers/gpu/msm/kgsl_sharedmem.c @@ -305,9 +305,9 @@ void kgsl_process_init_sysfs(struct kgsl_device *device, kgsl_process_private_get(private); if (kobject_init_and_add(&private->kobj, &process_ktype, - kgsl_driver.prockobj, "%d", private->pid)) { + kgsl_driver.prockobj, "%d", pid_nr(private->pid))) { dev_err(device->dev, "Unable to add sysfs for process %d\n", - private->pid); + pid_nr(private->pid)); return; } @@ -322,7 +322,7 @@ void kgsl_process_init_sysfs(struct kgsl_device *device, if (ret) dev_err(device->dev, "Unable to create sysfs files for process %d\n", - private->pid); + pid_nr(private->pid)); } for (i = 0; i < ARRAY_SIZE(debug_memstats); i++) { diff --git a/drivers/gpu/msm/kgsl_trace.h b/drivers/gpu/msm/kgsl_trace.h index 3106e88579bf..e47b686411b1 100644 --- a/drivers/gpu/msm/kgsl_trace.h +++ b/drivers/gpu/msm/kgsl_trace.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2011-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2011-2020, The Linux Foundation. All rights reserved. */ #if !defined(_KGSL_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) @@ -409,7 +409,7 @@ TRACE_EVENT(kgsl_mem_alloc, TP_fast_assign( __entry->gpuaddr = mem_entry->memdesc.gpuaddr; __entry->size = mem_entry->memdesc.size; - __entry->tgid = mem_entry->priv->pid; + __entry->tgid = pid_nr(mem_entry->priv->pid); kgsl_get_memory_usage(__entry->usage, sizeof(__entry->usage), mem_entry->memdesc.flags); __entry->id = mem_entry->id; @@ -502,7 +502,7 @@ TRACE_EVENT(kgsl_mem_map, __entry->size = mem_entry->memdesc.size; __entry->fd = fd; __entry->type = kgsl_memdesc_usermem_type(&mem_entry->memdesc); - __entry->tgid = mem_entry->priv->pid; + __entry->tgid = pid_nr(mem_entry->priv->pid); kgsl_get_memory_usage(__entry->usage, sizeof(__entry->usage), mem_entry->memdesc.flags); __entry->id = mem_entry->id; @@ -537,7 +537,7 @@ TRACE_EVENT(kgsl_mem_free, __entry->gpuaddr = mem_entry->memdesc.gpuaddr; __entry->size = mem_entry->memdesc.size; __entry->type = kgsl_memdesc_usermem_type(&mem_entry->memdesc); - __entry->tgid = mem_entry->priv->pid; + __entry->tgid = pid_nr(mem_entry->priv->pid); kgsl_get_memory_usage(__entry->usage, sizeof(__entry->usage), mem_entry->memdesc.flags); __entry->id = mem_entry->id; @@ -572,7 +572,7 @@ TRACE_EVENT(kgsl_mem_sync_cache, __entry->gpuaddr = mem_entry->memdesc.gpuaddr; kgsl_get_memory_usage(__entry->usage, sizeof(__entry->usage), mem_entry->memdesc.flags); - __entry->tgid = mem_entry->priv->pid; + __entry->tgid = pid_nr(mem_entry->priv->pid); __entry->id = mem_entry->id; __entry->op = op; __entry->offset = offset; -- GitLab From e79ade598974684f98be5c0a252f4a2d6a0293eb Mon Sep 17 00:00:00 2001 From: Abir Ghosh Date: Wed, 28 Oct 2020 12:03:09 +0530 Subject: [PATCH 1284/1304] qbt_handler: Memset userspace struct to zero Explicitly memset struct to zero before sending to userspace. This is to prevent padded bytes from possibly leaking kernel memory contents to userspace. Change-Id: Id3e873164430c34195344884b2899e61aa060dfc Signed-off-by: Abir Ghosh --- drivers/soc/qcom/qbt_handler.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/soc/qcom/qbt_handler.c b/drivers/soc/qcom/qbt_handler.c index 9550acbe9f18..7088d6b1fc45 100644 --- a/drivers/soc/qcom/qbt_handler.c +++ b/drivers/soc/qcom/qbt_handler.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved. */ #define DEBUG @@ -371,6 +371,8 @@ static long qbt_ioctl( { struct qbt_wuhb_connected_status wuhb_connected_status; + memset(&wuhb_connected_status, 0, + sizeof(wuhb_connected_status)); wuhb_connected_status.is_wuhb_connected = drvdata->is_wuhb_connected; rc = copy_to_user((void __user *)priv_arg, -- GitLab From 91a68a888447a8e889b9a1e464fc7293a2a13b17 Mon Sep 17 00:00:00 2001 From: Phanindra Babu Pabba Date: Fri, 23 Oct 2020 14:04:47 +0530 Subject: [PATCH 1285/1304] fscrypt: Handle support for v1 encryption policy FS_IOC_ADD_ENCRYPTION_KEY ioctl need to support both v1 policy with 'key_descriptor' as well along with v2 encryption policy. Change-Id: I7638191bc8926ba306b11c3e8a5dafdb3d396764 Signed-off-by: Phanindra Babu Pabba --- fs/crypto/keyring.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/fs/crypto/keyring.c b/fs/crypto/keyring.c index ae081f06e149..d90d5d6c4c92 100644 --- a/fs/crypto/keyring.c +++ b/fs/crypto/keyring.c @@ -670,7 +670,8 @@ int fscrypt_ioctl_add_key(struct file *filp, void __user *_uarg) if (arg.__flags) { if (arg.__flags & ~__FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED) return -EINVAL; - if (arg.key_spec.type != FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER) + if ((arg.key_spec.type != FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER) && + (arg.key_spec.type != FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR)) return -EINVAL; secret.is_hw_wrapped = true; } -- GitLab From c7ac25676080c821863423a44d2dd14e7408394a Mon Sep 17 00:00:00 2001 From: Praveen Kurapati Date: Wed, 21 Oct 2020 00:51:19 +0530 Subject: [PATCH 1286/1304] msm: ipa: fix the use-after-free on qmi framework in ssr scenario IPA drvier free the qmi server hdl without notify the qmi framework which is causing the use-after-free on QMI framework. The fix is to notify qmi framework before freeing the qmi handle. Change-Id: I1ec9d3efd29283fddd958561a538b2995222a53c Signed-off-by: Praveen Kurapati Signed-off-by: Bojun Pan --- drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c index c1e3735827fc..8ee40de2c36a 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c @@ -1580,6 +1580,7 @@ static void ipa3_q6_clnt_svc_arrive(struct work_struct *work) IPAWANERR( "ipa3_qmi_init_modem_send_sync_msg failed due to SSR!\n"); /* Cleanup when ipa3_wwan_remove is called */ + qmi_handle_release(ipa_q6_clnt); vfree(ipa_q6_clnt); ipa_q6_clnt = NULL; return; -- GitLab From 1881e0a3fedfb3733bff6571c2ad17b0138cf8fe Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 22 Aug 2019 13:00:15 +0200 Subject: [PATCH 1287/1304] timekeeping/vsyscall: Prevent math overflow in BOOTTIME update The VDSO update for CLOCK_BOOTTIME has a overflow issue as it shifts the nanoseconds based boot time offset left by the clocksource shift. That overflows once the boot time offset becomes large enough. As a consequence CLOCK_BOOTTIME in the VDSO becomes a random number causing applications to misbehave. Fix it by storing a timespec64 representation of the offset when boot time is adjusted and add that to the MONOTONIC base time value in the vdso data page. Using the timespec64 representation avoids a 64bit division in the update code. Change-Id: I4bee5af4e14ed1bf6c9623f5cb70f1542d7cc700 Fixes: 44f57d788e7d ("timekeeping: Provide a generic update_vsyscall() implementation") Reported-by: Chris Clayton Signed-off-by: Thomas Gleixner Tested-by: Chris Clayton Tested-by: Vincenzo Frascino Link: https://lkml.kernel.org/r/alpine.DEB.2.21.1908221257580.1983@nanos.tec.linutronix.de Git-commit: b99328a60a482108f5195b4d611f90992ca016ba Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git Signed-off-by: Prateek Sood --- include/linux/timekeeper_internal.h | 5 +++++ kernel/time/timekeeping.c | 5 +++++ kernel/time/vsyscall.c | 22 +++++++++++++--------- 3 files changed, 23 insertions(+), 9 deletions(-) diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h index 7acb953298a7..84ff2844df2a 100644 --- a/include/linux/timekeeper_internal.h +++ b/include/linux/timekeeper_internal.h @@ -57,6 +57,7 @@ struct tk_read_base { * @cs_was_changed_seq: The sequence number of clocksource change events * @next_leap_ktime: CLOCK_MONOTONIC time value of a pending leap-second * @raw_sec: CLOCK_MONOTONIC_RAW time in seconds + * @monotonic_to_boot: CLOCK_MONOTONIC to CLOCK_BOOTTIME offset * @cycle_interval: Number of clock cycles in one NTP interval * @xtime_interval: Number of clock shifted nano seconds in one NTP * interval. @@ -84,6 +85,9 @@ struct tk_read_base { * * wall_to_monotonic is no longer the boot time, getboottime must be * used instead. + * + * @monotonic_to_boottime is a timespec64 representation of @offs_boot to + * accelerate the VDSO update for CLOCK_BOOTTIME. */ struct timekeeper { struct tk_read_base tkr_mono; @@ -99,6 +103,7 @@ struct timekeeper { u8 cs_was_changed_seq; ktime_t next_leap_ktime; u64 raw_sec; + struct timespec64 monotonic_to_boot; /* The following members are for timekeeping internal use */ u64 cycle_interval; diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 81ee5b83c920..9832c7ae915f 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -150,6 +150,11 @@ static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec64 wtm) static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta) { tk->offs_boot = ktime_add(tk->offs_boot, delta); + /* + * Timespec representation for VDSO update to avoid 64bit division + * on every update. + */ + tk->monotonic_to_boot = ktime_to_timespec64(tk->offs_boot); } /* diff --git a/kernel/time/vsyscall.c b/kernel/time/vsyscall.c index 565ab06917a7..15882e4b5c83 100644 --- a/kernel/time/vsyscall.c +++ b/kernel/time/vsyscall.c @@ -17,7 +17,7 @@ static inline void update_vdso_data(struct vdso_data *vdata, struct timekeeper *tk) { struct vdso_timestamp *vdso_ts; - u64 nsec; + u64 nsec, sec; vdata[CS_HRES_COARSE].cycle_last = tk->tkr_mono.cycle_last; vdata[CS_HRES_COARSE].mask = tk->tkr_mono.mask; @@ -45,23 +45,27 @@ static inline void update_vdso_data(struct vdso_data *vdata, } vdso_ts->nsec = nsec; - /* CLOCK_MONOTONIC_RAW */ - vdso_ts = &vdata[CS_RAW].basetime[CLOCK_MONOTONIC_RAW]; - vdso_ts->sec = tk->raw_sec; - vdso_ts->nsec = tk->tkr_raw.xtime_nsec; + /* Copy MONOTONIC time for BOOTTIME */ + sec = vdso_ts->sec; + /* Add the boot offset */ + sec += tk->monotonic_to_boot.tv_sec; + nsec += (u64)tk->monotonic_to_boot.tv_nsec << tk->tkr_mono.shift; /* CLOCK_BOOTTIME */ vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_BOOTTIME]; - vdso_ts->sec = tk->xtime_sec + tk->wall_to_monotonic.tv_sec; - nsec = tk->tkr_mono.xtime_nsec; - nsec += ((u64)(tk->wall_to_monotonic.tv_nsec + - ktime_to_ns(tk->offs_boot)) << tk->tkr_mono.shift); + vdso_ts->sec = sec; + while (nsec >= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) { nsec -= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift); vdso_ts->sec++; } vdso_ts->nsec = nsec; + /* CLOCK_MONOTONIC_RAW */ + vdso_ts = &vdata[CS_RAW].basetime[CLOCK_MONOTONIC_RAW]; + vdso_ts->sec = tk->raw_sec; + vdso_ts->nsec = tk->tkr_raw.xtime_nsec; + /* CLOCK_TAI */ vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_TAI]; vdso_ts->sec = tk->xtime_sec + (s64)tk->tai_offset; -- GitLab From 685acd34aacca80e7a636cc8fb25e4b06106ce93 Mon Sep 17 00:00:00 2001 From: Srinivasarao P Date: Tue, 3 Nov 2020 11:30:34 +0530 Subject: [PATCH 1288/1304] Revert "clk: Evict unregistered clks from parent caches" This reverts commit 903c6bd937ca84ee9e60eca6beb5c2593d0bfd4e. User build failing due to this upstream patch so reverting it. Change-Id: Icb1e697d71ee18512b8ec1e5e2f8f4d7a9b7404e Signed-off-by: Srinivasarao P --- drivers/clk/clk.c | 53 ++++++++++------------------------------------- 1 file changed, 11 insertions(+), 42 deletions(-) diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 2f7d6e40d6c9..45dc77103fa9 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -52,18 +52,6 @@ struct clk_handoff_vdd { static LIST_HEAD(clk_handoff_vdd_list); static bool vdd_class_handoff_completed; static DEFINE_MUTEX(vdd_class_list_lock); - -static struct hlist_head *all_lists[] = { - &clk_root_list, - &clk_orphan_list, - NULL, -}; - -static struct hlist_head *orphan_list[] = { - &clk_orphan_list, - NULL, -}; - /* * clk_rate_change_list is used during clk_core_set_rate_nolock() calls to * handle vdd_class vote tracking. core->rate_change_node is added to @@ -3238,6 +3226,17 @@ static u32 debug_suspend; static DEFINE_MUTEX(clk_debug_lock); static HLIST_HEAD(clk_debug_list); +static struct hlist_head *all_lists[] = { + &clk_root_list, + &clk_orphan_list, + NULL, +}; + +static struct hlist_head *orphan_list[] = { + &clk_orphan_list, + NULL, +}; + static void clk_state_subtree(struct clk_core *c) { int vdd_level = 0; @@ -4513,34 +4512,6 @@ static const struct clk_ops clk_nodrv_ops = { .set_parent = clk_nodrv_set_parent, }; -static void clk_core_evict_parent_cache_subtree(struct clk_core *root, - struct clk_core *target) -{ - int i; - struct clk_core *child; - - for (i = 0; i < root->num_parents; i++) - if (root->parents[i] == target) - root->parents[i] = NULL; - - hlist_for_each_entry(child, &root->children, child_node) - clk_core_evict_parent_cache_subtree(child, target); -} - -/* Remove this clk from all parent caches */ -static void clk_core_evict_parent_cache(struct clk_core *core) -{ - struct hlist_head **lists; - struct clk_core *root; - - lockdep_assert_held(&prepare_lock); - - for (lists = all_lists; *lists; lists++) - hlist_for_each_entry(root, *lists, child_node) - clk_core_evict_parent_cache_subtree(root, core); - -} - /** * clk_unregister - unregister a currently registered clock * @clk: clock to unregister @@ -4579,8 +4550,6 @@ void clk_unregister(struct clk *clk) clk_core_set_parent_nolock(child, NULL); } - clk_core_evict_parent_cache(clk->core); - hlist_del_init(&clk->core->child_node); if (clk->core->prepare_count) -- GitLab From eab88e07e52e55c1e59508eec2cb95405cb2d69a Mon Sep 17 00:00:00 2001 From: Piyush Dhyani Date: Tue, 3 Nov 2020 19:46:21 +0530 Subject: [PATCH 1289/1304] msm: ipa3: Add debug logs to check unregister netdev completion time Add debug logs to check how long unregister netdev completion taking time. Change-Id: I0cd7ec2655d32cfdf9b758f6147628c7fa33f568 Signed-off-by: Piyush Dhyani --- drivers/platform/msm/ipa/ipa_clients/ecm_ipa.c | 2 ++ drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c | 3 ++- drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c | 3 ++- 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/platform/msm/ipa/ipa_clients/ecm_ipa.c b/drivers/platform/msm/ipa/ipa_clients/ecm_ipa.c index d183dfa08736..b91d00599cb2 100644 --- a/drivers/platform/msm/ipa/ipa_clients/ecm_ipa.c +++ b/drivers/platform/msm/ipa/ipa_clients/ecm_ipa.c @@ -831,7 +831,9 @@ void ecm_ipa_cleanup(void *priv) ecm_ipa_rules_destroy(ecm_ipa_ctx); ecm_ipa_debugfs_destroy(ecm_ipa_ctx); + ECM_IPA_DEBUG("ECM_IPA unregister_netdev started\n"); unregister_netdev(ecm_ipa_ctx->net); + ECM_IPA_DEBUG("ECM_IPA unregister_netdev completed\n"); free_netdev(ecm_ipa_ctx->net); ECM_IPA_INFO("ECM_IPA was destroyed successfully\n"); diff --git a/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c b/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c index 47bfe38f8c93..02b610899301 100644 --- a/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c +++ b/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c @@ -1389,8 +1389,9 @@ void rndis_ipa_cleanup(void *private) rndis_ipa_debugfs_destroy(rndis_ipa_ctx); RNDIS_IPA_DEBUG("debugfs remove was done\n"); + RNDIS_IPA_DEBUG("RNDIS_IPA netdev unregistered started\n"); unregister_netdev(rndis_ipa_ctx->net); - RNDIS_IPA_DEBUG("netdev unregistered\n"); + RNDIS_IPA_DEBUG("RNDIS_IPA netdev unregistered completed\n"); spin_lock_irqsave(&rndis_ipa_ctx->state_lock, flags); next_state = rndis_ipa_next_state(rndis_ipa_ctx->state, diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c index b1fc78e17e8c..42e9714157a2 100644 --- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c +++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c @@ -2569,8 +2569,9 @@ static int ipa3_wwan_remove(struct platform_device *pdev) if (ipa3_rmnet_res.ipa_napi_enable) netif_napi_del(&(rmnet_ipa3_ctx->wwan_priv->napi)); mutex_unlock(&rmnet_ipa3_ctx->pipe_handle_guard); - IPAWANINFO("rmnet_ipa unregister_netdev\n"); + IPAWANDBG("rmnet_ipa unregister_netdev started\n"); unregister_netdev(IPA_NETDEV()); + IPAWANDBG("rmnet_ipa unregister_netdev completed\n"); ipa3_wwan_deregister_netdev_pm_client(); cancel_work_sync(&ipa3_tx_wakequeue_work); cancel_delayed_work(&ipa_tether_stats_poll_wakequeue_work); -- GitLab From a29838ef294af3dca823cce2717364b998e8f7a5 Mon Sep 17 00:00:00 2001 From: Arend van Spriel Date: Fri, 2 Aug 2019 13:30:58 +0200 Subject: [PATCH 1290/1304] nl80211: add 6GHz band definition to enum nl80211_band In the 802.11ax specification a new band is introduced, which is also proposed by FCC for unlicensed use. This band is referred to as 6GHz spanning frequency range from 5925 to 7125 MHz. Reviewed-by: Pieter-Paul Giesberts Reviewed-by: Leon Zegers Signed-off-by: Arend van Spriel Link: https://lore.kernel.org/r/1564745465-21234-2-git-send-email-arend.vanspriel@broadcom.com Signed-off-by: Johannes Berg Git-commit: c5b9a7f826735228a38fab4a7b2707f032468c88 Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git Change-Id: I3f6581e8b4b1bdd2fe76390d93304c1f115d8105 CRs-Fixed: 2536728 Signed-off-by: Liangwei Dong --- include/uapi/linux/nl80211.h | 2 ++ net/mac80211/tx.c | 1 + 2 files changed, 3 insertions(+) diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index e98d90b5e6a4..e8bca08b2697 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -4499,6 +4499,7 @@ enum nl80211_txrate_gi { * @NL80211_BAND_2GHZ: 2.4 GHz ISM band * @NL80211_BAND_5GHZ: around 5 GHz band (4.9 - 5.7 GHz) * @NL80211_BAND_60GHZ: around 60 GHz band (58.32 - 64.80 GHz) + * @NL80211_BAND_6GHZ: around 6 GHz band (5.9 - 7.2 GHz) * @NUM_NL80211_BANDS: number of bands, avoid using this in userspace * since newer kernel versions may support more bands */ @@ -4506,6 +4507,7 @@ enum nl80211_band { NL80211_BAND_2GHZ, NL80211_BAND_5GHZ, NL80211_BAND_60GHZ, + NL80211_BAND_6GHZ, NUM_NL80211_BANDS, }; diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 3160ffd93a15..7d7eb75ce901 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -166,6 +166,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, break; } case NL80211_BAND_5GHZ: + case NL80211_BAND_6GHZ: if (r->flags & IEEE80211_RATE_MANDATORY_A) mrate = r->bitrate; break; -- GitLab From 49dc746ccf5cff5d145b6ec58504b29f277abe31 Mon Sep 17 00:00:00 2001 From: Arend van Spriel Date: Fri, 2 Aug 2019 13:30:59 +0200 Subject: [PATCH 1291/1304] cfg80211: add 6GHz UNII band definitions For the new 6GHz there are new UNII band definitions as listed in the FCC notice [1]. [1] https://docs.fcc.gov/public/attachments/FCC-18-147A1_Rcd.pdf. Reviewed-by: Pieter-Paul Giesberts Reviewed-by: Leon Zegers Signed-off-by: Arend van Spriel Link: https://lore.kernel.org/r/1564745465-21234-3-git-send-email-arend.vanspriel@broadcom.com Signed-off-by: Johannes Berg Git-commit: f89769cfdd5a469c9d5791a06a670d424e847477 Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git Change-Id: I15fcecab9ac149195907f460c28bff7a9020a4d5 CRs-Fixed: 2536735 Signed-off-by: Liangwei Dong --- net/wireless/reg.c | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 64959ab85561..66d2a69bceb6 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -3829,8 +3829,9 @@ void wiphy_regulatory_deregister(struct wiphy *wiphy) } /* - * See http://www.fcc.gov/document/5-ghz-unlicensed-spectrum-unii, for - * UNII band definitions + * See FCC notices for UNII band definitions + * 5GHz: https://www.fcc.gov/document/5-ghz-unlicensed-spectrum-unii + * 6GHz: https://www.fcc.gov/document/fcc-proposes-more-spectrum-unlicensed-use-0 */ int cfg80211_get_unii(int freq) { @@ -3854,6 +3855,22 @@ int cfg80211_get_unii(int freq) if (freq > 5725 && freq <= 5825) return 4; + /* UNII-5 */ + if (freq > 5925 && freq <= 6425) + return 5; + + /* UNII-6 */ + if (freq > 6425 && freq <= 6525) + return 6; + + /* UNII-7 */ + if (freq > 6525 && freq <= 6875) + return 7; + + /* UNII-8 */ + if (freq > 6875 && freq <= 7125) + return 8; + return -EINVAL; } -- GitLab From 9dc05b294003a02138556c64e761587df4471eaa Mon Sep 17 00:00:00 2001 From: Arend van Spriel Date: Fri, 2 Aug 2019 13:31:00 +0200 Subject: [PATCH 1292/1304] cfg80211: util: add 6GHz channel to freq conversion and vice versa Extend the functions ieee80211_channel_to_frequency() and ieee80211_frequency_to_channel() to support 6GHz band according specification in 802.11ax D4.1 27.3.22.2. Reviewed-by: Pieter-Paul Giesberts Reviewed-by: Leon Zegers Signed-off-by: Arend van Spriel Link: https://lore.kernel.org/r/1564745465-21234-4-git-send-email-arend.vanspriel@broadcom.com Signed-off-by: Johannes Berg Git-commit: fa1f1085bc063da5a44f779c9b655b7026c52d68 Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git Change-Id: I74d18fd05c7180ee8411334096392c5470c0d8ae CRs-Fixed: 2536741 Signed-off-by: Liangwei Dong --- net/wireless/util.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/net/wireless/util.c b/net/wireless/util.c index ddc6d95e65c3..89fb0dc9695d 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -87,6 +87,11 @@ int ieee80211_channel_to_frequency(int chan, enum nl80211_band band) else return 5000 + chan * 5; break; + case NL80211_BAND_6GHZ: + /* see 802.11ax D4.1 27.3.22.2 */ + if (chan <= 253) + return 5940 + chan * 5; + break; case NL80211_BAND_60GHZ: if (chan < 5) return 56160 + chan * 2160; @@ -107,8 +112,11 @@ int ieee80211_frequency_to_channel(int freq) return (freq - 2407) / 5; else if (freq >= 4910 && freq <= 4980) return (freq - 4000) / 5; - else if (freq <= 45000) /* DMG band lower limit */ + else if (freq < 5940) return (freq - 5000) / 5; + else if (freq <= 45000) /* DMG band lower limit */ + /* see 802.11ax D4.1 27.3.22.2 */ + return (freq - 5940) / 5; else if (freq >= 58320 && freq <= 64800) return (freq - 56160) / 2160; else -- GitLab From 49f4ef3dc7a187f2f8792e217d335c6b4a4f19e7 Mon Sep 17 00:00:00 2001 From: Arend van Spriel Date: Fri, 2 Aug 2019 13:31:01 +0200 Subject: [PATCH 1293/1304] cfg80211: extend ieee80211_operating_class_to_band() for 6GHz Add 6GHz operating class range as defined in 802.11ax D4.1 Annex E. Reviewed-by: Pieter-Paul Giesberts Reviewed-by: Leon Zegers Signed-off-by: Arend van Spriel Link: https://lore.kernel.org/r/1564745465-21234-5-git-send-email-arend.vanspriel@broadcom.com Signed-off-by: Johannes Berg Git-commit: 852f04620e5b7c27eadbf81d086d04f61431c9dc Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git Change-Id: Ie2a375417c88eaad69909e6ab9875a6f69747293 CRs-Fixed: 2536745 Signed-off-by: Liangwei Dong --- net/wireless/util.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/net/wireless/util.c b/net/wireless/util.c index 89fb0dc9695d..52c2ac4d79e3 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -1489,6 +1489,9 @@ bool ieee80211_operating_class_to_band(u8 operating_class, case 128 ... 130: *band = NL80211_BAND_5GHZ; return true; + case 131 ... 135: + *band = NL80211_BAND_6GHZ; + return true; case 81: case 82: case 83: -- GitLab From 58c511c8ae6a87749d833e6b1a270d64db8dc988 Mon Sep 17 00:00:00 2001 From: Arend van Spriel Date: Fri, 2 Aug 2019 13:31:02 +0200 Subject: [PATCH 1294/1304] cfg80211: add 6GHz in code handling array with NUM_NL80211_BANDS entries In nl80211.c there is a policy for all bands in NUM_NL80211_BANDS and in trace.h there is a callback trace for multicast rates which is per band in NUM_NL80211_BANDS. Both need to be extended for the new NL80211_BAND_6GHZ. Reviewed-by: Pieter-Paul Giesberts Reviewed-by: Leon Zegers Signed-off-by: Arend van Spriel Link: https://lore.kernel.org/r/1564745465-21234-6-git-send-email-arend.vanspriel@broadcom.com Signed-off-by: Johannes Berg Git-commit: e548a1c36b11ccf56627e5a2581409e2f27a6ac4 Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git Change-Id: Ie470a251eb9dbf86890ac5f2bc88b238e680817f CRs-Fixed: 2536753 Signed-off-by: Liangwei Dong --- net/wireless/trace.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/wireless/trace.h b/net/wireless/trace.h index 713c15d6c5e6..d039ec0f98c9 100644 --- a/net/wireless/trace.h +++ b/net/wireless/trace.h @@ -3222,10 +3222,11 @@ TRACE_EVENT(rdev_set_mcast_rate, sizeof(int) * NUM_NL80211_BANDS); ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " - "mcast_rates [2.4GHz=0x%x, 5.2GHz=0x%x, 60GHz=0x%x]", + "mcast_rates [2.4GHz=0x%x, 5.2GHz=0x%x, 6GHz=0x%x, 60GHz=0x%x]", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->mcast_rate[NL80211_BAND_2GHZ], __entry->mcast_rate[NL80211_BAND_5GHZ], + __entry->mcast_rate[NL80211_BAND_6GHZ], __entry->mcast_rate[NL80211_BAND_60GHZ]) ); -- GitLab From b185f070a5017079c5405780aff6e5d58c98abe8 Mon Sep 17 00:00:00 2001 From: Arend van Spriel Date: Fri, 2 Aug 2019 13:31:03 +0200 Subject: [PATCH 1295/1304] cfg80211: use same IR permissive rules for 6GHz band The function cfg80211_ir_permissive_chan() is applicable for 6GHz band as well so make sure it is handled. Reviewed-by: Pieter-Paul Giesberts Reviewed-by: Leon Zegers Signed-off-by: Arend van Spriel Link: https://lore.kernel.org/r/1564745465-21234-7-git-send-email-arend.vanspriel@broadcom.com Signed-off-by: Johannes Berg Git-commit: 0816e6b1177adb4f120767434c67441c30de10d2 Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git Change-Id: I33357ab63abca47e918dc4711122142b9755567a CRs-Fixed: 2536758 Signed-off-by: Liangwei Dong --- net/wireless/chan.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/wireless/chan.c b/net/wireless/chan.c index 5d5333a56f4f..46f65757ccc5 100644 --- a/net/wireless/chan.c +++ b/net/wireless/chan.c @@ -1050,7 +1050,8 @@ static bool cfg80211_ir_permissive_chan(struct wiphy *wiphy, if (chan == other_chan) return true; - if (chan->band != NL80211_BAND_5GHZ) + if (chan->band != NL80211_BAND_5GHZ && + chan->band != NL80211_BAND_6GHZ) continue; r1 = cfg80211_get_unii(chan->center_freq); -- GitLab From 6f4c4e16d754db53a01649fa00e7a2befb729732 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maciej=20=C5=BBenczykowski?= Date: Wed, 23 Sep 2020 13:18:15 -0700 Subject: [PATCH 1296/1304] net/ipv4: always honour route mtu during forwarding MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Documentation/networking/ip-sysctl.txt:46 says: ip_forward_use_pmtu - BOOLEAN By default we don't trust protocol path MTUs while forwarding because they could be easily forged and can lead to unwanted fragmentation by the router. You only need to enable this if you have user-space software which tries to discover path mtus by itself and depends on the kernel honoring this information. This is normally not the case. Default: 0 (disabled) Possible values: 0 - disabled 1 - enabled Which makes it pretty clear that setting it to 1 is a potential security/safety/DoS issue, and yet it is entirely reasonable to want forwarded traffic to honour explicitly administrator configured route mtus (instead of defaulting to device mtu). Indeed, I can't think of a single reason why you wouldn't want to. Since you configured a route mtu you probably know better... It is pretty common to have a higher device mtu to allow receiving large (jumbo) frames, while having some routes via that interface (potentially including the default route to the internet) specify a lower mtu. Note that ipv6 forwarding uses device mtu unless the route is locked (in which case it will use the route mtu). This approach is not usable for IPv4 where an 'mtu lock' on a route also has the side effect of disabling TCP path mtu discovery via disabling the IPv4 DF (don't frag) bit on all outgoing frames. I'm not aware of a way to lock a route from an IPv6 RA, so that also potentially seems wrong. Signed-off-by: Maciej Żenczykowski Cc: Eric Dumazet Cc: Willem de Bruijn Cc: Lorenzo Colitti Cc: Sunmeet Gill (Sunny) Cc: Vinay Paradkar Cc: Tyler Wear Cc: David Ahern Reviewed-by: Eric Dumazet (Backported from commit 02a1b175b0e92d9e0fa5df3957ade8d733ceb6a0). Change-Id: I26f336f891711f0149b2835d2c4a78fc8407b5ba Git-Commit: 02a1b175b0e92d9e0fa5df3957ade8d733ceb6a0 Git-repo: https://kernel.googlesource.com/pub/scm/linux/kernel/git/stable/linux Signed-off-by: Sauvik Saha --- include/net/ip.h | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/include/net/ip.h b/include/net/ip.h index d584d025f229..00f3e910e8f8 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -402,12 +402,18 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst, bool forwarding) { struct net *net = dev_net(dst->dev); + unsigned int mtu; if (net->ipv4.sysctl_ip_fwd_use_pmtu || ip_mtu_locked(dst) || !forwarding) return dst_mtu(dst); + /* 'forwarding = true' case should always honour route mtu */ + mtu = dst_metric_raw(dst, RTAX_MTU); + if (mtu) + return mtu; + return min(READ_ONCE(dst->dev->mtu), IP_MAX_MTU); } -- GitLab From b2934c576ed08d780539a170c915d90b2b2e061b Mon Sep 17 00:00:00 2001 From: Satish Kodishala Date: Tue, 3 Nov 2020 21:40:06 +0530 Subject: [PATCH 1297/1304] Correct the FM port numbers for Chk 3.x Correct the FM port numbers for Chk 3.x. They are currently reversed. CRs-Fixed: 2801744 Change-Id: Id713e373b5db015830f9dc597525e1cfd5519888 Signed-off-by: Satish Kodishala --- drivers/bluetooth/btfm_slim_slave.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/bluetooth/btfm_slim_slave.h b/drivers/bluetooth/btfm_slim_slave.h index 67e08a6ce667..48b9e84c9f53 100644 --- a/drivers/bluetooth/btfm_slim_slave.h +++ b/drivers/bluetooth/btfm_slim_slave.h @@ -71,8 +71,8 @@ #define SLAVE_SB_PGD_PORT_TX_SCO 0 #define SLAVE_SB_PGD_PORT_TX1_FM 1 #define SLAVE_SB_PGD_PORT_TX2_FM 2 -#define CHRKVER3_SB_PGD_PORT_TX1_FM 4 -#define CHRKVER3_SB_PGD_PORT_TX2_FM 5 +#define CHRKVER3_SB_PGD_PORT_TX1_FM 5 +#define CHRKVER3_SB_PGD_PORT_TX2_FM 4 #define SLAVE_SB_PGD_PORT_RX_SCO 16 #define SLAVE_SB_PGD_PORT_RX_A2P 17 -- GitLab From 54584f4f5b4084ee6703556b14d9448cfd3449f8 Mon Sep 17 00:00:00 2001 From: Anirudh Raghavendra Date: Wed, 29 Jul 2020 16:01:39 -0700 Subject: [PATCH 1298/1304] msm:adsprpc: Prevent use after free in fastrpc_set_process_info Serialize kzalloc in fastrpc_set_process_info and prevent use after free. Change-Id: I02d62182a234ef40ce33165247fc578f6727d27a Signed-off-by: Anirudh Raghavendra --- drivers/char/adsprpc.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c index 5a0b4c8712ee..7218b5b8890d 100644 --- a/drivers/char/adsprpc.c +++ b/drivers/char/adsprpc.c @@ -476,6 +476,8 @@ struct fastrpc_file { /* Flag to enable PM wake/relax voting for every remote invoke */ int wake_enable; uint32_t ws_timeout; + /* To indicate attempt has been made to allocate memory for debug_buf */ + int debug_buf_alloced_attempted; }; static struct fastrpc_apps gfa; @@ -4071,6 +4073,14 @@ static int fastrpc_set_process_info(struct fastrpc_file *fl) if (debugfs_root) { buf_size = strlen(current->comm) + strlen("_") + strlen(strpid) + 1; + + spin_lock(&fl->hlock); + if (fl->debug_buf_alloced_attempted) { + spin_unlock(&fl->hlock); + return err; + } + fl->debug_buf_alloced_attempted = 1; + spin_unlock(&fl->hlock); fl->debug_buf = kzalloc(buf_size, GFP_KERNEL); if (!fl->debug_buf) { err = -ENOMEM; -- GitLab From ad2719777ab23e0dc5ee01998d4d645a31d49696 Mon Sep 17 00:00:00 2001 From: Swetha Chikkaboraiah Date: Tue, 13 Oct 2020 12:41:05 +0530 Subject: [PATCH 1299/1304] defconfig: Enable VETH config Enabling VETH config for supporting SDM660. Change-Id: Iaead9a865bdd9eeaac21b6cc8ab17c6e420d178d Signed-off-by: Swetha Chikkaboraiah --- arch/arm64/configs/vendor/sdm660-perf_defconfig | 1 + arch/arm64/configs/vendor/sdm660_defconfig | 1 + 2 files changed, 2 insertions(+) diff --git a/arch/arm64/configs/vendor/sdm660-perf_defconfig b/arch/arm64/configs/vendor/sdm660-perf_defconfig index 3fce78febaf6..4c2ba1189778 100644 --- a/arch/arm64/configs/vendor/sdm660-perf_defconfig +++ b/arch/arm64/configs/vendor/sdm660-perf_defconfig @@ -318,6 +318,7 @@ CONFIG_NETDEVICES=y CONFIG_BONDING=y CONFIG_DUMMY=y CONFIG_TUN=y +CONFIG_VETH=y CONFIG_SKY2=y CONFIG_RMNET=y CONFIG_SMSC911X=y diff --git a/arch/arm64/configs/vendor/sdm660_defconfig b/arch/arm64/configs/vendor/sdm660_defconfig index 05d1f057e49e..e282ba72ac74 100644 --- a/arch/arm64/configs/vendor/sdm660_defconfig +++ b/arch/arm64/configs/vendor/sdm660_defconfig @@ -328,6 +328,7 @@ CONFIG_NETDEVICES=y CONFIG_BONDING=y CONFIG_DUMMY=y CONFIG_TUN=y +CONFIG_VETH=y CONFIG_RMNET=y CONFIG_PPP=y CONFIG_PPP_BSDCOMP=y -- GitLab From f77cf4a5dcf552b953401334484e4e2d30a103ac Mon Sep 17 00:00:00 2001 From: Swetha Chikkaboraiah Date: Tue, 13 Oct 2020 14:11:46 +0530 Subject: [PATCH 1300/1304] defconfig: Disable CRYPTO_MD4 config MD4 is a weak cryptography so disabling it for SDM660. Change-Id: Ic7fc98874284684027a576d4552a53f6488e7e57 Signed-off-by: Swetha Chikkaboraiah --- arch/arm64/configs/vendor/sdm660-perf_defconfig | 1 - arch/arm64/configs/vendor/sdm660_defconfig | 1 - 2 files changed, 2 deletions(-) diff --git a/arch/arm64/configs/vendor/sdm660-perf_defconfig b/arch/arm64/configs/vendor/sdm660-perf_defconfig index 4c2ba1189778..50c711e6d740 100644 --- a/arch/arm64/configs/vendor/sdm660-perf_defconfig +++ b/arch/arm64/configs/vendor/sdm660-perf_defconfig @@ -675,7 +675,6 @@ CONFIG_SECURITY_SELINUX=y CONFIG_SECURITY_SMACK=y CONFIG_CRYPTO_GCM=y CONFIG_CRYPTO_XCBC=y -CONFIG_CRYPTO_MD4=y CONFIG_CRYPTO_TWOFISH=y CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_CRYPTO_DEV_QCE=y diff --git a/arch/arm64/configs/vendor/sdm660_defconfig b/arch/arm64/configs/vendor/sdm660_defconfig index e282ba72ac74..d6aea82da4b8 100644 --- a/arch/arm64/configs/vendor/sdm660_defconfig +++ b/arch/arm64/configs/vendor/sdm660_defconfig @@ -716,7 +716,6 @@ CONFIG_SECURITY_SELINUX=y CONFIG_SECURITY_SMACK=y CONFIG_CRYPTO_GCM=y CONFIG_CRYPTO_XCBC=y -CONFIG_CRYPTO_MD4=y CONFIG_CRYPTO_TWOFISH=y CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_CRYPTO_DEV_QCE=y -- GitLab From fa54b671acfef3de0cc8848ac301978752da8bd5 Mon Sep 17 00:00:00 2001 From: Swetha Chikkaboraiah Date: Tue, 13 Oct 2020 14:43:16 +0530 Subject: [PATCH 1301/1304] defconfig: Sync with Android-4.19 configs SDM660 configuration to be in sync with Android-4.19 configs. Change-Id: I151cf447eefe4bd54a835ebbd3cec2bfecb9dd86 Signed-off-by: Swetha Chikkaboraiah --- arch/arm64/configs/vendor/sdm660-perf_defconfig | 1 + arch/arm64/configs/vendor/sdm660_defconfig | 1 + 2 files changed, 2 insertions(+) diff --git a/arch/arm64/configs/vendor/sdm660-perf_defconfig b/arch/arm64/configs/vendor/sdm660-perf_defconfig index 50c711e6d740..5f4b860122f3 100644 --- a/arch/arm64/configs/vendor/sdm660-perf_defconfig +++ b/arch/arm64/configs/vendor/sdm660-perf_defconfig @@ -633,6 +633,7 @@ CONFIG_QCOM_L2_COUNTERS=y CONFIG_RAS=y CONFIG_ANDROID=y CONFIG_ANDROID_BINDER_IPC=y +CONFIG_ANDROID_BINDERFS=y CONFIG_QCOM_QFPROM=y CONFIG_NVMEM_SPMI_SDAM=y CONFIG_SLIMBUS_MSM_NGD=y diff --git a/arch/arm64/configs/vendor/sdm660_defconfig b/arch/arm64/configs/vendor/sdm660_defconfig index d6aea82da4b8..18d316c82aaa 100644 --- a/arch/arm64/configs/vendor/sdm660_defconfig +++ b/arch/arm64/configs/vendor/sdm660_defconfig @@ -672,6 +672,7 @@ CONFIG_QCOM_L2_COUNTERS=y CONFIG_RAS=y CONFIG_ANDROID=y CONFIG_ANDROID_BINDER_IPC=y +CONFIG_ANDROID_BINDERFS=y CONFIG_QCOM_QFPROM=y CONFIG_NVMEM_SPMI_SDAM=y CONFIG_SLIMBUS_MSM_NGD=y -- GitLab From 7a3263d2afa9d20de837d0092211f56aa8228ac0 Mon Sep 17 00:00:00 2001 From: Swetha Chikkaboraiah Date: Tue, 13 Oct 2020 15:55:07 +0530 Subject: [PATCH 1302/1304] defconfig: For support api_30 kernel changes Enabling Mandatory defconfigs For supporting sdm660. Change-Id: I557424eb779eb29104cff9241ad439c8a56fc835 Signed-off-by: Swetha Chikkaboraiah --- arch/arm64/configs/vendor/sdm660-perf_defconfig | 9 +++++---- arch/arm64/configs/vendor/sdm660_defconfig | 9 +++++---- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/arch/arm64/configs/vendor/sdm660-perf_defconfig b/arch/arm64/configs/vendor/sdm660-perf_defconfig index 5f4b860122f3..5134806ad520 100644 --- a/arch/arm64/configs/vendor/sdm660-perf_defconfig +++ b/arch/arm64/configs/vendor/sdm660-perf_defconfig @@ -17,11 +17,11 @@ CONFIG_RCU_FAST_NO_HZ=y CONFIG_RCU_NOCB_CPU=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y +CONFIG_IKHEADERS=y CONFIG_LOG_CPU_MAX_BUF_SHIFT=17 CONFIG_MEMCG=y CONFIG_MEMCG_SWAP=y CONFIG_BLK_CGROUP=y -CONFIG_RT_GROUP_SCHED=y CONFIG_CGROUP_FREEZER=y CONFIG_CPUSETS=y CONFIG_CGROUP_CPUACCT=y @@ -41,6 +41,7 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y # CONFIG_FHANDLE is not set CONFIG_KALLSYMS_ALL=y CONFIG_BPF_SYSCALL=y +CONFIG_BPF_JIT_ALWAYS_ON=y CONFIG_EMBEDDED=y # CONFIG_SLUB_DEBUG is not set # CONFIG_COMPAT_BRK is not set @@ -67,7 +68,6 @@ CONFIG_ARM64_SW_TTBR0_PAN=y # CONFIG_ARM64_VHE is not set CONFIG_RANDOMIZE_BASE=y CONFIG_COMPAT=y -CONFIG_PM_AUTOSLEEP=y CONFIG_PM_WAKELOCKS=y CONFIG_PM_WAKELOCKS_LIMIT=0 # CONFIG_PM_WAKELOCKS_GC is not set @@ -95,7 +95,6 @@ CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16 CONFIG_PANIC_ON_REFCOUNT_ERROR=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y -CONFIG_MODULE_FORCE_UNLOAD=y CONFIG_MODVERSIONS=y CONFIG_MODULE_SIG=y CONFIG_MODULE_SIG_FORCE=y @@ -265,6 +264,7 @@ CONFIG_NET_ACT_SKBEDIT=y CONFIG_DNS_RESOLVER=y CONFIG_QRTR=y CONFIG_QRTR_SMD=y +CONFIG_BPF_JIT=y CONFIG_SOCKEV_NLMCAST=y CONFIG_BT=y CONFIG_MSM_BT_POWER=y @@ -277,6 +277,7 @@ CONFIG_RFKILL=y CONFIG_NFC_NQ=y CONFIG_FW_LOADER_USER_HELPER=y CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y +# CONFIG_FW_CACHE is not set CONFIG_REGMAP_WCD_IRQ=y CONFIG_DMA_CMA=y CONFIG_MTD=m @@ -344,6 +345,7 @@ CONFIG_INPUT_KEYRESET=y CONFIG_KEYBOARD_GPIO=y # CONFIG_INPUT_MOUSE is not set CONFIG_INPUT_JOYSTICK=y +CONFIG_JOYSTICK_XPAD=y CONFIG_INPUT_TOUCHSCREEN=y # CONFIG_TOUCHSCREEN_SYNAPTICS_DSX is not set # CONFIG_TOUCHSCREEN_SYNAPTICS_TCM is not set @@ -460,7 +462,6 @@ CONFIG_HID_MICROSOFT=y CONFIG_HID_MULTITOUCH=y CONFIG_HID_PLANTRONICS=y CONFIG_HID_SONY=y -CONFIG_USB=y CONFIG_USB_ANNOUNCE_NEW_DEVICES=y CONFIG_USB_XHCI_HCD=y CONFIG_USB_EHCI_HCD=y diff --git a/arch/arm64/configs/vendor/sdm660_defconfig b/arch/arm64/configs/vendor/sdm660_defconfig index 18d316c82aaa..9a055034e77f 100644 --- a/arch/arm64/configs/vendor/sdm660_defconfig +++ b/arch/arm64/configs/vendor/sdm660_defconfig @@ -16,12 +16,12 @@ CONFIG_RCU_FAST_NO_HZ=y CONFIG_RCU_NOCB_CPU=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y +CONFIG_IKHEADERS=y CONFIG_LOG_CPU_MAX_BUF_SHIFT=17 CONFIG_MEMCG=y CONFIG_MEMCG_SWAP=y CONFIG_BLK_CGROUP=y CONFIG_DEBUG_BLK_CGROUP=y -CONFIG_RT_GROUP_SCHED=y CONFIG_CGROUP_FREEZER=y CONFIG_CPUSETS=y CONFIG_CGROUP_CPUACCT=y @@ -42,6 +42,7 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y # CONFIG_FHANDLE is not set CONFIG_KALLSYMS_ALL=y CONFIG_BPF_SYSCALL=y +CONFIG_BPF_JIT_ALWAYS_ON=y CONFIG_EMBEDDED=y # CONFIG_COMPAT_BRK is not set CONFIG_SLAB_FREELIST_RANDOM=y @@ -67,7 +68,6 @@ CONFIG_ARM64_SW_TTBR0_PAN=y # CONFIG_ARM64_VHE is not set CONFIG_RANDOMIZE_BASE=y CONFIG_COMPAT=y -CONFIG_PM_AUTOSLEEP=y CONFIG_PM_WAKELOCKS=y CONFIG_PM_WAKELOCKS_LIMIT=0 # CONFIG_PM_WAKELOCKS_GC is not set @@ -97,7 +97,6 @@ CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16 CONFIG_PANIC_ON_REFCOUNT_ERROR=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y -CONFIG_MODULE_FORCE_UNLOAD=y CONFIG_MODVERSIONS=y CONFIG_MODULE_SIG=y CONFIG_MODULE_SIG_FORCE=y @@ -272,6 +271,7 @@ CONFIG_NET_ACT_SKBEDIT=y CONFIG_DNS_RESOLVER=y CONFIG_QRTR=y CONFIG_QRTR_SMD=y +CONFIG_BPF_JIT=y CONFIG_SOCKEV_NLMCAST=y CONFIG_BT=y CONFIG_MSM_BT_POWER=y @@ -285,6 +285,7 @@ CONFIG_RFKILL=y CONFIG_NFC_NQ=y CONFIG_FW_LOADER_USER_HELPER=y CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y +# CONFIG_FW_CACHE is not set CONFIG_REGMAP_WCD_IRQ=y CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y CONFIG_DMA_CMA=y @@ -352,6 +353,7 @@ CONFIG_INPUT_KEYRESET=y CONFIG_KEYBOARD_GPIO=y # CONFIG_INPUT_MOUSE is not set CONFIG_INPUT_JOYSTICK=y +CONFIG_JOYSTICK_XPAD=y CONFIG_INPUT_TOUCHSCREEN=y # CONFIG_TOUCHSCREEN_SYNAPTICS_DSX is not set # CONFIG_TOUCHSCREEN_SYNAPTICS_TCM is not set @@ -489,7 +491,6 @@ CONFIG_HID_MICROSOFT=y CONFIG_HID_MULTITOUCH=y CONFIG_HID_PLANTRONICS=y CONFIG_HID_SONY=y -CONFIG_USB=y CONFIG_USB_ANNOUNCE_NEW_DEVICES=y CONFIG_USB_XHCI_HCD=y CONFIG_USB_EHCI_HCD=y -- GitLab From de64739e792b9ff4f30a2956a92b4620354ba421 Mon Sep 17 00:00:00 2001 From: Swetha Chikkaboraiah Date: Wed, 30 Sep 2020 14:48:00 +0530 Subject: [PATCH 1303/1304] defconfig: sdm660: Enable CONFIG_HID_NINTENDO for sdm660 Enable CONFIG_HID_NINTENDO for sdm660 Change-Id: I4f321c6c5e11564641e4f26e82ceb76b86df8498 Signed-off-by: Swetha Chikkaboraiah --- arch/arm64/configs/vendor/sdm660-perf_defconfig | 1 + arch/arm64/configs/vendor/sdm660_defconfig | 1 + 2 files changed, 2 insertions(+) diff --git a/arch/arm64/configs/vendor/sdm660-perf_defconfig b/arch/arm64/configs/vendor/sdm660-perf_defconfig index 5134806ad520..8cb6bfe1ef50 100644 --- a/arch/arm64/configs/vendor/sdm660-perf_defconfig +++ b/arch/arm64/configs/vendor/sdm660-perf_defconfig @@ -460,6 +460,7 @@ CONFIG_HID_ELECOM=y CONFIG_HID_MAGICMOUSE=y CONFIG_HID_MICROSOFT=y CONFIG_HID_MULTITOUCH=y +CONFIG_HID_NINTENDO=y CONFIG_HID_PLANTRONICS=y CONFIG_HID_SONY=y CONFIG_USB_ANNOUNCE_NEW_DEVICES=y diff --git a/arch/arm64/configs/vendor/sdm660_defconfig b/arch/arm64/configs/vendor/sdm660_defconfig index 9a055034e77f..52a871b84b8e 100644 --- a/arch/arm64/configs/vendor/sdm660_defconfig +++ b/arch/arm64/configs/vendor/sdm660_defconfig @@ -489,6 +489,7 @@ CONFIG_HID_ELECOM=y CONFIG_HID_MAGICMOUSE=y CONFIG_HID_MICROSOFT=y CONFIG_HID_MULTITOUCH=y +CONFIG_HID_NINTENDO=y CONFIG_HID_PLANTRONICS=y CONFIG_HID_SONY=y CONFIG_USB_ANNOUNCE_NEW_DEVICES=y -- GitLab From efab7ef9d1bd5d1367670c35872169ee51240ef7 Mon Sep 17 00:00:00 2001 From: priyankar Date: Mon, 12 Oct 2020 16:54:17 +0530 Subject: [PATCH 1304/1304] Bluetooth: Implement a minimum off-time for AON discharge issue Sometimes it is seen that AON LDO output takes minimum off-time (worst case 100ms) to fully discharge. If BT is turned on in this timeframe when AON is not fully discharged yet ,BT would fail to turn ON. This fix ensures to: Toggle BT_EN high when WL_EN is already high. Handle corner case if WL_EN goes low in step 1. Add 100ms to toggle BT_EN high when WL_EN is low. Change-Id: I7b035d3c91e4b73564a866f40ebdff25f1a315b4 Signed-off-by: priyankar --- drivers/bluetooth/bluetooth-power.c | 100 ++++++++++++++++++++++++---- include/linux/bluetooth-power.h | 2 + 2 files changed, 89 insertions(+), 13 deletions(-) diff --git a/drivers/bluetooth/bluetooth-power.c b/drivers/bluetooth/bluetooth-power.c index 4568608ed6f5..cee4fe2b2bb5 100644 --- a/drivers/bluetooth/bluetooth-power.c +++ b/drivers/bluetooth/bluetooth-power.c @@ -288,6 +288,67 @@ static int bt_clk_disable(struct bt_power_clk_data *clk) return rc; } +static int bt_enable_bt_reset_gpios_safely(void) +{ + int rc = 0; + int bt_reset_gpio = bt_power_pdata->bt_gpio_sys_rst; + int wl_reset_gpio = bt_power_pdata->wl_gpio_sys_rst; + + if (wl_reset_gpio >= 0) { + BT_PWR_INFO("%s: BTON:Turn Bt On", __func__); + BT_PWR_INFO("%s: wl-reset-gpio(%d) value(%d)", + __func__, wl_reset_gpio, + gpio_get_value(wl_reset_gpio)); + } + + if ((wl_reset_gpio < 0) || + ((wl_reset_gpio >= 0) && + gpio_get_value(wl_reset_gpio))) { + BT_PWR_INFO("%s: BTON: Asserting BT_EN", + __func__); + rc = gpio_direction_output(bt_reset_gpio, 1); + if (rc) { + BT_PWR_ERR("%s: Unable to set direction", + __func__); + return rc; + } + bt_power_src_status[BT_RESET_GPIO] = + gpio_get_value(bt_reset_gpio); + } + + if ((wl_reset_gpio >= 0) && + (gpio_get_value(wl_reset_gpio) == 0)) { + if (gpio_get_value(bt_reset_gpio)) { + BT_PWR_INFO("%s: Wlan Off and BT On too close", + __func__); + BT_PWR_INFO("%s: Reset BT_EN", __func__); + BT_PWR_INFO("%s: Enable it after delay", + __func__); + rc = gpio_direction_output(bt_reset_gpio, 0); + if (rc) { + BT_PWR_ERR("%s:Unable to set direction", + __func__); + return rc; + } + bt_power_src_status[BT_RESET_GPIO] = + gpio_get_value(bt_reset_gpio); + } + BT_PWR_INFO("%s: 100ms delay added", __func__); + BT_PWR_INFO("%s: for AON output to fully discharge", + __func__); + msleep(100); + rc = gpio_direction_output(bt_reset_gpio, 1); + if (rc) { + BT_PWR_ERR("%s: Unable to set direction", + __func__); + return rc; + } + bt_power_src_status[BT_RESET_GPIO] = + gpio_get_value(bt_reset_gpio); + } + return rc; +} + static int bt_configure_gpios(int on) { int rc = 0; @@ -312,7 +373,7 @@ static int bt_configure_gpios(int on) bt_power_src_status[BT_RESET_GPIO] = gpio_get_value(bt_reset_gpio); msleep(50); - BT_PWR_INFO("BTON:Turn Bt Off bt-reset-gpio(%d) value(%d)\n", + BT_PWR_INFO("BTON:Turn Bt Off bt-reset-gpio(%d) value(%d)", bt_reset_gpio, gpio_get_value(bt_reset_gpio)); if (bt_sw_ctrl_gpio >= 0) { BT_PWR_INFO("BTON:Turn Bt Off"); @@ -323,14 +384,12 @@ static int bt_configure_gpios(int on) bt_power_src_status[BT_SW_CTRL_GPIO]); } - rc = gpio_direction_output(bt_reset_gpio, 1); - + rc = bt_enable_bt_reset_gpios_safely(); if (rc) { - BT_PWR_ERR("Unable to set direction\n"); - return rc; + BT_PWR_ERR("%s:bt_enable_bt_reset_gpios_safely failed", + __func__); } - bt_power_src_status[BT_RESET_GPIO] = - gpio_get_value(bt_reset_gpio); + msleep(50); /* Check if SW_CTRL is asserted */ if (bt_sw_ctrl_gpio >= 0) { @@ -384,6 +443,18 @@ static int bt_configure_gpios(int on) return rc; } +static void bt_free_gpios(void) +{ + if (bt_power_pdata->bt_gpio_sys_rst > 0) + gpio_free(bt_power_pdata->bt_gpio_sys_rst); + if (bt_power_pdata->wl_gpio_sys_rst > 0) + gpio_free(bt_power_pdata->wl_gpio_sys_rst); + if (bt_power_pdata->bt_gpio_sw_ctrl > 0) + gpio_free(bt_power_pdata->bt_gpio_sw_ctrl); + if (bt_power_pdata->bt_gpio_debug > 0) + gpio_free(bt_power_pdata->bt_gpio_debug); +} + static int bluetooth_power(int on) { int rc = 0; @@ -547,12 +618,9 @@ static int bluetooth_power(int on) if (bt_power_pdata->bt_gpio_sys_rst > 0) bt_configure_gpios(on); gpio_fail: - if (bt_power_pdata->bt_gpio_sys_rst > 0) - gpio_free(bt_power_pdata->bt_gpio_sys_rst); - if (bt_power_pdata->bt_gpio_sw_ctrl > 0) - gpio_free(bt_power_pdata->bt_gpio_sw_ctrl); - if (bt_power_pdata->bt_gpio_debug > 0) - gpio_free(bt_power_pdata->bt_gpio_debug); + //Free Gpios + bt_free_gpios(); + if (bt_power_pdata->bt_chip_clk) bt_clk_disable(bt_power_pdata->bt_chip_clk); clk_fail: @@ -821,6 +889,12 @@ static int bt_power_populate_dt_pinfo(struct platform_device *pdev) if (bt_power_pdata->bt_gpio_sys_rst < 0) BT_PWR_INFO("bt-reset-gpio not provided in devicetree"); + bt_power_pdata->wl_gpio_sys_rst = + of_get_named_gpio(pdev->dev.of_node, + "qca,wl-reset-gpio", 0); + if (bt_power_pdata->wl_gpio_sys_rst < 0) + BT_PWR_INFO("wl-reset-gpio not provided in devicetree"); + bt_power_pdata->bt_gpio_sw_ctrl = of_get_named_gpio(pdev->dev.of_node, "qca,bt-sw-ctrl-gpio", 0); diff --git a/include/linux/bluetooth-power.h b/include/linux/bluetooth-power.h index 04614d491318..4e41cfd2bdee 100644 --- a/include/linux/bluetooth-power.h +++ b/include/linux/bluetooth-power.h @@ -52,6 +52,8 @@ struct bluetooth_power_platform_data { int bt_gpio_sys_rst; /* Bluetooth sw_ctrl gpio */ int bt_gpio_sw_ctrl; + /* Wlan reset gpio */ + int wl_gpio_sys_rst; /* Bluetooth debug gpio */ int bt_gpio_debug; struct device *slim_dev; -- GitLab