[curro:for-edward 17/17] drivers/gpu/drm/i915/gt/intel_lrc.c:2680:25: error: ISO C90 forbids mixed declarations and code
by kernel test robot
tree: https://github.com/curro/linux for-edward
head: 9ba143107276ffb71f116efd181424c070805213
commit: 9ba143107276ffb71f116efd181424c070805213 [17/17] DEBUG
config: x86_64-randconfig-s021-20200930 (attached as .config)
compiler: gcc-9 (Debian 9.3.0-15) 9.3.0
reproduce:
# apt-get install sparse
# sparse version: v0.6.2-201-g24bdaac6-dirty
# https://github.com/curro/linux/commit/9ba143107276ffb71f116efd181424c0708...
git remote add curro https://github.com/curro/linux
git fetch --no-tags curro for-edward
git checkout 9ba143107276ffb71f116efd181424c070805213
# save the attached .config to linux build tree
make W=1 C=1 CF='-fdiagnostic-prefix -D__CHECK_ENDIAN__' ARCH=x86_64
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp(a)intel.com>
All errors (new ones prefixed by >>):
drivers/gpu/drm/i915/gt/intel_lrc.c: In function 'process_csb':
>> drivers/gpu/drm/i915/gt/intel_lrc.c:2680:25: error: ISO C90 forbids mixed declarations and code [-Werror=declaration-after-statement]
2680 | bool trace = false;
| ^~~~
drivers/gpu/drm/i915/gt/intel_lrc.c:2712:25: error: ISO C90 forbids mixed declarations and code [-Werror=declaration-after-statement]
2712 | bool trace = false;
| ^~~~
cc1: all warnings being treated as errors
vim +2680 drivers/gpu/drm/i915/gt/intel_lrc.c
2559
2560 static void process_csb(struct intel_engine_cs *engine)
2561 {
2562 struct intel_engine_execlists * const execlists = &engine->execlists;
2563 const u32 * const buf = execlists->csb_status;
2564 const u8 num_entries = execlists->csb_size;
2565 u8 head, tail;
2566
2567 /*
2568 * As we modify our execlists state tracking we require exclusive
2569 * access. Either we are inside the tasklet, or the tasklet is disabled
2570 * and we assume that is only inside the reset paths and so serialised.
2571 */
2572 GEM_BUG_ON(!tasklet_is_locked(&execlists->tasklet) &&
2573 !reset_in_progress(execlists));
2574 GEM_BUG_ON(!intel_engine_in_execlists_submission_mode(engine));
2575
2576 /*
2577 * Note that csb_write, csb_status may be either in HWSP or mmio.
2578 * When reading from the csb_write mmio register, we have to be
2579 * careful to only use the GEN8_CSB_WRITE_PTR portion, which is
2580 * the low 4bits. As it happens we know the next 4bits are always
2581 * zero and so we can simply masked off the low u8 of the register
2582 * and treat it identically to reading from the HWSP (without having
2583 * to use explicit shifting and masking, and probably bifurcating
2584 * the code to handle the legacy mmio read).
2585 */
2586 head = execlists->csb_head;
2587 tail = READ_ONCE(*execlists->csb_write);
2588 if (unlikely(head == tail))
2589 return;
2590
2591 /*
2592 * We will consume all events from HW, or at least pretend to.
2593 *
2594 * The sequence of events from the HW is deterministic, and derived
2595 * from our writes to the ELSP, with a smidgen of variability for
2596 * the arrival of the asynchronous requests wrt to the inflight
2597 * execution. If the HW sends an event that does not correspond with
2598 * the one we are expecting, we have to abandon all hope as we lose
2599 * all tracking of what the engine is actually executing. We will
2600 * only detect we are out of sequence with the HW when we get an
2601 * 'impossible' event because we have already drained our own
2602 * preemption/promotion queue. If this occurs, we know that we likely
2603 * lost track of execution earlier and must unwind and restart, the
2604 * simplest way is by stop processing the event queue and force the
2605 * engine to reset.
2606 */
2607 execlists->csb_head = tail;
2608 ENGINE_TRACE(engine, "cs-irq head=%d, tail=%d\n", head, tail);
2609
2610 /*
2611 * Hopefully paired with a wmb() in HW!
2612 *
2613 * We must complete the read of the write pointer before any reads
2614 * from the CSB, so that we do not see stale values. Without an rmb
2615 * (lfence) the HW may speculatively perform the CSB[] reads *before*
2616 * we perform the READ_ONCE(*csb_write).
2617 */
2618 rmb();
2619 do {
2620 bool promote;
2621
2622 if (++head == num_entries)
2623 head = 0;
2624
2625 /*
2626 * We are flying near dragons again.
2627 *
2628 * We hold a reference to the request in execlist_port[]
2629 * but no more than that. We are operating in softirq
2630 * context and so cannot hold any mutex or sleep. That
2631 * prevents us stopping the requests we are processing
2632 * in port[] from being retired simultaneously (the
2633 * breadcrumb will be complete before we see the
2634 * context-switch). As we only hold the reference to the
2635 * request, any pointer chasing underneath the request
2636 * is subject to a potential use-after-free. Thus we
2637 * store all of the bookkeeping within port[] as
2638 * required, and avoid using unguarded pointers beneath
2639 * request itself. The same applies to the atomic
2640 * status notifier.
2641 */
2642
2643 ENGINE_TRACE(engine, "csb[%d]: status=0x%08x:0x%08x\n",
2644 head, buf[2 * head + 0], buf[2 * head + 1]);
2645
2646 if (INTEL_GEN(engine->i915) >= 12)
2647 promote = gen12_csb_parse(execlists, buf + 2 * head);
2648 else
2649 promote = gen8_csb_parse(execlists, buf + 2 * head);
2650 if (promote) {
2651 struct i915_request * const *old = execlists->active;
2652
2653 if (GEM_WARN_ON(!*execlists->pending)) {
2654 execlists->error_interrupt |= ERROR_CSB;
2655 break;
2656 }
2657
2658 ring_set_paused(engine, 0);
2659
2660 /* Point active to the new ELSP; prevent overwriting */
2661 WRITE_ONCE(execlists->active, execlists->pending);
2662 smp_wmb(); /* notify execlists_active() */
2663
2664 /* cancel old inflight, prepare for switch */
2665 trace_ports(execlists, "preempted", old);
2666 while (*old)
2667 execlists_schedule_out(*old++);
2668
2669 /* switch pending to inflight */
2670 GEM_BUG_ON(!assert_pending_valid(execlists, "promote"));
2671 memcpy(execlists->inflight,
2672 execlists->pending,
2673 execlists_num_ports(execlists) *
2674 sizeof(*execlists->pending));
2675 smp_wmb(); /* complete the seqlock */
2676 WRITE_ONCE(execlists->active, execlists->inflight);
2677
2678 WRITE_ONCE(execlists->pending[0], NULL);
2679
> 2680 bool trace = false;
2681 if (!atomic_xchg(&execlists->busy, 1)) {
2682 if ((engine->gt->qos.debug & 1))
2683 intel_qos_overload_begin(&engine->gt->qos);
2684 trace = true;
2685 }
2686
2687 if (execlists->inflight[1]) {
2688 if (!atomic_xchg(&execlists->overload, 1)) {
2689 if (!(engine->gt->qos.debug & 1))
2690 intel_qos_overload_begin(&engine->gt->qos);
2691 trace = true;
2692 }
2693 } else {
2694 if (atomic_xchg(&execlists->overload, 0)) {
2695 if (!(engine->gt->qos.debug & 1))
2696 intel_qos_overload_end(&engine->gt->qos);
2697 trace = true;
2698 }
2699 }
2700
2701 if (trace)
2702 trace_status(engine);
2703 } else {
2704 if (GEM_WARN_ON(!*execlists->active)) {
2705 execlists->error_interrupt |= ERROR_CSB;
2706 break;
2707 }
2708
2709 /* port0 completed, advanced to port1 */
2710 trace_ports(execlists, "completed", execlists->active);
2711
2712 bool trace = false;
2713 if (atomic_xchg(&execlists->overload, 0)) {
2714 if (!(engine->gt->qos.debug & 1))
2715 intel_qos_overload_end(&engine->gt->qos);
2716 trace = true;
2717 }
2718
2719 /*
2720 * We rely on the hardware being strongly
2721 * ordered, that the breadcrumb write is
2722 * coherent (visible from the CPU) before the
2723 * user interrupt is processed. One might assume
2724 * that the breadcrumb write being before the
2725 * user interrupt and the CS event for the context
2726 * switch would therefore be before the CS event
2727 * itself...
2728 */
2729 if (GEM_SHOW_DEBUG() &&
2730 !i915_request_completed(*execlists->active)) {
2731 struct i915_request *rq = *execlists->active;
2732 const u32 *regs __maybe_unused =
2733 rq->context->lrc_reg_state;
2734
2735 ENGINE_TRACE(engine,
2736 "context completed before request!\n");
2737 ENGINE_TRACE(engine,
2738 "ring:{start:0x%08x, head:%04x, tail:%04x, ctl:%08x, mode:%08x}\n",
2739 ENGINE_READ(engine, RING_START),
2740 ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR,
2741 ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR,
2742 ENGINE_READ(engine, RING_CTL),
2743 ENGINE_READ(engine, RING_MI_MODE));
2744 ENGINE_TRACE(engine,
2745 "rq:{start:%08x, head:%04x, tail:%04x, seqno:%llx:%d, hwsp:%d}, ",
2746 i915_ggtt_offset(rq->ring->vma),
2747 rq->head, rq->tail,
2748 rq->fence.context,
2749 lower_32_bits(rq->fence.seqno),
2750 hwsp_seqno(rq));
2751 ENGINE_TRACE(engine,
2752 "ctx:{start:%08x, head:%04x, tail:%04x}, ",
2753 regs[CTX_RING_START],
2754 regs[CTX_RING_HEAD],
2755 regs[CTX_RING_TAIL]);
2756 }
2757
2758 execlists_schedule_out(*execlists->active++);
2759
2760 if (!*execlists->active && atomic_xchg(&execlists->busy, 0)) {
2761 if ((engine->gt->qos.debug & 1))
2762 intel_qos_overload_end(&engine->gt->qos);
2763 trace = true;
2764 }
2765
2766 if (trace)
2767 trace_status(engine);
2768
2769 GEM_BUG_ON(execlists->active - execlists->inflight >
2770 execlists_num_ports(execlists));
2771 }
2772 } while (head != tail);
2773
2774 set_timeslice(engine);
2775
2776 /*
2777 * Gen11 has proven to fail wrt global observation point between
2778 * entry and tail update, failing on the ordering and thus
2779 * we see an old entry in the context status buffer.
2780 *
2781 * Forcibly evict out entries for the next gpu csb update,
2782 * to increase the odds that we get a fresh entries with non
2783 * working hardware. The cost for doing so comes out mostly with
2784 * the wash as hardware, working or not, will need to do the
2785 * invalidation before.
2786 */
2787 invalidate_csb_entries(&buf[0], &buf[num_entries - 1]);
2788 }
2789
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
1 year, 7 months
[linux-next:master 10904/11956] drivers/firmware/tegra/bpmp.c:861:51: error: 'tegra186_soc' undeclared here (not in a function); did you mean
by kernel test robot
tree: https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git master
head: 49e7e3e905e437a02782019570f70997e2da9101
commit: cecd2eaea2e0ff9095f18e84415469f5aacf8144 [10904/11956] Merge branch 'arm/drivers' into for-next
config: arm64-randconfig-r011-20200930 (attached as .config)
compiler: aarch64-linux-gcc (GCC) 9.3.0
reproduce (this is a W=1 build):
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git/commi...
git remote add linux-next https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
git fetch --no-tags linux-next master
git checkout cecd2eaea2e0ff9095f18e84415469f5aacf8144
# save the attached .config to linux build tree
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-9.3.0 make.cross ARCH=arm64
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp(a)intel.com>
Note: the linux-next/master HEAD 49e7e3e905e437a02782019570f70997e2da9101 builds fine.
It may have been fixed somewhere.
All errors (new ones prefixed by >>):
>> drivers/firmware/tegra/bpmp.c:861:51: error: 'tegra186_soc' undeclared here (not in a function); did you mean 'tegra210_soc'?
861 | { .compatible = "nvidia,tegra186-bpmp", .data = &tegra186_soc },
| ^~~~~~~~~~~~
| tegra210_soc
vim +861 drivers/firmware/tegra/bpmp.c
139251fc220830c Timo Alho 2019-01-24 856
983de5f97169ab5 Thierry Reding 2016-08-19 857 static const struct of_device_id tegra_bpmp_match[] = {
fe45ab552955ee9 Thierry Reding 2019-02-07 858 #if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) || \
0ebdf11699d0491 Thierry Reding 2020-09-17 859 IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC) || \
0ebdf11699d0491 Thierry Reding 2020-09-17 860 IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC)
983de5f97169ab5 Thierry Reding 2016-08-19 @861 { .compatible = "nvidia,tegra186-bpmp", .data = &tegra186_soc },
79d031fcad56e27 Thierry Reding 2019-02-07 862 #endif
79d031fcad56e27 Thierry Reding 2019-02-07 863 #if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
139251fc220830c Timo Alho 2019-01-24 864 { .compatible = "nvidia,tegra210-bpmp", .data = &tegra210_soc },
79d031fcad56e27 Thierry Reding 2019-02-07 865 #endif
983de5f97169ab5 Thierry Reding 2016-08-19 866 { }
983de5f97169ab5 Thierry Reding 2016-08-19 867 };
983de5f97169ab5 Thierry Reding 2016-08-19 868
:::::: The code at line 861 was first introduced by commit
:::::: 983de5f97169ab59d4cb0f60d9d9157778ce4a5e firmware: tegra: Add BPMP support
:::::: TO: Thierry Reding <treding(a)nvidia.com>
:::::: CC: Thierry Reding <treding(a)nvidia.com>
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
1 year, 7 months
Re: [PATCH v3 2/7] drivers: mfd: Add a driver for iEi WT61P803 PUZZLE MCU
by kernel test robot
Hi Luka,
Thank you for the patch! Perhaps something to improve:
[auto build test WARNING on hwmon/hwmon-next]
[also build test WARNING on lee-mfd/for-mfd-next pavel-linux-leds/for-next linus/master v5.9-rc7 next-20200929]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]
url: https://github.com/0day-ci/linux/commits/Luka-Kovacic/Add-support-for-the...
base: https://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git hwmon-next
config: openrisc-randconfig-r033-20200930 (attached as .config)
compiler: or1k-linux-gcc (GCC) 9.3.0
reproduce (this is a W=1 build):
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# https://github.com/0day-ci/linux/commit/3f186576324e22bba3e3893d6f3cf3ce9...
git remote add linux-review https://github.com/0day-ci/linux
git fetch --no-tags linux-review Luka-Kovacic/Add-support-for-the-iEi-Puzzle-M801-board/20200930-094341
git checkout 3f186576324e22bba3e3893d6f3cf3ce91d70da7
# save the attached .config to linux build tree
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-9.3.0 make.cross ARCH=openrisc
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp(a)intel.com>
All warnings (new ones prefixed by >>):
>> drivers/mfd/iei-wt61p803-puzzle.c:309:5: warning: no previous prototype for 'iei_wt61p803_puzzle_buzzer' [-Wmissing-prototypes]
309 | int iei_wt61p803_puzzle_buzzer(struct iei_wt61p803_puzzle *mcu, bool long_beep)
| ^~~~~~~~~~~~~~~~~~~~~~~~~~
vim +/iei_wt61p803_puzzle_buzzer +309 drivers/mfd/iei-wt61p803-puzzle.c
308
> 309 int iei_wt61p803_puzzle_buzzer(struct iei_wt61p803_puzzle *mcu, bool long_beep)
310 {
311 unsigned char buzzer_short_cmd[4] = {
312 IEI_WT61P803_PUZZLE_CMD_HEADER_START,
313 IEI_WT61P803_PUZZLE_CMD_FUNCTION_SINGLE,
314 '2'
315 }; /* Buzzer 0.5 sec */
316 unsigned char buzzer_long_cmd[4] = {
317 IEI_WT61P803_PUZZLE_CMD_HEADER_START,
318 IEI_WT61P803_PUZZLE_CMD_FUNCTION_SINGLE,
319 '3'
320 }; /* Buzzer 1.5 sec */
321 unsigned char *resp_buf = mcu->response_buffer;
322 size_t reply_size = 0;
323 int ret;
324
325 mutex_lock(&mcu->lock);
326 ret = iei_wt61p803_puzzle_write_command(mcu,
327 long_beep ? buzzer_long_cmd : buzzer_short_cmd, 4,
328 resp_buf, &reply_size);
329 if (ret)
330 goto exit;
331
332 if (reply_size != 3) {
333 ret = -EIO;
334 goto exit;
335 }
336
337 if (!(resp_buf[0] == IEI_WT61P803_PUZZLE_CMD_HEADER_START &&
338 resp_buf[1] == IEI_WT61P803_PUZZLE_CMD_RESPONSE_OK &&
339 resp_buf[2] == IEI_WT61P803_PUZZLE_CHECKSUM_RESPONSE_OK)) {
340 ret = -EPROTO;
341 goto exit;
342 }
343 exit:
344 mutex_unlock(&mcu->lock);
345 return ret;
346 }
347
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
1 year, 7 months
Re: [Intel-gfx] [PATCH v2 04/11] drm/i915: Shove the PHY test into the hotplug work
by kernel test robot
Hi Ville,
Thank you for the patch! Perhaps something to improve:
[auto build test WARNING on drm-intel/for-linux-next]
[also build test WARNING on drm-tip/drm-tip v5.9-rc7 next-20200929]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]
url: https://github.com/0day-ci/linux/commits/Ville-Syrjala/drm-i915-Plumb-crt...
base: git://anongit.freedesktop.org/drm-intel for-linux-next
config: x86_64-randconfig-s021-20200930 (attached as .config)
compiler: gcc-9 (Debian 9.3.0-15) 9.3.0
reproduce:
# apt-get install sparse
# sparse version: v0.6.2-201-g24bdaac6-dirty
# https://github.com/0day-ci/linux/commit/a504af17c918ba91652d9c0c5ed45d9e0...
git remote add linux-review https://github.com/0day-ci/linux
git fetch --no-tags linux-review Ville-Syrjala/drm-i915-Plumb-crtc-state-to-link-training-code/20200930-073629
git checkout a504af17c918ba91652d9c0c5ed45d9e0ca98dfd
# save the attached .config to linux build tree
make W=1 C=1 CF='-fdiagnostic-prefix -D__CHECK_ENDIAN__' ARCH=x86_64
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp(a)intel.com>
echo
echo "sparse warnings: (new ones prefixed by >>)"
echo
>> drivers/gpu/drm/i915/display/intel_dp.c:5977:39: sparse: sparse: mixing different enum types:
>> drivers/gpu/drm/i915/display/intel_dp.c:5977:39: sparse: unsigned int enum drm_connector_status
>> drivers/gpu/drm/i915/display/intel_dp.c:5977:39: sparse: unsigned int enum intel_hotplug_state
vim +5977 drivers/gpu/drm/i915/display/intel_dp.c
5951
5952 /*
5953 * If display is now connected check links status,
5954 * there has been known issues of link loss triggering
5955 * long pulse.
5956 *
5957 * Some sinks (eg. ASUS PB287Q) seem to perform some
5958 * weird HPD ping pong during modesets. So we can apparently
5959 * end up with HPD going low during a modeset, and then
5960 * going back up soon after. And once that happens we must
5961 * retrain the link to get a picture. That's in case no
5962 * userspace component reacted to intermittent HPD dip.
5963 */
5964 static enum intel_hotplug_state
5965 intel_dp_hotplug(struct intel_encoder *encoder,
5966 struct intel_connector *connector)
5967 {
5968 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
5969 struct drm_modeset_acquire_ctx ctx;
5970 enum intel_hotplug_state state;
5971 int ret;
5972
5973 if (intel_dp->compliance.test_active &&
5974 intel_dp->compliance.test_type == DP_TEST_LINK_PHY_TEST_PATTERN) {
5975 intel_dp_phy_test(encoder);
5976 /* just do the PHY test and nothing else */
> 5977 return connector->base.status;
5978 }
5979
5980 state = intel_encoder_hotplug(encoder, connector);
5981
5982 drm_modeset_acquire_init(&ctx, 0);
5983
5984 for (;;) {
5985 ret = intel_dp_retrain_link(encoder, &ctx);
5986
5987 if (ret == -EDEADLK) {
5988 drm_modeset_backoff(&ctx);
5989 continue;
5990 }
5991
5992 break;
5993 }
5994
5995 drm_modeset_drop_locks(&ctx);
5996 drm_modeset_acquire_fini(&ctx);
5997 drm_WARN(encoder->base.dev, ret,
5998 "Acquiring modeset locks failed with %i\n", ret);
5999
6000 /*
6001 * Keeping it consistent with intel_ddi_hotplug() and
6002 * intel_hdmi_hotplug().
6003 */
6004 if (state == INTEL_HOTPLUG_UNCHANGED && !connector->hotplug_retries)
6005 state = INTEL_HOTPLUG_RETRY;
6006
6007 return state;
6008 }
6009
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
1 year, 7 months
[peterz-queue:sched/wip2 9/12] kernel/sched/core.c:4689:3: error: implicit declaration of function 'migrate_disable_switch'; did you mean
by kernel test robot
tree: https://git.kernel.org/pub/scm/linux/kernel/git/peterz/queue.git sched/wip2
head: a01eeac805648bb3f2f50ed46e738f61738a23c2
commit: ffb992f060f9d5563cba657acaa2435f9d825552 [9/12] sched: Add migrate_disable()
config: i386-randconfig-m021-20200929 (attached as .config)
compiler: gcc-9 (Debian 9.3.0-15) 9.3.0
reproduce (this is a W=1 build):
# https://git.kernel.org/pub/scm/linux/kernel/git/peterz/queue.git/commit/?...
git remote add peterz-queue https://git.kernel.org/pub/scm/linux/kernel/git/peterz/queue.git
git fetch --no-tags peterz-queue sched/wip2
git checkout ffb992f060f9d5563cba657acaa2435f9d825552
# save the attached .config to linux build tree
make W=1 ARCH=i386
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp(a)intel.com>
All errors (new ones prefixed by >>):
kernel/sched/core.c: In function 'ttwu_stat':
kernel/sched/core.c:2528:13: warning: variable 'rq' set but not used [-Wunused-but-set-variable]
2528 | struct rq *rq;
| ^~
kernel/sched/core.c: In function 'schedule_tail':
kernel/sched/core.c:3870:13: warning: variable 'rq' set but not used [-Wunused-but-set-variable]
3870 | struct rq *rq;
| ^~
kernel/sched/core.c: In function '__schedule':
>> kernel/sched/core.c:4689:3: error: implicit declaration of function 'migrate_disable_switch'; did you mean 'migrate_disable'? [-Werror=implicit-function-declaration]
4689 | migrate_disable_switch(rq, prev);
| ^~~~~~~~~~~~~~~~~~~~~~
| migrate_disable
cc1: some warnings being treated as errors
vim +4689 kernel/sched/core.c
4536
4537 /*
4538 * __schedule() is the main scheduler function.
4539 *
4540 * The main means of driving the scheduler and thus entering this function are:
4541 *
4542 * 1. Explicit blocking: mutex, semaphore, waitqueue, etc.
4543 *
4544 * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
4545 * paths. For example, see arch/x86/entry_64.S.
4546 *
4547 * To drive preemption between tasks, the scheduler sets the flag in timer
4548 * interrupt handler scheduler_tick().
4549 *
4550 * 3. Wakeups don't really cause entry into schedule(). They add a
4551 * task to the run-queue and that's it.
4552 *
4553 * Now, if the new task added to the run-queue preempts the current
4554 * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
4555 * called on the nearest possible occasion:
4556 *
4557 * - If the kernel is preemptible (CONFIG_PREEMPTION=y):
4558 *
4559 * - in syscall or exception context, at the next outmost
4560 * preempt_enable(). (this might be as soon as the wake_up()'s
4561 * spin_unlock()!)
4562 *
4563 * - in IRQ context, return from interrupt-handler to
4564 * preemptible context
4565 *
4566 * - If the kernel is not preemptible (CONFIG_PREEMPTION is not set)
4567 * then at the next:
4568 *
4569 * - cond_resched() call
4570 * - explicit schedule() call
4571 * - return from syscall or exception to user-space
4572 * - return from interrupt-handler to user-space
4573 *
4574 * WARNING: must be called with preemption disabled!
4575 */
4576 static void __sched notrace __schedule(bool preempt)
4577 {
4578 struct task_struct *prev, *next;
4579 unsigned long *switch_count;
4580 unsigned long prev_state;
4581 struct rq_flags rf;
4582 struct rq *rq;
4583 int cpu;
4584
4585 cpu = smp_processor_id();
4586 rq = cpu_rq(cpu);
4587 prev = rq->curr;
4588
4589 schedule_debug(prev, preempt);
4590
4591 if (sched_feat(HRTICK))
4592 hrtick_clear(rq);
4593
4594 local_irq_disable();
4595 rcu_note_context_switch(preempt);
4596
4597 /*
4598 * Make sure that signal_pending_state()->signal_pending() below
4599 * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
4600 * done by the caller to avoid the race with signal_wake_up():
4601 *
4602 * __set_current_state(@state) signal_wake_up()
4603 * schedule() set_tsk_thread_flag(p, TIF_SIGPENDING)
4604 * wake_up_state(p, state)
4605 * LOCK rq->lock LOCK p->pi_state
4606 * smp_mb__after_spinlock() smp_mb__after_spinlock()
4607 * if (signal_pending_state()) if (p->state & @state)
4608 *
4609 * Also, the membarrier system call requires a full memory barrier
4610 * after coming from user-space, before storing to rq->curr.
4611 */
4612 rq_lock(rq, &rf);
4613 smp_mb__after_spinlock();
4614
4615 /* Promote REQ to ACT */
4616 rq->clock_update_flags <<= 1;
4617 update_rq_clock(rq);
4618
4619 switch_count = &prev->nivcsw;
4620
4621 /*
4622 * We must load prev->state once (task_struct::state is volatile), such
4623 * that:
4624 *
4625 * - we form a control dependency vs deactivate_task() below.
4626 * - ptrace_{,un}freeze_traced() can change ->state underneath us.
4627 */
4628 prev_state = prev->state;
4629 if (!preempt && prev_state) {
4630 if (signal_pending_state(prev_state, prev)) {
4631 prev->state = TASK_RUNNING;
4632 } else {
4633 prev->sched_contributes_to_load =
4634 (prev_state & TASK_UNINTERRUPTIBLE) &&
4635 !(prev_state & TASK_NOLOAD) &&
4636 !(prev->flags & PF_FROZEN);
4637
4638 if (prev->sched_contributes_to_load)
4639 rq->nr_uninterruptible++;
4640
4641 /*
4642 * __schedule() ttwu()
4643 * prev_state = prev->state; if (p->on_rq && ...)
4644 * if (prev_state) goto out;
4645 * p->on_rq = 0; smp_acquire__after_ctrl_dep();
4646 * p->state = TASK_WAKING
4647 *
4648 * Where __schedule() and ttwu() have matching control dependencies.
4649 *
4650 * After this, schedule() must not care about p->state any more.
4651 */
4652 deactivate_task(rq, prev, DEQUEUE_SLEEP | DEQUEUE_NOCLOCK);
4653
4654 if (prev->in_iowait) {
4655 atomic_inc(&rq->nr_iowait);
4656 delayacct_blkio_start();
4657 }
4658 }
4659 switch_count = &prev->nvcsw;
4660 }
4661
4662 next = pick_next_task(rq, prev, &rf);
4663 clear_tsk_need_resched(prev);
4664 clear_preempt_need_resched();
4665
4666 if (likely(prev != next)) {
4667 rq->nr_switches++;
4668 /*
4669 * RCU users of rcu_dereference(rq->curr) may not see
4670 * changes to task_struct made by pick_next_task().
4671 */
4672 RCU_INIT_POINTER(rq->curr, next);
4673 /*
4674 * The membarrier system call requires each architecture
4675 * to have a full memory barrier after updating
4676 * rq->curr, before returning to user-space.
4677 *
4678 * Here are the schemes providing that barrier on the
4679 * various architectures:
4680 * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC.
4681 * switch_mm() rely on membarrier_arch_switch_mm() on PowerPC.
4682 * - finish_lock_switch() for weakly-ordered
4683 * architectures where spin_unlock is a full barrier,
4684 * - switch_to() for arm64 (weakly-ordered, spin_unlock
4685 * is a RELEASE barrier),
4686 */
4687 ++*switch_count;
4688
> 4689 migrate_disable_switch(rq, prev);
4690 psi_sched_switch(prev, next, !task_on_rq_queued(prev));
4691
4692 trace_sched_switch(preempt, prev, next);
4693
4694 /* Also unlocks the rq: */
4695 rq = context_switch(rq, prev, next, &rf);
4696 } else {
4697 rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP);
4698
4699 rq_unpin_lock(rq, &rf);
4700 __balance_callbacks(rq);
4701 raw_spin_unlock_irq(&rq->lock);
4702 }
4703 }
4704
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
1 year, 7 months
Re: [PATCH net-next 13/15] sctp: support for sending packet over udp4 sock
by kernel test robot
Hi Xin,
Thank you for the patch! Yet something to improve:
[auto build test ERROR on net-next/master]
url: https://github.com/0day-ci/linux/commits/Xin-Long/sctp-Implement-RFC6951-...
base: https://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git 280095713ce244e8dbdfb059cdca695baa72230a
config: ia64-randconfig-r014-20200929 (attached as .config)
compiler: ia64-linux-gcc (GCC) 9.3.0
reproduce (this is a W=1 build):
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# https://github.com/0day-ci/linux/commit/a1016fd4a55f176fcc2eae05052a61ad7...
git remote add linux-review https://github.com/0day-ci/linux
git fetch --no-tags linux-review Xin-Long/sctp-Implement-RFC6951-UDP-Encapsulation-of-SCTP/20200929-215159
git checkout a1016fd4a55f176fcc2eae05052a61ad7d5a142b
# save the attached .config to linux build tree
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-9.3.0 make.cross ARCH=ia64
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp(a)intel.com>
All errors (new ones prefixed by >>):
net/sctp/protocol.c: In function 'sctp_udp_sock_start':
>> net/sctp/protocol.c:894:11: error: 'struct udp_port_cfg' has no member named 'local_ip6'; did you mean 'local_ip'?
894 | udp_conf.local_ip6 = in6addr_any;
| ^~~~~~~~~
| local_ip
vim +894 net/sctp/protocol.c
a330bee1c278f8 Xin Long 2020-09-29 870
140bb5309cf409 Xin Long 2020-09-29 871 int sctp_udp_sock_start(struct net *net)
140bb5309cf409 Xin Long 2020-09-29 872 {
140bb5309cf409 Xin Long 2020-09-29 873 struct udp_tunnel_sock_cfg tuncfg = {NULL};
140bb5309cf409 Xin Long 2020-09-29 874 struct udp_port_cfg udp_conf = {0};
140bb5309cf409 Xin Long 2020-09-29 875 struct socket *sock;
140bb5309cf409 Xin Long 2020-09-29 876 int err;
140bb5309cf409 Xin Long 2020-09-29 877
140bb5309cf409 Xin Long 2020-09-29 878 udp_conf.family = AF_INET;
140bb5309cf409 Xin Long 2020-09-29 879 udp_conf.local_ip.s_addr = htonl(INADDR_ANY);
140bb5309cf409 Xin Long 2020-09-29 880 udp_conf.local_udp_port = htons(net->sctp.udp_port);
140bb5309cf409 Xin Long 2020-09-29 881 err = udp_sock_create(net, &udp_conf, &sock);
140bb5309cf409 Xin Long 2020-09-29 882 if (err)
140bb5309cf409 Xin Long 2020-09-29 883 return err;
140bb5309cf409 Xin Long 2020-09-29 884
140bb5309cf409 Xin Long 2020-09-29 885 tuncfg.encap_type = 1;
140bb5309cf409 Xin Long 2020-09-29 886 tuncfg.encap_rcv = sctp_udp_rcv;
a330bee1c278f8 Xin Long 2020-09-29 887 tuncfg.encap_err_lookup = sctp_udp_err_lookup;
140bb5309cf409 Xin Long 2020-09-29 888 setup_udp_tunnel_sock(net, sock, &tuncfg);
140bb5309cf409 Xin Long 2020-09-29 889 net->sctp.udp4_sock = sock->sk;
140bb5309cf409 Xin Long 2020-09-29 890
cff8956126170d Xin Long 2020-09-29 891 memset(&udp_conf, 0, sizeof(udp_conf));
cff8956126170d Xin Long 2020-09-29 892
cff8956126170d Xin Long 2020-09-29 893 udp_conf.family = AF_INET6;
cff8956126170d Xin Long 2020-09-29 @894 udp_conf.local_ip6 = in6addr_any;
cff8956126170d Xin Long 2020-09-29 895 udp_conf.local_udp_port = htons(net->sctp.udp_port);
cff8956126170d Xin Long 2020-09-29 896 udp_conf.use_udp6_rx_checksums = true;
cff8956126170d Xin Long 2020-09-29 897 udp_conf.ipv6_v6only = true;
cff8956126170d Xin Long 2020-09-29 898 err = udp_sock_create(net, &udp_conf, &sock);
cff8956126170d Xin Long 2020-09-29 899 if (err) {
cff8956126170d Xin Long 2020-09-29 900 udp_tunnel_sock_release(net->sctp.udp4_sock->sk_socket);
cff8956126170d Xin Long 2020-09-29 901 net->sctp.udp4_sock = NULL;
cff8956126170d Xin Long 2020-09-29 902 return err;
cff8956126170d Xin Long 2020-09-29 903 }
cff8956126170d Xin Long 2020-09-29 904
cff8956126170d Xin Long 2020-09-29 905 tuncfg.encap_type = 1;
cff8956126170d Xin Long 2020-09-29 906 tuncfg.encap_rcv = sctp_udp_rcv;
a330bee1c278f8 Xin Long 2020-09-29 907 tuncfg.encap_err_lookup = sctp_udp_err_lookup;
cff8956126170d Xin Long 2020-09-29 908 setup_udp_tunnel_sock(net, sock, &tuncfg);
cff8956126170d Xin Long 2020-09-29 909 net->sctp.udp6_sock = sock->sk;
cff8956126170d Xin Long 2020-09-29 910
140bb5309cf409 Xin Long 2020-09-29 911 return 0;
140bb5309cf409 Xin Long 2020-09-29 912 }
140bb5309cf409 Xin Long 2020-09-29 913
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
1 year, 7 months
[linux-next:master 6316/11956] arch/arm64/kvm/mmu.c:1642 user_mem_abort() error: uninitialized symbol 'ret'.
by Dan Carpenter
tree: https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git master
head: 49e7e3e905e437a02782019570f70997e2da9101
commit: 6f745f1bb5bf2914be2344b0dd7781d0528b3e42 [6316/11956] KVM: arm64: Convert user_mem_abort() to generic page-table API
config: arm64-randconfig-m031-20200929 (attached as .config)
compiler: aarch64-linux-gcc (GCC) 9.3.0
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp(a)intel.com>
Reported-by: Dan Carpenter <dan.carpenter(a)oracle.com>
smatch warnings:
arch/arm64/kvm/mmu.c:1642 user_mem_abort() error: uninitialized symbol 'ret'.
vim +/ret +1642 arch/arm64/kvm/mmu.c
94f8e6418d3915 arch/arm/kvm/mmu.c Christoffer Dall 2013-01-20 1496 static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
98047888bb9fd5 arch/arm/kvm/mmu.c Christoffer Dall 2014-08-19 1497 struct kvm_memory_slot *memslot, unsigned long hva,
94f8e6418d3915 arch/arm/kvm/mmu.c Christoffer Dall 2013-01-20 1498 unsigned long fault_status)
94f8e6418d3915 arch/arm/kvm/mmu.c Christoffer Dall 2013-01-20 1499 {
94f8e6418d3915 arch/arm/kvm/mmu.c Christoffer Dall 2013-01-20 1500 int ret;
6396b852e46e56 virt/kvm/arm/mmu.c Punit Agrawal 2018-12-11 1501 bool write_fault, writable, force_pte = false;
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon 2020-09-11 1502 bool exec_fault;
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon 2020-09-11 1503 bool device = false;
94f8e6418d3915 arch/arm/kvm/mmu.c Christoffer Dall 2013-01-20 1504 unsigned long mmu_seq;
ad361f093c1e31 arch/arm/kvm/mmu.c Christoffer Dall 2012-11-01 1505 struct kvm *kvm = vcpu->kvm;
94f8e6418d3915 arch/arm/kvm/mmu.c Christoffer Dall 2013-01-20 1506 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
ad361f093c1e31 arch/arm/kvm/mmu.c Christoffer Dall 2012-11-01 1507 struct vm_area_struct *vma;
1559b7583ff6ed virt/kvm/arm/mmu.c James Morse 2019-12-17 1508 short vma_shift;
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon 2020-09-11 1509 gfn_t gfn;
ba049e93aef7e8 arch/arm/kvm/mmu.c Dan Williams 2016-01-15 1510 kvm_pfn_t pfn;
15a49a44fc3620 arch/arm/kvm/mmu.c Mario Smarduch 2015-01-15 1511 bool logging_active = memslot_is_logging(memslot);
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon 2020-09-11 1512 unsigned long vma_pagesize;
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon 2020-09-11 1513 enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R;
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon 2020-09-11 1514 struct kvm_pgtable *pgt;
94f8e6418d3915 arch/arm/kvm/mmu.c Christoffer Dall 2013-01-20 1515
a7d079cea2dffb arch/arm/kvm/mmu.c Ard Biesheuvel 2014-09-09 1516 write_fault = kvm_is_write_fault(vcpu);
d0e22b4ac3ba23 virt/kvm/arm/mmu.c Marc Zyngier 2017-10-23 1517 exec_fault = kvm_vcpu_trap_is_iabt(vcpu);
d0e22b4ac3ba23 virt/kvm/arm/mmu.c Marc Zyngier 2017-10-23 1518 VM_BUG_ON(write_fault && exec_fault);
d0e22b4ac3ba23 virt/kvm/arm/mmu.c Marc Zyngier 2017-10-23 1519
d0e22b4ac3ba23 virt/kvm/arm/mmu.c Marc Zyngier 2017-10-23 1520 if (fault_status == FSC_PERM && !write_fault && !exec_fault) {
94f8e6418d3915 arch/arm/kvm/mmu.c Christoffer Dall 2013-01-20 1521 kvm_err("Unexpected L2 read permission error\n");
94f8e6418d3915 arch/arm/kvm/mmu.c Christoffer Dall 2013-01-20 1522 return -EFAULT;
94f8e6418d3915 arch/arm/kvm/mmu.c Christoffer Dall 2013-01-20 1523 }
94f8e6418d3915 arch/arm/kvm/mmu.c Christoffer Dall 2013-01-20 1524
ad361f093c1e31 arch/arm/kvm/mmu.c Christoffer Dall 2012-11-01 1525 /* Let's check if we will get back a huge page backed by hugetlbfs */
89154dd5313f77 arch/arm64/kvm/mmu.c Michel Lespinasse 2020-06-08 1526 mmap_read_lock(current->mm);
ad361f093c1e31 arch/arm/kvm/mmu.c Christoffer Dall 2012-11-01 1527 vma = find_vma_intersection(current->mm, hva, hva + 1);
37b544087ef3f6 arch/arm/kvm/mmu.c Ard Biesheuvel 2014-09-17 1528 if (unlikely(!vma)) {
37b544087ef3f6 arch/arm/kvm/mmu.c Ard Biesheuvel 2014-09-17 1529 kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
89154dd5313f77 arch/arm64/kvm/mmu.c Michel Lespinasse 2020-06-08 1530 mmap_read_unlock(current->mm);
37b544087ef3f6 arch/arm/kvm/mmu.c Ard Biesheuvel 2014-09-17 1531 return -EFAULT;
37b544087ef3f6 arch/arm/kvm/mmu.c Ard Biesheuvel 2014-09-17 1532 }
37b544087ef3f6 arch/arm/kvm/mmu.c Ard Biesheuvel 2014-09-17 1533
1559b7583ff6ed virt/kvm/arm/mmu.c James Morse 2019-12-17 1534 if (is_vm_hugetlb_page(vma))
1559b7583ff6ed virt/kvm/arm/mmu.c James Morse 2019-12-17 1535 vma_shift = huge_page_shift(hstate_vma(vma));
1559b7583ff6ed virt/kvm/arm/mmu.c James Morse 2019-12-17 1536 else
1559b7583ff6ed virt/kvm/arm/mmu.c James Morse 2019-12-17 1537 vma_shift = PAGE_SHIFT;
1559b7583ff6ed virt/kvm/arm/mmu.c James Morse 2019-12-17 1538
1559b7583ff6ed virt/kvm/arm/mmu.c James Morse 2019-12-17 1539 vma_pagesize = 1ULL << vma_shift;
a80868f3985548 virt/kvm/arm/mmu.c Suzuki K Poulose 2019-03-12 1540 if (logging_active ||
6d674e28f642e3 virt/kvm/arm/mmu.c Marc Zyngier 2019-12-11 1541 (vma->vm_flags & VM_PFNMAP) ||
a80868f3985548 virt/kvm/arm/mmu.c Suzuki K Poulose 2019-03-12 1542 !fault_supports_stage2_huge_mapping(memslot, hva, vma_pagesize)) {
a80868f3985548 virt/kvm/arm/mmu.c Suzuki K Poulose 2019-03-12 1543 force_pte = true;
a80868f3985548 virt/kvm/arm/mmu.c Suzuki K Poulose 2019-03-12 1544 vma_pagesize = PAGE_SIZE;
a80868f3985548 virt/kvm/arm/mmu.c Suzuki K Poulose 2019-03-12 1545 }
a80868f3985548 virt/kvm/arm/mmu.c Suzuki K Poulose 2019-03-12 1546
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon 2020-09-11 1547 if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE)
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon 2020-09-11 1548 fault_ipa &= huge_page_mask(hstate_vma(vma));
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon 2020-09-11 1549
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon 2020-09-11 1550 gfn = fault_ipa >> PAGE_SHIFT;
89154dd5313f77 arch/arm64/kvm/mmu.c Michel Lespinasse 2020-06-08 1551 mmap_read_unlock(current->mm);
ad361f093c1e31 arch/arm/kvm/mmu.c Christoffer Dall 2012-11-01 1552
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon 2020-09-11 1553 /*
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon 2020-09-11 1554 * Permission faults just need to update the existing leaf entry,
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon 2020-09-11 1555 * and so normally don't require allocations from the memcache. The
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon 2020-09-11 1556 * only exception to this is when dirty logging is enabled at runtime
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon 2020-09-11 1557 * and a write fault needs to collapse a block entry into a table.
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon 2020-09-11 1558 */
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon 2020-09-11 1559 if (fault_status != FSC_PERM || (logging_active && write_fault)) {
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon 2020-09-11 1560 ret = kvm_mmu_topup_memory_cache(memcache,
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon 2020-09-11 1561 kvm_mmu_cache_min_pages(kvm));
94f8e6418d3915 arch/arm/kvm/mmu.c Christoffer Dall 2013-01-20 1562 if (ret)
94f8e6418d3915 arch/arm/kvm/mmu.c Christoffer Dall 2013-01-20 1563 return ret;
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon 2020-09-11 1564 }
94f8e6418d3915 arch/arm/kvm/mmu.c Christoffer Dall 2013-01-20 1565
94f8e6418d3915 arch/arm/kvm/mmu.c Christoffer Dall 2013-01-20 1566 mmu_seq = vcpu->kvm->mmu_notifier_seq;
94f8e6418d3915 arch/arm/kvm/mmu.c Christoffer Dall 2013-01-20 1567 /*
94f8e6418d3915 arch/arm/kvm/mmu.c Christoffer Dall 2013-01-20 1568 * Ensure the read of mmu_notifier_seq happens before we call
94f8e6418d3915 arch/arm/kvm/mmu.c Christoffer Dall 2013-01-20 1569 * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
94f8e6418d3915 arch/arm/kvm/mmu.c Christoffer Dall 2013-01-20 1570 * the page we just got a reference to gets unmapped before we have a
94f8e6418d3915 arch/arm/kvm/mmu.c Christoffer Dall 2013-01-20 1571 * chance to grab the mmu_lock, which ensure that if the page gets
94f8e6418d3915 arch/arm/kvm/mmu.c Christoffer Dall 2013-01-20 1572 * unmapped afterwards, the call to kvm_unmap_hva will take it away
94f8e6418d3915 arch/arm/kvm/mmu.c Christoffer Dall 2013-01-20 1573 * from us again properly. This smp_rmb() interacts with the smp_wmb()
94f8e6418d3915 arch/arm/kvm/mmu.c Christoffer Dall 2013-01-20 1574 * in kvm_mmu_notifier_invalidate_<page|range_end>.
94f8e6418d3915 arch/arm/kvm/mmu.c Christoffer Dall 2013-01-20 1575 */
94f8e6418d3915 arch/arm/kvm/mmu.c Christoffer Dall 2013-01-20 1576 smp_rmb();
94f8e6418d3915 arch/arm/kvm/mmu.c Christoffer Dall 2013-01-20 1577
ad361f093c1e31 arch/arm/kvm/mmu.c Christoffer Dall 2012-11-01 1578 pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable);
196f878a7ac2e7 virt/kvm/arm/mmu.c James Morse 2017-06-20 1579 if (pfn == KVM_PFN_ERR_HWPOISON) {
1559b7583ff6ed virt/kvm/arm/mmu.c James Morse 2019-12-17 1580 kvm_send_hwpoison_signal(hva, vma_shift);
196f878a7ac2e7 virt/kvm/arm/mmu.c James Morse 2017-06-20 1581 return 0;
196f878a7ac2e7 virt/kvm/arm/mmu.c James Morse 2017-06-20 1582 }
9ac715954682b2 arch/arm/kvm/mmu.c Christoffer Dall 2016-08-17 1583 if (is_error_noslot_pfn(pfn))
94f8e6418d3915 arch/arm/kvm/mmu.c Christoffer Dall 2013-01-20 1584 return -EFAULT;
94f8e6418d3915 arch/arm/kvm/mmu.c Christoffer Dall 2013-01-20 1585
15a49a44fc3620 arch/arm/kvm/mmu.c Mario Smarduch 2015-01-15 1586 if (kvm_is_device_pfn(pfn)) {
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon 2020-09-11 1587 device = true;
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon 2020-09-11 1588 } else if (logging_active && !write_fault) {
15a49a44fc3620 arch/arm/kvm/mmu.c Mario Smarduch 2015-01-15 1589 /*
15a49a44fc3620 arch/arm/kvm/mmu.c Mario Smarduch 2015-01-15 1590 * Only actually map the page as writable if this was a write
15a49a44fc3620 arch/arm/kvm/mmu.c Mario Smarduch 2015-01-15 1591 * fault.
15a49a44fc3620 arch/arm/kvm/mmu.c Mario Smarduch 2015-01-15 1592 */
15a49a44fc3620 arch/arm/kvm/mmu.c Mario Smarduch 2015-01-15 1593 writable = false;
15a49a44fc3620 arch/arm/kvm/mmu.c Mario Smarduch 2015-01-15 1594 }
b88657674d39fc arch/arm/kvm/mmu.c Kim Phillips 2014-06-26 1595
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon 2020-09-11 1596 if (exec_fault && device)
6d674e28f642e3 virt/kvm/arm/mmu.c Marc Zyngier 2019-12-11 1597 return -ENOEXEC;
6d674e28f642e3 virt/kvm/arm/mmu.c Marc Zyngier 2019-12-11 1598
ad361f093c1e31 arch/arm/kvm/mmu.c Christoffer Dall 2012-11-01 1599 spin_lock(&kvm->mmu_lock);
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon 2020-09-11 1600 pgt = vcpu->arch.hw_mmu->pgt;
ad361f093c1e31 arch/arm/kvm/mmu.c Christoffer Dall 2012-11-01 1601 if (mmu_notifier_retry(kvm, mmu_seq))
94f8e6418d3915 arch/arm/kvm/mmu.c Christoffer Dall 2013-01-20 1602 goto out_unlock;
Set the error code?
15a49a44fc3620 arch/arm/kvm/mmu.c Mario Smarduch 2015-01-15 1603
3f58bf63455588 virt/kvm/arm/mmu.c Punit Agrawal 2018-12-11 1604 /*
0529c9021252a5 arch/arm64/kvm/mmu.c Suzuki K Poulose 2020-05-07 1605 * If we are not forced to use page mapping, check if we are
0529c9021252a5 arch/arm64/kvm/mmu.c Suzuki K Poulose 2020-05-07 1606 * backed by a THP and thus use block mapping if possible.
3f58bf63455588 virt/kvm/arm/mmu.c Punit Agrawal 2018-12-11 1607 */
0529c9021252a5 arch/arm64/kvm/mmu.c Suzuki K Poulose 2020-05-07 1608 if (vma_pagesize == PAGE_SIZE && !force_pte)
0529c9021252a5 arch/arm64/kvm/mmu.c Suzuki K Poulose 2020-05-07 1609 vma_pagesize = transparent_hugepage_adjust(memslot, hva,
0529c9021252a5 arch/arm64/kvm/mmu.c Suzuki K Poulose 2020-05-07 1610 &pfn, &fault_ipa);
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon 2020-09-11 1611 if (writable) {
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon 2020-09-11 1612 prot |= KVM_PGTABLE_PROT_W;
ad361f093c1e31 arch/arm/kvm/mmu.c Christoffer Dall 2012-11-01 1613 kvm_set_pfn_dirty(pfn);
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon 2020-09-11 1614 mark_page_dirty(kvm, gfn);
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon 2020-09-11 1615 }
a9c0e12ebee56e virt/kvm/arm/mmu.c Marc Zyngier 2017-10-23 1616
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon 2020-09-11 1617 if (fault_status != FSC_PERM && !device)
3f58bf63455588 virt/kvm/arm/mmu.c Punit Agrawal 2018-12-11 1618 clean_dcache_guest_page(pfn, vma_pagesize);
3f58bf63455588 virt/kvm/arm/mmu.c Punit Agrawal 2018-12-11 1619
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon 2020-09-11 1620 if (exec_fault) {
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon 2020-09-11 1621 prot |= KVM_PGTABLE_PROT_X;
3f58bf63455588 virt/kvm/arm/mmu.c Punit Agrawal 2018-12-11 1622 invalidate_icache_guest_page(pfn, vma_pagesize);
94f8e6418d3915 arch/arm/kvm/mmu.c Christoffer Dall 2013-01-20 1623 }
a9c0e12ebee56e virt/kvm/arm/mmu.c Marc Zyngier 2017-10-23 1624
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon 2020-09-11 1625 if (device)
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon 2020-09-11 1626 prot |= KVM_PGTABLE_PROT_DEVICE;
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon 2020-09-11 1627 else if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC))
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon 2020-09-11 1628 prot |= KVM_PGTABLE_PROT_X;
a15f693935a9f1 virt/kvm/arm/mmu.c Marc Zyngier 2017-10-23 1629
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon 2020-09-11 1630 if (fault_status == FSC_PERM && !(logging_active && writable)) {
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon 2020-09-11 1631 ret = kvm_pgtable_stage2_relax_perms(pgt, fault_ipa, prot);
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon 2020-09-11 1632 } else {
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon 2020-09-11 1633 ret = kvm_pgtable_stage2_map(pgt, fault_ipa, vma_pagesize,
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon 2020-09-11 1634 __pfn_to_phys(pfn), prot,
6f745f1bb5bf29 arch/arm64/kvm/mmu.c Will Deacon 2020-09-11 1635 memcache);
ad361f093c1e31 arch/arm/kvm/mmu.c Christoffer Dall 2012-11-01 1636 }
ad361f093c1e31 arch/arm/kvm/mmu.c Christoffer Dall 2012-11-01 1637
94f8e6418d3915 arch/arm/kvm/mmu.c Christoffer Dall 2013-01-20 1638 out_unlock:
ad361f093c1e31 arch/arm/kvm/mmu.c Christoffer Dall 2012-11-01 1639 spin_unlock(&kvm->mmu_lock);
35307b9a5f7ebc arch/arm/kvm/mmu.c Marc Zyngier 2015-03-12 1640 kvm_set_pfn_accessed(pfn);
94f8e6418d3915 arch/arm/kvm/mmu.c Christoffer Dall 2013-01-20 1641 kvm_release_pfn_clean(pfn);
ad361f093c1e31 arch/arm/kvm/mmu.c Christoffer Dall 2012-11-01 @1642 return ret;
94f8e6418d3915 arch/arm/kvm/mmu.c Christoffer Dall 2013-01-20 1643 }
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
1 year, 7 months
[leon-rdma:rdma-next 67/71] drivers/infiniband/hw/mlx5/qp.c:861:42: sparse: sparse: subtraction of Share your drugs
by kernel test robot
tree: https://git.kernel.org/pub/scm/linux/kernel/git/leon/linux-rdma.git rdma-next
head: 41142fada89c77281000029c8f4ce28ea8ed95ac
commit: b287f59569b5ddead80547afb3fbe54ba19d71a2 [67/71] RDMA/mlx5: Use mlx5_umem_find_best_quantized_pgoff() for WQ
:::::: branch date: 5 hours ago
:::::: commit date: 6 hours ago
config: i386-randconfig-s002-20200929 (attached as .config)
compiler: gcc-9 (Debian 9.3.0-15) 9.3.0
reproduce:
# apt-get install sparse
# sparse version: v0.6.2-201-g24bdaac6-dirty
# https://git.kernel.org/pub/scm/linux/kernel/git/leon/linux-rdma.git/commi...
git remote add leon-rdma https://git.kernel.org/pub/scm/linux/kernel/git/leon/linux-rdma.git
git fetch --no-tags leon-rdma rdma-next
git checkout b287f59569b5ddead80547afb3fbe54ba19d71a2
# save the attached .config to linux build tree
make W=1 C=1 CF='-fdiagnostic-prefix -D__CHECK_ENDIAN__' ARCH=i386
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp(a)intel.com>
echo
echo "sparse warnings: (new ones prefixed by >>)"
echo
>> drivers/infiniband/hw/mlx5/qp.c:861:42: sparse: sparse: subtraction of functions? Share your drugs
vim +861 drivers/infiniband/hw/mlx5/qp.c
79b20a6c3014c7 Yishai Hadas 2016-05-23 829
79b20a6c3014c7 Yishai Hadas 2016-05-23 830 static int create_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd,
b0ea0fa5435f9d Jason Gunthorpe 2019-01-09 831 struct ib_udata *udata, struct mlx5_ib_rwq *rwq,
79b20a6c3014c7 Yishai Hadas 2016-05-23 832 struct mlx5_ib_create_wq *ucmd)
79b20a6c3014c7 Yishai Hadas 2016-05-23 833 {
89944450547334 Shamir Rabinovitch 2019-02-07 834 struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
89944450547334 Shamir Rabinovitch 2019-02-07 835 udata, struct mlx5_ib_ucontext, ibucontext);
b287f59569b5dd Jason Gunthorpe 2020-08-21 836 unsigned long page_size = 0;
79b20a6c3014c7 Yishai Hadas 2016-05-23 837 u32 offset = 0;
79b20a6c3014c7 Yishai Hadas 2016-05-23 838 int err;
79b20a6c3014c7 Yishai Hadas 2016-05-23 839
79b20a6c3014c7 Yishai Hadas 2016-05-23 840 if (!ucmd->buf_addr)
79b20a6c3014c7 Yishai Hadas 2016-05-23 841 return -EINVAL;
79b20a6c3014c7 Yishai Hadas 2016-05-23 842
c320e527e15483 Moni Shoua 2020-01-15 843 rwq->umem = ib_umem_get(&dev->ib_dev, ucmd->buf_addr, rwq->buf_size, 0);
79b20a6c3014c7 Yishai Hadas 2016-05-23 844 if (IS_ERR(rwq->umem)) {
79b20a6c3014c7 Yishai Hadas 2016-05-23 845 mlx5_ib_dbg(dev, "umem_get failed\n");
79b20a6c3014c7 Yishai Hadas 2016-05-23 846 err = PTR_ERR(rwq->umem);
79b20a6c3014c7 Yishai Hadas 2016-05-23 847 return err;
79b20a6c3014c7 Yishai Hadas 2016-05-23 848 }
79b20a6c3014c7 Yishai Hadas 2016-05-23 849
b287f59569b5dd Jason Gunthorpe 2020-08-21 850 page_size = mlx5_umem_find_best_quantized_pgoff(
b287f59569b5dd Jason Gunthorpe 2020-08-21 851 rwq->umem, wq, log_wq_pg_sz, MLX5_ADAPTER_PAGE_SHIFT,
b287f59569b5dd Jason Gunthorpe 2020-08-21 852 page_offset, 64, &rwq->rq_page_offset);
b287f59569b5dd Jason Gunthorpe 2020-08-21 853 if (!page_size) {
79b20a6c3014c7 Yishai Hadas 2016-05-23 854 mlx5_ib_warn(dev, "bad offset\n");
b287f59569b5dd Jason Gunthorpe 2020-08-21 855 err = -EINVAL;
79b20a6c3014c7 Yishai Hadas 2016-05-23 856 goto err_umem;
79b20a6c3014c7 Yishai Hadas 2016-05-23 857 }
79b20a6c3014c7 Yishai Hadas 2016-05-23 858
b287f59569b5dd Jason Gunthorpe 2020-08-21 859 rwq->rq_num_pas = ib_umem_num_dma_blocks(rwq->umem, page_size);
b287f59569b5dd Jason Gunthorpe 2020-08-21 860 rwq->page_shift = order_base_2(page_size);
b287f59569b5dd Jason Gunthorpe 2020-08-21 @861 rwq->log_page_size = page_shift - page_shift;
79b20a6c3014c7 Yishai Hadas 2016-05-23 862 rwq->wq_sig = !!(ucmd->flags & MLX5_WQ_FLAG_SIGNATURE);
79b20a6c3014c7 Yishai Hadas 2016-05-23 863
97cb748d1478c1 Jason Gunthorpe 2020-08-19 864 mlx5_ib_dbg(
97cb748d1478c1 Jason Gunthorpe 2020-08-19 865 dev,
b287f59569b5dd Jason Gunthorpe 2020-08-21 866 "addr 0x%llx, size %zd, npages %zu, page_size %ld, ncont %d, offset %d\n",
79b20a6c3014c7 Yishai Hadas 2016-05-23 867 (unsigned long long)ucmd->buf_addr, rwq->buf_size,
b287f59569b5dd Jason Gunthorpe 2020-08-21 868 ib_umem_num_pages(rwq->umem), page_size, rwq->rq_num_pas,
97cb748d1478c1 Jason Gunthorpe 2020-08-19 869 offset);
79b20a6c3014c7 Yishai Hadas 2016-05-23 870
89944450547334 Shamir Rabinovitch 2019-02-07 871 err = mlx5_ib_db_map_user(ucontext, udata, ucmd->db_addr, &rwq->db);
79b20a6c3014c7 Yishai Hadas 2016-05-23 872 if (err) {
79b20a6c3014c7 Yishai Hadas 2016-05-23 873 mlx5_ib_dbg(dev, "map failed\n");
79b20a6c3014c7 Yishai Hadas 2016-05-23 874 goto err_umem;
79b20a6c3014c7 Yishai Hadas 2016-05-23 875 }
79b20a6c3014c7 Yishai Hadas 2016-05-23 876
79b20a6c3014c7 Yishai Hadas 2016-05-23 877 return 0;
79b20a6c3014c7 Yishai Hadas 2016-05-23 878
79b20a6c3014c7 Yishai Hadas 2016-05-23 879 err_umem:
79b20a6c3014c7 Yishai Hadas 2016-05-23 880 ib_umem_release(rwq->umem);
79b20a6c3014c7 Yishai Hadas 2016-05-23 881 return err;
79b20a6c3014c7 Yishai Hadas 2016-05-23 882 }
79b20a6c3014c7 Yishai Hadas 2016-05-23 883
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
1 year, 7 months