@ linux-patched/include/xenomai/linux/stdarg.h:1 @
--- linux/include/xenomai/version.h	1970-01-01 01:00:00.000000000 +0100
+#include <stdarg.h>
+++ linux-patched/include/xenomai/version.h	2022-03-21 12:58:32.309860487 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/pipeline/sched.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2001-2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _XENOMAI_VERSION_H
+#define _XENOMAI_VERSION_H
+
+#ifndef __KERNEL__
+#include <xeno_config.h>
+#include <boilerplate/compiler.h>
+#endif
+
+#define XENO_VERSION(maj, min, rev)  (((maj)<<16)|((min)<<8)|(rev))
+
+#define XENO_VERSION_CODE	XENO_VERSION(CONFIG_XENO_VERSION_MAJOR,	\
+					     CONFIG_XENO_VERSION_MINOR,	\
+					     CONFIG_XENO_REVISION_LEVEL)
+
+#define XENO_VERSION_STRING	CONFIG_XENO_VERSION_STRING
+
+#endif /* _XENOMAI_VERSION_H */
+++ linux-patched/include/xenomai/pipeline/sched.h	2022-03-21 12:58:32.033863179 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/pipeline/sirq.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2020 Philippe Gerum  <rpm@xenomai.org>
+ */
+
+#ifndef _COBALT_KERNEL_DOVETAIL_SCHED_H
+#define _COBALT_KERNEL_DOVETAIL_SCHED_H
+
+#include <cobalt/kernel/lock.h>
+
+struct xnthread;
+struct xnsched;
+struct task_struct;
+
+void pipeline_init_shadow_tcb(struct xnthread *thread);
+
+void pipeline_init_root_tcb(struct xnthread *thread);
+
+int ___xnsched_run(struct xnsched *sched);
+
+static inline int pipeline_schedule(struct xnsched *sched)
+{
+	return run_oob_call((int (*)(void *))___xnsched_run, sched);
+}
+
+static inline void pipeline_prep_switch_oob(struct xnthread *root)
+{
+	/* N/A */
+}
+
+bool pipeline_switch_to(struct xnthread *prev,
+			struct xnthread *next,
+			bool leaving_inband);
+
+int pipeline_leave_inband(void);
+
+int pipeline_leave_oob_prepare(void);
+
+static inline void pipeline_leave_oob_unlock(void)
+{
+	/*
+	 * We may not re-enable hard irqs due to the specifics of
+	 * stage escalation via run_oob_call(), to prevent breaking
+	 * the (virtual) interrupt state.
+	 */
+	xnlock_put(&nklock);
+}
+
+void pipeline_leave_oob_finish(void);
+
+static inline
+void pipeline_finalize_thread(struct xnthread *thread)
+{
+	/* N/A */
+}
+
+void pipeline_raise_mayday(struct task_struct *tsk);
+
+void pipeline_clear_mayday(void);
+
+#endif /* !_COBALT_KERNEL_DOVETAIL_SCHED_H */
+++ linux-patched/include/xenomai/pipeline/sirq.h	2022-03-21 12:58:32.026863247 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/pipeline/wrappers.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2020 Philippe Gerum  <rpm@xenomai.org>
+ */
+
+#ifndef _COBALT_KERNEL_DOVETAIL_SIRQ_H
+#define _COBALT_KERNEL_DOVETAIL_SIRQ_H
+
+#include <linux/irq_pipeline.h>
+#include <cobalt/kernel/assert.h>
+
+/*
+ * Wrappers to create "synthetic IRQs" the Dovetail way. Those
+ * interrupt channels can only be trigged by software, in order to run
+ * a handler on the in-band execution stage.
+ */
+
+static inline
+int pipeline_create_inband_sirq(irqreturn_t (*handler)(int irq, void *dev_id))
+{
+	/*
+	 * Allocate an IRQ from the synthetic interrupt domain then
+	 * trap it to @handler, to be fired from the in-band stage.
+	 */
+	int sirq, ret;
+
+	sirq = irq_create_direct_mapping(synthetic_irq_domain);
+	if (sirq == 0)
+		return -EAGAIN;
+
+	ret = __request_percpu_irq(sirq,
+			handler,
+			IRQF_NO_THREAD,
+			"Inband sirq",
+			&cobalt_machine_cpudata);
+
+	if (ret) {
+		irq_dispose_mapping(sirq);
+		return ret;
+	}
+
+	return sirq;
+}
+
+static inline
+void pipeline_delete_inband_sirq(int sirq)
+{
+	/*
+	 * Free the synthetic IRQ then deallocate it to its
+	 * originating domain.
+	 */
+	free_percpu_irq(sirq,
+		&cobalt_machine_cpudata);
+
+	irq_dispose_mapping(sirq);
+}
+
+static inline void pipeline_post_sirq(int sirq)
+{
+	/* Trigger the synthetic IRQ */
+	irq_post_inband(sirq);
+}
+
+#endif /* !_COBALT_KERNEL_DOVETAIL_SIRQ_H */
+++ linux-patched/include/xenomai/pipeline/wrappers.h	2022-03-21 12:58:32.019863315 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/pipeline/kevents.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ */
+#ifndef _COBALT_KERNEL_DOVETAIL_WRAPPERS_H
+#define _COBALT_KERNEL_DOVETAIL_WRAPPERS_H
+
+/* No wrapper needed so far. */
+
+#endif /* !_COBALT_KERNEL_DOVETAIL_WRAPPERS_H */
+++ linux-patched/include/xenomai/pipeline/kevents.h	2022-03-21 12:58:32.011863393 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/pipeline/vdso_fallback.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2020 Philippe Gerum  <rpm@xenomai.org>
+ */
+
+#ifndef _COBALT_KERNEL_DOVETAIL_KEVENTS_H
+#define _COBALT_KERNEL_DOVETAIL_KEVENTS_H
+
+#define KEVENT_PROPAGATE   0
+#define KEVENT_STOP        1
+
+struct cobalt_process;
+struct cobalt_thread;
+
+static inline
+int pipeline_attach_process(struct cobalt_process *process)
+{
+	return 0;
+}
+
+static inline
+void pipeline_detach_process(struct cobalt_process *process)
+{ }
+
+int pipeline_prepare_current(void);
+
+void pipeline_attach_current(struct xnthread *thread);
+
+int pipeline_trap_kevents(void);
+
+void pipeline_enable_kevents(void);
+
+void pipeline_cleanup_process(void);
+
+#endif /* !_COBALT_KERNEL_DOVETAIL_KEVENTS_H */
+++ linux-patched/include/xenomai/pipeline/vdso_fallback.h	2022-03-21 12:58:32.004863461 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/pipeline/machine.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2020 Philippe Gerum  <rpm@xenomai.org>
+ * Copyright (c) Siemens AG, 2021
+ */
+
+#ifndef _COBALT_KERNEL_PIPELINE_VDSO_FALLBACK_H
+#define _COBALT_KERNEL_PIPELINE_VDSO_FALLBACK_H
+
+#include <cobalt/kernel/clock.h>
+#include <cobalt/kernel/timer.h>
+#include <xenomai/posix/clock.h>
+
+#define is_clock_gettime(__nr)		((__nr) == __NR_clock_gettime)
+
+#ifndef __NR_clock_gettime64
+#define is_clock_gettime64(__nr)	0
+#else
+#define is_clock_gettime64(__nr)	((__nr) == __NR_clock_gettime64)
+#endif
+
+static __always_inline bool 
+pipeline_handle_vdso_fallback(int nr, struct pt_regs *regs)
+{
+	struct __kernel_old_timespec __user *u_old_ts;
+	struct __kernel_timespec uts, __user *u_uts;
+	struct __kernel_old_timespec old_ts;
+	struct timespec64 ts64;
+	int clock_id, ret = 0;
+	unsigned long args[6];
+
+	if (!is_clock_gettime(nr) && !is_clock_gettime64(nr))
+		return false;
+
+	/*
+	 * We need to fetch the args again because not all archs use the same
+	 * calling convention for Linux and Xenomai syscalls.
+	 */
+	syscall_get_arguments(current, regs, args);
+
+	clock_id = (int)args[0];
+	switch (clock_id) {
+	case CLOCK_MONOTONIC:
+		ns2ts(&ts64, xnclock_read_monotonic(&nkclock));
+		break;
+	case CLOCK_REALTIME:
+		ns2ts(&ts64, xnclock_read_realtime(&nkclock));
+		break;
+	default:
+		return false;
+	}
+
+	if (is_clock_gettime(nr)) {
+		old_ts.tv_sec = (__kernel_old_time_t)ts64.tv_sec;
+		old_ts.tv_nsec = ts64.tv_nsec;
+		u_old_ts = (struct __kernel_old_timespec __user *)args[1];
+		if (raw_copy_to_user(u_old_ts, &old_ts, sizeof(old_ts)))
+			ret = -EFAULT;
+	} else if (is_clock_gettime64(nr)) {
+		uts.tv_sec = ts64.tv_sec;
+		uts.tv_nsec = ts64.tv_nsec;
+		u_uts = (struct __kernel_timespec __user *)args[1];
+		if (raw_copy_to_user(u_uts, &uts, sizeof(uts)))
+			ret = -EFAULT;
+	}
+
+	__xn_status_return(regs, ret);
+
+	return true;
+}
+
+#endif /* !_COBALT_KERNEL_PIPELINE_VDSO_FALLBACK_H */
+++ linux-patched/include/xenomai/pipeline/machine.h	2022-03-21 12:58:31.997863530 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/pipeline/irq.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2020 Philippe Gerum  <rpm@xenomai.org>
+ */
+
+#ifndef _COBALT_KERNEL_DOVETAIL_MACHINE_H
+#define _COBALT_KERNEL_DOVETAIL_MACHINE_H
+
+#include <linux/percpu.h>
+
+#ifdef CONFIG_FTRACE
+#define boot_lat_trace_notice "[LTRACE]"
+#else
+#define boot_lat_trace_notice ""
+#endif
+
+struct vm_area_struct;
+
+struct cobalt_machine {
+	const char *name;
+	int (*init)(void);
+	int (*late_init)(void);
+	void (*cleanup)(void);
+	void (*prefault)(struct vm_area_struct *vma);
+	const char *const *fault_labels;
+};
+
+extern struct cobalt_machine cobalt_machine;
+
+struct cobalt_machine_cpudata {
+	unsigned int faults[32];
+};
+
+DECLARE_PER_CPU(struct cobalt_machine_cpudata, cobalt_machine_cpudata);
+
+struct cobalt_pipeline {
+#ifdef CONFIG_SMP
+	cpumask_t supported_cpus;
+#endif
+};
+
+int pipeline_init(void);
+
+int pipeline_late_init(void);
+
+void pipeline_cleanup(void);
+
+extern struct cobalt_pipeline cobalt_pipeline;
+
+#endif /* !_COBALT_KERNEL_IPIPE_MACHINE_H */
+++ linux-patched/include/xenomai/pipeline/irq.h	2022-03-21 12:58:31.989863608 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/pipeline/tick.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
+#ifndef _COBALT_KERNEL_DOVETAIL_IRQ_H
+#define _COBALT_KERNEL_DOVETAIL_IRQ_H
+
+static inline void xnintr_init_proc(void)
+{
+	/* N/A */
+}
+
+static inline void xnintr_cleanup_proc(void)
+{
+	/* N/A */
+}
+
+static inline int xnintr_mount(void)
+{
+	/* N/A */
+	return 0;
+}
+
+#endif /* !_COBALT_KERNEL_DOVETAIL_IRQ_H */
+++ linux-patched/include/xenomai/pipeline/tick.h	2022-03-21 12:58:31.982863676 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/pipeline/thread.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
+#ifndef _COBALT_KERNEL_IPIPE_TICK_H
+#define _COBALT_KERNEL_IPIPE_TICK_H
+
+int pipeline_install_tick_proxy(void);
+
+void pipeline_uninstall_tick_proxy(void);
+
+struct xnsched;
+
+bool pipeline_must_force_program_tick(struct xnsched *sched);
+
+#endif /* !_COBALT_KERNEL_IPIPE_TICK_H */
+++ linux-patched/include/xenomai/pipeline/thread.h	2022-03-21 12:58:31.974863754 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/pipeline/inband_work.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2019 Philippe Gerum  <rpm@xenomai.org>
+ */
+
+#ifndef _COBALT_KERNEL_DOVETAIL_THREAD_H
+#define _COBALT_KERNEL_DOVETAIL_THREAD_H
+
+#include <linux/dovetail.h>
+
+struct xnthread;
+
+#define cobalt_threadinfo oob_thread_state
+
+static inline struct cobalt_threadinfo *pipeline_current(void)
+{
+	return dovetail_current_state();
+}
+
+static inline
+struct xnthread *pipeline_thread_from_task(struct task_struct *p)
+{
+	return dovetail_task_state(p)->thread;
+}
+
+#endif /* !_COBALT_KERNEL_DOVETAIL_THREAD_H */
+++ linux-patched/include/xenomai/pipeline/inband_work.h	2022-03-21 12:58:31.967863822 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/pipeline/lock.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2020 Philippe Gerum  <rpm@xenomai.org>
+ */
+
+#ifndef _COBALT_KERNEL_DOVETAIL_INBAND_WORK_H
+#define _COBALT_KERNEL_DOVETAIL_INBAND_WORK_H
+
+#include <linux/irq_work.h>
+
+/*
+ * This field must be named inband_work and appear first in the
+ * container work struct.
+ */
+struct pipeline_inband_work {
+	struct irq_work work;
+};
+
+#define PIPELINE_INBAND_WORK_INITIALIZER(__work, __handler)		\
+	{								\
+		.work = IRQ_WORK_INIT((void (*)(struct irq_work *))__handler), \
+	}
+
+#define pipeline_post_inband_work(__work)				\
+			irq_work_queue(&(__work)->inband_work.work)
+
+#endif /* !_COBALT_KERNEL_DOVETAIL_INBAND_WORK_H */
+++ linux-patched/include/xenomai/pipeline/lock.h	2022-03-21 12:58:31.960863890 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/pipeline/pipeline.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
+#ifndef _COBALT_KERNEL_DOVETAIL_LOCK_H
+#define _COBALT_KERNEL_DOVETAIL_LOCK_H
+
+#include <linux/spinlock.h>
+
+typedef hard_spinlock_t pipeline_spinlock_t;
+
+#define PIPELINE_SPIN_LOCK_UNLOCKED(__name)  __HARD_SPIN_LOCK_INITIALIZER(__name)
+
+#ifdef CONFIG_XENO_OPT_DEBUG_LOCKING
+/* Disable UP-over-SMP kernel optimization in debug mode. */
+#define __locking_active__  1
+
+#else
+
+#ifdef CONFIG_SMP
+#define __locking_active__  1
+#else
+#define __locking_active__  IS_ENABLED(CONFIG_SMP)
+#endif
+
+#endif
+
+#endif /* !_COBALT_KERNEL_DOVETAIL_LOCK_H */
+++ linux-patched/include/xenomai/pipeline/pipeline.h	2022-03-21 12:58:31.952863968 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/pipeline/trace.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
+#ifndef _COBALT_KERNEL_DOVETAIL_PIPELINE_H
+#define _COBALT_KERNEL_DOVETAIL_PIPELINE_H
+
+#include <linux/irq_pipeline.h>
+#include <linux/cpumask.h>
+#include <cobalt/kernel/assert.h>
+#include <asm/xenomai/features.h>
+#include <asm/xenomai/syscall.h>
+#include <asm/syscall.h>
+#include <pipeline/machine.h>
+
+typedef unsigned long spl_t;
+
+/*
+ * We only keep the LSB when testing in SMP mode in order to strip off
+ * the recursion marker (0x2) the nklock may store there.
+ */
+#define splhigh(x)  ((x) = oob_irq_save() & 1)
+#ifdef CONFIG_SMP
+#define splexit(x)  oob_irq_restore(x & 1)
+#else /* !CONFIG_SMP */
+#define splexit(x)  oob_irq_restore(x)
+#endif /* !CONFIG_SMP */
+#define splmax()    oob_irq_disable()
+#define splnone()   oob_irq_enable()
+#define spltest()   oob_irqs_disabled()
+
+#define is_secondary_domain()	running_inband()
+#define is_primary_domain()	running_oob()
+
+#ifdef CONFIG_SMP
+
+irqreturn_t pipeline_reschedule_ipi_handler(int irq, void *dev_id);
+
+static inline int pipeline_request_resched_ipi(void (*handler)(void))
+{
+	if (num_possible_cpus() == 1)
+		return 0;
+
+	/* Trap the out-of-band rescheduling interrupt. */
+	return __request_percpu_irq(RESCHEDULE_OOB_IPI,
+			pipeline_reschedule_ipi_handler,
+			IRQF_OOB,
+			"Xenomai reschedule",
+			&cobalt_machine_cpudata);
+}
+
+static inline void pipeline_free_resched_ipi(void)
+{
+	if (num_possible_cpus() > 1)
+		/* Release the out-of-band rescheduling interrupt. */
+		free_percpu_irq(RESCHEDULE_OOB_IPI, &cobalt_machine_cpudata);
+}
+
+static inline void pipeline_send_resched_ipi(const struct cpumask *dest)
+{
+	/*
+	 * Trigger the out-of-band rescheduling interrupt on remote
+	 * CPU(s).
+	 */
+	irq_send_oob_ipi(RESCHEDULE_OOB_IPI, dest);
+}
+
+static inline void pipeline_send_timer_ipi(const struct cpumask *dest)
+{
+	/*
+	 * Trigger the out-of-band timer interrupt on remote CPU(s).
+	 */
+	irq_send_oob_ipi(TIMER_OOB_IPI, dest);
+}
+
+#else  /* !CONFIG_SMP */
+
+static inline int pipeline_request_resched_ipi(void (*handler)(void))
+{
+	return 0;
+}
+
+
+static inline void pipeline_free_resched_ipi(void)
+{
+}
+
+#endif	/* CONFIG_SMP */
+
+static inline void pipeline_prepare_panic(void)
+{
+	/* N/A */
+}
+
+static inline void pipeline_collect_features(struct cobalt_featinfo *f)
+{
+	f->clock_freq = 0;	/* N/A */
+}
+
+#ifndef pipeline_get_syscall_args
+static inline void pipeline_get_syscall_args(struct task_struct *task,
+					     struct pt_regs *regs,
+					     unsigned long *args)
+{
+	syscall_get_arguments(task, regs, args);
+}
+#endif	/* !pipeline_get_syscall_args */
+
+#endif /* !_COBALT_KERNEL_DOVETAIL_PIPELINE_H */
+++ linux-patched/include/xenomai/pipeline/trace.h	2022-03-21 12:58:31.945864037 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/pipeline/clock.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2006 Jan Kiszka <jan.kiszka@web.de>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_DOVETAIL_TRACE_H
+#define _COBALT_KERNEL_DOVETAIL_TRACE_H
+
+#include <linux/types.h>
+#include <linux/kconfig.h>
+#include <cobalt/uapi/kernel/trace.h>
+#include <trace/events/cobalt-core.h>
+#include <cobalt/kernel/assert.h>
+
+static inline int xntrace_max_begin(unsigned long v)
+{
+	TODO();
+	return 0;
+}
+
+static inline int xntrace_max_end(unsigned long v)
+{
+	TODO();
+	return 0;
+}
+
+static inline int xntrace_max_reset(void)
+{
+	TODO();
+	return 0;
+}
+
+static inline int xntrace_user_start(void)
+{
+	TODO();
+	return 0;
+}
+
+static inline int xntrace_user_stop(unsigned long v)
+{
+	TODO();
+	return 0;
+}
+
+static inline int xntrace_user_freeze(unsigned long v, int once)
+{
+	trace_cobalt_trace_longval(0, v);
+	trace_cobalt_trigger("user-freeze");
+	return 0;
+}
+
+static inline void xntrace_latpeak_freeze(int delay)
+{
+	trace_cobalt_latpeak(delay);
+	trace_cobalt_trigger("latency-freeze");
+}
+
+static inline int xntrace_special(unsigned char id, unsigned long v)
+{
+	trace_cobalt_trace_longval(id, v);
+	return 0;
+}
+
+static inline int xntrace_special_u64(unsigned char id,
+				unsigned long long v)
+{
+	trace_cobalt_trace_longval(id, v);
+	return 0;
+}
+
+static inline int xntrace_pid(pid_t pid, short prio)
+{
+	trace_cobalt_trace_pid(pid, prio);
+	return 0;
+}
+
+static inline int xntrace_tick(unsigned long delay_ticks) /* ns */
+{
+	trace_cobalt_tick_shot(delay_ticks);
+	return 0;
+}
+
+static inline int xntrace_panic_freeze(void)
+{
+	TODO();
+	return 0;
+}
+
+static inline int xntrace_panic_dump(void)
+{
+	TODO();
+	return 0;
+}
+
+static inline bool xntrace_enabled(void)
+{
+	return IS_ENABLED(CONFIG_DOVETAIL_TRACE);
+}
+
+#endif /* !_COBALT_KERNEL_DOVETAIL_TRACE_H */
+++ linux-patched/include/xenomai/pipeline/clock.h	2022-03-21 12:58:31.938864105 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/uapi/ipc.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
+#ifndef _COBALT_KERNEL_DOVETAIL_CLOCK_H
+#define _COBALT_KERNEL_DOVETAIL_CLOCK_H
+
+#include <cobalt/uapi/kernel/types.h>
+#include <cobalt/kernel/assert.h>
+#include <linux/ktime.h>
+#include <linux/errno.h>
+
+struct timespec64;
+
+static inline u64 pipeline_read_cycle_counter(void)
+{
+	/*
+	 * With Dovetail, our idea of time is directly based on a
+	 * refined count of nanoseconds since the epoch, the hardware
+	 * time counter is transparent to us. For this reason,
+	 * xnclock_ticks_to_ns() and xnclock_ns_to_ticks() are
+	 * idempotent when building for Dovetail.
+	 */
+	return ktime_get_mono_fast_ns();
+}
+
+static inline xnticks_t pipeline_read_wallclock(void)
+{
+	return ktime_get_real_fast_ns();
+}
+
+static inline int pipeline_set_wallclock(xnticks_t epoch_ns)
+{
+	return -EOPNOTSUPP;
+}
+
+void pipeline_set_timer_shot(unsigned long cycles);
+
+const char *pipeline_timer_name(void);
+
+static inline const char *pipeline_clock_name(void)
+{
+	/* Return the name of the current clock source. */
+	TODO();
+
+	return "?";
+}
+
+static inline int pipeline_get_host_time(struct timespec64 *tp)
+{
+	/* Convert ktime_get_real_fast_ns() to timespec. */
+	*tp = ktime_to_timespec64(ktime_get_real_fast_ns());
+
+	return 0;
+}
+
+static inline void pipeline_init_clock(void)
+{
+	/* N/A */
+}
+
+static inline xnsticks_t xnclock_core_ticks_to_ns(xnsticks_t ticks)
+{
+	return ticks;
+}
+
+static inline xnsticks_t xnclock_core_ticks_to_ns_rounded(xnsticks_t ticks)
+{
+	return ticks;
+}
+
+static inline xnsticks_t xnclock_core_ns_to_ticks(xnsticks_t ns)
+{
+	return ns;
+}
+
+#endif /* !_COBALT_KERNEL_DOVETAIL_CLOCK_H */
+++ linux-patched/include/xenomai/rtdm/uapi/ipc.h	2022-03-21 12:58:32.298860595 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/uapi/udd.h	1970-01-01 01:00:00.000000000 +0100
+/**
+ * @file
+ * This file is part of the Xenomai project.
+ *
+ * @note Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#ifndef _RTDM_UAPI_IPC_H
+#define _RTDM_UAPI_IPC_H
+
+/**
+ * @ingroup rtdm_profiles
+ * @defgroup rtdm_ipc Real-time IPC
+ *
+ * @b Profile @b Revision: 1
+ * @n
+ * @n
+ * @par Device Characteristics
+ * @n
+ * @ref rtdm_driver_flags "Device Flags": @c RTDM_PROTOCOL_DEVICE @n
+ * @n
+ * @ref rtdm_driver.protocol_family "Protocol Family": @c PF_RTIPC @n
+ * @n
+ * @ref rtdm_driver.socket_type "Socket Type": @c SOCK_DGRAM @n
+ * @n
+ * @ref rtdm_driver_profile "Device Class": @c RTDM_CLASS_RTIPC @n
+ * @n
+ * @{
+ *
+ * @anchor rtipc_operations @name Supported operations
+ * Standard socket operations supported by the RTIPC protocols.
+ * @{
+ */
+
+/** Create an endpoint for communication in the AF_RTIPC domain.
+ *
+ * @param[in] domain The communication domain. Must be AF_RTIPC.
+ *
+ * @param[in] type The socket type. Must be SOCK_DGRAM.
+ *
+ * @param [in] protocol Any of @ref IPCPROTO_XDDP, @ref IPCPROTO_IDDP,
+ * or @ref IPCPROTO_BUFP. @ref IPCPROTO_IPC is also valid, and refers
+ * to the default RTIPC protocol, namely @ref IPCPROTO_IDDP.
+ *
+ * @return In addition to the standard error codes for @c socket(2),
+ * the following specific error code may be returned:
+ * - -ENOPROTOOPT (Protocol is known, but not compiled in the RTIPC driver).
+ *   See @ref RTIPC_PROTO "RTIPC protocols"
+ *   for available protocols.
+ *
+ * @par Calling context:
+ * non-RT
+ */
+#ifdef DOXYGEN_CPP
+int socket__AF_RTIPC(int domain =AF_RTIPC, int type =SOCK_DGRAM, int protocol);
+#endif
+
+/**
+ * Close a RTIPC socket descriptor.
+ *
+ * Blocking calls to any of the @ref sendmsg__AF_RTIPC "sendmsg" or @ref
+ * recvmsg__AF_RTIPC "recvmsg" functions will be unblocked when the socket
+ * is closed and return with an error.
+ *
+ * @param[in] sockfd The socket descriptor to close.
+ *
+ * @return In addition to the standard error codes for @c close(2),
+ * the following specific error code may be returned:
+ * none
+ *
+ * @par Calling context:
+ * non-RT
+ */
+#ifdef DOXYGEN_CPP
+int close__AF_RTIPC(int sockfd);
+#endif
+
+/**
+ * Bind a RTIPC socket to a port.
+ *
+ * Bind the socket to a destination port.
+ *
+ * @param[in] sockfd The RTDM file descriptor obtained from the socket
+ * creation call.
+ *
+ * @param [in] addr The address to bind the socket to (see struct
+ * sockaddr_ipc). The meaning of such address depends on the RTIPC
+ * protocol in use for the socket:
+ *
+ * - IPCPROTO_XDDP
+ *
+ *   This action creates an endpoint for channelling traffic between
+ *   the Xenomai and Linux domains.
+ *
+ *   @em sipc_family must be AF_RTIPC, @em sipc_port is either -1,
+ *   or a valid free port number between 0 and
+ *   CONFIG_XENO_OPT_PIPE_NRDEV-1.
+ *
+ *   If @em sipc_port is -1, a free port will be assigned automatically.
+ *
+ *   Upon success, the pseudo-device /dev/rtp@em N will be reserved
+ *   for this communication channel, where @em N is the assigned port
+ *   number. The non real-time side shall open this device to exchange
+ *   data over the bound socket.
+ *
+ * @anchor xddp_label_binding
+ *   If a label was assigned (see @ref XDDP_LABEL) prior to
+ *   binding the socket to a port, a registry link referring to the
+ *   created pseudo-device will be automatically set up as
+ *   @c /proc/xenomai/registry/rtipc/xddp/@em label, where @em label is the
+ *   label string passed to setsockopt() for the @ref XDDP_LABEL option.
+ *
+ * - IPCPROTO_IDDP
+ *
+ *   This action creates an endpoint for exchanging datagrams within
+ *   the Xenomai domain.
+ *
+ *   @em sipc_family must be AF_RTIPC, @em sipc_port is either -1,
+ *   or a valid free port number between 0 and
+ *   CONFIG_XENO_OPT_IDDP_NRPORT-1.
+ *
+ *   If @em sipc_port is -1, a free port will be assigned
+ *   automatically. The real-time peer shall connect to the same port
+ *   for exchanging data over the bound socket.
+ *
+ * @anchor iddp_label_binding
+ *   If a label was assigned (see @ref IDDP_LABEL) prior to binding
+ *   the socket to a port, a registry link referring to the assigned
+ *   port number will be automatically set up as @c
+ *   /proc/xenomai/registry/rtipc/iddp/@em label, where @em label is
+ *   the label string passed to setsockopt() for the @ref IDDP_LABEL
+ *   option.
+ *
+ * - IPCPROTO_BUFP
+ *
+ *   This action creates an endpoint for a one-way byte
+ *   stream within the Xenomai domain.
+ *
+ *   @em sipc_family must be AF_RTIPC, @em sipc_port is either -1,
+ *   or a valid free port number between 0 and CONFIG_XENO_OPT_BUFP_NRPORT-1.
+ *
+ *   If @em sipc_port is -1, an available port will be assigned
+ *   automatically. The real-time peer shall connect to the same port
+ *   for exchanging data over the bound socket.
+ *
+ * @anchor bufp_label_binding
+ *   If a label was assigned (see @ref BUFP_LABEL) prior to binding
+ *   the socket to a port, a registry link referring to the assigned
+ *   port number will be automatically set up as @c
+ *   /proc/xenomai/registry/rtipc/bufp/@em label, where @em label is
+ *   the label string passed to setsockopt() for the @a BUFP_LABEL
+ *   option.
+ *
+ * @param[in] addrlen The size in bytes of the structure pointed to by
+ * @a addr.
+ *
+ * @return In addition to the standard error codes for @c
+ * bind(2), the following specific error code may be returned:
+ *   - -EFAULT (Invalid data address given)
+ *   - -ENOMEM (Not enough memory)
+ *   - -EINVAL (Invalid parameter)
+ *   - -EADDRINUSE (Socket already bound to a port, or no port available)
+ *   - -EAGAIN (no registry slot available, check/raise
+ *     CONFIG_XENO_OPT_REGISTRY_NRSLOTS) .
+ *
+ * @par Calling context:
+ * non-RT
+ */
+#ifdef DOXYGEN_CPP
+int bind__AF_RTIPC(int sockfd, const struct sockaddr_ipc *addr,
+		   socklen_t addrlen);
+#endif
+
+/**
+ * Initiate a connection on a RTIPC socket.
+ *
+ * @param[in] sockfd The RTDM file descriptor obtained from the socket
+ * creation call.
+ *
+ * @param [in] addr The address to connect the socket to (see struct
+ * sockaddr_ipc).
+ *
+ * - If sipc_port is a valid port for the protocol, it is used
+ * verbatim and the connection succeeds immediately, regardless of
+ * whether the destination is bound at the time of the call.
+ *
+ * - If sipc_port is -1 and a label was assigned to the socket,
+ * connect() blocks for the requested amount of time (see @ref
+ * SO_RCVTIMEO) until a socket is bound to the same label via @c
+ * bind(2) (see @ref XDDP_LABEL, @ref IDDP_LABEL, @ref BUFP_LABEL), in
+ * which case a connection is established between both endpoints.
+ *
+ * - If sipc_port is -1 and no label was assigned to the socket, the
+ * default destination address is cleared, meaning that any subsequent
+ * write to the socket will return -EDESTADDRREQ, until a valid
+ * destination address is set via @c connect(2) or @c bind(2).
+ *
+ * @param[in] addrlen The size in bytes of the structure pointed to by
+ * @a addr.
+ *
+ * @return In addition to the standard error codes for @c connect(2),
+ * the following specific error code may be returned:
+ * none.
+ *
+ * @par Calling context:
+ * RT/non-RT
+ */
+#ifdef DOXYGEN_CPP
+int connect__AF_RTIPC(int sockfd, const struct sockaddr_ipc *addr,
+		      socklen_t addrlen);
+#endif
+
+/**
+ * Set options on RTIPC sockets.
+ *
+ * These functions allow to set various socket options.
+ * Supported Levels and Options:
+ *
+ * - Level @ref sockopts_socket "SOL_SOCKET"
+ * - Level @ref sockopts_xddp "SOL_XDDP"
+ * - Level @ref sockopts_iddp "SOL_IDDP"
+ * - Level @ref sockopts_bufp "SOL_BUFP"
+ * .
+ *
+ * @return In addition to the standard error codes for @c
+ * setsockopt(2), the following specific error code may
+ * be returned:
+ * follow the option links above.
+ *
+ * @par Calling context:
+ * non-RT
+ */
+#ifdef DOXYGEN_CPP
+int setsockopt__AF_RTIPC(int sockfd, int level, int optname,
+			 const void *optval, socklen_t optlen);
+#endif
+/**
+ * Get options on RTIPC sockets.
+ *
+ * These functions allow to get various socket options.
+ * Supported Levels and Options:
+ *
+ * - Level @ref sockopts_socket "SOL_SOCKET"
+ * - Level @ref sockopts_xddp "SOL_XDDP"
+ * - Level @ref sockopts_iddp "SOL_IDDP"
+ * - Level @ref sockopts_bufp "SOL_BUFP"
+ * .
+ *
+ * @return In addition to the standard error codes for @c
+ * getsockopt(2), the following specific error code may
+ * be returned:
+ * follow the option links above.
+ *
+ * @par Calling context:
+ * RT/non-RT
+ */
+#ifdef DOXYGEN_CPP
+int getsockopt__AF_RTIPC(int sockfd, int level, int optname,
+			 void *optval, socklen_t *optlen);
+#endif
+
+/**
+ * Send a message on a RTIPC socket.
+ *
+ * @param[in] sockfd The RTDM file descriptor obtained from the socket
+ * creation call.
+ *
+ * @param[in] msg The address of the message header conveying the
+ * datagram.
+ *
+ * @param [in] flags Operation flags:
+ *
+ * - MSG_OOB Send out-of-band message.  For all RTIPC protocols except
+ *   @ref IPCPROTO_BUFP, sending out-of-band data actually means
+ *   pushing them to the head of the receiving queue, so that the
+ *   reader will always receive them before normal messages. @ref
+ *   IPCPROTO_BUFP does not support out-of-band sending.
+ *
+ * - MSG_DONTWAIT Non-blocking I/O operation. The caller will not be
+ *   blocked whenever the message cannot be sent immediately at the
+ *   time of the call (e.g. memory shortage), but will rather return
+ *   with -EWOULDBLOCK. Unlike other RTIPC protocols, @ref
+ *   IPCPROTO_XDDP accepts but never considers MSG_DONTWAIT since
+ *   writing to a real-time XDDP endpoint is inherently a non-blocking
+ *   operation.
+ *
+ * - MSG_MORE Accumulate data before sending. This flag is accepted by
+ *   the @ref IPCPROTO_XDDP protocol only, and tells the send service
+ *   to accumulate the outgoing data into an internal streaming
+ *   buffer, instead of issuing a datagram immediately for it. See
+ *   @ref XDDP_BUFSZ for more.
+ *
+ * @note No RTIPC protocol allows for short writes, and only complete
+ * messages are sent to the peer.
+ *
+ * @return In addition to the standard error codes for @c sendmsg(2),
+ * the following specific error code may be returned:
+ * none.
+ *
+ * @par Calling context:
+ * RT
+ */
+#ifdef DOXYGEN_CPP
+ssize_t sendmsg__AF_RTIPC(int sockfd, const struct msghdr *msg, int flags);
+#endif
+
+/**
+ * Receive a message from a RTIPC socket.
+ *
+ * @param[in] sockfd The RTDM file descriptor obtained from the socket
+ * creation call.
+ *
+ * @param[out] msg The address the message header will be copied at.
+ *
+ * @param [in] flags Operation flags:
+ *
+ * - MSG_DONTWAIT Non-blocking I/O operation. The caller will not be
+ *   blocked whenever no message is immediately available for receipt
+ *   at the time of the call, but will rather return with
+ *   -EWOULDBLOCK.
+ *
+ * @note @ref IPCPROTO_BUFP does not allow for short reads and always
+ * returns the requested amount of bytes, except in one situation:
+ * whenever some writer is waiting for sending data upon a buffer full
+ * condition, while the caller would have to wait for receiving a
+ * complete message.  This is usually the sign of a pathological use
+ * of the BUFP socket, like defining an incorrect buffer size via @ref
+ * BUFP_BUFSZ. In that case, a short read is allowed to prevent a
+ * deadlock.
+ *
+ * @return In addition to the standard error codes for @c recvmsg(2),
+ * the following specific error code may be returned:
+ * none.
+ *
+ * @par Calling context:
+ * RT
+ */
+#ifdef DOXYGEN_CPP
+ssize_t recvmsg__AF_RTIPC(int sockfd, struct msghdr *msg, int flags);
+#endif
+
+/**
+ * Get socket name.
+ *
+ * The name of the local endpoint for the socket is copied back (see
+ * struct sockaddr_ipc).
+ *
+ * @return In addition to the standard error codes for @c getsockname(2),
+ * the following specific error code may be returned:
+ * none.
+ *
+ * @par Calling context:
+ * RT/non-RT
+ */
+#ifdef DOXYGEN_CPP
+int getsockname__AF_RTIPC(int sockfd, struct sockaddr_ipc *addr, socklen_t *addrlen);
+#endif
+
+/**
+ * Get socket peer.
+ *
+ * The name of the remote endpoint for the socket is copied back (see
+ * struct sockaddr_ipc). This is the default destination address for
+ * messages sent on the socket. It can be set either explicitly via @c
+ * connect(2), or implicitly via @c bind(2) if no @c connect(2) was
+ * called prior to binding the socket to a port, in which case both
+ * the local and remote names are equal.
+ *
+ * @return In addition to the standard error codes for @c getpeername(2),
+ * the following specific error code may be returned:
+ * none.
+ *
+ * @par Calling context:
+ * RT/non-RT
+ */
+#ifdef DOXYGEN_CPP
+int getpeername__AF_RTIPC(int sockfd, struct sockaddr_ipc *addr, socklen_t *addrlen);
+#endif
+
+/** @} */
+
+#include <cobalt/uapi/kernel/types.h>
+#include <cobalt/uapi/kernel/pipe.h>
+#include <rtdm/rtdm.h>
+
+/* Address family */
+#define AF_RTIPC		111
+
+/* Protocol family */
+#define PF_RTIPC		AF_RTIPC
+
+/**
+ * @anchor RTIPC_PROTO @name RTIPC protocol list
+ * protocols for the PF_RTIPC protocol family
+ *
+ * @{ */
+enum {
+/** Default protocol (IDDP) */
+	IPCPROTO_IPC  = 0,
+/**
+ * Cross-domain datagram protocol (RT <-> non-RT).
+ *
+ * Real-time Xenomai threads and regular Linux threads may want to
+ * exchange data in a way that does not require the former to leave
+ * the real-time domain (i.e. primary mode). The RTDM-based XDDP
+ * protocol is available for this purpose.
+ *
+ * On the Linux domain side, pseudo-device files named /dev/rtp@em \<minor\>
+ * give regular POSIX threads access to non real-time communication
+ * endpoints, via the standard character-based I/O interface. On the
+ * Xenomai domain side, sockets may be bound to XDDP ports, which act
+ * as proxies to send and receive data to/from the associated
+ * pseudo-device files. Ports and pseudo-device minor numbers are
+ * paired, meaning that e.g. socket port 7 will proxy the traffic to/from
+ * /dev/rtp7.
+ *
+ * All data sent through a bound/connected XDDP socket via @c
+ * sendto(2) or @c write(2) will be passed to the peer endpoint in the
+ * Linux domain, and made available for reading via the standard @c
+ * read(2) system call. Conversely, all data sent using @c write(2)
+ * through the non real-time endpoint will be conveyed to the
+ * real-time socket endpoint, and made available to the @c recvfrom(2)
+ * or @c read(2) system calls.
+ */
+	IPCPROTO_XDDP = 1,
+/**
+ * Intra-domain datagram protocol (RT <-> RT).
+ *
+ * The RTDM-based IDDP protocol enables real-time threads to exchange
+ * datagrams within the Xenomai domain, via socket endpoints.
+ */
+	IPCPROTO_IDDP = 2,
+/**
+ * Buffer protocol (RT <-> RT, byte-oriented).
+ *
+ * The RTDM-based BUFP protocol implements a lightweight,
+ * byte-oriented, one-way Producer-Consumer data path. All messages
+ * written are buffered into a single memory area in strict FIFO
+ * order, until read by the consumer.
+ *
+ * This protocol always prevents short writes, and only allows short
+ * reads when a potential deadlock situation arises (i.e. readers and
+ * writers waiting for each other indefinitely).
+ */
+	IPCPROTO_BUFP = 3,
+	IPCPROTO_MAX
+};
+/** @} */
+
+/**
+ * Port number type for the RTIPC address family.
+ */
+typedef int16_t rtipc_port_t;
+
+/**
+ * Port label information structure.
+ */
+struct rtipc_port_label {
+	/** Port label string, null-terminated. */
+	char label[XNOBJECT_NAME_LEN];
+};
+
+/**
+ * Socket address structure for the RTIPC address family.
+ */
+struct sockaddr_ipc {
+	/** RTIPC address family, must be @c AF_RTIPC */
+	sa_family_t sipc_family;
+	/** Port number. */
+	rtipc_port_t sipc_port;
+};
+
+#define SOL_XDDP		311
+/**
+ * @anchor sockopts_xddp @name XDDP socket options
+ * Setting and getting XDDP socket options.
+ * @{ */
+/**
+ * XDDP label assignment
+ *
+ * ASCII label strings can be attached to XDDP ports, so that opening
+ * the non-RT endpoint can be done by specifying this symbolic device
+ * name rather than referring to a raw pseudo-device entry
+ * (i.e. /dev/rtp@em N).
+ *
+ * When available, this label will be registered when binding, in
+ * addition to the port number (see @ref xddp_label_binding
+ * "XDDP port binding").
+ *
+ * It is not allowed to assign a label after the socket was
+ * bound. However, multiple assignment calls are allowed prior to the
+ * binding; the last label set will be used.
+ *
+ * @param [in] level @ref sockopts_xddp "SOL_XDDP"
+ * @param [in] optname @b XDDP_LABEL
+ * @param [in] optval Pointer to struct rtipc_port_label
+ * @param [in] optlen sizeof(struct rtipc_port_label)
+ *
+ * @return 0 is returned upon success. Otherwise:
+ *
+ * - -EFAULT (Invalid data address given)
+ * - -EALREADY (socket already bound)
+ * - -EINVAL (@a optlen invalid)
+ * .
+ *
+ * @par Calling context:
+ * RT/non-RT
+ */
+#define XDDP_LABEL		1
+/**
+ * XDDP local pool size configuration
+ *
+ * By default, the memory needed to convey the data is pulled from
+ * Xenomai's system pool. Setting a local pool size overrides this
+ * default for the socket.
+ *
+ * If a non-zero size was configured, a local pool is allocated at
+ * binding time. This pool will provide storage for pending datagrams.
+ *
+ * It is not allowed to configure a local pool size after the socket
+ * was bound. However, multiple configuration calls are allowed prior
+ * to the binding; the last value set will be used.
+ *
+ * @note: the pool memory is obtained from the host allocator by the
+ * @ref bind__AF_RTIPC "bind call".
+ *
+ * @param [in] level @ref sockopts_xddp "SOL_XDDP"
+ * @param [in] optname @b XDDP_POOLSZ
+ * @param [in] optval Pointer to a variable of type size_t, containing
+ * the required size of the local pool to reserve at binding time
+ * @param [in] optlen sizeof(size_t)
+ *
+ * @return 0 is returned upon success. Otherwise:
+ *
+ * - -EFAULT (Invalid data address given)
+ * - -EALREADY (socket already bound)
+ * - -EINVAL (@a optlen invalid or *@a optval is zero)
+ * .
+ *
+ * @par Calling context:
+ * RT/non-RT
+ */
+#define XDDP_POOLSZ		2
+/**
+ * XDDP streaming buffer size configuration
+ *
+ * In addition to sending datagrams, real-time threads may stream data
+ * in a byte-oriented mode through the port as well. This increases
+ * the bandwidth and reduces the overhead, when the overall data to
+ * send to the Linux domain is collected by bits, and keeping the
+ * message boundaries is not required.
+ *
+ * This feature is enabled when a non-zero buffer size is set for the
+ * socket. In that case, the real-time data accumulates into the
+ * streaming buffer when MSG_MORE is passed to any of the @ref
+ * sendmsg__AF_RTIPC "send functions", until:
+ *
+ * - the receiver from the Linux domain wakes up and consumes it,
+ * - a different source port attempts to send data to the same
+ *   destination port,
+ * - MSG_MORE is absent from the send flags,
+ * - the buffer is full,
+ * .
+ * whichever comes first.
+ *
+ * Setting *@a optval to zero disables the streaming buffer, in which
+ * case all sendings are conveyed in separate datagrams, regardless of
+ * MSG_MORE.
+ *
+ * @note only a single streaming buffer exists per socket. When this
+ * buffer is full, the real-time data stops accumulating and sending
+ * operations resume in mere datagram mode. Accumulation may happen
+ * again after some or all data in the streaming buffer is consumed
+ * from the Linux domain endpoint.
+ *
+ * The streaming buffer size may be adjusted multiple times during the
+ * socket lifetime; the latest configuration change will take effect
+ * when the accumulation resumes after the previous buffer was
+ * flushed.
+ *
+ * @param [in] level @ref sockopts_xddp "SOL_XDDP"
+ * @param [in] optname @b XDDP_BUFSZ
+ * @param [in] optval Pointer to a variable of type size_t, containing
+ * the required size of the streaming buffer
+ * @param [in] optlen sizeof(size_t)
+ *
+ * @return 0 is returned upon success. Otherwise:
+ *
+ * - -EFAULT (Invalid data address given)
+ * - -ENOMEM (Not enough memory)
+ * - -EINVAL (@a optlen is invalid)
+ * .
+ *
+ * @par Calling context:
+ * RT/non-RT
+ */
+#define XDDP_BUFSZ		3
+/**
+ * XDDP monitoring callback
+ *
+ * Other RTDM drivers may install a user-defined callback via the @ref
+ * rtdm_setsockopt call from the inter-driver API, in order to collect
+ * particular events occurring on the channel.
+ *
+ * This notification mechanism is particularly useful to monitor a
+ * channel asynchronously while performing other tasks.
+ *
+ * The user-provided routine will be passed the RTDM file descriptor
+ * of the socket receiving the event, the event code, and an optional
+ * argument.  Four events are currently defined, see @ref XDDP_EVENTS.
+ *
+ * The XDDP_EVTIN and XDDP_EVTOUT events are fired on behalf of a
+ * fully atomic context; therefore, care must be taken to keep their
+ * overhead low. In those cases, the Xenomai services that may be
+ * called from the callback are restricted to the set allowed to a
+ * real-time interrupt handler.
+ *
+ * @param [in] level @ref sockopts_xddp "SOL_XDDP"
+ * @param [in] optname @b XDDP_MONITOR
+ * @param [in] optval Pointer to a pointer to function of type int
+ *             (*)(int fd, int event, long arg), containing the address of the
+ *             user-defined callback.Passing a NULL callback pointer
+ *             in @a optval disables monitoring.
+ * @param [in] optlen sizeof(int (*)(int fd, int event, long arg))
+ *
+ * @return 0 is returned upon success. Otherwise:
+ *
+ * - -EFAULT (Invalid data address given)
+ * - -EPERM (Operation not allowed from user-space)
+ * - -EINVAL (@a optlen is invalid)
+ * .
+ *
+ * @par Calling context:
+ * RT/non-RT, kernel space only
+ */
+#define XDDP_MONITOR		4
+/** @} */
+
+/**
+ * @anchor XDDP_EVENTS @name XDDP events
+ * Specific events occurring on XDDP channels, which can be monitored
+ * via the @ref XDDP_MONITOR socket option.
+ *
+ * @{ */
+/**
+ * @ref XDDP_MONITOR "Monitor" writes to the non real-time endpoint.
+ *
+ * XDDP_EVTIN is sent when data is written to the non real-time
+ * endpoint the socket is bound to (i.e. via /dev/rtp@em N), which
+ * means that some input is pending for the real-time endpoint. The
+ * argument is the size of the incoming message.
+ */
+#define XDDP_EVTIN		1
+/**
+ * @ref XDDP_MONITOR "Monitor" reads from the non real-time endpoint.
+ *
+ * XDDP_EVTOUT is sent when the non real-time endpoint successfully
+ * reads a complete message (i.e. via /dev/rtp@em N). The argument is
+ * the size of the outgoing message.
+ */
+#define XDDP_EVTOUT		2
+/**
+ * @ref XDDP_MONITOR "Monitor" close from the non real-time endpoint.
+ *
+ * XDDP_EVTDOWN is sent when the non real-time endpoint is closed. The
+ * argument is always 0.
+ */
+#define XDDP_EVTDOWN		3
+/**
+ * @ref XDDP_MONITOR "Monitor" memory shortage for non real-time
+ * datagrams.
+ *
+ * XDDP_EVTNOBUF is sent when no memory is available from the pool to
+ * hold the message currently sent from the non real-time
+ * endpoint. The argument is the size of the failed allocation. Upon
+ * return from the callback, the caller will block and retry until
+ * enough space is available from the pool; during that process, the
+ * callback might be invoked multiple times, each time a new attempt
+ * to get the required memory fails.
+ */
+#define XDDP_EVTNOBUF		4
+/** @} */
+
+#define SOL_IDDP		312
+/**
+ * @anchor sockopts_iddp @name IDDP socket options
+ * Setting and getting IDDP socket options.
+ * @{ */
+/**
+ * IDDP label assignment
+ *
+ * ASCII label strings can be attached to IDDP ports, in order to
+ * connect sockets to them in a more descriptive way than using plain
+ * numeric port values.
+ *
+ * When available, this label will be registered when binding, in
+ * addition to the port number (see @ref iddp_label_binding
+ * "IDDP port binding").
+ *
+ * It is not allowed to assign a label after the socket was
+ * bound. However, multiple assignment calls are allowed prior to the
+ * binding; the last label set will be used.
+ *
+ * @param [in] level @ref sockopts_iddp "SOL_IDDP"
+ * @param [in] optname @b IDDP_LABEL
+ * @param [in] optval Pointer to struct rtipc_port_label
+ * @param [in] optlen sizeof(struct rtipc_port_label)
+ *
+ * @return 0 is returned upon success. Otherwise:
+ *
+ * - -EFAULT (Invalid data address given)
+ * - -EALREADY (socket already bound)
+ * - -EINVAL (@a optlen is invalid)
+ * .
+ *
+ * @par Calling context:
+ * RT/non-RT
+ */
+#define IDDP_LABEL		1
+/**
+ * IDDP local pool size configuration
+ *
+ * By default, the memory needed to convey the data is pulled from
+ * Xenomai's system pool. Setting a local pool size overrides this
+ * default for the socket.
+ *
+ * If a non-zero size was configured, a local pool is allocated at
+ * binding time. This pool will provide storage for pending datagrams.
+ *
+ * It is not allowed to configure a local pool size after the socket
+ * was bound. However, multiple configuration calls are allowed prior
+ * to the binding; the last value set will be used.
+ *
+ * @note: the pool memory is obtained from the host allocator by the
+ * @ref bind__AF_RTIPC "bind call".
+ *
+ * @param [in] level @ref sockopts_iddp "SOL_IDDP"
+ * @param [in] optname @b IDDP_POOLSZ
+ * @param [in] optval Pointer to a variable of type size_t, containing
+ * the required size of the local pool to reserve at binding time
+ * @param [in] optlen sizeof(size_t)
+ *
+ * @return 0 is returned upon success. Otherwise:
+ *
+ * - -EFAULT (Invalid data address given)
+ * - -EALREADY (socket already bound)
+ * - -EINVAL (@a optlen is invalid or *@a optval is zero)
+ * .
+ *
+ * @par Calling context:
+ * RT/non-RT
+ */
+#define IDDP_POOLSZ		2
+/** @} */
+
+#define SOL_BUFP		313
+/**
+ * @anchor sockopts_bufp @name BUFP socket options
+ * Setting and getting BUFP socket options.
+ * @{ */
+/**
+ * BUFP label assignment
+ *
+ * ASCII label strings can be attached to BUFP ports, in order to
+ * connect sockets to them in a more descriptive way than using plain
+ * numeric port values.
+ *
+ * When available, this label will be registered when binding, in
+ * addition to the port number (see @ref bufp_label_binding
+ * "BUFP port binding").
+ *
+ * It is not allowed to assign a label after the socket was
+ * bound. However, multiple assignment calls are allowed prior to the
+ * binding; the last label set will be used.
+ *
+ * @param [in] level @ref sockopts_bufp "SOL_BUFP"
+ * @param [in] optname @b BUFP_LABEL
+ * @param [in] optval Pointer to struct rtipc_port_label
+ * @param [in] optlen sizeof(struct rtipc_port_label)
+ *
+ * @return 0 is returned upon success. Otherwise:
+ *
+ * - -EFAULT (Invalid data address given)
+ * - -EALREADY (socket already bound)
+ * - -EINVAL (@a optlen is invalid)
+ * .
+ *
+ * @par Calling context:
+ * RT/non-RT
+ */
+#define BUFP_LABEL		1
+/**
+ * BUFP buffer size configuration
+ *
+ * All messages written to a BUFP socket are buffered in a single
+ * per-socket memory area. Configuring the size of such buffer prior
+ * to binding the socket to a destination port is mandatory.
+ *
+ * It is not allowed to configure a buffer size after the socket was
+ * bound. However, multiple configuration calls are allowed prior to
+ * the binding; the last value set will be used.
+ *
+ * @note: the buffer memory is obtained from the host allocator by the
+ * @ref bind__AF_RTIPC "bind call".
+ *
+ * @param [in] level @ref sockopts_bufp "SOL_BUFP"
+ * @param [in] optname @b BUFP_BUFSZ
+ * @param [in] optval Pointer to a variable of type size_t, containing
+ * the required size of the buffer to reserve at binding time
+ * @param [in] optlen sizeof(size_t)
+ *
+ * @return 0 is returned upon success. Otherwise:
+ *
+ * - -EFAULT (Invalid data address given)
+ * - -EALREADY (socket already bound)
+ * - -EINVAL (@a optlen is invalid or *@a optval is zero)
+ * .
+ *
+ * @par Calling context:
+ * RT/non-RT
+ */
+#define BUFP_BUFSZ		2
+/** @} */
+
+/**
+ * @anchor sockopts_socket @name Socket level options
+ * Setting and getting supported standard socket level options.
+ * @{ */
+/**
+ *
+ * @ref IPCPROTO_IDDP and @ref IPCPROTO_BUFP protocols support the
+ * standard SO_SNDTIMEO socket option, from the @c SOL_SOCKET level.
+ *
+ * @see @c setsockopt(), @c getsockopt() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399/
+ */
+#ifdef DOXYGEN_CPP
+#define SO_SNDTIMEO defined_by_kernel_header_file
+#endif
+/**
+ *
+ * All RTIPC protocols support the standard SO_RCVTIMEO socket option,
+ * from the @c SOL_SOCKET level.
+ *
+ * @see @c setsockopt(), @c getsockopt() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399/
+ */
+#ifdef DOXYGEN_CPP
+#define SO_RCVTIMEO defined_by_kernel_header_file
+#endif
+/** @} */
+
+/**
+ * @anchor rtdm_ipc_examples @name RTIPC examples
+ * @{ */
+/** @example bufp-readwrite.c */
+/** @example bufp-label.c */
+/** @example iddp-label.c */
+/** @example iddp-sendrecv.c */
+/** @example xddp-echo.c */
+/** @example xddp-label.c */
+/** @example xddp-stream.c */
+/** @} */
+
+/** @} */
+
+#endif /* !_RTDM_UAPI_IPC_H */
+++ linux-patched/include/xenomai/rtdm/uapi/udd.h	2022-03-21 12:58:32.291860663 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/uapi/testing.h	1970-01-01 01:00:00.000000000 +0100
+/**
+ * @file
+ * This file is part of the Xenomai project.
+ *
+ * @author Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _RTDM_UAPI_UDD_H
+#define _RTDM_UAPI_UDD_H
+
+/**
+ * @addtogroup rtdm_udd
+ *
+ * @{
+ */
+
+/**
+ * @anchor udd_signotify
+ * @brief UDD event notification descriptor
+ *
+ * This structure shall be used to pass the information required to
+ * enable/disable the notification by signal upon interrupt receipt.
+ *
+ * If PID is zero or negative, the notification is disabled.
+ * Otherwise, the Cobalt thread whose PID is given will receive the
+ * Cobalt signal also mentioned, along with the count of interrupts at
+ * the time of the receipt stored in siginfo.si_int. A Cobalt thread
+ * must explicitly wait for notifications using the sigwaitinfo() or
+ * sigtimedwait() services (no asynchronous mode available).
+ */
+struct udd_signotify {
+	/**
+	 * PID of the Cobalt thread to notify upon interrupt
+	 * receipt. If @a pid is zero or negative, the notification is
+	 * disabled.
+	 */
+	pid_t pid;
+	/**
+	 * Signal number to send to PID for notifying, which must be
+	 * in the range [SIGRTMIN .. SIGRTMAX] inclusive. This value
+	 * is not considered if @a pid is zero or negative.
+	 */
+	int sig;
+};
+
+/**
+ * @anchor udd_ioctl_codes @name UDD_IOCTL
+ * IOCTL requests
+ *
+ * @{
+ */
+
+/**
+ * Enable the interrupt line. The UDD-class mini-driver should handle
+ * this request when received through its ->ioctl() handler if
+ * provided. Otherwise, the UDD core enables the interrupt line in the
+ * interrupt controller before returning to the caller.
+ */
+#define UDD_RTIOC_IRQEN		_IO(RTDM_CLASS_UDD, 0)
+/**
+ * Disable the interrupt line. The UDD-class mini-driver should handle
+ * this request when received through its ->ioctl() handler if
+ * provided. Otherwise, the UDD core disables the interrupt line in
+ * the interrupt controller before returning to the caller.
+ *
+ * @note The mini-driver must handle the UDD_RTIOC_IRQEN request for a
+ * custom IRQ from its ->ioctl() handler, otherwise such request
+ * receives -EIO from the UDD core.
+ */
+#define UDD_RTIOC_IRQDIS	_IO(RTDM_CLASS_UDD, 1)
+/**
+ * Enable/Disable signal notification upon interrupt event. A valid
+ * @ref udd_signotify "notification descriptor" must be passed along
+ * with this request, which is handled by the UDD core directly.
+ *
+ * @note The mini-driver must handle the UDD_RTIOC_IRQDIS request for
+ * a custom IRQ from its ->ioctl() handler, otherwise such request
+ * receives -EIO from the UDD core.
+ */
+#define UDD_RTIOC_IRQSIG	_IOW(RTDM_CLASS_UDD, 2, struct udd_signotify)
+
+/** @} */
+/** @} */
+
+#endif /* !_RTDM_UAPI_UDD_H */
+++ linux-patched/include/xenomai/rtdm/uapi/testing.h	2022-03-21 12:58:32.284860731 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/uapi/analogy.h	1970-01-01 01:00:00.000000000 +0100
+/**
+ * @file
+ * Real-Time Driver Model for Xenomai, testing device profile header
+ *
+ * @note Copyright (C) 2005 Jan Kiszka <jan.kiszka@web.de>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ *
+ * @ingroup rttesting
+ */
+#ifndef _RTDM_UAPI_TESTING_H
+#define _RTDM_UAPI_TESTING_H
+
+#include <linux/types.h>
+
+#define RTTST_PROFILE_VER		2
+
+typedef struct rttst_bench_res {
+	__s32 avg;
+	__s32 min;
+	__s32 max;
+	__s32 overruns;
+	__s32 test_loops;
+} rttst_bench_res_t;
+
+typedef struct rttst_interm_bench_res {
+	struct rttst_bench_res last;
+	struct rttst_bench_res overall;
+} rttst_interm_bench_res_t;
+
+typedef struct rttst_overall_bench_res {
+	struct rttst_bench_res result;
+	__s32 *histogram_avg;
+	__s32 *histogram_min;
+	__s32 *histogram_max;
+} rttst_overall_bench_res_t;
+
+#define RTTST_TMBENCH_INVALID		-1 /* internal use only */
+#define RTTST_TMBENCH_TASK		0
+#define RTTST_TMBENCH_HANDLER		1
+
+typedef struct rttst_tmbench_config {
+	int mode;
+	int priority;
+	__u64 period;
+	int warmup_loops;
+	int histogram_size;
+	int histogram_bucketsize;
+	int freeze_max;
+} rttst_tmbench_config_t;
+
+struct rttst_swtest_task {
+	unsigned int index;
+	unsigned int flags;
+};
+
+/* Possible values for struct rttst_swtest_task::flags. */
+#define RTTST_SWTEST_FPU		0x1
+#define RTTST_SWTEST_USE_FPU		0x2 /* Only for kernel-space tasks. */
+#define RTTST_SWTEST_FREEZE		0x4 /* Only for kernel-space tasks. */
+
+struct rttst_swtest_dir {
+	unsigned int from;
+	unsigned int to;
+};
+
+struct rttst_swtest_error {
+	struct rttst_swtest_dir last_switch;
+	unsigned int fp_val;
+};
+
+#define RTTST_RTDM_NORMAL_CLOSE		0
+#define RTTST_RTDM_DEFER_CLOSE_CONTEXT	1
+
+#define RTTST_RTDM_MAGIC_PRIMARY	0xfefbfefb
+#define RTTST_RTDM_MAGIC_SECONDARY	0xa5b9a5b9
+
+#define RTTST_HEAPCHECK_ZEROOVRD   1
+#define RTTST_HEAPCHECK_SHUFFLE    2
+#define RTTST_HEAPCHECK_PATTERN    4
+#define RTTST_HEAPCHECK_HOT        8
+
+struct rttst_heap_parms {
+	__u64 heap_size;
+	__u64 block_size;
+	int flags;
+	int nrstats;
+};
+
+struct rttst_heap_stats {
+	__u64 heap_size;
+	__u64 user_size;
+	__u64 block_size;
+	__s64 alloc_avg_ns;
+	__s64 alloc_max_ns;
+	__s64 free_avg_ns;
+	__s64 free_max_ns;
+	__u64 maximum_free;
+	__u64 largest_free;
+	int nrblocks;
+	int flags;
+};
+
+struct rttst_heap_stathdr {
+	int nrstats;
+	struct rttst_heap_stats *buf;
+};
+
+#define RTIOC_TYPE_TESTING		RTDM_CLASS_TESTING
+
+/*!
+ * @name Sub-Classes of RTDM_CLASS_TESTING
+ * @{ */
+/** subclass name: "timerbench" */
+#define RTDM_SUBCLASS_TIMERBENCH	0
+/** subclass name: "irqbench" */
+#define RTDM_SUBCLASS_IRQBENCH		1
+/** subclass name: "switchtest" */
+#define RTDM_SUBCLASS_SWITCHTEST	2
+/** subclase name: "rtdm" */
+#define RTDM_SUBCLASS_RTDMTEST		3
+/** subclase name: "heapcheck" */
+#define RTDM_SUBCLASS_HEAPCHECK		4
+/** @} */
+
+/*!
+ * @anchor TSTIOCTLs @name IOCTLs
+ * Testing device IOCTLs
+ * @{ */
+#define RTTST_RTIOC_INTERM_BENCH_RES \
+	_IOWR(RTIOC_TYPE_TESTING, 0x00, struct rttst_interm_bench_res)
+
+#define RTTST_RTIOC_TMBENCH_START \
+	_IOW(RTIOC_TYPE_TESTING, 0x10, struct rttst_tmbench_config)
+
+#define RTTST_RTIOC_TMBENCH_STOP \
+	_IOWR(RTIOC_TYPE_TESTING, 0x11, struct rttst_overall_bench_res)
+
+#define RTTST_RTIOC_SWTEST_SET_TASKS_COUNT \
+	_IOW(RTIOC_TYPE_TESTING, 0x30, __u32)
+
+#define RTTST_RTIOC_SWTEST_SET_CPU \
+	_IOW(RTIOC_TYPE_TESTING, 0x31, __u32)
+
+#define RTTST_RTIOC_SWTEST_REGISTER_UTASK \
+	_IOW(RTIOC_TYPE_TESTING, 0x32, struct rttst_swtest_task)
+
+#define RTTST_RTIOC_SWTEST_CREATE_KTASK \
+	_IOWR(RTIOC_TYPE_TESTING, 0x33, struct rttst_swtest_task)
+
+#define RTTST_RTIOC_SWTEST_PEND \
+	_IOR(RTIOC_TYPE_TESTING, 0x34, struct rttst_swtest_task)
+
+#define RTTST_RTIOC_SWTEST_SWITCH_TO \
+	_IOR(RTIOC_TYPE_TESTING, 0x35, struct rttst_swtest_dir)
+
+#define RTTST_RTIOC_SWTEST_GET_SWITCHES_COUNT \
+	_IOR(RTIOC_TYPE_TESTING, 0x36, __u32)
+
+#define RTTST_RTIOC_SWTEST_GET_LAST_ERROR \
+	_IOR(RTIOC_TYPE_TESTING, 0x37, struct rttst_swtest_error)
+
+#define RTTST_RTIOC_SWTEST_SET_PAUSE \
+	_IOW(RTIOC_TYPE_TESTING, 0x38, __u32)
+
+#define RTTST_RTIOC_RTDM_DEFER_CLOSE \
+	_IOW(RTIOC_TYPE_TESTING, 0x40, __u32)
+
+#define RTTST_RTIOC_RTDM_ACTOR_GET_CPU \
+	_IOR(RTIOC_TYPE_TESTING, 0x41, __u32)
+  
+#define RTTST_RTIOC_RTDM_PING_PRIMARY \
+	_IOR(RTIOC_TYPE_TESTING, 0x42, __u32)
+  
+#define RTTST_RTIOC_RTDM_PING_SECONDARY \
+	_IOR(RTIOC_TYPE_TESTING, 0x43, __u32)
+
+#define RTTST_RTIOC_HEAP_CHECK \
+	_IOR(RTIOC_TYPE_TESTING, 0x44, struct rttst_heap_parms)
+
+#define RTTST_RTIOC_HEAP_STAT_COLLECT \
+	_IOR(RTIOC_TYPE_TESTING, 0x45, int)
+
+/** @} */
+
+#endif /* !_RTDM_UAPI_TESTING_H */
+++ linux-patched/include/xenomai/rtdm/uapi/analogy.h	2022-03-21 12:58:32.276860809 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/uapi/gpio.h	1970-01-01 01:00:00.000000000 +0100
+/**
+ * @file
+ * Analogy for Linux, UAPI bits
+ * @note Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * @note Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _RTDM_UAPI_ANALOGY_H
+#define _RTDM_UAPI_ANALOGY_H
+
+/* --- Misc precompilation constant --- */
+#define A4L_NAMELEN 20
+
+#define A4L_INFINITE 0
+#define A4L_NONBLOCK (-1)
+
+/* --- Common Analogy types --- */
+
+typedef unsigned short sampl_t;
+typedef unsigned long lsampl_t;
+
+/* MMAP ioctl argument structure */
+struct a4l_mmap_arg {
+	unsigned int idx_subd;
+	unsigned long size;
+	void *ptr;
+};
+typedef struct a4l_mmap_arg a4l_mmap_t;
+
+/* Constants related with buffer size
+   (might be used with BUFCFG ioctl) */
+#define A4L_BUF_MAXSIZE 0x1000000
+#define A4L_BUF_DEFSIZE 0x10000
+#define A4L_BUF_DEFMAGIC 0xffaaff55
+
+/* BUFCFG ioctl argument structure */
+struct a4l_buffer_config {
+	/* NOTE: with the last buffer implementation, the field
+	   idx_subd became useless; the buffer are now
+	   per-context. So, the buffer size configuration is specific
+	   to an opened device. There is a little exception: we can
+	   define a default buffer size for a device.
+	   So far, a hack is used to implement the configuration of
+	   the default buffer size */
+	unsigned int idx_subd;
+	unsigned long buf_size;
+};
+typedef struct a4l_buffer_config a4l_bufcfg_t;
+
+/* BUFINFO ioctl argument structure */
+struct a4l_buffer_info {
+	unsigned int idx_subd;
+	unsigned long buf_size;
+	unsigned long rw_count;
+};
+typedef struct a4l_buffer_info a4l_bufinfo_t;
+
+/* BUFCFG2 / BUFINFO2 ioctl argument structure */
+struct a4l_buffer_config2 {
+	unsigned long wake_count;
+	unsigned long reserved[3];
+};
+typedef struct a4l_buffer_config2 a4l_bufcfg2_t;
+
+/* POLL ioctl argument structure */
+struct a4l_poll {
+	unsigned int idx_subd;
+	unsigned long arg;
+};
+typedef struct a4l_poll a4l_poll_t;
+
+/* DEVCFG ioctl argument structure */
+struct a4l_link_desc {
+	unsigned char bname_size;
+	char *bname;
+	unsigned int opts_size;
+	void *opts;
+};
+typedef struct a4l_link_desc a4l_lnkdesc_t;
+
+/* DEVINFO ioctl argument structure */
+struct a4l_dev_info {
+	char board_name[A4L_NAMELEN];
+	char driver_name[A4L_NAMELEN];
+	int nb_subd;
+	int idx_read_subd;
+	int idx_write_subd;
+};
+typedef struct a4l_dev_info a4l_dvinfo_t;
+
+#define CIO 'd'
+#define A4L_DEVCFG _IOW(CIO,0,a4l_lnkdesc_t)
+#define A4L_DEVINFO _IOR(CIO,1,a4l_dvinfo_t)
+#define A4L_SUBDINFO _IOR(CIO,2,a4l_sbinfo_t)
+#define A4L_CHANINFO _IOR(CIO,3,a4l_chinfo_arg_t)
+#define A4L_RNGINFO _IOR(CIO,4,a4l_rnginfo_arg_t)
+#define A4L_CMD _IOWR(CIO,5,a4l_cmd_t)
+#define A4L_CANCEL _IOR(CIO,6,unsigned int)
+#define A4L_INSNLIST _IOR(CIO,7,unsigned int)
+#define A4L_INSN _IOR(CIO,8,unsigned int)
+#define A4L_BUFCFG _IOR(CIO,9,a4l_bufcfg_t)
+#define A4L_BUFINFO _IOWR(CIO,10,a4l_bufinfo_t)
+#define A4L_POLL _IOR(CIO,11,unsigned int)
+#define A4L_MMAP _IOWR(CIO,12,unsigned int)
+#define A4L_NBCHANINFO _IOR(CIO,13,a4l_chinfo_arg_t)
+#define A4L_NBRNGINFO _IOR(CIO,14,a4l_rnginfo_arg_t)
+
+/* These IOCTLs are bound to be merged with A4L_BUFCFG and A4L_BUFINFO
+   at the next major release */
+#define A4L_BUFCFG2 _IOR(CIO,15,a4l_bufcfg_t)
+#define A4L_BUFINFO2 _IOWR(CIO,16,a4l_bufcfg_t)
+
+/*!
+ * @addtogroup analogy_lib_async1
+ * @{
+ */
+
+/*!
+ * @anchor ANALOGY_CMD_xxx @name ANALOGY_CMD_xxx
+ * @brief Common command flags definitions
+ * @{
+ */
+
+/**
+ * Do not execute the command, just check it
+ */
+#define A4L_CMD_SIMUL 0x1
+/**
+ * Perform data recovery / transmission in bulk mode
+ */
+#define A4L_CMD_BULK 0x2
+/**
+ * Perform a command which will write data to the device
+ */
+#define A4L_CMD_WRITE 0x4
+
+	  /*! @} ANALOGY_CMD_xxx */
+
+/*!
+ * @anchor TRIG_xxx @name TRIG_xxx
+ * @brief Command triggers flags definitions
+ * @{
+ */
+
+/**
+ * Never trigger
+ */
+#define TRIG_NONE	0x00000001
+/**
+ * Trigger now + N ns
+ */
+#define TRIG_NOW	0x00000002
+/**
+ * Trigger on next lower level trig
+ */
+#define TRIG_FOLLOW	0x00000004
+/**
+ * Trigger at time N ns
+ */
+#define TRIG_TIME	0x00000008
+/**
+ * Trigger at rate N ns
+ */
+#define TRIG_TIMER	0x00000010
+/**
+ * Trigger when count reaches N
+ */
+#define TRIG_COUNT	0x00000020
+/**
+ * Trigger on external signal N
+ */
+#define TRIG_EXT	0x00000040
+/**
+ * Trigger on analogy-internal signal N
+ */
+#define TRIG_INT	0x00000080
+/**
+ * Driver defined trigger
+ */
+#define TRIG_OTHER	0x00000100
+/**
+ * Wake up on end-of-scan
+ */
+#define TRIG_WAKE_EOS	0x0020
+/**
+ * Trigger not implemented yet
+ */
+#define TRIG_ROUND_MASK 0x00030000
+/**
+ * Trigger not implemented yet
+ */
+#define TRIG_ROUND_NEAREST 0x00000000
+/**
+ * Trigger not implemented yet
+ */
+#define TRIG_ROUND_DOWN 0x00010000
+/**
+ * Trigger not implemented yet
+ */
+#define TRIG_ROUND_UP 0x00020000
+/**
+ * Trigger not implemented yet
+ */
+#define TRIG_ROUND_UP_NEXT 0x00030000
+
+	  /*! @} TRIG_xxx */
+
+/*!
+ * @anchor CHAN_RNG_AREF @name Channel macros
+ * @brief Specific precompilation macros and constants useful for the
+ * channels descriptors tab located in the command structure
+ * @{
+ */
+
+/**
+ * Channel indication macro
+ */
+#define CHAN(a) ((a) & 0xffff)
+/**
+ * Range definition macro
+ */
+#define RNG(a) (((a) & 0xff) << 16)
+/**
+ * Reference definition macro
+ */
+#define AREF(a) (((a) & 0x03) << 24)
+/**
+ * Flags definition macro
+ */
+#define FLAGS(a) ((a) & CR_FLAGS_MASK)
+/**
+ * Channel + range + reference definition macro
+ */
+#define PACK(a, b, c) (a | RNG(b) | AREF(c))
+/**
+ * Channel + range + reference + flags definition macro
+ */
+#define PACK_FLAGS(a, b, c, d) (PACK(a, b, c) | FLAGS(d))
+
+/**
+ * Analog reference is analog ground
+ */
+#define AREF_GROUND 0x00
+/**
+ * Analog reference is analog common
+ */
+#define AREF_COMMON 0x01
+/**
+ * Analog reference is differential
+ */
+#define AREF_DIFF 0x02
+/**
+ * Analog reference is undefined
+ */
+#define AREF_OTHER 0x03
+
+	  /*! @} CHAN_RNG_AREF */
+
+#if !defined(DOXYGEN_CPP)
+
+#define CR_FLAGS_MASK 0xfc000000
+#define CR_ALT_FILTER (1<<26)
+#define CR_DITHER CR_ALT_FILTER
+#define CR_DEGLITCH CR_ALT_FILTER
+#define CR_ALT_SOURCE (1<<27)
+#define CR_EDGE	(1<<30)
+#define CR_INVERT (1<<31)
+
+#endif /* !DOXYGEN_CPP */
+
+/*!
+ * @brief Structure describing the asynchronous instruction
+ * @see a4l_snd_command()
+ */
+
+struct a4l_cmd_desc {
+	unsigned char idx_subd;
+			       /**< Subdevice to which the command will be applied. */
+
+	unsigned long flags;
+			       /**< Command flags */
+
+	/* Command trigger characteristics */
+	unsigned int start_src;
+			       /**< Start trigger type */
+	unsigned int start_arg;
+			       /**< Start trigger argument */
+	unsigned int scan_begin_src;
+			       /**< Scan begin trigger type */
+	unsigned int scan_begin_arg;
+			       /**< Scan begin trigger argument */
+	unsigned int convert_src;
+			       /**< Convert trigger type */
+	unsigned int convert_arg;
+			       /**< Convert trigger argument */
+	unsigned int scan_end_src;
+			       /**< Scan end trigger type */
+	unsigned int scan_end_arg;
+			       /**< Scan end trigger argument */
+	unsigned int stop_src;
+			       /**< Stop trigger type */
+	unsigned int stop_arg;
+			   /**< Stop trigger argument */
+
+	unsigned char nb_chan;
+			   /**< Count of channels related with the command */
+	unsigned int *chan_descs;
+			    /**< Tab containing channels descriptors */
+
+	/* Driver specific fields */
+	unsigned int valid_simul_stages;
+			   /** < cmd simulation valid stages (driver dependent) */
+
+	unsigned int data_len;
+			   /**< Driver specific buffer size */
+	sampl_t *data;
+	                   /**< Driver specific buffer pointer */
+};
+typedef struct a4l_cmd_desc a4l_cmd_t;
+
+/*! @} analogy_lib_async1 */
+
+/* --- Range section --- */
+
+/** Constant for internal use only (must not be used by driver
+    developer).  */
+#define A4L_RNG_FACTOR 1000000
+
+/**
+ * Volt unit range flag
+ */
+#define A4L_RNG_VOLT_UNIT 0x0
+/**
+ * MilliAmpere unit range flag
+ */
+#define A4L_RNG_MAMP_UNIT 0x1
+/**
+ * No unit range flag
+ */
+#define A4L_RNG_NO_UNIT 0x2
+/**
+ * External unit range flag
+ */
+#define A4L_RNG_EXT_UNIT 0x4
+
+/**
+ * Macro to retrieve the range unit from the range flags
+ */
+#define A4L_RNG_UNIT(x) (x & (A4L_RNG_VOLT_UNIT |	\
+			      A4L_RNG_MAMP_UNIT |	\
+			      A4L_RNG_NO_UNIT |		\
+			      A4L_RNG_EXT_UNIT))
+
+/* --- Subdevice flags desc stuff --- */
+
+/* TODO: replace ANALOGY_SUBD_AI with ANALOGY_SUBD_ANALOG
+   and ANALOGY_SUBD_INPUT */
+
+/* Subdevice types masks */
+#define A4L_SUBD_MASK_READ 0x80000000
+#define A4L_SUBD_MASK_WRITE 0x40000000
+#define A4L_SUBD_MASK_SPECIAL 0x20000000
+
+/*!
+ * @addtogroup analogy_subdevice
+ * @{
+ */
+
+/*!
+ * @anchor ANALOGY_SUBD_xxx @name Subdevices types
+ * @brief Flags to define the subdevice type
+ * @{
+ */
+
+/**
+ * Unused subdevice
+ */
+#define A4L_SUBD_UNUSED (A4L_SUBD_MASK_SPECIAL|0x1)
+/**
+ * Analog input subdevice
+ */
+#define A4L_SUBD_AI (A4L_SUBD_MASK_READ|0x2)
+/**
+ * Analog output subdevice
+ */
+#define A4L_SUBD_AO (A4L_SUBD_MASK_WRITE|0x4)
+/**
+ * Digital input subdevice
+ */
+#define A4L_SUBD_DI (A4L_SUBD_MASK_READ|0x8)
+/**
+ * Digital output subdevice
+ */
+#define A4L_SUBD_DO (A4L_SUBD_MASK_WRITE|0x10)
+/**
+ * Digital input/output subdevice
+ */
+#define A4L_SUBD_DIO (A4L_SUBD_MASK_SPECIAL|0x20)
+/**
+ * Counter subdevice
+ */
+#define A4L_SUBD_COUNTER (A4L_SUBD_MASK_SPECIAL|0x40)
+/**
+ * Timer subdevice
+ */
+#define A4L_SUBD_TIMER (A4L_SUBD_MASK_SPECIAL|0x80)
+/**
+ * Memory, EEPROM, DPRAM
+ */
+#define A4L_SUBD_MEMORY (A4L_SUBD_MASK_SPECIAL|0x100)
+/**
+ * Calibration subdevice  DACs
+ */
+#define A4L_SUBD_CALIB (A4L_SUBD_MASK_SPECIAL|0x200)
+/**
+ * Processor, DSP
+ */
+#define A4L_SUBD_PROC (A4L_SUBD_MASK_SPECIAL|0x400)
+/**
+ * Serial IO subdevice
+ */
+#define A4L_SUBD_SERIAL (A4L_SUBD_MASK_SPECIAL|0x800)
+/**
+ * Mask which gathers all the types
+ */
+#define A4L_SUBD_TYPES (A4L_SUBD_UNUSED |	 \
+			   A4L_SUBD_AI |	 \
+			   A4L_SUBD_AO |	 \
+			   A4L_SUBD_DI |	 \
+			   A4L_SUBD_DO |	 \
+			   A4L_SUBD_DIO |	 \
+			   A4L_SUBD_COUNTER | \
+			   A4L_SUBD_TIMER |	 \
+			   A4L_SUBD_MEMORY |	 \
+			   A4L_SUBD_CALIB |	 \
+			   A4L_SUBD_PROC |	 \
+			   A4L_SUBD_SERIAL)
+
+/*! @} ANALOGY_SUBD_xxx */
+
+/*!
+ * @anchor ANALOGY_SUBD_FT_xxx @name Subdevice features
+ * @brief Flags to define the subdevice's capabilities
+ * @{
+ */
+
+/* Subdevice capabilities */
+/**
+ * The subdevice can handle command (i.e it can perform asynchronous
+ * acquisition)
+ */
+#define A4L_SUBD_CMD 0x1000
+/**
+ * The subdevice support mmap operations (technically, any driver can
+ * do it; however, the developer might want that his driver must be
+ * accessed through read / write
+ */
+#define A4L_SUBD_MMAP 0x8000
+
+/*! @} ANALOGY_SUBD_FT_xxx */
+
+/*!
+ * @anchor ANALOGY_SUBD_ST_xxx @name Subdevice status
+ * @brief Flags to define the subdevice's status
+ * @{
+ */
+
+/* Subdevice status flag(s) */
+/**
+ * The subdevice is busy, a synchronous or an asynchronous acquisition
+ * is occuring
+ */
+#define A4L_SUBD_BUSY_NR 0
+#define A4L_SUBD_BUSY (1 << A4L_SUBD_BUSY_NR)
+
+/**
+ * The subdevice is about to be cleaned in the middle of the detach
+ * procedure
+ */
+#define A4L_SUBD_CLEAN_NR 1
+#define A4L_SUBD_CLEAN (1 << A4L_SUBD_CLEAN_NR)
+
+
+/*! @} ANALOGY_SUBD_ST_xxx */
+
+/* --- Subdevice related IOCTL arguments structures --- */
+
+/* SUDBINFO IOCTL argument */
+struct a4l_subd_info {
+	unsigned long flags;
+	unsigned long status;
+	unsigned char nb_chan;
+};
+typedef struct a4l_subd_info a4l_sbinfo_t;
+
+/* CHANINFO / NBCHANINFO IOCTL arguments */
+struct a4l_chan_info {
+	unsigned long chan_flags;
+	unsigned char nb_rng;
+	unsigned char nb_bits;
+};
+typedef struct a4l_chan_info a4l_chinfo_t;
+
+struct a4l_chinfo_arg {
+	unsigned int idx_subd;
+	void *info;
+};
+typedef struct a4l_chinfo_arg a4l_chinfo_arg_t;
+
+/* RNGINFO / NBRNGINFO IOCTL arguments */
+struct a4l_rng_info {
+	long min;
+	long max;
+	unsigned long flags;
+};
+typedef struct a4l_rng_info a4l_rnginfo_t;
+
+struct a4l_rng_info_arg {
+	unsigned int idx_subd;
+	unsigned int idx_chan;
+	void *info;
+};
+typedef struct a4l_rng_info_arg a4l_rnginfo_arg_t;
+
+/*! @} */
+
+#define A4L_INSN_MASK_READ 0x8000000
+#define A4L_INSN_MASK_WRITE 0x4000000
+#define A4L_INSN_MASK_SPECIAL 0x2000000
+
+/*!
+ * @addtogroup analogy_lib_sync1
+ * @{
+ */
+
+/*!
+ * @anchor ANALOGY_INSN_xxx @name Instruction type
+ * @brief Flags to define the type of instruction
+ * @{
+ */
+
+/**
+ * Read instruction
+ */
+#define A4L_INSN_READ (0 | A4L_INSN_MASK_READ)
+/**
+ * Write instruction
+ */
+#define A4L_INSN_WRITE (1 | A4L_INSN_MASK_WRITE)
+/**
+ * "Bits" instruction
+ */
+#define A4L_INSN_BITS (2 | A4L_INSN_MASK_READ | \
+		       A4L_INSN_MASK_WRITE)
+/**
+ * Configuration instruction
+ */
+#define A4L_INSN_CONFIG (3 | A4L_INSN_MASK_READ | \
+			 A4L_INSN_MASK_WRITE)
+/**
+ * Get time instruction
+ */
+#define A4L_INSN_GTOD (4 | A4L_INSN_MASK_READ | \
+		       A4L_INSN_MASK_SPECIAL)
+/**
+ * Wait instruction
+ */
+#define A4L_INSN_WAIT (5 | A4L_INSN_MASK_WRITE | \
+		       A4L_INSN_MASK_SPECIAL)
+/**
+ * Trigger instruction (to start asynchronous acquisition)
+ */
+#define A4L_INSN_INTTRIG (6 | A4L_INSN_MASK_WRITE | \
+			  A4L_INSN_MASK_SPECIAL)
+
+	  /*! @} ANALOGY_INSN_xxx */
+
+/**
+ * Maximal wait duration
+ */
+#define A4L_INSN_WAIT_MAX 100000
+
+/*!
+ * @anchor INSN_CONFIG_xxx @name Configuration instruction type
+ * @brief Values to define the type of configuration instruction
+ * @{
+ */
+
+#define A4L_INSN_CONFIG_DIO_INPUT		0
+#define A4L_INSN_CONFIG_DIO_OUTPUT		1
+#define A4L_INSN_CONFIG_DIO_OPENDRAIN		2
+#define A4L_INSN_CONFIG_ANALOG_TRIG		16
+#define A4L_INSN_CONFIG_ALT_SOURCE		20
+#define A4L_INSN_CONFIG_DIGITAL_TRIG		21
+#define A4L_INSN_CONFIG_BLOCK_SIZE		22
+#define A4L_INSN_CONFIG_TIMER_1			23
+#define A4L_INSN_CONFIG_FILTER			24
+#define A4L_INSN_CONFIG_CHANGE_NOTIFY		25
+#define A4L_INSN_CONFIG_SERIAL_CLOCK		26
+#define A4L_INSN_CONFIG_BIDIRECTIONAL_DATA	27
+#define A4L_INSN_CONFIG_DIO_QUERY		28
+#define A4L_INSN_CONFIG_PWM_OUTPUT		29
+#define A4L_INSN_CONFIG_GET_PWM_OUTPUT		30
+#define A4L_INSN_CONFIG_ARM			31
+#define A4L_INSN_CONFIG_DISARM			32
+#define A4L_INSN_CONFIG_GET_COUNTER_STATUS	33
+#define A4L_INSN_CONFIG_RESET			34
+#define A4L_INSN_CONFIG_GPCT_SINGLE_PULSE_GENERATOR	1001	/* Use CTR as single pulsegenerator */
+#define A4L_INSN_CONFIG_GPCT_PULSE_TRAIN_GENERATOR	1002	/* Use CTR as pulsetraingenerator */
+#define A4L_INSN_CONFIG_GPCT_QUADRATURE_ENCODER	1003	/* Use the counter as encoder */
+#define A4L_INSN_CONFIG_SET_GATE_SRC		2001	/* Set gate source */
+#define A4L_INSN_CONFIG_GET_GATE_SRC		2002	/* Get gate source */
+#define A4L_INSN_CONFIG_SET_CLOCK_SRC		2003	/* Set master clock source */
+#define A4L_INSN_CONFIG_GET_CLOCK_SRC		2004	/* Get master clock source */
+#define A4L_INSN_CONFIG_SET_OTHER_SRC		2005	/* Set other source */
+#define A4L_INSN_CONFIG_SET_COUNTER_MODE	4097
+#define A4L_INSN_CONFIG_SET_ROUTING		4099
+#define A4L_INSN_CONFIG_GET_ROUTING		4109
+
+/*! @} INSN_CONFIG_xxx */
+
+/*!
+ * @anchor ANALOGY_COUNTER_xxx @name Counter status bits
+ * @brief Status bits for INSN_CONFIG_GET_COUNTER_STATUS
+ * @{
+ */
+
+#define A4L_COUNTER_ARMED		0x1
+#define A4L_COUNTER_COUNTING		0x2
+#define A4L_COUNTER_TERMINAL_COUNT	0x4
+
+	  /*! @} ANALOGY_COUNTER_xxx */
+
+/*!
+ * @anchor ANALOGY_IO_DIRECTION @name IO direction
+ * @brief Values to define the IO polarity
+ * @{
+ */
+
+#define A4L_INPUT	0
+#define A4L_OUTPUT	1
+#define A4L_OPENDRAIN	2
+
+	  /*! @} ANALOGY_IO_DIRECTION */
+
+
+/*!
+ * @anchor ANALOGY_EV_xxx @name Events types
+ * @brief Values to define the Analogy events. They might used to send
+ * some specific events through the instruction interface.
+ * @{
+ */
+
+#define A4L_EV_START		0x00040000
+#define A4L_EV_SCAN_BEGIN	0x00080000
+#define A4L_EV_CONVERT		0x00100000
+#define A4L_EV_SCAN_END		0x00200000
+#define A4L_EV_STOP		0x00400000
+
+/*! @} ANALOGY_EV_xxx */
+
+/*!
+ * @brief Structure describing the synchronous instruction
+ * @see a4l_snd_insn()
+ */
+
+struct a4l_instruction {
+	unsigned int type;
+		       /**< Instruction type */
+	unsigned int idx_subd;
+			   /**< Subdevice to which the instruction will be applied. */
+	unsigned int chan_desc;
+			    /**< Channel descriptor */
+	unsigned int data_size;
+			    /**< Size of the intruction data */
+	void *data;
+		    /**< Instruction data */
+};
+typedef struct a4l_instruction a4l_insn_t;
+
+/*!
+ * @brief Structure describing the list of synchronous instructions
+ * @see a4l_snd_insnlist()
+ */
+
+struct a4l_instruction_list {
+	unsigned int count;
+			/**< Instructions count */
+	a4l_insn_t *insns;
+			  /**< Tab containing the instructions pointers */
+};
+typedef struct a4l_instruction_list a4l_insnlst_t;
+
+/*! @} analogy_lib_sync1 */
+
+struct a4l_calibration_subdev {
+	a4l_sbinfo_t *info;
+	char *name;
+	int slen;
+	int idx;
+};
+
+struct a4l_calibration_subdev_data {
+	int index;
+	int channel;
+	int range;
+	int expansion;
+	int nb_coeff;
+	double *coeff;
+
+};
+
+struct a4l_calibration_data {
+	char *driver_name;
+	char *board_name;
+	int nb_ai;
+	struct a4l_calibration_subdev_data *ai;
+	int nb_ao;
+	struct a4l_calibration_subdev_data *ao;
+};
+
+struct a4l_polynomial {
+	int expansion;
+	int order;
+	int nb_coeff;
+	double *coeff;
+};
+
+
+#endif /* _RTDM_UAPI_ANALOGY_H */
+++ linux-patched/include/xenomai/rtdm/uapi/gpio.h	2022-03-21 12:58:32.269860877 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/uapi/serial.h	1970-01-01 01:00:00.000000000 +0100
+/**
+ * @note Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _RTDM_UAPI_GPIO_H
+#define _RTDM_UAPI_GPIO_H
+
+struct rtdm_gpio_readout {
+	nanosecs_abs_t timestamp;
+	__s32 value;
+};
+
+#define GPIO_RTIOC_DIR_OUT	_IOW(RTDM_CLASS_GPIO, 0, int)
+#define GPIO_RTIOC_DIR_IN	_IO(RTDM_CLASS_GPIO, 1)
+#define GPIO_RTIOC_IRQEN	_IOW(RTDM_CLASS_GPIO, 2, int) /* GPIO trigger */
+#define GPIO_RTIOC_IRQDIS	_IO(RTDM_CLASS_GPIO, 3)
+#define GPIO_RTIOC_REQS		_IO(RTDM_CLASS_GPIO, 4)
+#define GPIO_RTIOC_RELS		_IO(RTDM_CLASS_GPIO, 5)
+#define GPIO_RTIOC_TS_MONO	_IOR(RTDM_CLASS_GPIO, 7, int)
+#define GPIO_RTIOC_TS_REAL	_IOR(RTDM_CLASS_GPIO, 8, int)
+#define GPIO_RTIOC_TS		GPIO_RTIOC_TS_REAL
+
+#define GPIO_TRIGGER_NONE		0x0 /* unspecified */
+#define GPIO_TRIGGER_EDGE_RISING	0x1
+#define GPIO_TRIGGER_EDGE_FALLING	0x2
+#define GPIO_TRIGGER_LEVEL_HIGH		0x4
+#define GPIO_TRIGGER_LEVEL_LOW		0x8
+#define GPIO_TRIGGER_MASK		0xf
+
+#endif /* !_RTDM_UAPI_GPIO_H */
+++ linux-patched/include/xenomai/rtdm/uapi/serial.h	2022-03-21 12:58:32.262860946 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/uapi/rtdm.h	1970-01-01 01:00:00.000000000 +0100
+/**
+ * @file
+ * Real-Time Driver Model for Xenomai, serial device profile header
+ *
+ * @note Copyright (C) 2005-2007 Jan Kiszka <jan.kiszka@web.de>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ *
+ * @ingroup rtserial
+ */
+#ifndef _RTDM_UAPI_SERIAL_H
+#define _RTDM_UAPI_SERIAL_H
+
+#define RTSER_PROFILE_VER		3
+
+/*!
+ * @anchor RTSER_DEF_BAUD   @name RTSER_DEF_BAUD
+ * Default baud rate
+ * @{ */
+#define RTSER_DEF_BAUD			9600
+/** @} */
+
+/*!
+ * @anchor RTSER_xxx_PARITY   @name RTSER_xxx_PARITY
+ * Number of parity bits
+ * @{ */
+#define RTSER_NO_PARITY			0x00
+#define RTSER_ODD_PARITY		0x01
+#define RTSER_EVEN_PARITY		0x03
+#define RTSER_DEF_PARITY		RTSER_NO_PARITY
+/** @} */
+
+/*!
+ * @anchor RTSER_xxx_BITS   @name RTSER_xxx_BITS
+ * Number of data bits
+ * @{ */
+#define RTSER_5_BITS			0x00
+#define RTSER_6_BITS			0x01
+#define RTSER_7_BITS			0x02
+#define RTSER_8_BITS			0x03
+#define RTSER_DEF_BITS			RTSER_8_BITS
+/** @} */
+
+/*!
+ * @anchor RTSER_xxx_STOPB   @name RTSER_xxx_STOPB
+ * Number of stop bits
+ * @{ */
+#define RTSER_1_STOPB			0x00
+/** valid only in combination with 5 data bits */
+#define RTSER_1_5_STOPB			0x01
+#define RTSER_2_STOPB			0x01
+#define RTSER_DEF_STOPB			RTSER_1_STOPB
+/** @} */
+
+/*!
+ * @anchor RTSER_xxx_HAND   @name RTSER_xxx_HAND
+ * Handshake mechanisms
+ * @{ */
+#define RTSER_NO_HAND			0x00
+#define RTSER_RTSCTS_HAND		0x01
+#define RTSER_DEF_HAND			RTSER_NO_HAND
+/** @} */
+
+/*!
+ * @anchor RTSER_RS485_xxx   @name RTSER_RS485_xxx
+ * RS485 mode with automatic RTS handling
+ * @{ */
+#define RTSER_RS485_DISABLE		0x00
+#define RTSER_RS485_ENABLE		0x01
+#define RTSER_DEF_RS485			RTSER_RS485_DISABLE
+/** @} */
+
+/*!
+ * @anchor RTSER_FIFO_xxx   @name RTSER_FIFO_xxx
+ * Reception FIFO interrupt threshold
+ * @{ */
+#define RTSER_FIFO_DEPTH_1		0x00
+#define RTSER_FIFO_DEPTH_4		0x40
+#define RTSER_FIFO_DEPTH_8		0x80
+#define RTSER_FIFO_DEPTH_14		0xC0
+#define RTSER_DEF_FIFO_DEPTH		RTSER_FIFO_DEPTH_1
+/** @} */
+
+/*!
+ * @anchor RTSER_TIMEOUT_xxx   @name RTSER_TIMEOUT_xxx
+ * Special timeout values, see also @ref RTDM_TIMEOUT_xxx
+ * @{ */
+#define RTSER_TIMEOUT_INFINITE		RTDM_TIMEOUT_INFINITE
+#define RTSER_TIMEOUT_NONE		RTDM_TIMEOUT_NONE
+#define RTSER_DEF_TIMEOUT		RTDM_TIMEOUT_INFINITE
+/** @} */
+
+/*!
+ * @anchor RTSER_xxx_TIMESTAMP_HISTORY   @name RTSER_xxx_TIMESTAMP_HISTORY
+ * Timestamp history control
+ * @{ */
+#define RTSER_RX_TIMESTAMP_HISTORY	0x01
+#define RTSER_DEF_TIMESTAMP_HISTORY	0x00
+/** @} */
+
+/*!
+ * @anchor RTSER_EVENT_xxx   @name RTSER_EVENT_xxx
+ * Events bits
+ * @{ */
+#define RTSER_EVENT_RXPEND		0x01
+#define RTSER_EVENT_ERRPEND		0x02
+#define RTSER_EVENT_MODEMHI		0x04
+#define RTSER_EVENT_MODEMLO		0x08
+#define RTSER_EVENT_TXEMPTY		0x10
+#define RTSER_DEF_EVENT_MASK		0x00
+/** @} */
+
+
+/*!
+ * @anchor RTSER_SET_xxx   @name RTSER_SET_xxx
+ * Configuration mask bits
+ * @{ */
+#define RTSER_SET_BAUD			0x0001
+#define RTSER_SET_PARITY		0x0002
+#define RTSER_SET_DATA_BITS		0x0004
+#define RTSER_SET_STOP_BITS		0x0008
+#define RTSER_SET_HANDSHAKE		0x0010
+#define RTSER_SET_FIFO_DEPTH		0x0020
+#define RTSER_SET_TIMEOUT_RX		0x0100
+#define RTSER_SET_TIMEOUT_TX		0x0200
+#define RTSER_SET_TIMEOUT_EVENT		0x0400
+#define RTSER_SET_TIMESTAMP_HISTORY	0x0800
+#define RTSER_SET_EVENT_MASK		0x1000
+#define RTSER_SET_RS485			0x2000
+/** @} */
+
+
+/*!
+ * @anchor RTSER_LSR_xxx   @name RTSER_LSR_xxx
+ * Line status bits
+ * @{ */
+#define RTSER_LSR_DATA			0x01
+#define RTSER_LSR_OVERRUN_ERR		0x02
+#define RTSER_LSR_PARITY_ERR		0x04
+#define RTSER_LSR_FRAMING_ERR		0x08
+#define RTSER_LSR_BREAK_IND		0x10
+#define RTSER_LSR_THR_EMTPY		0x20
+#define RTSER_LSR_TRANSM_EMPTY		0x40
+#define RTSER_LSR_FIFO_ERR		0x80
+#define RTSER_SOFT_OVERRUN_ERR		0x0100
+/** @} */
+
+
+/*!
+ * @anchor RTSER_MSR_xxx   @name RTSER_MSR_xxx
+ * Modem status bits
+ * @{ */
+#define RTSER_MSR_DCTS			0x01
+#define RTSER_MSR_DDSR			0x02
+#define RTSER_MSR_TERI			0x04
+#define RTSER_MSR_DDCD			0x08
+#define RTSER_MSR_CTS			0x10
+#define RTSER_MSR_DSR			0x20
+#define RTSER_MSR_RI			0x40
+#define RTSER_MSR_DCD			0x80
+/** @} */
+
+
+/*!
+ * @anchor RTSER_MCR_xxx   @name RTSER_MCR_xxx
+ * Modem control bits
+ * @{ */
+#define RTSER_MCR_DTR			0x01
+#define RTSER_MCR_RTS			0x02
+#define RTSER_MCR_OUT1			0x04
+#define RTSER_MCR_OUT2			0x08
+#define RTSER_MCR_LOOP			0x10
+/** @} */
+
+
+/*!
+ * @anchor RTSER_BREAK_xxx   @name RTSER_BREAK_xxx
+ * Break control
+ * @{ */
+#define RTSER_BREAK_CLR			0x00
+#define RTSER_BREAK_SET			0x01
+
+
+/**
+ * Serial device configuration
+ */
+typedef struct rtser_config {
+	/** mask specifying valid fields, see @ref RTSER_SET_xxx */
+	int		config_mask;
+
+	/** baud rate, default @ref RTSER_DEF_BAUD */
+	int		baud_rate;
+
+	/** number of parity bits, see @ref RTSER_xxx_PARITY */
+	int		parity;
+
+	/** number of data bits, see @ref RTSER_xxx_BITS */
+	int		data_bits;
+
+	/** number of stop bits, see @ref RTSER_xxx_STOPB */
+	int		stop_bits;
+
+	/** handshake mechanisms, see @ref RTSER_xxx_HAND */
+	int		handshake;
+
+	/** reception FIFO interrupt threshold, see @ref RTSER_FIFO_xxx */
+	int		fifo_depth;
+
+	int		reserved;
+
+	/** reception timeout, see @ref RTSER_TIMEOUT_xxx for special
+	 *  values */
+	nanosecs_rel_t	rx_timeout;
+
+	/** transmission timeout, see @ref RTSER_TIMEOUT_xxx for special
+	 *  values */
+	nanosecs_rel_t	tx_timeout;
+
+	/** event timeout, see @ref RTSER_TIMEOUT_xxx for special values */
+	nanosecs_rel_t	event_timeout;
+
+	/** enable timestamp history, see @ref RTSER_xxx_TIMESTAMP_HISTORY */
+	int		timestamp_history;
+
+	/** event mask to be used with @ref RTSER_RTIOC_WAIT_EVENT, see
+	 *  @ref RTSER_EVENT_xxx */
+	int		event_mask;
+
+	/** enable RS485 mode, see @ref RTSER_RS485_xxx */
+	int		rs485;
+} rtser_config_t;
+
+/**
+ * Serial device status
+ */
+typedef struct rtser_status {
+	/** line status register, see @ref RTSER_LSR_xxx */
+	int		line_status;
+
+	/** modem status register, see @ref RTSER_MSR_xxx */
+	int		modem_status;
+} rtser_status_t;
+
+/**
+ * Additional information about serial device events
+ */
+typedef struct rtser_event {
+	/** signalled events, see @ref RTSER_EVENT_xxx */
+	int		events;
+
+	/** number of pending input characters */
+	int		rx_pending;
+
+	/** last interrupt timestamp */
+	nanosecs_abs_t	last_timestamp;
+
+	/** reception timestamp of oldest character in input queue */
+	nanosecs_abs_t	rxpend_timestamp;
+} rtser_event_t;
+
+
+#define RTIOC_TYPE_SERIAL		RTDM_CLASS_SERIAL
+
+
+/*!
+ * @name Sub-Classes of RTDM_CLASS_SERIAL
+ * @{ */
+#define RTDM_SUBCLASS_16550A		0
+/** @} */
+
+
+/*!
+ * @anchor SERIOCTLs @name IOCTLs
+ * Serial device IOCTLs
+ * @{ */
+
+/**
+ * Get serial device configuration
+ *
+ * @param[out] arg Pointer to configuration buffer (struct rtser_config)
+ *
+ * @return 0 on success, otherwise negative error code
+ *
+ * @coretags{task-unrestricted}
+ */
+#define RTSER_RTIOC_GET_CONFIG	\
+	_IOR(RTIOC_TYPE_SERIAL, 0x00, struct rtser_config)
+
+/**
+ * Set serial device configuration
+ *
+ * @param[in] arg Pointer to configuration buffer (struct rtser_config)
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EPERM is returned if the caller's context is invalid, see note below.
+ *
+ * - -ENOMEM is returned if a new history buffer for timestamps cannot be
+ * allocated.
+ *
+ * @coretags{task-unrestricted}
+ *
+ * @note If rtser_config contains a valid timestamp_history and the
+ * addressed device has been opened in non-real-time context, this IOCTL must
+ * be issued in non-real-time context as well. Otherwise, this command will
+ * fail.
+ */
+#define RTSER_RTIOC_SET_CONFIG	\
+	_IOW(RTIOC_TYPE_SERIAL, 0x01, struct rtser_config)
+
+/**
+ * Get serial device status
+ *
+ * @param[out] arg Pointer to status buffer (struct rtser_status)
+ *
+ * @return 0 on success, otherwise negative error code
+ *
+ * @coretags{task-unrestricted}
+ *
+ * @note The error states @c RTSER_LSR_OVERRUN_ERR, @c RTSER_LSR_PARITY_ERR,
+ * @c RTSER_LSR_FRAMING_ERR, and @c RTSER_SOFT_OVERRUN_ERR that may have
+ * occured during previous read accesses to the device will be saved for being
+ * reported via this IOCTL. Upon return from @c RTSER_RTIOC_GET_STATUS, the
+ * saved state will be cleared.
+ */
+#define RTSER_RTIOC_GET_STATUS	\
+	_IOR(RTIOC_TYPE_SERIAL, 0x02, struct rtser_status)
+
+/**
+ * Get serial device's modem contol register
+ *
+ * @param[out] arg Pointer to variable receiving the content (int, see
+ *             @ref RTSER_MCR_xxx)
+ *
+ * @return 0 on success, otherwise negative error code
+ *
+ * @coretags{task-unrestricted}
+ */
+#define RTSER_RTIOC_GET_CONTROL	\
+	_IOR(RTIOC_TYPE_SERIAL, 0x03, int)
+
+/**
+ * Set serial device's modem contol register
+ *
+ * @param[in] arg New control register content (int, see @ref RTSER_MCR_xxx)
+ *
+ * @return 0 on success, otherwise negative error code
+ *
+ * @coretags{task-unrestricted}
+ */
+#define RTSER_RTIOC_SET_CONTROL	\
+	_IOW(RTIOC_TYPE_SERIAL, 0x04, int)
+
+/**
+ * Wait on serial device events according to previously set mask
+ *
+ * @param[out] arg Pointer to event information buffer (struct rtser_event)
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EBUSY is returned if another task is already waiting on events of this
+ * device.
+ *
+ * - -EBADF is returned if the file descriptor is invalid or the device has
+ * just been closed.
+ *
+ * @coretags{mode-unrestricted}
+ */
+#define RTSER_RTIOC_WAIT_EVENT	\
+	_IOR(RTIOC_TYPE_SERIAL, 0x05, struct rtser_event)
+/** @} */
+
+/**
+ * Set or clear break on UART output line
+ *
+ * @param[in] arg @c RTSER_BREAK_SET or @c RTSER_BREAK_CLR (int)
+ *
+ * @return 0 on success, otherwise negative error code
+ *
+ * @coretags{task-unrestricted}
+ *
+ * @note A set break condition may also be cleared on UART line
+ * reconfiguration.
+ */
+#define RTSER_RTIOC_BREAK_CTL	\
+	_IOR(RTIOC_TYPE_SERIAL, 0x06, int)
+/** @} */
+
+/*!
+ * @anchor SERutils @name RT Serial example and utility programs
+ * @{ */
+/** @example cross-link.c */
+/** @} */
+
+#endif /* !_RTDM_UAPI_SERIAL_H */
+++ linux-patched/include/xenomai/rtdm/uapi/rtdm.h	2022-03-21 12:58:32.254861024 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/uapi/gpiopwm.h	1970-01-01 01:00:00.000000000 +0100
+/**
+ * @file
+ * Real-Time Driver Model for Xenomai, user API header.
+ *
+ * @note Copyright (C) 2005, 2006 Jan Kiszka <jan.kiszka@web.de>
+ * @note Copyright (C) 2005 Joerg Langenberg <joerg.langenberg@gmx.net>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ * @ingroup rtdm_user_api
+ */
+#ifndef _RTDM_UAPI_RTDM_H
+#define _RTDM_UAPI_RTDM_H
+
+/*!
+ * @addtogroup rtdm
+ * @{
+ */
+
+/*!
+ * @anchor rtdm_api_versioning @name API Versioning
+ * @{ */
+/** Common user and driver API version */
+#define RTDM_API_VER			9
+
+/** Minimum API revision compatible with the current release */
+#define RTDM_API_MIN_COMPAT_VER		9
+/** @} API Versioning */
+
+/** RTDM type for representing absolute dates. Its base type is a 64 bit
+ *  unsigned integer. The unit is 1 nanosecond. */
+typedef uint64_t nanosecs_abs_t;
+
+/** RTDM type for representing relative intervals. Its base type is a 64 bit
+ *  signed integer. The unit is 1 nanosecond. Relative intervals can also
+ *  encode the special timeouts "infinite" and "non-blocking", see
+ *  @ref RTDM_TIMEOUT_xxx. */
+typedef int64_t nanosecs_rel_t;
+
+/*!
+ * @anchor RTDM_TIMEOUT_xxx @name RTDM_TIMEOUT_xxx
+ * Special timeout values
+ * @{ */
+/** Block forever. */
+#define RTDM_TIMEOUT_INFINITE		0
+
+/** Any negative timeout means non-blocking. */
+#define RTDM_TIMEOUT_NONE		(-1)
+/** @} RTDM_TIMEOUT_xxx */
+/** @} rtdm */
+
+/*!
+ * @addtogroup rtdm_profiles
+ * @{
+ */
+
+/*!
+ * @anchor RTDM_CLASS_xxx   @name RTDM_CLASS_xxx
+ * Device classes
+ * @{ */
+#define RTDM_CLASS_PARPORT		1
+#define RTDM_CLASS_SERIAL		2
+#define RTDM_CLASS_CAN			3
+#define RTDM_CLASS_NETWORK		4
+#define RTDM_CLASS_RTMAC		5
+#define RTDM_CLASS_TESTING		6
+#define RTDM_CLASS_RTIPC		7
+#define RTDM_CLASS_COBALT		8
+#define RTDM_CLASS_UDD			9
+#define RTDM_CLASS_MEMORY		10
+#define RTDM_CLASS_GPIO			11
+#define RTDM_CLASS_SPI			12
+#define RTDM_CLASS_PWM			13
+
+#define RTDM_CLASS_MISC			223
+#define RTDM_CLASS_EXPERIMENTAL		224
+#define RTDM_CLASS_MAX			255
+/** @} RTDM_CLASS_xxx */
+
+#define RTDM_SUBCLASS_GENERIC		(-1)
+
+#define RTIOC_TYPE_COMMON		0
+
+/*!
+ * @anchor device_naming    @name Device Naming
+ * Maximum length of device names (excluding the final null character)
+ * @{
+ */
+#define RTDM_MAX_DEVNAME_LEN		31
+/** @} Device Naming */
+
+/**
+ * Device information
+ */
+typedef struct rtdm_device_info {
+	/** Device flags, see @ref dev_flags "Device Flags" for details */
+	int device_flags;
+
+	/** Device class ID, see @ref RTDM_CLASS_xxx */
+	int device_class;
+
+	/** Device sub-class, either RTDM_SUBCLASS_GENERIC or a
+	 *  RTDM_SUBCLASS_xxx definition of the related @ref rtdm_profiles
+	 *  "Device Profile" */
+	int device_sub_class;
+
+	/** Supported device profile version */
+	int profile_version;
+} rtdm_device_info_t;
+
+/*!
+ * @anchor RTDM_PURGE_xxx_BUFFER    @name RTDM_PURGE_xxx_BUFFER
+ * Flags selecting buffers to be purged
+ * @{ */
+#define RTDM_PURGE_RX_BUFFER		0x0001
+#define RTDM_PURGE_TX_BUFFER		0x0002
+/** @} RTDM_PURGE_xxx_BUFFER*/
+
+/*!
+ * @anchor common_IOCTLs    @name Common IOCTLs
+ * The following IOCTLs are common to all device rtdm_profiles.
+ * @{
+ */
+
+/**
+ * Retrieve information about a device or socket.
+ * @param[out] arg Pointer to information buffer (struct rtdm_device_info)
+ */
+#define RTIOC_DEVICE_INFO \
+	_IOR(RTIOC_TYPE_COMMON, 0x00, struct rtdm_device_info)
+
+/**
+ * Purge internal device or socket buffers.
+ * @param[in] arg Purge mask, see @ref RTDM_PURGE_xxx_BUFFER
+ */
+#define RTIOC_PURGE		_IOW(RTIOC_TYPE_COMMON, 0x10, int)
+/** @} Common IOCTLs */
+/** @} rtdm */
+
+/* Internally used for mapping socket functions on IOCTLs */
+struct _rtdm_getsockopt_args {
+	int level;
+	int optname;
+	void *optval;
+	socklen_t *optlen;
+};
+
+struct _rtdm_setsockopt_args {
+	int level;
+	int optname;
+	const void *optval;
+	socklen_t optlen;
+};
+
+struct _rtdm_getsockaddr_args {
+	struct sockaddr *addr;
+	socklen_t *addrlen;
+};
+
+struct _rtdm_setsockaddr_args {
+	const struct sockaddr *addr;
+	socklen_t addrlen;
+};
+
+#define _RTIOC_GETSOCKOPT	_IOW(RTIOC_TYPE_COMMON, 0x20,		\
+				     struct _rtdm_getsockopt_args)
+#define _RTIOC_SETSOCKOPT	_IOW(RTIOC_TYPE_COMMON, 0x21,		\
+				     struct _rtdm_setsockopt_args)
+#define _RTIOC_BIND		_IOW(RTIOC_TYPE_COMMON, 0x22,		\
+				     struct _rtdm_setsockaddr_args)
+#define _RTIOC_CONNECT		_IOW(RTIOC_TYPE_COMMON, 0x23,		\
+				     struct _rtdm_setsockaddr_args)
+#define _RTIOC_LISTEN		_IOW(RTIOC_TYPE_COMMON, 0x24,		\
+				     int)
+#define _RTIOC_ACCEPT		_IOW(RTIOC_TYPE_COMMON, 0x25,		\
+				     struct _rtdm_getsockaddr_args)
+#define _RTIOC_GETSOCKNAME	_IOW(RTIOC_TYPE_COMMON, 0x26,		\
+				     struct _rtdm_getsockaddr_args)
+#define _RTIOC_GETPEERNAME	_IOW(RTIOC_TYPE_COMMON, 0x27,		\
+				     struct _rtdm_getsockaddr_args)
+#define _RTIOC_SHUTDOWN		_IOW(RTIOC_TYPE_COMMON, 0x28,		\
+				     int)
+
+/* Internally used for mmap() */
+struct _rtdm_mmap_request {
+	__u64 offset;
+	size_t length;
+	int prot;
+	int flags;
+};
+
+#endif /* !_RTDM_UAPI_RTDM_H */
+++ linux-patched/include/xenomai/rtdm/uapi/gpiopwm.h	2022-03-21 12:58:32.247861092 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/uapi/can.h	1970-01-01 01:00:00.000000000 +0100
+/**
+ * @file
+ * Real-Time Driver Model for Xenomai, pwm header
+ *
+ * @note Copyright (C) 2015 Jorge Ramirez <jro@xenomai.org>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ *
+ * @ingroup rttesting
+ */
+#ifndef _RTDM_UAPI_PWM_H
+#define _RTDM_UAPI_PWM_H
+
+#include <linux/types.h>
+
+#define RTPWM_PROFILE_VER			1
+
+struct gpiopwm {
+	unsigned int duty_cycle;
+	unsigned int range_min;
+	unsigned int range_max;
+	unsigned int period;
+	unsigned int gpio;
+};
+
+#define RTIOC_TYPE_PWM		RTDM_CLASS_PWM
+
+#define GPIOPWM_RTIOC_SET_CONFIG \
+	_IOW(RTIOC_TYPE_PWM, 0x00, struct gpiopwm)
+
+#define GPIOPWM_RTIOC_GET_CONFIG \
+	_IOR(RTIOC_TYPE_PWM, 0x10, struct gpiopwm)
+
+#define GPIOPWM_RTIOC_START \
+	_IO(RTIOC_TYPE_PWM, 0x20)
+
+#define GPIOPWM_RTIOC_STOP \
+	_IO(RTIOC_TYPE_PWM, 0x30)
+
+#define GPIOPWM_RTIOC_CHANGE_DUTY_CYCLE \
+	_IOW(RTIOC_TYPE_PWM, 0x40, unsigned int)
+
+
+#endif /* !_RTDM_UAPI_TESTING_H */
+++ linux-patched/include/xenomai/rtdm/uapi/can.h	2022-03-21 12:58:32.240861160 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/uapi/net.h	1970-01-01 01:00:00.000000000 +0100
+/**
+ * @file
+ * Real-Time Driver Model for RT-Socket-CAN, CAN device profile header
+ *
+ * @note Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * @note Copyright (C) 2005, 2006 Sebastian Smolorz
+ *                         <Sebastian.Smolorz@stud.uni-hannover.de>
+ *
+ * This RTDM CAN device profile header is based on:
+ *
+ * include/linux/can.h, include/linux/socket.h, net/can/pf_can.h in
+ * linux-can.patch, a CAN socket framework for Linux
+ *
+ * Copyright (C) 2004, 2005,
+ * Robert Schwebel, Benedikt Spranger, Marc Kleine-Budde, Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _RTDM_UAPI_CAN_H
+#define _RTDM_UAPI_CAN_H
+
+/**
+ * @addtogroup rtdm_can
+ * @{
+ */
+
+#define RTCAN_PROFILE_VER  2
+
+#ifndef AF_CAN
+
+/** CAN address family */
+#define AF_CAN	29
+
+/** CAN protocol family */
+#define PF_CAN	AF_CAN
+
+#endif
+
+/** CAN socket levels
+ *
+ *  Used for @ref Sockopts for the particular protocols.
+ */
+#define SOL_CAN_RAW  103
+
+/** Type of CAN id (see @ref CAN_xxx_MASK and @ref CAN_xxx_FLAG) */
+typedef uint32_t can_id_t;
+typedef uint32_t canid_t;
+
+/** Type of CAN error mask */
+typedef can_id_t can_err_mask_t;
+
+/*!
+ * @anchor CAN_xxx_MASK @name CAN ID masks
+ * Bit masks for masking CAN IDs
+ * @{ */
+
+/** Bit mask for extended CAN IDs */
+#define CAN_EFF_MASK  0x1FFFFFFF
+
+/** Bit mask for standard CAN IDs */
+#define CAN_SFF_MASK  0x000007FF
+
+/** @} */
+
+/*!
+ * @anchor CAN_xxx_FLAG @name CAN ID flags
+ * Flags within a CAN ID indicating special CAN frame attributes
+ * @{ */
+/** Extended frame */
+#define CAN_EFF_FLAG  0x80000000
+/** Remote transmission frame */
+#define CAN_RTR_FLAG  0x40000000
+/** Error frame (see @ref Errors), not valid in struct can_filter */
+#define CAN_ERR_FLAG  0x20000000
+/** Invert CAN filter definition, only valid in struct can_filter */
+#define CAN_INV_FILTER CAN_ERR_FLAG
+
+/** @} */
+
+/*!
+ * @anchor CAN_PROTO @name Particular CAN protocols
+ * Possible protocols for the PF_CAN protocol family
+ *
+ * Currently only the RAW protocol is supported.
+ * @{ */
+/** Raw protocol of @c PF_CAN, applicable to socket type @c SOCK_RAW */
+#define CAN_RAW  1
+/** @} */
+
+#define CAN_BAUDRATE_UNKNOWN       ((uint32_t)-1)
+#define CAN_BAUDRATE_UNCONFIGURED  0
+
+/**
+ * Baudrate definition in bits per second
+ */
+typedef uint32_t can_baudrate_t;
+
+/**
+ * Supported CAN bit-time types
+ */
+enum CAN_BITTIME_TYPE {
+	/** Standard bit-time definition according to Bosch */
+	CAN_BITTIME_STD,
+	/** Hardware-specific BTR bit-time definition */
+	CAN_BITTIME_BTR
+};
+
+/**
+ * See @ref CAN_BITTIME_TYPE
+ */
+typedef enum CAN_BITTIME_TYPE can_bittime_type_t;
+
+/**
+ * Standard bit-time parameters according to Bosch
+ */
+struct can_bittime_std {
+	uint32_t brp;		/**< Baud rate prescaler */
+	uint8_t prop_seg;	/**< from 1 to 8 */
+	uint8_t phase_seg1;	/**< from 1 to 8 */
+	uint8_t phase_seg2;	/**< from 1 to 8 */
+	uint8_t sjw:7;		/**< from 1 to 4 */
+	uint8_t sam:1;		/**< 1 - enable triple sampling */
+};
+
+/**
+ * Hardware-specific BTR bit-times
+ */
+struct can_bittime_btr {
+
+	uint8_t btr0;		/**< Bus timing register 0 */
+	uint8_t btr1;		/**< Bus timing register 1 */
+};
+
+/**
+ * Custom CAN bit-time definition
+ */
+struct can_bittime {
+	/** Type of bit-time definition */
+	can_bittime_type_t type;
+
+	union {
+		/** Standard bit-time */
+		struct can_bittime_std std;
+		/** Hardware-spcific BTR bit-time */
+		struct can_bittime_btr btr;
+	};
+};
+
+/*!
+ * @anchor CAN_MODE @name CAN operation modes
+ * Modes into which CAN controllers can be set
+ * @{ */
+enum CAN_MODE {
+	/*! Set controller in Stop mode (no reception / transmission possible) */
+	CAN_MODE_STOP = 0,
+
+	/*! Set controller into normal operation. @n
+	 *  Coming from stopped mode or bus off, the controller begins with no
+	 *  errors in @ref CAN_STATE_ACTIVE. */
+	CAN_MODE_START,
+
+	/*! Set controller into Sleep mode. @n
+	 *  This is only possible if the controller is not stopped or bus-off. @n
+	 *  Notice that sleep mode will only be entered when there is no bus
+	 *  activity. If the controller detects bus activity while "sleeping"
+	 *  it will go into operating mode again. @n
+	 *  To actively leave sleep mode again trigger @c CAN_MODE_START. */
+	CAN_MODE_SLEEP
+};
+/** @} */
+
+/** See @ref CAN_MODE */
+typedef enum CAN_MODE can_mode_t;
+
+/*!
+ * @anchor CAN_CTRLMODE @name CAN controller modes
+ * Special CAN controllers modes, which can be or'ed together.
+ *
+ * @note These modes are hardware-dependent. Please consult the hardware
+ * manual of the CAN controller for more detailed information.
+ *
+ * @{ */
+
+/*! Listen-Only mode
+ *
+ *  In this mode the CAN controller would give no acknowledge to the CAN-bus,
+ *  even if a message is received successfully and messages would not be
+ *  transmitted. This mode might be useful for bus-monitoring, hot-plugging
+ *  or throughput analysis. */
+#define CAN_CTRLMODE_LISTENONLY 0x1
+
+/*! Loopback mode
+ *
+ * In this mode the CAN controller does an internal loop-back, a message is
+ * transmitted and simultaneously received. That mode can be used for self
+ * test operation. */
+#define CAN_CTRLMODE_LOOPBACK   0x2
+
+/*! Triple sampling mode
+ *
+ * In this mode the CAN controller uses Triple sampling. */
+#define CAN_CTRLMODE_3_SAMPLES  0x4
+
+/** @} */
+
+/** See @ref CAN_CTRLMODE */
+typedef int can_ctrlmode_t;
+
+/*!
+ * @anchor CAN_STATE @name CAN controller states
+ * States a CAN controller can be in.
+ * @{ */
+enum CAN_STATE {
+	/** CAN controller is error active */
+	CAN_STATE_ERROR_ACTIVE = 0,
+	/** CAN controller is active */
+	CAN_STATE_ACTIVE = 0,
+
+	/** CAN controller is error active, warning level is reached */
+	CAN_STATE_ERROR_WARNING = 1,
+	/** CAN controller is error active, warning level is reached */
+	CAN_STATE_BUS_WARNING = 1,
+
+	/** CAN controller is error passive */
+	CAN_STATE_ERROR_PASSIVE = 2,
+	/** CAN controller is error passive */
+	CAN_STATE_BUS_PASSIVE = 2,
+
+	/** CAN controller went into Bus Off */
+	CAN_STATE_BUS_OFF,
+
+	/** CAN controller is scanning to get the baudrate */
+	CAN_STATE_SCANNING_BAUDRATE,
+
+	/** CAN controller is in stopped mode */
+	CAN_STATE_STOPPED,
+
+	/** CAN controller is in Sleep mode */
+	CAN_STATE_SLEEPING,
+};
+/** @} */
+
+/** See @ref CAN_STATE */
+typedef enum CAN_STATE can_state_t;
+
+#define CAN_STATE_OPERATING(state) ((state) < CAN_STATE_BUS_OFF)
+
+/**
+ * Filter for reception of CAN messages.
+ *
+ * This filter works as follows:
+ * A received CAN ID is AND'ed bitwise with @c can_mask and then compared to
+ * @c can_id. This also includes the @ref CAN_EFF_FLAG and @ref CAN_RTR_FLAG
+ * of @ref CAN_xxx_FLAG. If this comparison is true, the message will be
+ * received by the socket. The logic can be inverted with the @c can_id flag
+ * @ref CAN_INV_FILTER :
+ *
+ * @code
+ * if (can_id & CAN_INV_FILTER) {
+ *    if ((received_can_id & can_mask) != (can_id & ~CAN_INV_FILTER))
+ *       accept-message;
+ * } else {
+ *    if ((received_can_id & can_mask) == can_id)
+ *       accept-message;
+ * }
+ * @endcode
+ *
+ * Multiple filters can be arranged in a filter list and set with
+ * @ref Sockopts. If one of these filters matches a CAN ID upon reception
+ * of a CAN frame, this frame is accepted.
+ *
+ */
+typedef struct can_filter {
+	/** CAN ID which must match with incoming IDs after passing the mask.
+	 *  The filter logic can be inverted with the flag @ref CAN_INV_FILTER. */
+	uint32_t can_id;
+
+	/** Mask which is applied to incoming IDs. See @ref CAN_xxx_MASK
+	 *  "CAN ID masks" if exactly one CAN ID should come through. */
+	uint32_t can_mask;
+} can_filter_t;
+
+/**
+ * Socket address structure for the CAN address family
+ */
+struct sockaddr_can {
+	/** CAN address family, must be @c AF_CAN */
+	sa_family_t can_family;
+
+	/** Interface index of CAN controller. See @ref SIOCGIFINDEX. */
+	int can_ifindex;
+};
+
+/**
+ * Raw CAN frame
+ *
+ * Central structure for receiving and sending CAN frames.
+ */
+typedef struct can_frame {
+	/** CAN ID of the frame
+	 *
+	 *  See @ref CAN_xxx_FLAG "CAN ID flags" for special bits.
+	 */
+	can_id_t can_id;
+
+	/** Size of the payload in bytes */
+	uint8_t can_dlc;
+
+	/** Payload data bytes */
+	uint8_t data[8] __attribute__ ((aligned(8)));
+} can_frame_t;
+
+/**
+ * CAN interface request descriptor
+ *
+ * Parameter block for submitting CAN control requests.
+ */
+struct can_ifreq {
+	union {
+		char	ifrn_name[IFNAMSIZ];
+	} ifr_ifrn;
+	
+	union {
+		struct can_bittime bittime;
+		can_baudrate_t baudrate;
+		can_ctrlmode_t ctrlmode;
+		can_mode_t mode;
+		can_state_t state;
+		int ifru_ivalue;
+	} ifr_ifru;
+};
+
+/*!
+ * @anchor RTCAN_TIMESTAMPS   @name Timestamp switches
+ * Arguments to pass to @ref RTCAN_RTIOC_TAKE_TIMESTAMP
+ * @{ */
+#define RTCAN_TAKE_NO_TIMESTAMPS	0  /**< Switch off taking timestamps */
+#define RTCAN_TAKE_TIMESTAMPS		1  /**< Do take timestamps */
+/** @} */
+
+#define RTIOC_TYPE_CAN  RTDM_CLASS_CAN
+
+/*!
+ * @anchor Rawsockopts @name RAW socket options
+ * Setting and getting CAN RAW socket options.
+ * @{ */
+
+/**
+ * CAN filter definition
+ *
+ * A CAN raw filter list with elements of struct can_filter can be installed
+ * with @c setsockopt. This list is used upon reception of CAN frames to
+ * decide whether the bound socket will receive a frame. An empty filter list
+ * can also be defined using optlen = 0, which is recommanded for write-only
+ * sockets.
+ * @n
+ * If the socket was already bound with @ref Bind, the old filter list
+ * gets replaced with the new one. Be aware that already received, but
+ * not read out CAN frames may stay in the socket buffer.
+ * @n
+ * @n
+ * @param [in] level @b SOL_CAN_RAW
+ *
+ * @param [in] optname @b CAN_RAW_FILTER
+ *
+ * @param [in] optval Pointer to array of struct can_filter.
+ *
+ * @param [in] optlen Size of filter list: count * sizeof( struct can_filter).
+ * @n
+ * @coretags{task-unrestricted}
+ * @n
+ * Specific return values:
+ * - -EFAULT (It was not possible to access user space memory area at the
+ *            specified address.)
+ * - -ENOMEM (Not enough memory to fulfill the operation)
+ * - -EINVAL (Invalid length "optlen")
+ * - -ENOSPC (No space to store filter list, check RT-Socket-CAN kernel
+ *            parameters)
+ * .
+ */
+#define CAN_RAW_FILTER		0x1
+
+/**
+ * CAN error mask
+ *
+ * A CAN error mask (see @ref Errors) can be set with @c setsockopt. This
+ * mask is then used to decide if error frames are delivered to this socket
+ * in case of error condidtions. The error frames are marked with the
+ * @ref CAN_ERR_FLAG of @ref CAN_xxx_FLAG and must be handled by the
+ * application properly. A detailed description of the errors can be
+ * found in the @c can_id and the @c data fields of struct can_frame
+ * (see @ref Errors for futher details).
+ *
+ * @n
+ * @param [in] level @b SOL_CAN_RAW
+ *
+ * @param [in] optname @b CAN_RAW_ERR_FILTER
+ *
+ * @param [in] optval Pointer to error mask of type can_err_mask_t.
+ *
+ * @param [in] optlen Size of error mask: sizeof(can_err_mask_t).
+ *
+ * @coretags{task-unrestricted}
+ * @n
+ * Specific return values:
+ * - -EFAULT (It was not possible to access user space memory area at the
+ *            specified address.)
+ * - -EINVAL (Invalid length "optlen")
+ * .
+ */
+#define CAN_RAW_ERR_FILTER	0x2
+
+/**
+ * CAN TX loopback
+ *
+ * The TX loopback to other local sockets can be selected with this
+ * @c setsockopt.
+ *
+ * @note The TX loopback feature must be enabled in the kernel and then
+ * the loopback to other local TX sockets is enabled by default.
+ *
+ * @n
+ * @param [in] level @b SOL_CAN_RAW
+ *
+ * @param [in] optname @b CAN_RAW_LOOPBACK
+ *
+ * @param [in] optval Pointer to integer value.
+ *
+ * @param [in] optlen Size of int: sizeof(int).
+ *
+ * @coretags{task-unrestricted}
+ * @n
+ * Specific return values:
+ * - -EFAULT (It was not possible to access user space memory area at the
+ *            specified address.)
+ * - -EINVAL (Invalid length "optlen")
+ * - -EOPNOTSUPP (not supported, check RT-Socket-CAN kernel parameters).
+ */
+#define CAN_RAW_LOOPBACK	0x3
+
+/**
+ * CAN receive own messages
+ *
+ * Not supported by RT-Socket-CAN, but defined for compatibility with
+ * Socket-CAN.
+ */
+#define CAN_RAW_RECV_OWN_MSGS   0x4
+
+/** @} */
+
+/*!
+ * @anchor CANIOCTLs @name IOCTLs
+ * CAN device IOCTLs
+ *
+ * @deprecated Passing \c struct \c ifreq as a request descriptor
+ * for CAN IOCTLs is still accepted for backward compatibility,
+ * however it is recommended to switch to \c struct \c can_ifreq at
+ * the first opportunity.
+ *
+ * @{ */
+
+/**
+ * Get CAN interface index by name
+ *
+ * @param [in,out] arg Pointer to interface request structure buffer
+ *                     (<TT>struct can_ifreq</TT>). If
+ *                     <TT>ifr_name</TT> holds a valid CAN interface
+ *                     name <TT>ifr_ifindex</TT> will be filled with
+ *                     the corresponding interface index.
+ *
+ * @return 0 on success, otherwise:
+ * - -EFAULT: It was not possible to access user space memory area at the
+ *            specified address.
+ * - -ENODEV: No device with specified name exists.
+ *
+ * @coretags{task-unrestricted}
+ */
+#ifdef DOXYGEN_CPP /* For Doxygen only, already defined by kernel headers */
+#define SIOCGIFINDEX defined_by_kernel_header_file
+#endif
+
+/**
+ * Set baud rate
+ *
+ * The baudrate must be specified in bits per second. The driver will
+ * try to calculate resonable CAN bit-timing parameters. You can use
+ * @ref SIOCSCANCUSTOMBITTIME to set custom bit-timing.
+ *
+ * @param [in] arg Pointer to interface request structure buffer
+ *                 (<TT>struct can_ifreq</TT>).
+ *                 <TT>ifr_name</TT> must hold a valid CAN interface name,
+ *                 <TT>ifr_ifru</TT> must be filled with an instance of
+ *                 @ref can_baudrate_t.
+ *
+ * @return 0 on success, otherwise:
+ * - -EFAULT: It was not possible to access user space memory area at the
+ *            specified address.
+ * - -ENODEV: No device with specified name exists.
+ * - -EINVAL: No valid baud rate, see @ref can_baudrate_t.
+ * - -EDOM  : Baud rate not possible.
+ * - -EAGAIN: Request could not be successully fulfilled. Try again.
+ *
+ * @coretags{task-unrestricted, might-switch}
+ *
+ * @note Setting the baud rate is a configuration task. It should
+ * be done deliberately or otherwise CAN messages will likely be lost.
+ */
+#define SIOCSCANBAUDRATE	_IOW(RTIOC_TYPE_CAN, 0x01, struct can_ifreq)
+
+/**
+ * Get baud rate
+ *
+ * @param [in,out] arg Pointer to interface request structure buffer
+ *                    (<TT>struct can_ifreq</TT>).
+ *                    <TT>ifr_name</TT> must hold a valid CAN interface name,
+ *                    <TT>ifr_ifru</TT> will be filled with an instance of
+ *                    @ref can_baudrate_t.
+ *
+ * @return 0 on success, otherwise:
+ * - -EFAULT: It was not possible to access user space memory area at the
+ *            specified address.
+ * - -ENODEV: No device with specified name exists.
+ * - -EINVAL: No baud rate was set yet.
+ *
+ * @coretags{task-unrestricted}
+ */
+#define SIOCGCANBAUDRATE	_IOWR(RTIOC_TYPE_CAN, 0x02, struct can_ifreq)
+
+/**
+ * Set custom bit time parameter
+ *
+ * Custem-bit time could be defined in various formats (see
+ * struct can_bittime).
+ *
+ * @param [in] arg Pointer to interface request structure buffer
+ *                 (<TT>struct can_ifreq</TT>).
+ *                 <TT>ifr_name</TT> must hold a valid CAN interface name,
+ *                 <TT>ifr_ifru</TT> must be filled with an instance of
+ *                 struct can_bittime.
+ *
+ * @return 0 on success, otherwise:
+ * - -EFAULT: It was not possible to access user space memory area at the
+ *            specified address.
+ * - -ENODEV: No device with specified name exists.
+ * - -EINVAL: No valid baud rate, see @ref can_baudrate_t.
+ * - -EAGAIN: Request could not be successully fulfilled. Try again.
+ *
+ * @coretags{task-unrestricted, might-switch}
+ *
+ * @note Setting the bit-time is a configuration task. It should
+ * be done deliberately or otherwise CAN messages will likely be lost.
+ */
+#define SIOCSCANCUSTOMBITTIME	_IOW(RTIOC_TYPE_CAN, 0x03, struct can_ifreq)
+
+/**
+ * Get custom bit-time parameters
+ *
+ * @param [in,out] arg Pointer to interface request structure buffer
+ *                    (<TT>struct can_ifreq</TT>).
+ *                    <TT>ifr_name</TT> must hold a valid CAN interface name,
+ *                    <TT>ifr_ifru</TT> will be filled with an instance of
+ *                    struct can_bittime.
+ *
+ * @return 0 on success, otherwise:
+ * - -EFAULT: It was not possible to access user space memory area at the
+ *            specified address.
+ * - -ENODEV: No device with specified name exists.
+ * - -EINVAL: No baud rate was set yet.
+ *
+ * @coretags{task-unrestricted}
+ */
+#define SIOCGCANCUSTOMBITTIME	_IOWR(RTIOC_TYPE_CAN, 0x04, struct can_ifreq)
+
+/**
+ * Set operation mode of CAN controller
+ *
+ * See @ref CAN_MODE "CAN controller modes" for available modes.
+ *
+ * @param [in] arg Pointer to interface request structure buffer
+ *                 (<TT>struct can_ifreq</TT>).
+ *                 <TT>ifr_name</TT> must hold a valid CAN interface name,
+ *                 <TT>ifr_ifru</TT> must be filled with an instance of
+ *                 @ref can_mode_t.
+ *
+ * @return 0 on success, otherwise:
+ * - -EFAULT: It was not possible to access user space memory area at the
+ *            specified address.
+ * - -ENODEV: No device with specified name exists.
+ * - -EAGAIN: (@ref CAN_MODE_START, @ref CAN_MODE_STOP) Could not successfully
+ *            set mode, hardware is busy. Try again.
+ * - -EINVAL: (@ref CAN_MODE_START) Cannot start controller,
+ *            set baud rate first.
+ * - -ENETDOWN: (@ref CAN_MODE_SLEEP) Cannot go into sleep mode because
+		controller is stopped or bus off.
+ * - -EOPNOTSUPP: unknown mode
+ *
+ * @coretags{task-unrestricted, might-switch}
+ *
+ * @note Setting a CAN controller into normal operation after a bus-off can
+ * take some time (128 occurrences of 11 consecutive recessive bits).
+ * In such a case, although this IOCTL will return immediately with success
+ * and @ref SIOCGCANSTATE will report @ref CAN_STATE_ACTIVE,
+ * bus-off recovery may still be in progress. @n
+ * If a controller is bus-off, setting it into stop mode will return no error
+ * but the controller remains bus-off.
+ */
+#define SIOCSCANMODE		_IOW(RTIOC_TYPE_CAN, 0x05, struct can_ifreq)
+
+/**
+ * Get current state of CAN controller
+ *
+ * States are divided into main states and additional error indicators. A CAN
+ * controller is always in exactly one main state. CAN bus errors are
+ * registered by the CAN hardware and collected by the driver. There is one
+ * error indicator (bit) per error type. If this IOCTL is triggered the error
+ * types which occured since the last call of this IOCTL are reported and
+ * thereafter the error indicators are cleared. See also
+ * @ref CAN_STATE "CAN controller states".
+ *
+ * @param [in,out] arg Pointer to interface request structure buffer
+ *                    (<TT>struct can_ifreq</TT>).
+ *                    <TT>ifr_name</TT> must hold a valid CAN interface name,
+ *                    <TT>ifr_ifru</TT> will be filled with an instance of
+ *                    @ref can_mode_t.
+ *
+ * @return 0 on success, otherwise:
+ * - -EFAULT: It was not possible to access user space memory area at the
+ *            specified address.
+ * - -ENODEV: No device with specified name exists.
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+#define SIOCGCANSTATE		_IOWR(RTIOC_TYPE_CAN, 0x06, struct can_ifreq)
+
+/**
+ * Set special controller modes
+ *
+ * Various special controller modes could be or'ed together (see
+ * @ref CAN_CTRLMODE for further information).
+ *
+ * @param [in] arg Pointer to interface request structure buffer
+ *                 (<TT>struct can_ifreq</TT>).
+ *                 <TT>ifr_name</TT> must hold a valid CAN interface name,
+ *                 <TT>ifr_ifru</TT> must be filled with an instance of
+ *                 @ref can_ctrlmode_t.
+ *
+ * @return 0 on success, otherwise:
+ * - -EFAULT: It was not possible to access user space memory area at the
+ *            specified address.
+ * - -ENODEV: No device with specified name exists.
+ * - -EINVAL: No valid baud rate, see @ref can_baudrate_t.
+ * - -EAGAIN: Request could not be successully fulfilled. Try again.
+ *
+ * @coretags{task-unrestricted, might-switch}
+ *
+ * @note Setting special controller modes is a configuration task. It should
+ * be done deliberately or otherwise CAN messages will likely be lost.
+ */
+#define SIOCSCANCTRLMODE	_IOW(RTIOC_TYPE_CAN, 0x07, struct can_ifreq)
+
+/**
+ * Get special controller modes
+ *
+ *
+ * @param [in] arg Pointer to interface request structure buffer
+ *                 (<TT>struct can_ifreq</TT>).
+ *                 <TT>ifr_name</TT> must hold a valid CAN interface name,
+ *                 <TT>ifr_ifru</TT> must be filled with an instance of
+ *                 @ref can_ctrlmode_t.
+ *
+ * @return 0 on success, otherwise:
+ * - -EFAULT: It was not possible to access user space memory area at the
+ *            specified address.
+ * - -ENODEV: No device with specified name exists.
+ * - -EINVAL: No baud rate was set yet.
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+#define SIOCGCANCTRLMODE	_IOWR(RTIOC_TYPE_CAN, 0x08, struct can_ifreq)
+
+/**
+ * Enable or disable storing a high precision timestamp upon reception of
+ * a CAN frame.
+ *
+ * A newly created socket takes no timestamps by default.
+ *
+ * @param [in] arg int variable, see @ref RTCAN_TIMESTAMPS "Timestamp switches"
+ *
+ * @return 0 on success.
+ *
+ * @coretags{task-unrestricted}
+ *
+ * @note Activating taking timestamps only has an effect on newly received
+ * CAN messages from the bus. Frames that already are in the socket buffer do
+ * not have timestamps if it was deactivated before. See @ref Recv "Receive"
+ * for more details.
+ */
+#define RTCAN_RTIOC_TAKE_TIMESTAMP _IOW(RTIOC_TYPE_CAN, 0x09, int)
+
+/**
+ * Specify a reception timeout for a socket
+ *
+ * Defines a timeout for all receive operations via a
+ * socket which will take effect when one of the @ref Recv "receive functions"
+ * is called without the @c MSG_DONTWAIT flag set.
+ *
+ * The default value for a newly created socket is an infinite timeout.
+ *
+ * @note The setting of the timeout value is not done atomically to avoid
+ * locks. Please set the value before receiving messages from the socket.
+ *
+ * @param [in] arg Pointer to @ref nanosecs_rel_t variable. The value is
+ *                interpreted as relative timeout in nanoseconds in case
+ *                of a positive value.
+ *                See @ref RTDM_TIMEOUT_xxx "Timeouts" for special timeouts.
+ *
+ * @return 0 on success, otherwise:
+ * - -EFAULT: It was not possible to access user space memory area at the
+ *            specified address.
+ *
+ * @coretags{task-unrestricted}
+ */
+#define RTCAN_RTIOC_RCV_TIMEOUT	_IOW(RTIOC_TYPE_CAN, 0x0A, nanosecs_rel_t)
+
+/**
+ * Specify a transmission timeout for a socket
+ *
+ * Defines a timeout for all send operations via a
+ * socket which will take effect when one of the @ref Send "send functions"
+ * is called without the @c MSG_DONTWAIT flag set.
+ *
+ * The default value for a newly created socket is an infinite timeout.
+ *
+ * @note The setting of the timeout value is not done atomically to avoid
+ * locks. Please set the value before sending messages to the socket.
+ *
+ * @param [in] arg Pointer to @ref nanosecs_rel_t variable. The value is
+ *                interpreted as relative timeout in nanoseconds in case
+ *                of a positive value.
+ *                See @ref RTDM_TIMEOUT_xxx "Timeouts" for special timeouts.
+ *
+ * @return 0 on success, otherwise:
+ * - -EFAULT: It was not possible to access user space memory area at the
+ *            specified address.
+ *
+ * @coretags{task-unrestricted}
+ */
+#define RTCAN_RTIOC_SND_TIMEOUT	_IOW(RTIOC_TYPE_CAN, 0x0B, nanosecs_rel_t)
+/** @} */
+
+#define CAN_ERR_DLC  8	/* dlc for error frames */
+
+/*!
+ * @anchor Errors @name Error mask
+ * Error class (mask) in @c can_id field of struct can_frame to
+ * be used with @ref CAN_RAW_ERR_FILTER.
+ *
+ * @b Note: Error reporting is hardware dependent and most CAN controllers
+ * report less detailed error conditions than the SJA1000.
+ *
+ * @b Note: In case of a bus-off error condition (@ref CAN_ERR_BUSOFF), the
+ * CAN controller is @b not restarted automatically. It is the application's
+ * responsibility to react appropriately, e.g. calling @ref CAN_MODE_START.
+ *
+ * @b Note: Bus error interrupts (@ref CAN_ERR_BUSERROR) are enabled when an
+ * application is calling a @ref Recv function on a socket listening
+ * on bus errors (using @ref CAN_RAW_ERR_FILTER). After one bus error has
+ * occured, the interrupt will be disabled to allow the application time for
+ * error processing and to efficiently avoid bus error interrupt flooding.
+ * @{ */
+
+/** TX timeout (netdevice driver) */
+#define CAN_ERR_TX_TIMEOUT	0x00000001U
+
+/** Lost arbitration (see @ref Error0 "data[0]") */
+#define CAN_ERR_LOSTARB		0x00000002U
+
+/** Controller problems (see @ref Error1 "data[1]") */
+#define CAN_ERR_CRTL		0x00000004U
+
+/** Protocol violations (see @ref Error2 "data[2]",
+			     @ref Error3 "data[3]") */
+#define CAN_ERR_PROT		0x00000008U
+
+/** Transceiver status (see @ref Error4 "data[4]")    */
+#define CAN_ERR_TRX		0x00000010U
+
+/** Received no ACK on transmission */
+#define CAN_ERR_ACK		0x00000020U
+
+/** Bus off */
+#define CAN_ERR_BUSOFF		0x00000040U
+
+/** Bus error (may flood!) */
+#define CAN_ERR_BUSERROR	0x00000080U
+
+/** Controller restarted */
+#define CAN_ERR_RESTARTED	0x00000100U
+
+/** Omit EFF, RTR, ERR flags */
+#define CAN_ERR_MASK		0x1FFFFFFFU
+
+/** @} */
+
+/*!
+ * @anchor Error0 @name Arbitration lost error
+ * Error in the data[0] field of struct can_frame.
+ * @{ */
+/* arbitration lost in bit ... / data[0] */
+#define CAN_ERR_LOSTARB_UNSPEC	0x00 /**< unspecified */
+				     /**< else bit number in bitstream */
+/** @} */
+
+/*!
+ * @anchor Error1 @name Controller problems
+ * Error in the data[1] field of struct can_frame.
+ * @{ */
+/* error status of CAN-controller / data[1] */
+#define CAN_ERR_CRTL_UNSPEC	 0x00 /**< unspecified */
+#define CAN_ERR_CRTL_RX_OVERFLOW 0x01 /**< RX buffer overflow */
+#define CAN_ERR_CRTL_TX_OVERFLOW 0x02 /**< TX buffer overflow */
+#define CAN_ERR_CRTL_RX_WARNING	 0x04 /**< reached warning level for RX errors */
+#define CAN_ERR_CRTL_TX_WARNING	 0x08 /**< reached warning level for TX errors */
+#define CAN_ERR_CRTL_RX_PASSIVE	 0x10 /**< reached passive level for RX errors */
+#define CAN_ERR_CRTL_TX_PASSIVE	 0x20 /**< reached passive level for TX errors */
+/** @} */
+
+/*!
+ * @anchor Error2 @name Protocol error type
+ * Error in the data[2] field of struct can_frame.
+ * @{ */
+/* error in CAN protocol (type) / data[2] */
+#define CAN_ERR_PROT_UNSPEC	0x00 /**< unspecified */
+#define CAN_ERR_PROT_BIT	0x01 /**< single bit error */
+#define CAN_ERR_PROT_FORM	0x02 /**< frame format error */
+#define CAN_ERR_PROT_STUFF	0x04 /**< bit stuffing error */
+#define CAN_ERR_PROT_BIT0	0x08 /**< unable to send dominant bit */
+#define CAN_ERR_PROT_BIT1	0x10 /**< unable to send recessive bit */
+#define CAN_ERR_PROT_OVERLOAD	0x20 /**< bus overload */
+#define CAN_ERR_PROT_ACTIVE	0x40 /**< active error announcement */
+#define CAN_ERR_PROT_TX		0x80 /**< error occured on transmission */
+/** @} */
+
+/*!
+ * @anchor Error3 @name Protocol error location
+ * Error in the data[3] field of struct can_frame.
+ * @{ */
+/* error in CAN protocol (location) / data[3] */
+#define CAN_ERR_PROT_LOC_UNSPEC	 0x00 /**< unspecified */
+#define CAN_ERR_PROT_LOC_SOF	 0x03 /**< start of frame */
+#define CAN_ERR_PROT_LOC_ID28_21 0x02 /**< ID bits 28 - 21 (SFF: 10 - 3) */
+#define CAN_ERR_PROT_LOC_ID20_18 0x06 /**< ID bits 20 - 18 (SFF: 2 - 0 )*/
+#define CAN_ERR_PROT_LOC_SRTR	 0x04 /**< substitute RTR (SFF: RTR) */
+#define CAN_ERR_PROT_LOC_IDE	 0x05 /**< identifier extension */
+#define CAN_ERR_PROT_LOC_ID17_13 0x07 /**< ID bits 17-13 */
+#define CAN_ERR_PROT_LOC_ID12_05 0x0F /**< ID bits 12-5 */
+#define CAN_ERR_PROT_LOC_ID04_00 0x0E /**< ID bits 4-0 */
+#define CAN_ERR_PROT_LOC_RTR	 0x0C /**< RTR */
+#define CAN_ERR_PROT_LOC_RES1	 0x0D /**< reserved bit 1 */
+#define CAN_ERR_PROT_LOC_RES0	 0x09 /**< reserved bit 0 */
+#define CAN_ERR_PROT_LOC_DLC	 0x0B /**< data length code */
+#define CAN_ERR_PROT_LOC_DATA	 0x0A /**< data section */
+#define CAN_ERR_PROT_LOC_CRC_SEQ 0x08 /**< CRC sequence */
+#define CAN_ERR_PROT_LOC_CRC_DEL 0x18 /**< CRC delimiter */
+#define CAN_ERR_PROT_LOC_ACK	 0x19 /**< ACK slot */
+#define CAN_ERR_PROT_LOC_ACK_DEL 0x1B /**< ACK delimiter */
+#define CAN_ERR_PROT_LOC_EOF	 0x1A /**< end of frame */
+#define CAN_ERR_PROT_LOC_INTERM	 0x12 /**< intermission */
+/** @} */
+
+/*!
+ * @anchor Error4 @name Protocol error location
+ * Error in the data[4] field of struct can_frame.
+ * @{ */
+/* error status of CAN-transceiver / data[4] */
+/*                                               CANH CANL */
+#define CAN_ERR_TRX_UNSPEC		0x00 /**< 0000 0000 */
+#define CAN_ERR_TRX_CANH_NO_WIRE	0x04 /**< 0000 0100 */
+#define CAN_ERR_TRX_CANH_SHORT_TO_BAT	0x05 /**< 0000 0101 */
+#define CAN_ERR_TRX_CANH_SHORT_TO_VCC	0x06 /**< 0000 0110 */
+#define CAN_ERR_TRX_CANH_SHORT_TO_GND	0x07 /**< 0000 0111 */
+#define CAN_ERR_TRX_CANL_NO_WIRE	0x40 /**< 0100 0000 */
+#define CAN_ERR_TRX_CANL_SHORT_TO_BAT	0x50 /**< 0101 0000 */
+#define CAN_ERR_TRX_CANL_SHORT_TO_VCC	0x60 /**< 0110 0000 */
+#define CAN_ERR_TRX_CANL_SHORT_TO_GND	0x70 /**< 0111 0000 */
+#define CAN_ERR_TRX_CANL_SHORT_TO_CANH	0x80 /**< 1000 0000 */
+/** @} */
+
+/** @} */
+
+#endif /* !_RTDM_UAPI_CAN_H */
+++ linux-patched/include/xenomai/rtdm/uapi/net.h	2022-03-21 12:58:32.232861238 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/uapi/spi.h	1970-01-01 01:00:00.000000000 +0100
+/***
+ *
+ *  RTnet - real-time networking subsystem
+ *  Copyright (C) 2005-2011 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *  As a special exception to the GNU General Public license, the RTnet
+ *  project allows you to use this header file in unmodified form to produce
+ *  application programs executing in user-space which use RTnet services by
+ *  normal system calls. The resulting executable will not be covered by the
+ *  GNU General Public License merely as a result of this header file use.
+ *  Instead, this header file use will be considered normal use of RTnet and
+ *  not a "derived work" in the sense of the GNU General Public License.
+ *
+ *  This exception does not apply when the application code is built as a
+ *  static or dynamically loadable portion of the Linux kernel nor does the
+ *  exception override other reasons justifying application of the GNU General
+ *  Public License.
+ *
+ *  This exception applies only to the code released by the RTnet project
+ *  under the name RTnet and bearing this exception notice. If you copy code
+ *  from other sources into a copy of RTnet, the exception does not apply to
+ *  the code that you add in this way.
+ *
+ */
+
+#ifndef _RTDM_UAPI_NET_H
+#define _RTDM_UAPI_NET_H
+
+/* sub-classes: RTDM_CLASS_NETWORK */
+#define RTDM_SUBCLASS_RTNET     0
+
+#define RTIOC_TYPE_NETWORK      RTDM_CLASS_NETWORK
+
+/* RTnet-specific IOCTLs */
+#define RTNET_RTIOC_XMITPARAMS  _IOW(RTIOC_TYPE_NETWORK, 0x10, unsigned int)
+#define RTNET_RTIOC_PRIORITY    RTNET_RTIOC_XMITPARAMS  /* legacy */
+#define RTNET_RTIOC_TIMEOUT     _IOW(RTIOC_TYPE_NETWORK, 0x11, int64_t)
+/* RTNET_RTIOC_CALLBACK         _IOW(RTIOC_TYPE_NETWORK, 0x12, ...
+ * IOCTL only usable inside the kernel. */
+/* RTNET_RTIOC_NONBLOCK         _IOW(RTIOC_TYPE_NETWORK, 0x13, unsigned int)
+ * This IOCTL is no longer supported (and it was buggy anyway).
+ * Use RTNET_RTIOC_TIMEOUT with any negative timeout value instead. */
+#define RTNET_RTIOC_EXTPOOL     _IOW(RTIOC_TYPE_NETWORK, 0x14, unsigned int)
+#define RTNET_RTIOC_SHRPOOL     _IOW(RTIOC_TYPE_NETWORK, 0x15, unsigned int)
+
+/* socket transmission priorities */
+#define SOCK_MAX_PRIO           0
+#define SOCK_DEF_PRIO           SOCK_MAX_PRIO + \
+				    (SOCK_MIN_PRIO-SOCK_MAX_PRIO+1)/2
+#define SOCK_MIN_PRIO           SOCK_NRT_PRIO - 1
+#define SOCK_NRT_PRIO           31
+
+/* socket transmission channels */
+#define SOCK_DEF_RT_CHANNEL     0           /* default rt xmit channel     */
+#define SOCK_DEF_NRT_CHANNEL    1           /* default non-rt xmit channel */
+#define SOCK_USER_CHANNEL       2           /* first user-defined channel  */
+
+/* argument construction for RTNET_RTIOC_XMITPARAMS */
+#define SOCK_XMIT_PARAMS(priority, channel) ((priority) | ((channel) << 16))
+
+#endif  /* !_RTDM_UAPI_NET_H */
+++ linux-patched/include/xenomai/rtdm/uapi/spi.h	2022-03-21 12:58:32.225861306 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/uapi/autotune.h	1970-01-01 01:00:00.000000000 +0100
+/**
+ * @note Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _RTDM_UAPI_SPI_H
+#define _RTDM_UAPI_SPI_H
+
+#include <linux/types.h>
+
+struct rtdm_spi_config {
+	__u32 speed_hz;
+	__u16 mode;
+	__u8 bits_per_word;
+};
+
+struct rtdm_spi_iobufs {
+	__u32 io_len;
+	__u32 i_offset;
+	__u32 o_offset;
+	__u32 map_len;
+};
+
+#define SPI_RTIOC_SET_CONFIG		_IOW(RTDM_CLASS_SPI, 0, struct rtdm_spi_config)
+#define SPI_RTIOC_GET_CONFIG		_IOR(RTDM_CLASS_SPI, 1, struct rtdm_spi_config)
+#define SPI_RTIOC_SET_IOBUFS		_IOR(RTDM_CLASS_SPI, 2, struct rtdm_spi_iobufs)
+#define SPI_RTIOC_TRANSFER		_IO(RTDM_CLASS_SPI, 3)
+#define SPI_RTIOC_TRANSFER_N		_IOR(RTDM_CLASS_SPI, 4, int)
+
+#endif /* !_RTDM_UAPI_SPI_H */
+++ linux-patched/include/xenomai/rtdm/uapi/autotune.h	2022-03-21 12:58:32.217861384 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/ipc.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * This file is part of the Xenomai project.
+ *
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _RTDM_UAPI_AUTOTUNE_H
+#define _RTDM_UAPI_AUTOTUNE_H
+
+#include <linux/types.h>
+
+#define RTDM_CLASS_AUTOTUNE		RTDM_CLASS_MISC
+#define RTDM_SUBCLASS_AUTOTUNE		0
+
+struct autotune_setup {
+	__u32 period;
+	__u32 quiet;
+};
+
+#define AUTOTUNE_RTIOC_IRQ		_IOW(RTDM_CLASS_AUTOTUNE, 0, struct autotune_setup)
+#define AUTOTUNE_RTIOC_KERN		_IOW(RTDM_CLASS_AUTOTUNE, 1, struct autotune_setup)
+#define AUTOTUNE_RTIOC_USER		_IOW(RTDM_CLASS_AUTOTUNE, 2, struct autotune_setup)
+#define AUTOTUNE_RTIOC_PULSE		_IOW(RTDM_CLASS_AUTOTUNE, 3, __u64)
+#define AUTOTUNE_RTIOC_RUN		_IOR(RTDM_CLASS_AUTOTUNE, 4, __u32)
+#define AUTOTUNE_RTIOC_RESET		_IO(RTDM_CLASS_AUTOTUNE, 5)
+
+#endif /* !_RTDM_UAPI_AUTOTUNE_H */
+++ linux-patched/include/xenomai/rtdm/ipc.h	2022-03-21 12:58:31.927864212 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/udd.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * This file is part of the Xenomai project.
+ *
+ * Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef _COBALT_RTDM_IPC_H
+#define _COBALT_RTDM_IPC_H
+
+#include <linux/net.h>
+#include <linux/socket.h>
+#include <linux/if.h>
+#include <rtdm/rtdm.h>
+#include <rtdm/uapi/ipc.h>
+
+#endif /* !_COBALT_RTDM_IPC_H */
+++ linux-patched/include/xenomai/rtdm/udd.h	2022-03-21 12:58:31.920864280 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/testing.h	1970-01-01 01:00:00.000000000 +0100
+/**
+ * @file
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_UDD_H
+#define _COBALT_RTDM_UDD_H
+
+#include <linux/list.h>
+#include <rtdm/driver.h>
+#include <rtdm/uapi/udd.h>
+
+/**
+ * @ingroup rtdm_profiles
+ * @defgroup rtdm_udd User-space driver core
+ *
+ * This profile includes all mini-drivers sitting on top of the
+ * User-space Device Driver framework (UDD). The generic UDD core
+ * driver enables interrupt control and I/O memory access interfaces
+ * to user-space device drivers, as defined by the mini-drivers when
+ * registering.
+ *
+ * A mini-driver supplements the UDD core with ancillary functions for
+ * dealing with @ref udd_memory_region "memory mappings" and @ref
+ * udd_irq_handler "interrupt control" for a particular I/O
+ * card/device.
+ *
+ * UDD-compliant mini-drivers only have to provide the basic support
+ * for dealing with the interrupt sources present in the device, so
+ * that most part of the device requests can be handled from a Xenomai
+ * application running in user-space. Typically, a mini-driver would
+ * handle the interrupt top-half, and the user-space application would
+ * handle the bottom-half.
+ *
+ * This profile is reminiscent of the UIO framework available with the
+ * Linux kernel, adapted to the dual kernel Cobalt environment.
+ *
+ * @{
+ */
+
+/**
+ * @anchor udd_irq_special
+ * Special IRQ values for udd_device.irq
+ *
+ * @{
+ */
+/**
+ * No IRQ managed. Passing this code implicitly disables all
+ * interrupt-related services, including control (disable/enable) and
+ * notification.
+ */
+#define UDD_IRQ_NONE     0
+/**
+ * IRQ directly managed from the mini-driver on top of the UDD
+ * core. The mini-driver is in charge of attaching the handler(s) to
+ * the IRQ(s) it manages, notifying the Cobalt threads waiting for IRQ
+ * events by calling the udd_notify_event() service.
+ */
+#define UDD_IRQ_CUSTOM   (-1)
+/** @} */
+
+/**
+ * @anchor udd_memory_types  @name Memory types for mapping
+ * Types of memory for mapping
+ *
+ * The UDD core implements a default ->mmap() handler which first
+ * attempts to hand over the request to the corresponding handler
+ * defined by the mini-driver. If not present, the UDD core
+ * establishes the mapping automatically, depending on the memory
+ * type defined for the region.
+ *
+ * @{
+ */
+/**
+ * No memory region. Use this type code to disable an entry in the
+ * array of memory mappings, i.e. udd_device.mem_regions[].
+ */
+#define UDD_MEM_NONE     0
+/**
+ * Physical I/O memory region. By default, the UDD core maps such
+ * memory to a virtual user range by calling the rtdm_mmap_iomem()
+ * service.
+ */
+#define UDD_MEM_PHYS     1
+/**
+ * Kernel logical memory region (e.g. kmalloc()). By default, the UDD
+ * core maps such memory to a virtual user range by calling the
+ * rtdm_mmap_kmem() service. */
+#define UDD_MEM_LOGICAL  2
+/**
+ * Virtual memory region with no direct physical mapping
+ * (e.g. vmalloc()). By default, the UDD core maps such memory to a
+ * virtual user range by calling the rtdm_mmap_vmem() service.
+ */
+#define UDD_MEM_VIRTUAL  3
+/** @} */
+
+#define UDD_NR_MAPS  5
+
+/**
+ * @anchor udd_memory_region
+ * UDD memory region descriptor.
+ *
+ * This descriptor defines the characteristics of a memory region
+ * declared to the UDD core by the mini-driver. All valid regions
+ * should be declared in the udd_device.mem_regions[] array,
+ * invalid/unassigned ones should bear the UDD_MEM_NONE type.
+ *
+ * The UDD core exposes each region via the mmap(2) interface to the
+ * application. To this end, a companion mapper device is created
+ * automatically when registering the mini-driver.
+ *
+ * The mapper device creates special files in the RTDM namespace for
+ * reaching the individual regions, which the application can open
+ * then map to its address space via the mmap(2) system call.
+ *
+ * For instance, declaring a region of physical memory at index #2 of
+ * the memory region array could be done as follows:
+ *
+ * @code
+ * static struct udd_device udd;
+ *
+ * static int foocard_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ * {
+ *      udd.device_name = "foocard";
+ *      ...
+ *      udd.mem_regions[2].name = "ADC";
+ *      udd.mem_regions[2].addr = pci_resource_start(dev, 1);
+ *      udd.mem_regions[2].len = pci_resource_len(dev, 1);
+ *      udd.mem_regions[2].type = UDD_MEM_PHYS;
+ *      ...
+ *      return udd_register_device(&udd);
+ * }
+ * @endcode
+ *
+ * This will make such region accessible via the mapper device using
+ * the following sequence of code (see note), via the default
+ * ->mmap() handler from the UDD core:
+ *
+ * @code
+ * int fd, fdm;
+ * void *p;
+ *
+ * fd = open("/dev/rtdm/foocard", O_RDWR);
+ * fdm = open("/dev/rtdm/foocard,mapper2", O_RDWR);
+ * p = mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_SHARED, fdm, 0);
+ * @endcode
+ *
+ * if no valid region has been declared in the
+ * udd_device.mem_regions[] array, no mapper device is created.
+ *
+ * @note The example code assumes that @ref cobalt_api POSIX symbol
+ * wrapping is in effect, so that RTDM performs the memory mapping
+ * operation (not the regular kernel).
+ */
+struct udd_memregion {
+	/** Name of the region (informational but required) */
+	const char *name;
+	/**
+	 * Start address of the region. This may be a physical or
+	 * virtual address, depending on the @ref udd_memory_types
+	 * "memory type".
+	 */
+	unsigned long addr;
+	/**
+	 * Length (in bytes) of the region. This value must be
+	 * PAGE_SIZE aligned.
+	 */
+	size_t len;
+	/**
+	 * Type of the region. See the discussion about @ref
+	 * udd_memory_types "UDD memory types" for possible values.
+	 */
+	int type;
+};
+
+/**
+ * @anchor udd_device
+ * UDD device descriptor.
+ *
+ * This descriptor defines the characteristics of a UDD-based
+ * mini-driver when registering via a call to udd_register_device().
+ */
+struct udd_device {
+	/**
+	 * Name of the device managed by the mini-driver, appears
+	 * automatically in the /dev/rtdm namespace upon creation.
+	 */
+	const char *device_name;
+	/**
+	 * Additional device flags (e.g. RTDM_EXCLUSIVE)
+	 * RTDM_NAMED_DEVICE may be omitted).
+	 */
+	int device_flags;
+	/**
+	 * Subclass code of the device managed by the mini-driver (see
+	 * RTDM_SUBCLASS_xxx definition in the @ref rtdm_profiles
+	 * "Device Profiles"). The main class code is pre-set to
+	 * RTDM_CLASS_UDD.
+	 */
+	int device_subclass;
+	struct {
+		/**
+		 * Ancillary open() handler, optional. See
+		 * rtdm_open_handler().
+		 *
+		 * @note This handler is called from secondary mode
+		 * only.
+		 */
+		int (*open)(struct rtdm_fd *fd, int oflags);
+		/**
+		 * Ancillary close() handler, optional. See
+		 * rtdm_close_handler().
+		 *
+		 * @note This handler is called from secondary mode
+		 * only.
+		 */
+		void (*close)(struct rtdm_fd *fd);
+		/**
+		 * Ancillary ioctl() handler, optional. See
+		 * rtdm_ioctl_handler().
+		 *
+		 * If this routine returns -ENOSYS, the default action
+		 * implemented by the UDD core for the corresponding
+		 * request will be applied, as if no ioctl handler had
+		 * been defined.
+		 *
+		 * @note This handler is called from primary mode
+		 * only.
+		 */
+		int (*ioctl)(struct rtdm_fd *fd,
+			     unsigned int request, void *arg);
+		/**
+		 * Ancillary mmap() handler for the mapper device,
+		 * optional. See rtdm_mmap_handler(). The mapper
+		 * device operates on a valid region defined in the @a
+		 * mem_regions[] array. A pointer to the region 
+		 * can be obtained by a call to udd_get_region().
+		 *
+		 * If this handler is NULL, the UDD core establishes
+		 * the mapping automatically, depending on the memory
+		 * type defined for the region.
+		 *
+		 * @note This handler is called from secondary mode
+		 * only.
+		 */
+		int (*mmap)(struct rtdm_fd *fd,
+			    struct vm_area_struct *vma);
+		/**
+		 * @anchor udd_irq_handler
+		 *
+		 * Ancillary handler for receiving interrupts. This
+		 * handler must be provided if the mini-driver hands
+		 * over IRQ handling to the UDD core, by setting the
+		 * @a irq field to a valid value, different from
+		 * UDD_IRQ_CUSTOM and UDD_IRQ_NONE.
+		 *
+		 * The ->interrupt() handler shall return one of the
+		 * following status codes:
+		 *
+		 * - RTDM_IRQ_HANDLED, if the mini-driver successfully
+		 * handled the IRQ. This flag can be combined with
+		 * RTDM_IRQ_DISABLE to prevent the Cobalt kernel from
+		 * re-enabling the interrupt line upon return,
+		 * otherwise it is re-enabled automatically.
+		 *
+		 * - RTDM_IRQ_NONE, if the interrupt does not match
+		 * any IRQ the mini-driver can handle.
+		 *
+		 * Once the ->interrupt() handler has returned, the
+		 * UDD core notifies user-space Cobalt threads waiting
+		 * for IRQ events (if any).
+		 *
+		 * @note This handler is called from primary mode
+		 * only.
+		 */
+		int (*interrupt)(struct udd_device *udd);
+	} ops;
+	/**
+	 * IRQ number. If valid, the UDD core manages the
+	 * corresponding interrupt line, installing a base handler.
+	 * Otherwise, a special value can be passed for declaring
+	 * @ref udd_irq_special "unmanaged IRQs".
+	 */
+	int irq;
+	/**
+	 * Array of memory regions defined by the device. The array
+	 * can be sparse, with some entries bearing the UDD_MEM_NONE
+	 * type interleaved with valid ones.  See the discussion about
+	 * @ref udd_memory_region "UDD memory regions".
+	 */
+	struct udd_memregion mem_regions[UDD_NR_MAPS];
+	/** Reserved to the UDD core. */
+	struct udd_reserved {
+		rtdm_irq_t irqh;
+		u32 event_count;
+		struct udd_signotify signfy;
+		struct rtdm_event pulse;
+		struct rtdm_driver driver;
+		struct rtdm_device device;
+		struct rtdm_driver mapper_driver;
+		struct udd_mapper {
+			struct udd_device *udd;
+			struct rtdm_device dev;
+		} mapdev[UDD_NR_MAPS];
+		char *mapper_name;
+		int nr_maps;
+	} __reserved;
+};
+
+int udd_register_device(struct udd_device *udd);
+
+int udd_unregister_device(struct udd_device *udd);
+
+struct udd_device *udd_get_device(struct rtdm_fd *fd);
+
+void udd_notify_event(struct udd_device *udd);
+
+void udd_enable_irq(struct udd_device *udd,
+		    rtdm_event_t *done);
+
+void udd_disable_irq(struct udd_device *udd,
+		     rtdm_event_t *done);
+
+/** @} */
+
+#endif /* !_COBALT_RTDM_UDD_H */
+++ linux-patched/include/xenomai/rtdm/testing.h	2022-03-21 12:58:31.912864359 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/gpio.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2005 Jan Kiszka <jan.kiszka@web.de>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_TESTING_H
+#define _COBALT_RTDM_TESTING_H
+
+#include <rtdm/rtdm.h>
+#include <rtdm/uapi/testing.h>
+
+#ifdef CONFIG_XENO_ARCH_SYS3264
+
+#include <rtdm/compat.h>
+
+struct compat_rttst_overall_bench_res {
+	struct rttst_bench_res result;
+	compat_uptr_t histogram_avg;
+	compat_uptr_t histogram_min;
+	compat_uptr_t histogram_max;
+};
+
+struct compat_rttst_heap_stathdr {
+	int nrstats;
+	compat_uptr_t buf;
+};
+
+#define RTTST_RTIOC_TMBENCH_STOP_COMPAT \
+	_IOWR(RTIOC_TYPE_TESTING, 0x11, struct compat_rttst_overall_bench_res)
+
+#endif	/* CONFIG_XENO_ARCH_SYS3264 */
+
+#endif /* !_COBALT_RTDM_TESTING_H */
+++ linux-patched/include/xenomai/rtdm/gpio.h	2022-03-21 12:58:31.905864427 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/compat.h	1970-01-01 01:00:00.000000000 +0100
+/**
+ * Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_GPIO_H
+#define _COBALT_RTDM_GPIO_H
+
+#include <linux/list.h>
+#include <rtdm/driver.h>
+#include <rtdm/uapi/gpio.h>
+
+struct class;
+struct device_node;
+struct gpio_desc;
+
+struct rtdm_gpio_pin {
+	struct rtdm_device dev;
+	struct list_head next;
+	rtdm_irq_t irqh;
+	rtdm_event_t event;
+	char *name;
+	struct gpio_desc *desc;
+	nanosecs_abs_t timestamp;
+	bool monotonic_timestamp;
+};
+
+struct rtdm_gpio_chip {
+	struct gpio_chip *gc;
+	struct rtdm_driver driver;
+	struct class *devclass;
+	struct list_head next;
+	rtdm_lock_t lock;
+	struct rtdm_gpio_pin pins[0];
+};
+
+int rtdm_gpiochip_add(struct rtdm_gpio_chip *rgc,
+		      struct gpio_chip *gc,
+		      int gpio_subclass);
+
+struct rtdm_gpio_chip *
+rtdm_gpiochip_alloc(struct gpio_chip *gc,
+		    int gpio_subclass);
+
+void rtdm_gpiochip_remove(struct rtdm_gpio_chip *rgc);
+
+int rtdm_gpiochip_add_by_name(struct rtdm_gpio_chip *rgc,
+			      const char *label, int gpio_subclass);
+
+int rtdm_gpiochip_post_event(struct rtdm_gpio_chip *rgc,
+			     unsigned int offset);
+
+int rtdm_gpiochip_find(struct device_node *from, const char *label, int type);
+
+int rtdm_gpiochip_array_find(struct device_node *from, const char *label[],
+			     int nentries, int type);
+
+#ifdef CONFIG_OF
+
+int rtdm_gpiochip_scan_of(struct device_node *from,
+			  const char *compat, int type);
+
+int rtdm_gpiochip_scan_array_of(struct device_node *from,
+				const char *compat[],
+				int nentries, int type);
+#endif
+
+void rtdm_gpiochip_remove_by_type(int type);
+
+#endif /* !_COBALT_RTDM_GPIO_H */
+++ linux-patched/include/xenomai/rtdm/compat.h	2022-03-21 12:58:31.894864534 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/serial.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_COMPAT_H
+#define _COBALT_RTDM_COMPAT_H
+
+#ifdef CONFIG_XENO_ARCH_SYS3264
+
+#include <cobalt/kernel/compat.h>
+#include <rtdm/rtdm.h>
+
+struct compat_rtdm_getsockopt_args {
+	int level;
+	int optname;
+	compat_uptr_t optval;
+	compat_uptr_t optlen;
+};
+
+struct compat_rtdm_setsockopt_args {
+	int level;
+	int optname;
+	const compat_uptr_t optval;
+	socklen_t optlen;
+};
+
+struct compat_rtdm_getsockaddr_args {
+	compat_uptr_t addr;
+	compat_uptr_t addrlen;
+};
+
+struct compat_rtdm_setsockaddr_args {
+	const compat_uptr_t addr;
+	socklen_t addrlen;
+};
+
+#define _RTIOC_GETSOCKOPT_COMPAT	_IOW(RTIOC_TYPE_COMMON, 0x20,	\
+					     struct compat_rtdm_getsockopt_args)
+#define _RTIOC_SETSOCKOPT_COMPAT	_IOW(RTIOC_TYPE_COMMON, 0x21,	\
+					     struct compat_rtdm_setsockopt_args)
+#define _RTIOC_BIND_COMPAT		_IOW(RTIOC_TYPE_COMMON, 0x22,	\
+					     struct compat_rtdm_setsockaddr_args)
+#define _RTIOC_CONNECT_COMPAT		_IOW(RTIOC_TYPE_COMMON, 0x23,	\
+					     struct compat_rtdm_setsockaddr_args)
+#define _RTIOC_ACCEPT_COMPAT		_IOW(RTIOC_TYPE_COMMON, 0x25,	\
+					     struct compat_rtdm_getsockaddr_args)
+#define _RTIOC_GETSOCKNAME_COMPAT	_IOW(RTIOC_TYPE_COMMON, 0x26,	\
+					     struct compat_rtdm_getsockaddr_args)
+#define _RTIOC_GETPEERNAME_COMPAT	_IOW(RTIOC_TYPE_COMMON, 0x27,	\
+					     struct compat_rtdm_getsockaddr_args)
+
+#define __COMPAT_CASE(__op)		: case __op
+
+#else	/* !CONFIG_XENO_ARCH_SYS3264 */
+
+#define __COMPAT_CASE(__op)
+
+#endif	/* !CONFIG_XENO_ARCH_SYS3264 */
+
+#define COMPAT_CASE(__op)	case __op __COMPAT_CASE(__op  ## _COMPAT)
+
+#endif /* !_COBALT_RTDM_COMPAT_H */
+++ linux-patched/include/xenomai/rtdm/serial.h	2022-03-21 12:58:31.887864602 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/driver.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2005-2007 Jan Kiszka <jan.kiszka@web.de>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_SERIAL_H
+#define _COBALT_RTDM_SERIAL_H
+
+#include <rtdm/rtdm.h>
+#include <rtdm/uapi/serial.h>
+
+#endif /* !_COBALT_RTDM_SERIAL_H */
+++ linux-patched/include/xenomai/rtdm/driver.h	2022-03-21 12:58:31.879864680 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/rtdm.h	1970-01-01 01:00:00.000000000 +0100
+/**
+ * @file
+ * Real-Time Driver Model for Xenomai, driver API header
+ *
+ * Copyright (C) 2005-2007 Jan Kiszka <jan.kiszka@web.de>
+ * Copyright (C) 2005 Joerg Langenberg <joerg.langenberg@gmx.net>
+ * Copyright (C) 2008 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * @ingroup driverapi
+ */
+#ifndef _COBALT_RTDM_DRIVER_H
+#define _COBALT_RTDM_DRIVER_H
+
+#include <asm/atomic.h>
+#include <linux/cpumask.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/cdev.h>
+#include <linux/wait.h>
+#include <linux/notifier.h>
+#include <pipeline/lock.h>
+#include <pipeline/inband_work.h>
+#include <xenomai/version.h>
+#include <cobalt/kernel/heap.h>
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/intr.h>
+#include <cobalt/kernel/synch.h>
+#include <cobalt/kernel/select.h>
+#include <cobalt/kernel/clock.h>
+#include <cobalt/kernel/init.h>
+#include <cobalt/kernel/ancillaries.h>
+#include <cobalt/kernel/tree.h>
+#include <rtdm/fd.h>
+#include <rtdm/rtdm.h>
+
+/* debug support */
+#include <cobalt/kernel/assert.h>
+#include <trace/events/cobalt-rtdm.h>
+#ifdef CONFIG_PCI
+#include <asm-generic/xenomai/pci_ids.h>
+#endif /* CONFIG_PCI */
+#include <asm/xenomai/syscall.h>
+
+struct class;
+typedef struct xnselector rtdm_selector_t;
+enum rtdm_selecttype;
+
+/*!
+ * @addtogroup rtdm_device_register
+ * @{
+ */
+
+/*!
+ * @anchor dev_flags @name Device Flags
+ * Static flags describing a RTDM device
+ * @{
+ */
+/** If set, only a single instance of the device can be requested by an
+ *  application. */
+#define RTDM_EXCLUSIVE			0x0001
+
+/**
+ * Use fixed minor provided in the rtdm_device description for
+ * registering. If this flag is absent, the RTDM core assigns minor
+ * numbers to devices managed by a driver in order of registration.
+ */
+#define RTDM_FIXED_MINOR		0x0002
+
+/** If set, the device is addressed via a clear-text name. */
+#define RTDM_NAMED_DEVICE		0x0010
+
+/** If set, the device is addressed via a combination of protocol ID and
+ *  socket type. */
+#define RTDM_PROTOCOL_DEVICE		0x0020
+
+/** Mask selecting the device type. */
+#define RTDM_DEVICE_TYPE_MASK		0x00F0
+
+/** Flag indicating a secure variant of RTDM (not supported here) */
+#define RTDM_SECURE_DEVICE		0x80000000
+/** @} Device Flags */
+
+/** Maximum number of named devices per driver. */
+#define RTDM_MAX_MINOR	4096
+
+/** @} rtdm_device_register */
+
+/*!
+ * @addtogroup rtdm_sync
+ * @{
+ */
+
+/*!
+ * @anchor RTDM_SELECTTYPE_xxx   @name RTDM_SELECTTYPE_xxx
+ * Event types select can bind to
+ * @{
+ */
+enum rtdm_selecttype {
+	/** Select input data availability events */
+	RTDM_SELECTTYPE_READ = XNSELECT_READ,
+
+	/** Select ouput buffer availability events */
+	RTDM_SELECTTYPE_WRITE = XNSELECT_WRITE,
+
+	/** Select exceptional events */
+	RTDM_SELECTTYPE_EXCEPT = XNSELECT_EXCEPT
+};
+/** @} RTDM_SELECTTYPE_xxx */
+
+/** @} rtdm_sync */
+
+/**
+ * @brief Device context
+ *
+ * A device context structure is associated with every open device instance.
+ * RTDM takes care of its creation and destruction and passes it to the
+ * operation handlers when being invoked.
+ *
+ * Drivers can attach arbitrary data immediately after the official
+ * structure.  The size of this data is provided via
+ * rtdm_driver.context_size during device registration.
+ */
+struct rtdm_dev_context {
+	struct rtdm_fd fd;
+
+	/** Set of active device operation handlers */
+	/** Reference to owning device */
+	struct rtdm_device *device;
+
+	/** Begin of driver defined context data structure */
+	char dev_private[0];
+};
+
+static inline struct rtdm_dev_context *rtdm_fd_to_context(struct rtdm_fd *fd)
+{
+	return container_of(fd, struct rtdm_dev_context, fd);
+}
+
+/**
+ * Locate the driver private area associated to a device context structure
+ *
+ * @param[in] fd File descriptor structure associated with opened
+ * device instance
+ *
+ * @return The address of the private driver area associated to @a
+ * file descriptor.
+ */
+static inline void *rtdm_fd_to_private(struct rtdm_fd *fd)
+{
+	return &rtdm_fd_to_context(fd)->dev_private[0];
+}
+
+/**
+ * Locate a device file descriptor structure from its driver private area
+ *
+ * @param[in] dev_private Address of a private context area
+ *
+ * @return The address of the file descriptor structure defining @a
+ * dev_private.
+ */
+static inline struct rtdm_fd *rtdm_private_to_fd(void *dev_private)
+{
+	struct rtdm_dev_context *ctx;
+	ctx = container_of(dev_private, struct rtdm_dev_context, dev_private);
+	return &ctx->fd;
+}
+
+/**
+ * Tell whether the passed file descriptor belongs to an application.
+ *
+ * @param[in] fd File descriptor
+ *
+ * @return true if passed file descriptor belongs to an application,
+ * false otherwise.
+ */
+static inline bool rtdm_fd_is_user(struct rtdm_fd *fd)
+{
+	return rtdm_fd_owner(fd) != &cobalt_kernel_ppd;
+}
+
+/**
+ * Locate a device structure from a file descriptor.
+ *
+ * @param[in] fd File descriptor
+ *
+ * @return The address of the device structure to which this file
+ * descriptor is attached.
+ */
+static inline struct rtdm_device *rtdm_fd_device(struct rtdm_fd *fd)
+{
+	return rtdm_fd_to_context(fd)->device;
+}
+
+/**
+ * @brief RTDM profile information
+ *
+ * This descriptor details the profile information associated to a
+ * RTDM class of device managed by a driver.
+ *
+ * @anchor rtdm_profile_info
+ */
+struct rtdm_profile_info {
+	/** Device class name */
+	const char *name;
+	/** Device class ID, see @ref RTDM_CLASS_xxx */
+	int class_id;
+	/** Device sub-class, see RTDM_SUBCLASS_xxx definition in the
+	    @ref rtdm_profiles "Device Profiles" */
+	int subclass_id;
+	/** Supported device profile version */
+	int version;
+	/** Reserved */
+	unsigned int magic;
+	struct module *owner;
+	struct class *kdev_class;
+};
+
+struct rtdm_driver;
+
+/**
+ * @brief RTDM state management handlers
+ */
+struct rtdm_sm_ops {
+	/** Handler called upon transition to COBALT_STATE_WARMUP */ 
+	int (*start)(struct rtdm_driver *drv);
+	/** Handler called upon transition to COBALT_STATE_TEARDOWN */ 
+	int (*stop)(struct rtdm_driver *drv);
+};
+
+/**
+ * @brief RTDM driver
+ *
+ * This descriptor describes a RTDM device driver. The structure holds
+ * runtime data, therefore it must reside in writable memory.
+ */
+struct rtdm_driver {
+	/**
+	 * Class profile information. The RTDM_PROFILE_INFO() macro @b
+	 * must be used for filling up this field.
+	 * @anchor rtdm_driver_profile
+	 */
+	struct rtdm_profile_info profile_info;
+	/**
+	 * Device flags, see @ref dev_flags "Device Flags" for details
+	 * @anchor rtdm_driver_flags
+	 */
+	int device_flags;
+	/**
+	 * Size of the private memory area the core should
+	 * automatically allocate for each open file descriptor, which
+	 * is usable for storing the context data associated to each
+	 * connection. The allocated memory is zero-initialized. The
+	 * start of this area can be retrieved by a call to
+	 * rtdm_fd_to_private().
+	 */
+	size_t context_size;
+	/** Protocol device identification: protocol family (PF_xxx) */
+	int protocol_family;
+	/** Protocol device identification: socket type (SOCK_xxx) */
+	int socket_type;
+	/** I/O operation handlers */
+	struct rtdm_fd_ops ops;
+	/** State management handlers */
+	struct rtdm_sm_ops smops;
+	/**
+	 * Count of devices this driver manages. This value is used to
+	 * allocate a chrdev region for named devices.
+	 */
+	int device_count;
+	/** Base minor for named devices. */
+	int base_minor;
+	/** Reserved area */
+	struct {
+		union {
+			struct {
+				struct cdev cdev;
+				int major;
+			} named;
+		};
+		atomic_t refcount;
+		struct notifier_block nb_statechange;
+		DECLARE_BITMAP(minor_map, RTDM_MAX_MINOR);
+	};
+};
+
+#define RTDM_CLASS_MAGIC	0x8284636c
+
+/**
+ * @brief Initializer for class profile information.
+ *
+ * This macro must be used to fill in the @ref rtdm_profile_info
+ * "class profile information" field from a RTDM driver.
+ *
+ * @param __name Class name (unquoted).
+ *
+ * @param __id Class major identification number
+ * (profile_version.class_id).
+ *
+ * @param __subid Class minor identification number
+ * (profile_version.subclass_id).
+ *
+ * @param __version Profile version number.
+ *
+ * @note See @ref rtdm_profiles "Device Profiles".
+ */
+#define RTDM_PROFILE_INFO(__name, __id, __subid, __version)	\
+{								\
+	.name = ( # __name ),					\
+	.class_id = (__id),					\
+	.subclass_id = (__subid),				\
+	.version = (__version),					\
+	.magic = ~RTDM_CLASS_MAGIC,				\
+	.owner = THIS_MODULE,					\
+	.kdev_class = NULL,					\
+}
+
+int rtdm_drv_set_sysclass(struct rtdm_driver *drv, struct class *cls);
+
+/**
+ * @brief RTDM device
+ *
+ * This descriptor describes a RTDM device instance. The structure
+ * holds runtime data, therefore it must reside in writable memory.
+ */
+struct rtdm_device {
+	/** Device driver. */
+	struct rtdm_driver *driver;
+	/** Driver definable device data */
+	void *device_data;
+	/**
+	 * Device label template for composing the device name. A
+	 * limited printf-like format string is assumed, with a
+	 * provision for replacing the first %d/%i placeholder found
+	 * in the string by the device minor number.  It is up to the
+	 * driver to actually mention this placeholder or not,
+	 * depending on the naming convention for its devices.  For
+	 * named devices, the corresponding device node will
+	 * automatically appear in the /dev/rtdm hierachy with
+	 * hotplug-enabled device filesystems (DEVTMPFS).
+	 */
+	const char *label;
+	/**
+	 * Minor number of the device. If RTDM_FIXED_MINOR is present
+	 * in the driver flags, the value stored in this field is used
+	 * verbatim by rtdm_dev_register(). Otherwise, the RTDM core
+	 * automatically assigns minor numbers to all devices managed
+	 * by the driver referred to by @a driver, in order of
+	 * registration, storing the resulting values into this field.
+	 *
+	 * Device nodes created for named devices in the Linux /dev
+	 * hierarchy are assigned this minor number.
+	 *
+	 * The minor number of the current device handling an I/O
+	 * request can be retreived by a call to rtdm_fd_minor().
+	 */
+	int minor;
+	/** Reserved area. */
+	struct {
+		unsigned int magic;
+		char *name;
+		union {
+			struct {
+				xnhandle_t handle;
+			} named;
+			struct {
+				struct xnid id;
+			} proto;
+		};
+		dev_t rdev;
+		struct device *kdev;
+		struct class *kdev_class;
+		atomic_t refcount;
+		struct rtdm_fd_ops ops;
+		wait_queue_head_t putwq;
+		struct list_head openfd_list;
+	};
+};
+
+/* --- device registration --- */
+
+int rtdm_dev_register(struct rtdm_device *device);
+
+void rtdm_dev_unregister(struct rtdm_device *device);
+
+#ifndef DOXYGEN_CPP /* Avoid static inline tags for RTDM in doxygen */
+
+static inline struct device *rtdm_dev_to_kdev(struct rtdm_device *device)
+{
+	return device->kdev;
+}
+
+/* --- clock services --- */
+static inline nanosecs_abs_t rtdm_clock_read(void)
+{
+	return xnclock_read_realtime(&nkclock);
+}
+
+static inline nanosecs_abs_t rtdm_clock_read_monotonic(void)
+{
+	return xnclock_read_monotonic(&nkclock);
+}
+#endif /* !DOXYGEN_CPP */
+
+/* --- timeout sequences */
+
+typedef nanosecs_abs_t rtdm_toseq_t;
+
+void rtdm_toseq_init(rtdm_toseq_t *timeout_seq, nanosecs_rel_t timeout);
+
+/*!
+ * @addtogroup rtdm_sync
+ * @{
+ */
+
+/*!
+ * @defgroup rtdm_sync_biglock Big dual kernel lock
+ * @{
+ */
+
+/**
+ * @brief Enter atomic section (dual kernel only)
+ *
+ * This call opens a fully atomic section, serializing execution with
+ * respect to all interrupt handlers (including for real-time IRQs)
+ * and Xenomai threads running on all CPUs.
+ *
+ * @param __context name of local variable to store the context
+ * in. This variable updated by the real-time core will hold the
+ * information required to leave the atomic section properly.
+ *
+ * @note Atomic sections may be nested. The caller is allowed to sleep
+ * on a blocking Xenomai service from primary mode within an atomic
+ * section delimited by cobalt_atomic_enter/cobalt_atomic_leave calls.
+ * On the contrary, sleeping on a regular Linux kernel service while
+ * holding such lock is NOT valid.
+ *
+ * @note Since the strongest lock is acquired by this service, it can
+ * be used to synchronize real-time and non-real-time contexts.
+ *
+ * @warning This service is not portable to the Mercury core, and
+ * should be restricted to Cobalt-specific use cases, mainly for the
+ * purpose of porting existing dual-kernel drivers which still depend
+ * on the obsolete RTDM_EXECUTE_ATOMICALLY() construct.
+ */
+#define cobalt_atomic_enter(__context)				\
+	do {							\
+		xnlock_get_irqsave(&nklock, (__context));	\
+		xnsched_lock();					\
+	} while (0)
+
+/**
+ * @brief Leave atomic section (dual kernel only)
+ *
+ * This call closes an atomic section previously opened by a call to
+ * cobalt_atomic_enter(), restoring the preemption and interrupt state
+ * which prevailed prior to entering the exited section.
+ *
+ * @param __context name of local variable which stored the context.
+ *
+ * @warning This service is not portable to the Mercury core, and
+ * should be restricted to Cobalt-specific use cases.
+ */
+#define cobalt_atomic_leave(__context)				\
+	do {							\
+		xnsched_unlock();				\
+		xnlock_put_irqrestore(&nklock, (__context));	\
+	} while (0)
+
+/**
+ * @brief Execute code block atomically (DEPRECATED)
+ *
+ * Generally, it is illegal to suspend the current task by calling
+ * rtdm_task_sleep(), rtdm_event_wait(), etc. while holding a spinlock. In
+ * contrast, this macro allows to combine several operations including
+ * a potentially rescheduling call to an atomic code block with respect to
+ * other RTDM_EXECUTE_ATOMICALLY() blocks. The macro is a light-weight
+ * alternative for protecting code blocks via mutexes, and it can even be used
+ * to synchronise real-time and non-real-time contexts.
+ *
+ * @param code_block Commands to be executed atomically
+ *
+ * @note It is not allowed to leave the code block explicitly by using
+ * @c break, @c return, @c goto, etc. This would leave the global lock held
+ * during the code block execution in an inconsistent state. Moreover, do not
+ * embed complex operations into the code bock. Consider that they will be
+ * executed under preemption lock with interrupts switched-off. Also note that
+ * invocation of rescheduling calls may break the atomicity until the task
+ * gains the CPU again.
+ *
+ * @coretags{unrestricted}
+ *
+ * @deprecated This construct will be phased out in Xenomai
+ * 3.0. Please use rtdm_waitqueue services instead.
+ *
+ * @see cobalt_atomic_enter().
+ */
+#ifdef DOXYGEN_CPP /* Beautify doxygen output */
+#define RTDM_EXECUTE_ATOMICALLY(code_block)	\
+{						\
+	<ENTER_ATOMIC_SECTION>			\
+	code_block;				\
+	<LEAVE_ATOMIC_SECTION>			\
+}
+#else /* This is how it really works */
+static inline __attribute__((deprecated)) void
+rtdm_execute_atomically(void) { }
+
+#define RTDM_EXECUTE_ATOMICALLY(code_block)		\
+{							\
+	spl_t __rtdm_s;					\
+							\
+	rtdm_execute_atomically();			\
+	xnlock_get_irqsave(&nklock, __rtdm_s);		\
+	xnsched_lock();					\
+	code_block;					\
+	xnsched_unlock();				\
+	xnlock_put_irqrestore(&nklock, __rtdm_s);	\
+}
+#endif
+
+/** @} Big dual kernel lock */
+
+/**
+ * @defgroup rtdm_sync_spinlock Spinlock with preemption deactivation
+ * @{
+ */
+
+/**
+ * Static lock initialisation
+ */
+#define RTDM_LOCK_UNLOCKED(__name)	PIPELINE_SPIN_LOCK_UNLOCKED(__name)
+
+#define DEFINE_RTDM_LOCK(__name)		\
+	rtdm_lock_t __name = RTDM_LOCK_UNLOCKED(__name)
+
+/** Lock variable */
+typedef pipeline_spinlock_t rtdm_lock_t;
+
+/** Variable to save the context while holding a lock */
+typedef unsigned long rtdm_lockctx_t;
+
+/**
+ * Dynamic lock initialisation
+ *
+ * @param lock Address of lock variable
+ *
+ * @coretags{task-unrestricted}
+ */
+static inline void rtdm_lock_init(rtdm_lock_t *lock)
+{
+	raw_spin_lock_init(lock);
+}
+
+/**
+ * Acquire lock from non-preemptible contexts
+ *
+ * @param lock Address of lock variable
+ *
+ * @coretags{unrestricted}
+ */
+static inline void rtdm_lock_get(rtdm_lock_t *lock)
+{
+	XENO_BUG_ON(COBALT, !spltest());
+	raw_spin_lock(lock);
+	xnsched_lock();
+}
+
+/**
+ * Release lock without preemption restoration
+ *
+ * @param lock Address of lock variable
+ *
+ * @coretags{unrestricted, might-switch}
+ */
+static inline void rtdm_lock_put(rtdm_lock_t *lock)
+{
+	raw_spin_unlock(lock);
+	xnsched_unlock();
+}
+
+/**
+ * Acquire lock and disable preemption, by stalling the head domain.
+ *
+ * @param __lock Address of lock variable
+ * @param __context name of local variable to store the context in
+ *
+ * @coretags{unrestricted}
+ */
+#define rtdm_lock_get_irqsave(__lock, __context)	\
+	((__context) = __rtdm_lock_get_irqsave(__lock))
+
+static inline rtdm_lockctx_t __rtdm_lock_get_irqsave(rtdm_lock_t *lock)
+{
+	rtdm_lockctx_t context;
+
+	splhigh(context);
+	raw_spin_lock(lock);
+	xnsched_lock();
+
+	return context;
+}
+
+/**
+ * Release lock and restore preemption state
+ *
+ * @param lock Address of lock variable
+ * @param context name of local variable which stored the context
+ *
+ * @coretags{unrestricted}
+ */
+static inline
+void rtdm_lock_put_irqrestore(rtdm_lock_t *lock, rtdm_lockctx_t context)
+{
+	raw_spin_unlock(lock);
+	xnsched_unlock();
+	splexit(context);
+}
+
+/**
+ * Disable preemption locally
+ *
+ * @param __context name of local variable to store the context in
+ *
+ * @coretags{unrestricted}
+ */
+#define rtdm_lock_irqsave(__context)	\
+	splhigh(__context)
+
+/**
+ * Restore preemption state
+ *
+ * @param __context name of local variable which stored the context
+ *
+ * @coretags{unrestricted}
+ */
+#define rtdm_lock_irqrestore(__context)	\
+	splexit(__context)
+
+/** @} Spinlock with Preemption Deactivation */
+
+#ifndef DOXYGEN_CPP
+
+struct rtdm_waitqueue {
+	struct xnsynch wait;
+};
+typedef struct rtdm_waitqueue rtdm_waitqueue_t;
+
+#define RTDM_WAITQUEUE_INITIALIZER(__name) {		 \
+	    .wait = XNSYNCH_WAITQUEUE_INITIALIZER((__name).wait), \
+	}
+
+#define DEFINE_RTDM_WAITQUEUE(__name)				\
+	struct rtdm_waitqueue __name = RTDM_WAITQUEUE_INITIALIZER(__name)
+
+#define DEFINE_RTDM_WAITQUEUE_ONSTACK(__name)	\
+	DEFINE_RTDM_WAITQUEUE(__name)
+
+static inline void rtdm_waitqueue_init(struct rtdm_waitqueue *wq)
+{
+	*wq = (struct rtdm_waitqueue)RTDM_WAITQUEUE_INITIALIZER(*wq);
+}
+
+static inline void rtdm_waitqueue_destroy(struct rtdm_waitqueue *wq)
+{
+	xnsynch_destroy(&wq->wait);
+}
+
+static inline int __rtdm_dowait(struct rtdm_waitqueue *wq,
+				nanosecs_rel_t timeout, xntmode_t timeout_mode)
+{
+	int ret;
+	
+	ret = xnsynch_sleep_on(&wq->wait, timeout, timeout_mode);
+	if (ret & XNBREAK)
+		return -EINTR;
+	if (ret & XNTIMEO)
+		return -ETIMEDOUT;
+	if (ret & XNRMID)
+		return -EIDRM;
+	return 0;
+}
+
+static inline int __rtdm_timedwait(struct rtdm_waitqueue *wq,
+				   nanosecs_rel_t timeout, rtdm_toseq_t *toseq)
+{
+	if (toseq && timeout > 0)
+		return __rtdm_dowait(wq, *toseq, XN_ABSOLUTE);
+
+	return __rtdm_dowait(wq, timeout, XN_RELATIVE);
+}
+
+#define rtdm_timedwait_condition_locked(__wq, __cond, __timeout, __toseq) \
+	({								\
+		int __ret = 0;						\
+		while (__ret == 0 && !(__cond))				\
+			__ret = __rtdm_timedwait(__wq, __timeout, __toseq); \
+		__ret;							\
+	})
+
+#define rtdm_wait_condition_locked(__wq, __cond)			\
+	({								\
+		int __ret = 0;						\
+		while (__ret == 0 && !(__cond))				\
+			__ret = __rtdm_dowait(__wq,			\
+					      XN_INFINITE, XN_RELATIVE); \
+		__ret;							\
+	})
+
+#define rtdm_timedwait_condition(__wq, __cond, __timeout, __toseq)	\
+	({								\
+		spl_t __s;						\
+		int __ret;						\
+		xnlock_get_irqsave(&nklock, __s);			\
+		__ret = rtdm_timedwait_condition_locked(__wq, __cond,	\
+					      __timeout, __toseq);	\
+		xnlock_put_irqrestore(&nklock, __s);			\
+		__ret;							\
+	})
+
+#define rtdm_timedwait(__wq, __timeout, __toseq)			\
+	__rtdm_timedwait(__wq, __timeout, __toseq)
+
+#define rtdm_timedwait_locked(__wq, __timeout, __toseq)			\
+	rtdm_timedwait(__wq, __timeout, __toseq)
+
+#define rtdm_wait_condition(__wq, __cond)				\
+	({								\
+		spl_t __s;						\
+		int __ret;						\
+		xnlock_get_irqsave(&nklock, __s);			\
+		__ret = rtdm_wait_condition_locked(__wq, __cond);	\
+		xnlock_put_irqrestore(&nklock, __s);			\
+		__ret;							\
+	})
+
+#define rtdm_wait(__wq)							\
+	__rtdm_dowait(__wq, XN_INFINITE, XN_RELATIVE)
+
+#define rtdm_wait_locked(__wq)  rtdm_wait(__wq)
+
+#define rtdm_waitqueue_lock(__wq, __context)  cobalt_atomic_enter(__context)
+
+#define rtdm_waitqueue_unlock(__wq, __context)  cobalt_atomic_leave(__context)
+
+#define rtdm_waitqueue_signal(__wq)					\
+	({								\
+		struct xnthread *__waiter;				\
+		__waiter = xnsynch_wakeup_one_sleeper(&(__wq)->wait);	\
+		xnsched_run();						\
+		__waiter != NULL;					\
+	})
+
+#define __rtdm_waitqueue_flush(__wq, __reason)				\
+	({								\
+		int __ret;						\
+		__ret = xnsynch_flush(&(__wq)->wait, __reason);		\
+		xnsched_run();						\
+		__ret == XNSYNCH_RESCHED;				\
+	})
+
+#define rtdm_waitqueue_broadcast(__wq)	\
+	__rtdm_waitqueue_flush(__wq, 0)
+
+#define rtdm_waitqueue_flush(__wq)	\
+	__rtdm_waitqueue_flush(__wq, XNBREAK)
+
+#define rtdm_waitqueue_wakeup(__wq, __waiter)				\
+	do {								\
+		xnsynch_wakeup_this_sleeper(&(__wq)->wait, __waiter);	\
+		xnsched_run();						\
+	} while (0)
+
+#define rtdm_for_each_waiter(__pos, __wq)		\
+	xnsynch_for_each_sleeper(__pos, &(__wq)->wait)
+
+#define rtdm_for_each_waiter_safe(__pos, __tmp, __wq)	\
+	xnsynch_for_each_sleeper_safe(__pos, __tmp, &(__wq)->wait)
+
+#endif /* !DOXYGEN_CPP */
+
+/** @} rtdm_sync */
+
+/* --- Interrupt management services --- */
+/*!
+ * @addtogroup rtdm_irq
+ * @{
+ */
+
+typedef struct xnintr rtdm_irq_t;
+
+/*!
+ * @anchor RTDM_IRQTYPE_xxx   @name RTDM_IRQTYPE_xxx
+ * Interrupt registrations flags
+ * @{
+ */
+/** Enable IRQ-sharing with other real-time drivers */
+#define RTDM_IRQTYPE_SHARED		XN_IRQTYPE_SHARED
+/** Mark IRQ as edge-triggered, relevant for correct handling of shared
+ *  edge-triggered IRQs */
+#define RTDM_IRQTYPE_EDGE		XN_IRQTYPE_EDGE
+/** @} RTDM_IRQTYPE_xxx */
+
+/**
+ * Interrupt handler
+ *
+ * @param[in] irq_handle IRQ handle as returned by rtdm_irq_request()
+ *
+ * @return 0 or a combination of @ref RTDM_IRQ_xxx flags
+ */
+typedef int (*rtdm_irq_handler_t)(rtdm_irq_t *irq_handle);
+
+/*!
+ * @anchor RTDM_IRQ_xxx   @name RTDM_IRQ_xxx
+ * Return flags of interrupt handlers
+ * @{
+ */
+/** Unhandled interrupt */
+#define RTDM_IRQ_NONE			XN_IRQ_NONE
+/** Denote handled interrupt */
+#define RTDM_IRQ_HANDLED		XN_IRQ_HANDLED
+/** Request interrupt disabling on exit */
+#define RTDM_IRQ_DISABLE		XN_IRQ_DISABLE
+/** @} RTDM_IRQ_xxx */
+
+/**
+ * Retrieve IRQ handler argument
+ *
+ * @param irq_handle IRQ handle
+ * @param type Type of the pointer to return
+ *
+ * @return The argument pointer registered on rtdm_irq_request() is returned,
+ * type-casted to the specified @a type.
+ *
+ * @coretags{unrestricted}
+ */
+#define rtdm_irq_get_arg(irq_handle, type)	((type *)irq_handle->cookie)
+/** @} rtdm_irq */
+
+int rtdm_irq_request(rtdm_irq_t *irq_handle, unsigned int irq_no,
+		     rtdm_irq_handler_t handler, unsigned long flags,
+		     const char *device_name, void *arg);
+
+int rtdm_irq_request_affine(rtdm_irq_t *irq_handle, unsigned int irq_no,
+			    rtdm_irq_handler_t handler, unsigned long flags,
+			    const char *device_name, void *arg,
+			    const cpumask_t *cpumask);
+
+#ifndef DOXYGEN_CPP /* Avoid static inline tags for RTDM in doxygen */
+static inline int rtdm_irq_free(rtdm_irq_t *irq_handle)
+{
+	if (!XENO_ASSERT(COBALT, xnsched_root_p()))
+		return -EPERM;
+	xnintr_destroy(irq_handle);
+	return 0;
+}
+
+static inline int rtdm_irq_enable(rtdm_irq_t *irq_handle)
+{
+	xnintr_enable(irq_handle);
+	return 0;
+}
+
+static inline int rtdm_irq_disable(rtdm_irq_t *irq_handle)
+{
+	xnintr_disable(irq_handle);
+	return 0;
+}
+
+static inline int rtdm_irq_set_affinity(rtdm_irq_t *irq_handle,
+					const cpumask_t *cpumask)
+{
+	return xnintr_affinity(irq_handle, cpumask);
+}
+#endif /* !DOXYGEN_CPP */
+
+/* --- non-real-time signalling services --- */
+
+/*!
+ * @addtogroup rtdm_nrtsignal
+ * @{
+ */
+
+typedef struct rtdm_nrtsig rtdm_nrtsig_t;
+/**
+ * Non-real-time signal handler
+ *
+ * @param[in] nrt_sig Signal handle pointer as passed to rtdm_nrtsig_init()
+ * @param[in] arg Argument as passed to rtdm_nrtsig_init()
+ *
+ * @note The signal handler will run in soft-IRQ context of the non-real-time
+ * subsystem. Note the implications of this context, e.g. no invocation of
+ * blocking operations.
+ */
+typedef void (*rtdm_nrtsig_handler_t)(rtdm_nrtsig_t *nrt_sig, void *arg);
+
+struct rtdm_nrtsig {
+	struct pipeline_inband_work inband_work; /* Must be first */
+	rtdm_nrtsig_handler_t handler;
+	void *arg;
+};
+
+void rtdm_schedule_nrt_work(struct work_struct *lostage_work);
+/** @} rtdm_nrtsignal */
+
+#ifndef DOXYGEN_CPP /* Avoid static inline tags for RTDM in doxygen */
+void __rtdm_nrtsig_execute(struct pipeline_inband_work *inband_work);
+
+static inline void rtdm_nrtsig_init(rtdm_nrtsig_t *nrt_sig,
+				    rtdm_nrtsig_handler_t handler, void *arg)
+{
+	nrt_sig->inband_work = (struct pipeline_inband_work)
+		PIPELINE_INBAND_WORK_INITIALIZER(*nrt_sig,
+						 __rtdm_nrtsig_execute);
+	nrt_sig->handler = handler;
+	nrt_sig->arg = arg;
+}
+
+static inline void rtdm_nrtsig_destroy(rtdm_nrtsig_t *nrt_sig)
+{
+	nrt_sig->handler = NULL;
+	nrt_sig->arg = NULL;
+}
+
+void rtdm_nrtsig_pend(rtdm_nrtsig_t *nrt_sig);
+#endif /* !DOXYGEN_CPP */
+
+/* --- timer services --- */
+
+/*!
+ * @addtogroup rtdm_timer
+ * @{
+ */
+
+typedef struct xntimer rtdm_timer_t;
+
+/**
+ * Timer handler
+ *
+ * @param[in] timer Timer handle as returned by rtdm_timer_init()
+ */
+typedef void (*rtdm_timer_handler_t)(rtdm_timer_t *timer);
+
+/*!
+ * @anchor RTDM_TIMERMODE_xxx   @name RTDM_TIMERMODE_xxx
+ * Timer operation modes
+ * @{
+ */
+enum rtdm_timer_mode {
+	/** Monotonic timer with relative timeout */
+	RTDM_TIMERMODE_RELATIVE = XN_RELATIVE,
+
+	/** Monotonic timer with absolute timeout */
+	RTDM_TIMERMODE_ABSOLUTE = XN_ABSOLUTE,
+
+	/** Adjustable timer with absolute timeout */
+	RTDM_TIMERMODE_REALTIME = XN_REALTIME
+};
+/** @} RTDM_TIMERMODE_xxx */
+
+/** @} rtdm_timer */
+
+int rtdm_timer_init(rtdm_timer_t *timer, rtdm_timer_handler_t handler,
+		    const char *name);
+
+void rtdm_timer_destroy(rtdm_timer_t *timer);
+
+int rtdm_timer_start(rtdm_timer_t *timer, nanosecs_abs_t expiry,
+		     nanosecs_rel_t interval, enum rtdm_timer_mode mode);
+
+void rtdm_timer_stop(rtdm_timer_t *timer);
+
+#ifndef DOXYGEN_CPP /* Avoid static inline tags for RTDM in doxygen */
+static inline int rtdm_timer_start_in_handler(rtdm_timer_t *timer,
+					      nanosecs_abs_t expiry,
+					      nanosecs_rel_t interval,
+					      enum rtdm_timer_mode mode)
+{
+	return xntimer_start(timer, expiry, interval, (xntmode_t)mode);
+}
+
+static inline void rtdm_timer_stop_in_handler(rtdm_timer_t *timer)
+{
+	xntimer_stop(timer);
+}
+#endif /* !DOXYGEN_CPP */
+
+/* --- task services --- */
+/*!
+ * @addtogroup rtdm_task
+ * @{
+ */
+
+typedef struct xnthread rtdm_task_t;
+
+/**
+ * Real-time task procedure
+ *
+ * @param[in,out] arg argument as passed to rtdm_task_init()
+ */
+typedef void (*rtdm_task_proc_t)(void *arg);
+
+/**
+ * @anchor rtdmtaskprio @name Task Priority Range
+ * Maximum and minimum task priorities
+ * @{ */
+#define RTDM_TASK_LOWEST_PRIORITY	0
+#define RTDM_TASK_HIGHEST_PRIORITY	99
+/** @} Task Priority Range */
+
+/**
+ * @anchor rtdmchangetaskprio @name Task Priority Modification
+ * Raise or lower task priorities by one level
+ * @{ */
+#define RTDM_TASK_RAISE_PRIORITY	(+1)
+#define RTDM_TASK_LOWER_PRIORITY	(-1)
+/** @} Task Priority Modification */
+
+/** @} rtdm_task */
+
+int rtdm_task_init(rtdm_task_t *task, const char *name,
+		   rtdm_task_proc_t task_proc, void *arg,
+		   int priority, nanosecs_rel_t period);
+int __rtdm_task_sleep(xnticks_t timeout, xntmode_t mode);
+void rtdm_task_busy_sleep(nanosecs_rel_t delay);
+
+#ifndef DOXYGEN_CPP /* Avoid static inline tags for RTDM in doxygen */
+static inline void rtdm_task_destroy(rtdm_task_t *task)
+{
+	xnthread_cancel(task);
+	xnthread_join(task, true);
+}
+
+static inline int rtdm_task_should_stop(void)
+{
+	return xnthread_test_info(xnthread_current(), XNCANCELD);
+}
+
+void rtdm_task_join(rtdm_task_t *task);
+
+static inline void __deprecated rtdm_task_join_nrt(rtdm_task_t *task,
+						   unsigned int poll_delay)
+{
+	rtdm_task_join(task);
+}
+
+static inline void rtdm_task_set_priority(rtdm_task_t *task, int priority)
+{
+	union xnsched_policy_param param = { .rt = { .prio = priority } };
+	spl_t s;
+
+	splhigh(s);
+	xnthread_set_schedparam(task, &xnsched_class_rt, &param);
+	xnsched_run();
+	splexit(s);
+}
+
+static inline int rtdm_task_set_period(rtdm_task_t *task,
+				       nanosecs_abs_t start_date,
+				       nanosecs_rel_t period)
+{
+	if (period < 0)
+		period = 0;
+	if (start_date == 0)
+		start_date = XN_INFINITE;
+
+	return xnthread_set_periodic(task, start_date, XN_ABSOLUTE, period);
+}
+
+static inline int rtdm_task_unblock(rtdm_task_t *task)
+{
+	spl_t s;
+	int res;
+
+	splhigh(s);
+	res = xnthread_unblock(task);
+	xnsched_run();
+	splexit(s);
+
+	return res;
+}
+
+static inline rtdm_task_t *rtdm_task_current(void)
+{
+	return xnthread_current();
+}
+
+static inline int rtdm_task_wait_period(unsigned long *overruns_r)
+{
+	if (!XENO_ASSERT(COBALT, !xnsched_unblockable_p()))
+		return -EPERM;
+	return xnthread_wait_period(overruns_r);
+}
+
+static inline int rtdm_task_sleep(nanosecs_rel_t delay)
+{
+	return __rtdm_task_sleep(delay, XN_RELATIVE);
+}
+
+static inline int
+rtdm_task_sleep_abs(nanosecs_abs_t wakeup_date, enum rtdm_timer_mode mode)
+{
+	/* For the sake of a consistent API usage... */
+	if (mode != RTDM_TIMERMODE_ABSOLUTE && mode != RTDM_TIMERMODE_REALTIME)
+		return -EINVAL;
+	return __rtdm_task_sleep(wakeup_date, (xntmode_t)mode);
+}
+
+/* rtdm_task_sleep_abs shall be used instead */
+static inline int __deprecated rtdm_task_sleep_until(nanosecs_abs_t wakeup_time)
+{
+	return __rtdm_task_sleep(wakeup_time, XN_REALTIME);
+}
+
+#define rtdm_task_busy_wait(__condition, __spin_ns, __sleep_ns)			\
+	({									\
+		__label__ done;							\
+		nanosecs_abs_t __end;						\
+		int __ret = 0;							\
+		for (;;) {							\
+			__end = rtdm_clock_read_monotonic() + __spin_ns;	\
+			for (;;) {						\
+				if (__condition)				\
+					goto done;				\
+				if (rtdm_clock_read_monotonic() >= __end)	\
+					break;					\
+			}							\
+			__ret = rtdm_task_sleep(__sleep_ns);			\
+			if (__ret)						\
+				break;						\
+		}								\
+	done:									\
+		__ret;								\
+	})
+
+#define rtdm_wait_context	xnthread_wait_context
+
+static inline
+void rtdm_wait_complete(struct rtdm_wait_context *wc)
+{
+	xnthread_complete_wait(wc);
+}
+
+static inline
+int rtdm_wait_is_completed(struct rtdm_wait_context *wc)
+{
+	return xnthread_wait_complete_p(wc);
+}
+
+static inline void rtdm_wait_prepare(struct rtdm_wait_context *wc)
+{
+	xnthread_prepare_wait(wc);
+}
+
+static inline
+struct rtdm_wait_context *rtdm_wait_get_context(rtdm_task_t *task)
+{
+	return xnthread_get_wait_context(task);
+}
+
+#endif /* !DOXYGEN_CPP */
+
+/* --- event services --- */
+
+typedef struct rtdm_event {
+	struct xnsynch synch_base;
+	DECLARE_XNSELECT(select_block);
+} rtdm_event_t;
+
+#define RTDM_EVENT_PENDING		XNSYNCH_SPARE1
+
+void rtdm_event_init(rtdm_event_t *event, unsigned long pending);
+int rtdm_event_select(rtdm_event_t *event, rtdm_selector_t *selector,
+		      enum rtdm_selecttype type, unsigned fd_index);
+int rtdm_event_wait(rtdm_event_t *event);
+int rtdm_event_timedwait(rtdm_event_t *event, nanosecs_rel_t timeout,
+			 rtdm_toseq_t *timeout_seq);
+void rtdm_event_signal(rtdm_event_t *event);
+
+void rtdm_event_clear(rtdm_event_t *event);
+
+void rtdm_event_pulse(rtdm_event_t *event);
+
+void rtdm_event_destroy(rtdm_event_t *event);
+
+/* --- semaphore services --- */
+
+typedef struct rtdm_sem {
+	unsigned long value;
+	struct xnsynch synch_base;
+	DECLARE_XNSELECT(select_block);
+} rtdm_sem_t;
+
+void rtdm_sem_init(rtdm_sem_t *sem, unsigned long value);
+int rtdm_sem_select(rtdm_sem_t *sem, rtdm_selector_t *selector,
+		    enum rtdm_selecttype type, unsigned fd_index);
+int rtdm_sem_down(rtdm_sem_t *sem);
+int rtdm_sem_timeddown(rtdm_sem_t *sem, nanosecs_rel_t timeout,
+		       rtdm_toseq_t *timeout_seq);
+void rtdm_sem_up(rtdm_sem_t *sem);
+
+void rtdm_sem_destroy(rtdm_sem_t *sem);
+
+/* --- mutex services --- */
+
+typedef struct rtdm_mutex {
+	struct xnsynch synch_base;
+	atomic_t fastlock;
+} rtdm_mutex_t;
+
+void rtdm_mutex_init(rtdm_mutex_t *mutex);
+int rtdm_mutex_lock(rtdm_mutex_t *mutex);
+int rtdm_mutex_timedlock(rtdm_mutex_t *mutex, nanosecs_rel_t timeout,
+			 rtdm_toseq_t *timeout_seq);
+void rtdm_mutex_unlock(rtdm_mutex_t *mutex);
+void rtdm_mutex_destroy(rtdm_mutex_t *mutex);
+
+/* --- utility functions --- */
+
+#define rtdm_printk(format, ...)	printk(format, ##__VA_ARGS__)
+
+#define rtdm_printk_ratelimited(fmt, ...)  do {				\
+	if (xnclock_ratelimit())					\
+		printk(fmt, ##__VA_ARGS__);				\
+} while (0)
+
+#ifndef DOXYGEN_CPP /* Avoid static inline tags for RTDM in doxygen */
+static inline void *rtdm_malloc(size_t size)
+{
+	return xnmalloc(size);
+}
+
+static inline void rtdm_free(void *ptr)
+{
+	xnfree(ptr);
+}
+
+int rtdm_mmap_to_user(struct rtdm_fd *fd,
+		      void *src_addr, size_t len,
+		      int prot, void **pptr,
+		      struct vm_operations_struct *vm_ops,
+		      void *vm_private_data);
+
+int rtdm_iomap_to_user(struct rtdm_fd *fd,
+		       phys_addr_t src_addr, size_t len,
+		       int prot, void **pptr,
+		       struct vm_operations_struct *vm_ops,
+		       void *vm_private_data);
+
+int rtdm_mmap_kmem(struct vm_area_struct *vma, void *va);
+
+int rtdm_mmap_vmem(struct vm_area_struct *vma, void *va);
+
+int rtdm_mmap_iomem(struct vm_area_struct *vma, phys_addr_t pa);
+
+int rtdm_munmap(void *ptr, size_t len);
+
+static inline int rtdm_read_user_ok(struct rtdm_fd *fd,
+				    const void __user *ptr, size_t size)
+{
+	return access_rok(ptr, size);
+}
+
+static inline int rtdm_rw_user_ok(struct rtdm_fd *fd,
+				  const void __user *ptr, size_t size)
+{
+	return access_wok(ptr, size);
+}
+
+static inline int rtdm_copy_from_user(struct rtdm_fd *fd,
+				      void *dst, const void __user *src,
+				      size_t size)
+{
+	return __xn_copy_from_user(dst, src, size) ? -EFAULT : 0;
+}
+
+static inline int rtdm_safe_copy_from_user(struct rtdm_fd *fd,
+					   void *dst, const void __user *src,
+					   size_t size)
+{
+	return cobalt_copy_from_user(dst, src, size);
+}
+
+static inline int rtdm_copy_to_user(struct rtdm_fd *fd,
+				    void __user *dst, const void *src,
+				    size_t size)
+{
+	return __xn_copy_to_user(dst, src, size) ? -EFAULT : 0;
+}
+
+static inline int rtdm_safe_copy_to_user(struct rtdm_fd *fd,
+					 void __user *dst, const void *src,
+					 size_t size)
+{
+	return cobalt_copy_to_user(dst, src, size);
+}
+
+static inline int rtdm_strncpy_from_user(struct rtdm_fd *fd,
+					 char *dst,
+					 const char __user *src, size_t count)
+{
+	return cobalt_strncpy_from_user(dst, src, count);
+}
+
+static inline bool rtdm_available(void)
+{
+	return realtime_core_enabled();
+}
+
+static inline int rtdm_rt_capable(struct rtdm_fd *fd)
+{
+	if (!XENO_ASSERT(COBALT, !xnsched_interrupt_p()))
+		return 0;
+
+	if (!rtdm_fd_is_user(fd))
+		return !xnsched_root_p();
+
+	return xnthread_current() != NULL;
+}
+
+static inline int rtdm_in_rt_context(void)
+{
+	return is_primary_domain();
+}
+
+#define RTDM_IOV_FASTMAX  16
+
+int rtdm_get_iovec(struct rtdm_fd *fd, struct iovec **iov,
+		   const struct user_msghdr *msg,
+		   struct iovec *iov_fast);
+
+int rtdm_put_iovec(struct rtdm_fd *fd, struct iovec *iov,
+		   const struct user_msghdr *msg,
+		   struct iovec *iov_fast);
+
+static inline
+void rtdm_drop_iovec(struct iovec *iov, struct iovec *iov_fast)
+{
+	if (iov != iov_fast)
+		xnfree(iov);
+}
+
+ssize_t rtdm_get_iov_flatlen(struct iovec *iov, int iovlen);
+
+#endif /* !DOXYGEN_CPP */
+
+#endif /* _COBALT_RTDM_DRIVER_H */
+++ linux-patched/include/xenomai/rtdm/rtdm.h	2022-03-21 12:58:31.872864749 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/gpiopwm.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2005, 2006 Jan Kiszka <jan.kiszka@web.de>
+ * Copyright (C) 2005 Joerg Langenberg <joerg.langenberg@gmx.net>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_RTDM_H
+#define _COBALT_RTDM_RTDM_H
+
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/ioctl.h>
+#include <linux/sched.h>
+#include <linux/socket.h>
+#include <cobalt/kernel/ppd.h>
+#include <rtdm/fd.h>
+
+typedef __u32 socklen_t;
+
+#include <rtdm/uapi/rtdm.h>
+
+int __rtdm_dev_open(const char *path, int oflag);
+
+int __rtdm_dev_socket(int protocol_family,
+		      int socket_type, int protocol);
+
+static inline int rtdm_open(const char *path, int oflag, ...)
+{
+	return __rtdm_dev_open(path, oflag);
+}
+
+static inline int rtdm_socket(int protocol_family,
+			      int socket_type, int protocol)
+{
+	return __rtdm_dev_socket(protocol_family, socket_type, protocol);
+}
+
+static inline int rtdm_close(int fd)
+{
+	return rtdm_fd_close(fd, RTDM_FD_MAGIC);
+}
+
+#define rtdm_fcntl(__fd, __cmd, __args...)	\
+	rtdm_fd_fcntl(__fd, __cmd, ##__args)
+
+#define rtdm_ioctl(__fd, __request, __args...)	\
+	rtdm_fd_ioctl(__fd, __request, ##__args)
+
+static inline ssize_t rtdm_read(int fd, void *buf, size_t count)
+{
+	return rtdm_fd_read(fd, buf, count);
+}
+
+static inline ssize_t rtdm_write(int fd, const void *buf, size_t count)
+{
+	return rtdm_fd_write(fd, buf, count);
+}
+
+static inline ssize_t rtdm_recvmsg(int s, struct user_msghdr *msg, int flags)
+{
+	return rtdm_fd_recvmsg(s, msg, flags);
+}
+
+static inline ssize_t rtdm_sendmsg(int s, const struct user_msghdr *msg, int flags)
+{
+	return rtdm_fd_sendmsg(s, msg, flags);
+}
+
+static inline
+ssize_t rtdm_recvfrom(int s, void *buf, size_t len, int flags,
+		      struct sockaddr *from,
+		      socklen_t *fromlen)
+{
+	struct user_msghdr msg;
+	struct iovec iov;
+	ssize_t ret;
+
+	iov.iov_base = buf;
+	iov.iov_len = len;
+	msg.msg_name = from;
+	msg.msg_namelen = from ? *fromlen : 0;
+	msg.msg_iov = &iov;
+	msg.msg_iovlen = 1;
+	msg.msg_control = NULL;
+	msg.msg_controllen = 0;
+
+	ret = rtdm_recvmsg(s, &msg, flags);
+	if (ret < 0)
+		return ret;
+
+	if (from)
+		*fromlen = msg.msg_namelen;
+
+	return ret;
+}
+
+static inline ssize_t rtdm_recv(int s, void *buf, size_t len, int flags)
+{
+	return rtdm_recvfrom(s, buf, len, flags, NULL, NULL);
+}
+
+static inline ssize_t rtdm_sendto(int s, const void *buf, size_t len,
+				  int flags, const struct sockaddr *to,
+				  socklen_t tolen)
+{
+	struct user_msghdr msg;
+	struct iovec iov;
+
+	iov.iov_base = (void *)buf;
+	iov.iov_len = len;
+	msg.msg_name = (struct sockaddr *)to;
+	msg.msg_namelen = tolen;
+	msg.msg_iov = &iov;
+	msg.msg_iovlen = 1;
+	msg.msg_control = NULL;
+	msg.msg_controllen = 0;
+
+	return rtdm_sendmsg(s, &msg, flags);
+}
+
+static inline ssize_t rtdm_send(int s, const void *buf, size_t len, int flags)
+{
+	return rtdm_sendto(s, buf, len, flags, NULL, 0);
+}
+
+static inline int rtdm_getsockopt(int s, int level, int optname,
+				  void *optval, socklen_t *optlen)
+{
+	struct _rtdm_getsockopt_args args = {
+		level, optname, optval, optlen
+	};
+
+	return rtdm_ioctl(s, _RTIOC_GETSOCKOPT, &args);
+}
+
+static inline int rtdm_setsockopt(int s, int level, int optname,
+				  const void *optval, socklen_t optlen)
+{
+	struct _rtdm_setsockopt_args args = {
+		level, optname, (void *)optval, optlen
+	};
+
+	return rtdm_ioctl(s, _RTIOC_SETSOCKOPT, &args);
+}
+
+static inline int rtdm_bind(int s, const struct sockaddr *my_addr,
+			    socklen_t addrlen)
+{
+	struct _rtdm_setsockaddr_args args = {
+		my_addr, addrlen
+	};
+
+	return rtdm_ioctl(s, _RTIOC_BIND, &args);
+}
+
+static inline int rtdm_connect(int s, const struct sockaddr *serv_addr,
+			       socklen_t addrlen)
+{
+	struct _rtdm_setsockaddr_args args = {
+		serv_addr, addrlen
+	};
+
+	return rtdm_ioctl(s, _RTIOC_CONNECT, &args);
+}
+
+static inline int rtdm_listen(int s, int backlog)
+{
+	return rtdm_ioctl(s, _RTIOC_LISTEN, backlog);
+}
+
+static inline int rtdm_accept(int s, struct sockaddr *addr,
+			      socklen_t *addrlen)
+{
+	struct _rtdm_getsockaddr_args args = {
+		addr, addrlen
+	};
+
+	return rtdm_ioctl(s, _RTIOC_ACCEPT, &args);
+}
+
+static inline int rtdm_getsockname(int s, struct sockaddr *name,
+				   socklen_t *namelen)
+{
+	struct _rtdm_getsockaddr_args args = {
+		name, namelen
+	};
+
+	return rtdm_ioctl(s, _RTIOC_GETSOCKNAME, &args);
+}
+
+static inline int rtdm_getpeername(int s, struct sockaddr *name,
+				   socklen_t *namelen)
+{
+	struct _rtdm_getsockaddr_args args = {
+		name, namelen
+	};
+
+	return rtdm_ioctl(s, _RTIOC_GETPEERNAME, &args);
+}
+
+static inline int rtdm_shutdown(int s, int how)
+{
+	return rtdm_ioctl(s, _RTIOC_SHUTDOWN, how);
+}
+
+#endif /* _COBALT_RTDM_RTDM_H */
+++ linux-patched/include/xenomai/rtdm/gpiopwm.h	2022-03-21 12:58:31.864864827 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/fd.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2015 Jorge Ramirez <jro@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_PWM_H
+#define _COBALT_RTDM_PWM_H
+
+#include <rtdm/rtdm.h>
+#include <rtdm/uapi/gpiopwm.h>
+
+#endif /* !_COBALT_RTDM_PWM_H */
+++ linux-patched/include/xenomai/rtdm/fd.h	2022-03-21 12:58:31.857864895 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/cobalt.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2005-2007 Jan Kiszka <jan.kiszka@web.de>
+ * Copyright (C) 2005 Joerg Langenberg <joerg.langenberg@gmx.net>
+ * Copyright (C) 2008,2013,2014 Gilles Chanteperdrix <gch@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_FD_H
+#define _COBALT_KERNEL_FD_H
+
+#include <linux/types.h>
+#include <linux/socket.h>
+#include <linux/file.h>
+#include <cobalt/kernel/tree.h>
+#include <asm-generic/xenomai/syscall.h>
+
+struct vm_area_struct;
+struct rtdm_fd;
+struct _rtdm_mmap_request;
+struct xnselector;
+struct cobalt_ppd;
+struct rtdm_device;
+
+/**
+ * @file
+ * @anchor File operation handlers
+ * @addtogroup rtdm_device_register
+ * @{
+ */
+
+/**
+ * Open handler for named devices
+ *
+ * @param[in] fd File descriptor associated with opened device instance
+ * @param[in] oflags Open flags as passed by the user
+ *
+ * The file descriptor carries a device minor information which can be
+ * retrieved by a call to rtdm_fd_minor(fd). The minor number can be
+ * used for distinguishing devices managed by a driver.
+ *
+ * @return 0 on success. On failure, a negative error code is returned.
+ *
+ * @see @c open() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ */
+int rtdm_open_handler(struct rtdm_fd *fd, int oflags);
+
+/**
+ * Socket creation handler for protocol devices
+ *
+ * @param[in] fd File descriptor associated with opened device instance
+ * @param[in] protocol Protocol number as passed by the user
+ *
+ * @return 0 on success. On failure, a negative error code is returned.
+ *
+ * @see @c socket() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ */
+int rtdm_socket_handler(struct rtdm_fd *fd, int protocol);
+
+/**
+ * Close handler
+ *
+ * @param[in] fd File descriptor associated with opened
+ * device instance.
+ *
+ * @see @c close() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ */
+void rtdm_close_handler(struct rtdm_fd *fd);
+
+/**
+ * IOCTL handler
+ *
+ * @param[in] fd File descriptor
+ * @param[in] request Request number as passed by the user
+ * @param[in,out] arg Request argument as passed by the user
+ *
+ * @return A positive value or 0 on success. On failure return either
+ * -ENOSYS, to request that the function be called again from the opposite
+ * realtime/non-realtime context, or another negative error code.
+ *
+ * @see @c ioctl() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ */
+int rtdm_ioctl_handler(struct rtdm_fd *fd, unsigned int request, void __user *arg);
+
+/**
+ * Read handler
+ *
+ * @param[in] fd File descriptor
+ * @param[out] buf Input buffer as passed by the user
+ * @param[in] size Number of bytes the user requests to read
+ *
+ * @return On success, the number of bytes read. On failure return either
+ * -ENOSYS, to request that this handler be called again from the opposite
+ * realtime/non-realtime context, or another negative error code.
+ *
+ * @see @c read() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ */
+ssize_t rtdm_read_handler(struct rtdm_fd *fd, void __user *buf, size_t size);
+
+/**
+ * Write handler
+ *
+ * @param[in] fd File descriptor
+ * @param[in] buf Output buffer as passed by the user
+ * @param[in] size Number of bytes the user requests to write
+ *
+ * @return On success, the number of bytes written. On failure return
+ * either -ENOSYS, to request that this handler be called again from the
+ * opposite realtime/non-realtime context, or another negative error code.
+ *
+ * @see @c write() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ */
+ssize_t rtdm_write_handler(struct rtdm_fd *fd, const void __user *buf, size_t size);
+
+/**
+ * Receive message handler
+ *
+ * @param[in] fd File descriptor
+ * @param[in,out] msg Message descriptor as passed by the user, automatically
+ * mirrored to safe kernel memory in case of user mode call
+ * @param[in] flags Message flags as passed by the user
+ *
+ * @return On success, the number of bytes received. On failure return
+ * either -ENOSYS, to request that this handler be called again from the
+ * opposite realtime/non-realtime context, or another negative error code.
+ *
+ * @see @c recvmsg() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ */
+ssize_t rtdm_recvmsg_handler(struct rtdm_fd *fd, struct user_msghdr *msg, int flags);
+
+/**
+ * Transmit message handler
+ *
+ * @param[in] fd File descriptor
+ * @param[in] msg Message descriptor as passed by the user, automatically
+ * mirrored to safe kernel memory in case of user mode call
+ * @param[in] flags Message flags as passed by the user
+ *
+ * @return On success, the number of bytes transmitted. On failure return
+ * either -ENOSYS, to request that this handler be called again from the
+ * opposite realtime/non-realtime context, or another negative error code.
+ *
+ * @see @c sendmsg() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ */
+ssize_t rtdm_sendmsg_handler(struct rtdm_fd *fd, const struct user_msghdr *msg, int flags);
+
+/**
+ * Select handler
+ *
+ * @param[in] fd File descriptor
+ * @param selector Pointer to the selector structure
+ * @param type Type of events (@a XNSELECT_READ, @a XNSELECT_WRITE, or @a
+ * XNSELECT_EXCEPT)
+ * @param index Index of the file descriptor
+ *
+ * @return 0 on success. On failure, a negative error code is
+ * returned.
+ *
+ * @see @c select() in POSIX.1-2001,
+ * http://pubs.opengroup.org/onlinepubs/007908799/xsh/select.html
+ */
+int rtdm_select_handler(struct rtdm_fd *fd, struct xnselector *selector,
+			unsigned int type, unsigned int index);
+
+/**
+ * Memory mapping handler
+ *
+ * @param[in] fd File descriptor
+ * @param[in] vma Virtual memory area descriptor
+ *
+ * @return 0 on success. On failure, a negative error code is
+ * returned.
+ *
+ * @see @c mmap() in POSIX.1-2001,
+ * http://pubs.opengroup.org/onlinepubs/7908799/xsh/mmap.html
+ *
+ * @note The address hint passed to the mmap() request is deliberately
+ * ignored by RTDM.
+ */
+int rtdm_mmap_handler(struct rtdm_fd *fd, struct vm_area_struct *vma);
+
+/**
+ * Allocate mapping region in address space
+ *
+ * When present, this optional handler should return the start address
+ * of a free region in the process's address space, large enough to
+ * cover the ongoing mmap() operation. If unspecified, the default
+ * architecture-defined handler is invoked.
+ *
+ * Most drivers can omit this handler, except on MMU-less platforms
+ * (see second note).
+ *
+ * @param[in] fd File descriptor
+ * @param[in] len Length of the requested region
+ * @param[in] pgoff Page frame number to map to (see second note).
+ * @param[in] flags Requested mapping flags
+ *
+ * @return The start address of the mapping region on success. On
+ * failure, a negative error code should be returned, with -ENOSYS
+ * meaning that the driver does not want to provide such information,
+ * in which case the ongoing mmap() operation will fail.
+ *
+ * @note The address hint passed to the mmap() request is deliberately
+ * ignored by RTDM, and therefore not passed to this handler.
+ *
+ * @note On MMU-less platforms, this handler is required because RTDM
+ * issues mapping requests over a shareable character device
+ * internally. In such context, the RTDM core may pass a null @a pgoff
+ * argument to the handler, for probing for the logical start address
+ * of the memory region to map to. Otherwise, when @a pgoff is
+ * non-zero, pgoff << PAGE_SHIFT is usually returned.
+ */
+unsigned long
+rtdm_get_unmapped_area_handler(struct rtdm_fd *fd,
+			       unsigned long len, unsigned long pgoff,
+			       unsigned long flags);
+/**
+ * @anchor rtdm_fd_ops
+ * @brief RTDM file operation descriptor.
+ *
+ * This structure describes the operations available with a RTDM
+ * device, defining handlers for submitting I/O requests. Those
+ * handlers are implemented by RTDM device drivers.
+ */
+struct rtdm_fd_ops {
+	/** See rtdm_open_handler(). */
+	int (*open)(struct rtdm_fd *fd, int oflags);
+	/** See rtdm_socket_handler(). */
+	int (*socket)(struct rtdm_fd *fd, int protocol);
+	/** See rtdm_close_handler(). */
+	void (*close)(struct rtdm_fd *fd);
+	/** See rtdm_ioctl_handler(). */
+	int (*ioctl_rt)(struct rtdm_fd *fd,
+			unsigned int request, void __user *arg);
+	/** See rtdm_ioctl_handler(). */
+	int (*ioctl_nrt)(struct rtdm_fd *fd,
+			 unsigned int request, void __user *arg);
+	/** See rtdm_read_handler(). */
+	ssize_t (*read_rt)(struct rtdm_fd *fd,
+			   void __user *buf, size_t size);
+	/** See rtdm_read_handler(). */
+	ssize_t (*read_nrt)(struct rtdm_fd *fd,
+			    void __user *buf, size_t size);
+	/** See rtdm_write_handler(). */
+	ssize_t (*write_rt)(struct rtdm_fd *fd,
+			    const void __user *buf, size_t size);
+	/** See rtdm_write_handler(). */
+	ssize_t (*write_nrt)(struct rtdm_fd *fd,
+			     const void __user *buf, size_t size);
+	/** See rtdm_recvmsg_handler(). */
+	ssize_t (*recvmsg_rt)(struct rtdm_fd *fd,
+			      struct user_msghdr *msg, int flags);
+	/** See rtdm_recvmsg_handler(). */
+	ssize_t (*recvmsg_nrt)(struct rtdm_fd *fd,
+			       struct user_msghdr *msg, int flags);
+	/** See rtdm_sendmsg_handler(). */
+	ssize_t (*sendmsg_rt)(struct rtdm_fd *fd,
+			      const struct user_msghdr *msg, int flags);
+	/** See rtdm_sendmsg_handler(). */
+	ssize_t (*sendmsg_nrt)(struct rtdm_fd *fd,
+			       const struct user_msghdr *msg, int flags);
+	/** See rtdm_select_handler(). */
+	int (*select)(struct rtdm_fd *fd,
+		      struct xnselector *selector,
+		      unsigned int type, unsigned int index);
+	/** See rtdm_mmap_handler(). */
+	int (*mmap)(struct rtdm_fd *fd,
+		    struct vm_area_struct *vma);
+	/** See rtdm_get_unmapped_area_handler(). */
+	unsigned long (*get_unmapped_area)(struct rtdm_fd *fd,
+					   unsigned long len,
+					   unsigned long pgoff,
+					   unsigned long flags);
+};
+
+/** @} File operation handlers */
+
+struct rtdm_fd {
+	unsigned int magic;
+	struct rtdm_fd_ops *ops;
+	struct cobalt_ppd *owner;
+	unsigned int refs;
+	int ufd;
+	int minor;
+	int oflags;
+#ifdef CONFIG_XENO_ARCH_SYS3264
+	int compat;
+#endif
+	bool stale;
+	struct list_head cleanup;
+	struct list_head next;	/* in dev->openfd_list */
+};
+
+#define RTDM_FD_MAGIC 0x52544446
+
+#define RTDM_FD_COMPAT	__COBALT_COMPAT_BIT
+#define RTDM_FD_COMPATX	__COBALT_COMPATX_BIT
+
+int __rtdm_anon_getfd(const char *name, int flags);
+
+void __rtdm_anon_putfd(int ufd);
+
+static inline struct cobalt_ppd *rtdm_fd_owner(const struct rtdm_fd *fd)
+{
+	return fd->owner;
+}
+
+static inline int rtdm_fd_ufd(const struct rtdm_fd *fd)
+{
+	return fd->ufd;
+}
+
+static inline int rtdm_fd_minor(const struct rtdm_fd *fd)
+{
+	return fd->minor;
+}
+
+static inline int rtdm_fd_flags(const struct rtdm_fd *fd)
+{
+	return fd->oflags;
+}
+
+#ifdef CONFIG_XENO_ARCH_SYS3264
+static inline int rtdm_fd_is_compat(const struct rtdm_fd *fd)
+{
+	return fd->compat;
+}
+#else
+static inline int rtdm_fd_is_compat(const struct rtdm_fd *fd)
+{
+	return 0;
+}
+#endif
+
+int rtdm_fd_enter(struct rtdm_fd *rtdm_fd, int ufd,
+		  unsigned int magic, struct rtdm_fd_ops *ops);
+
+int rtdm_fd_register(struct rtdm_fd *fd, int ufd);
+
+struct rtdm_fd *rtdm_fd_get(int ufd, unsigned int magic);
+
+int rtdm_fd_lock(struct rtdm_fd *fd);
+
+void rtdm_fd_put(struct rtdm_fd *fd);
+
+void rtdm_fd_unlock(struct rtdm_fd *fd);
+
+int rtdm_fd_fcntl(int ufd, int cmd, ...);
+
+int rtdm_fd_ioctl(int ufd, unsigned int request, ...);
+
+ssize_t rtdm_fd_read(int ufd, void __user *buf, size_t size);
+
+ssize_t rtdm_fd_write(int ufd, const void __user *buf, size_t size);
+
+int rtdm_fd_close(int ufd, unsigned int magic);
+
+ssize_t rtdm_fd_recvmsg(int ufd, struct user_msghdr *msg, int flags);
+
+int __rtdm_fd_recvmmsg(int ufd, void __user *u_msgvec, unsigned int vlen,
+		       unsigned int flags, void __user *u_timeout,
+		       int (*get_mmsg)(struct mmsghdr *mmsg, void __user *u_mmsg),
+		       int (*put_mmsg)(void __user **u_mmsg_p, const struct mmsghdr *mmsg),
+		       int (*get_timespec)(struct timespec64 *ts, const void __user *u_ts));
+
+int __rtdm_fd_recvmmsg64(int ufd, void __user *u_msgvec, unsigned int vlen,
+			 unsigned int flags, void __user *u_timeout,
+			 int (*get_mmsg)(struct mmsghdr *mmsg, void __user *u_mmsg),
+			 int (*put_mmsg)(void __user **u_mmsg_p, const struct mmsghdr *mmsg));
+
+ssize_t rtdm_fd_sendmsg(int ufd, const struct user_msghdr *msg,
+			int flags);
+
+int __rtdm_fd_sendmmsg(int ufd, void __user *u_msgvec, unsigned int vlen,
+		       unsigned int flags,
+		       int (*get_mmsg)(struct mmsghdr *mmsg, void __user *u_mmsg),
+		       int (*put_mmsg)(void __user **u_mmsg_p, const struct mmsghdr *mmsg));
+
+int rtdm_fd_mmap(int ufd, struct _rtdm_mmap_request *rma,
+		 void **u_addrp);
+
+int rtdm_fd_valid_p(int ufd);
+
+int rtdm_fd_select(int ufd, struct xnselector *selector,
+		   unsigned int type);
+
+int rtdm_device_new_fd(struct rtdm_fd *fd, int ufd,
+		struct rtdm_device *dev);
+
+void rtdm_device_flush_fds(struct rtdm_device *dev);
+
+void rtdm_fd_cleanup(struct cobalt_ppd *p);
+
+void rtdm_fd_init(void);
+
+#endif /* _COBALT_KERNEL_FD_H */
+++ linux-patched/include/xenomai/rtdm/cobalt.h	2022-03-21 12:58:31.849864973 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/analogy/rtdm_helpers.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * This file is part of the Xenomai project.
+ *
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_COBALT_H
+#define _COBALT_RTDM_COBALT_H
+
+#include <xenomai/posix/process.h>
+#include <xenomai/posix/extension.h>
+#include <xenomai/posix/thread.h>
+#include <xenomai/posix/signal.h>
+#include <xenomai/posix/timer.h>
+#include <xenomai/posix/clock.h>
+#include <xenomai/posix/event.h>
+#include <xenomai/posix/monitor.h>
+#include <xenomai/posix/corectl.h>
+
+#endif /* !_COBALT_RTDM_COBALT_H */
+++ linux-patched/include/xenomai/rtdm/analogy/rtdm_helpers.h	2022-03-21 12:58:31.842865041 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/analogy/subdevice.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Analogy for Linux, Operation system facilities
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_ANALOGY_RTDM_HELPERS_H
+#define _COBALT_RTDM_ANALOGY_RTDM_HELPERS_H
+
+#include <linux/fs.h>
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/uaccess.h>
+#include <rtdm/driver.h>
+
+/* --- Trace section  --- */
+#define A4L_PROMPT "Analogy: "
+
+#define RTDM_SUBCLASS_ANALOGY 0
+
+#define __a4l_err(fmt, args...)  rtdm_printk(KERN_ERR A4L_PROMPT fmt, ##args)
+#define __a4l_warn(fmt, args...) rtdm_printk(KERN_WARNING A4L_PROMPT fmt, ##args)
+
+#ifdef  CONFIG_XENO_DRIVERS_ANALOGY_DEBUG_FTRACE
+#define __a4l_info(fmt, args...) trace_printk(fmt, ##args)
+#else
+#define __a4l_info(fmt, args...) 						\
+        rtdm_printk(KERN_INFO A4L_PROMPT "%s: " fmt, __FUNCTION__, ##args)
+#endif
+
+#ifdef CONFIG_XENO_DRIVERS_ANALOGY_DEBUG
+#ifdef CONFIG_XENO_DRIVERS_ANALOGY_DEBUG_FTRACE
+#define __a4l_dbg(level, debug, fmt, args...)				\
+	do {								\
+	if ((debug) >= (level))						\
+		trace_printk(fmt, ##args); 				\
+	} while (0)
+#else
+#define __a4l_dbg(level, debug, fmt, args...)						\
+	do {										\
+	if ((debug) >= (level))								\
+		rtdm_printk(KERN_DEBUG A4L_PROMPT "%s: " fmt, __FUNCTION__ , ##args);	\
+	} while (0)
+#endif
+
+#define core_dbg CONFIG_XENO_DRIVERS_ANALOGY_DEBUG_LEVEL
+#define drv_dbg CONFIG_XENO_DRIVERS_ANALOGY_DRIVER_DEBUG_LEVEL
+
+#else /* !CONFIG_XENO_DRIVERS_ANALOGY_DEBUG */
+
+#define __a4l_dbg(level, debug, fmt, args...)
+
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_DEBUG */
+
+#define __a4l_dev_name(dev) 						\
+	(dev->driver == NULL) ? "unattached dev" : dev->driver->board_name
+
+#define a4l_err(dev, fmt, args...) 					\
+	__a4l_err("%s: " fmt, __a4l_dev_name(dev), ##args)
+
+#define a4l_warn(dev, fmt, args...) 					\
+	__a4l_warn("%s: " fmt, __a4l_dev_name(dev), ##args)
+
+#define a4l_info(dev, fmt, args...) 					\
+	__a4l_info("%s: " fmt, __a4l_dev_name(dev), ##args)
+
+#define a4l_dbg(level, debug, dev, fmt, args...)			\
+	__a4l_dbg(level, debug, "%s: " fmt, __a4l_dev_name(dev), ##args)
+
+
+/* --- Time section --- */
+static inline void a4l_udelay(unsigned int us)
+{
+	rtdm_task_busy_sleep(((nanosecs_rel_t) us) * 1000);
+}
+
+/* Function which gives absolute time */
+nanosecs_abs_t a4l_get_time(void);
+
+/* Function for setting up the absolute time recovery */
+void a4l_init_time(void);
+
+/* --- IRQ section --- */
+#define A4L_IRQ_DISABLED 0
+
+typedef int (*a4l_irq_hdlr_t) (unsigned int irq, void *d);
+
+struct a4l_irq_descriptor {
+	/* These fields are useful to launch the IRQ trampoline;
+	   that is the reason why a structure has been defined */
+	a4l_irq_hdlr_t handler;
+	unsigned int irq;
+	void *cookie;
+	rtdm_irq_t rtdm_desc;
+};
+
+int __a4l_request_irq(struct a4l_irq_descriptor * dsc,
+		      unsigned int irq,
+		      a4l_irq_hdlr_t handler,
+		      unsigned long flags, void *cookie);
+int __a4l_free_irq(struct a4l_irq_descriptor * dsc);
+
+/* --- Synchronization section --- */
+#define __NRT_WAITER 1
+#define __RT_WAITER 2
+#define __EVT_PDING 3
+
+struct a4l_sync {
+	unsigned long status;
+	rtdm_event_t rtdm_evt;
+	rtdm_nrtsig_t nrt_sig;
+	wait_queue_head_t wq;
+};
+
+#define a4l_select_sync(snc, slr, type, fd) \
+	rtdm_event_select(&((snc)->rtdm_evt), slr, type, fd)
+
+int a4l_init_sync(struct a4l_sync * snc);
+void a4l_cleanup_sync(struct a4l_sync * snc);
+void a4l_flush_sync(struct a4l_sync * snc);
+int a4l_wait_sync(struct a4l_sync * snc, int rt);
+int a4l_timedwait_sync(struct a4l_sync * snc,
+		       int rt, unsigned long long ns_timeout);
+void a4l_signal_sync(struct a4l_sync * snc);
+
+#endif /* !_COBALT_RTDM_ANALOGY_RTDM_HELPERS_H */
+++ linux-patched/include/xenomai/rtdm/analogy/subdevice.h	2022-03-21 12:58:31.835865109 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/analogy/context.h	1970-01-01 01:00:00.000000000 +0100
+/**
+ * @file
+ * Analogy for Linux, subdevice related features
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_ANALOGY_SUBDEVICE_H
+#define _COBALT_RTDM_ANALOGY_SUBDEVICE_H
+
+#include <linux/list.h>
+#include <rtdm/analogy/instruction.h>
+#include <rtdm/analogy/command.h>
+#include <rtdm/analogy/channel_range.h>
+
+/* --- Subdevice descriptor structure --- */
+
+struct a4l_device;
+struct a4l_buffer;
+
+/*!
+ * @brief Structure describing the subdevice
+ * @see a4l_add_subd()
+ */
+
+struct a4l_subdevice {
+
+	struct list_head list;
+			   /**< List stuff */
+
+	struct a4l_device *dev;
+			       /**< Containing device */
+
+	unsigned int idx;
+		      /**< Subdevice index */
+
+	struct a4l_buffer *buf;
+			       /**< Linked buffer */
+
+	/* Subdevice's status (busy, linked?) */
+	unsigned long status;
+			     /**< Subdevice's status */
+
+	/* Descriptors stuff */
+	unsigned long flags;
+			 /**< Type flags */
+	struct a4l_channels_desc *chan_desc;
+				/**< Tab of channels descriptors pointers */
+	struct a4l_rngdesc *rng_desc;
+				/**< Tab of ranges descriptors pointers */
+	struct a4l_cmd_desc *cmd_mask;
+			    /**< Command capabilities mask */
+
+	/* Functions stuff */
+	int (*insn_read) (struct a4l_subdevice *, struct a4l_kernel_instruction *);
+							/**< Callback for the instruction "read" */
+	int (*insn_write) (struct a4l_subdevice *, struct a4l_kernel_instruction *);
+							 /**< Callback for the instruction "write" */
+	int (*insn_bits) (struct a4l_subdevice *, struct a4l_kernel_instruction *);
+							/**< Callback for the instruction "bits" */
+	int (*insn_config) (struct a4l_subdevice *, struct a4l_kernel_instruction *);
+							  /**< Callback for the configuration instruction */
+	int (*do_cmd) (struct a4l_subdevice *, struct a4l_cmd_desc *);
+					/**< Callback for command handling */
+	int (*do_cmdtest) (struct a4l_subdevice *, struct a4l_cmd_desc *);
+						       /**< Callback for command checking */
+	void (*cancel) (struct a4l_subdevice *);
+					 /**< Callback for asynchronous transfer cancellation */
+	void (*munge) (struct a4l_subdevice *, void *, unsigned long);
+								/**< Callback for munge operation */
+	int (*trigger) (struct a4l_subdevice *, lsampl_t);
+					      /**< Callback for trigger operation */
+
+	char priv[0];
+		  /**< Private data */
+};
+
+/* --- Subdevice related functions and macros --- */
+
+struct a4l_channel *a4l_get_chfeat(struct a4l_subdevice * sb, int idx);
+struct a4l_range *a4l_get_rngfeat(struct a4l_subdevice * sb, int chidx, int rngidx);
+int a4l_check_chanlist(struct a4l_subdevice * subd,
+		       unsigned char nb_chan, unsigned int *chans);
+
+#define a4l_subd_is_input(x) ((A4L_SUBD_MASK_READ & (x)->flags) != 0)
+/* The following macro considers that a DIO subdevice is firstly an
+   output subdevice */
+#define a4l_subd_is_output(x) \
+	((A4L_SUBD_MASK_WRITE & (x)->flags) != 0 || \
+	 (A4L_SUBD_DIO & (x)->flags) != 0)
+
+/* --- Upper layer functions --- */
+
+struct a4l_subdevice * a4l_get_subd(struct a4l_device *dev, int idx);
+struct a4l_subdevice * a4l_alloc_subd(int sizeof_priv,
+			    void (*setup)(struct a4l_subdevice *));
+int a4l_add_subd(struct a4l_device *dev, struct a4l_subdevice * subd);
+int a4l_ioctl_subdinfo(struct a4l_device_context * cxt, void *arg);
+int a4l_ioctl_chaninfo(struct a4l_device_context * cxt, void *arg);
+int a4l_ioctl_rnginfo(struct a4l_device_context * cxt, void *arg);
+int a4l_ioctl_nbchaninfo(struct a4l_device_context * cxt, void *arg);
+int a4l_ioctl_nbrnginfo(struct a4l_device_context * cxt, void *arg);
+
+#endif /* !_COBALT_RTDM_ANALOGY_SUBDEVICE_H */
+++ linux-patched/include/xenomai/rtdm/analogy/context.h	2022-03-21 12:58:31.827865187 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/analogy/instruction.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Analogy for Linux, context structure / macros declarations
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_ANALOGY_CONTEXT_H
+#define _COBALT_RTDM_ANALOGY_CONTEXT_H
+
+#include <rtdm/driver.h>
+
+struct a4l_device;
+struct a4l_buffer;
+
+struct a4l_device_context {
+	/* The adequate device pointer
+	   (retrieved thanks to minor at open time) */
+	struct a4l_device *dev;
+
+	/* The buffer structure contains everything to transfer data
+	   from asynchronous acquisition operations on a specific
+	   subdevice */
+	struct a4l_buffer *buffer;
+};
+
+static inline int a4l_get_minor(struct a4l_device_context *cxt)
+{
+	/* Get a pointer on the container structure */
+	struct rtdm_fd *fd = rtdm_private_to_fd(cxt);
+	/* Get the minor index */
+	return rtdm_fd_minor(fd);
+}
+
+#endif /* !_COBALT_RTDM_ANALOGY_CONTEXT_H */
+++ linux-patched/include/xenomai/rtdm/analogy/instruction.h	2022-03-21 12:58:31.820865256 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/analogy/channel_range.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Analogy for Linux, instruction related features
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_ANALOGY_INSTRUCTION_H
+#define _COBALT_RTDM_ANALOGY_INSTRUCTION_H
+
+struct a4l_kernel_instruction {
+	unsigned int type;
+	unsigned int idx_subd;
+	unsigned int chan_desc;
+	unsigned int data_size;
+	void *data;
+	void *__udata;
+};
+
+struct a4l_kernel_instruction_list {
+	unsigned int count;
+	struct a4l_kernel_instruction *insns;
+	a4l_insn_t *__uinsns;
+};
+
+/* Instruction related functions */
+
+/* Upper layer functions */
+int a4l_ioctl_insnlist(struct a4l_device_context * cxt, void *arg);
+int a4l_ioctl_insn(struct a4l_device_context * cxt, void *arg);
+
+#endif /* !_COBALT_RTDM_ANALOGY_BUFFER_H */
+++ linux-patched/include/xenomai/rtdm/analogy/channel_range.h	2022-03-21 12:58:31.812865334 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/analogy/driver.h	1970-01-01 01:00:00.000000000 +0100
+/**
+ * @file
+ * Analogy for Linux, channel, range related features
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_ANALOGY_CHANNEL_RANGE_H
+#define _COBALT_RTDM_ANALOGY_CHANNEL_RANGE_H
+
+#include <rtdm/uapi/analogy.h>
+
+/**
+ * @ingroup analogy_driver_facilities
+ * @defgroup analogy_channel_range Channels and ranges
+ *
+ * Channels
+ *
+ * According to the Analogy nomenclature, the channel is the elementary
+ * acquisition entity. One channel is supposed to acquire one data at
+ * a time. A channel can be:
+ * - an analog input or an analog ouput;
+ * - a digital input or a digital ouput;
+ *
+ * Channels are defined by their type and by some other
+ * characteristics like:
+ * - their resolutions for analog channels (which usually ranges from
+     8 to 32 bits);
+ * - their references;
+ *
+ * Such parameters must be declared for each channel composing a
+ * subdevice. The structure a4l_channel (struct a4l_channel) is used to
+ * define one channel.
+ *
+ * Another structure named a4l_channels_desc (struct a4l_channels_desc)
+ * gathers all channels for a specific subdevice. This latter
+ * structure also stores :
+ * - the channels count;
+ * - the channels declaration mode (A4L_CHAN_GLOBAL_CHANDESC or
+     A4L_CHAN_PERCHAN_CHANDESC): if all the channels composing a
+     subdevice are identical, there is no need to declare the
+     parameters for each channel; the global declaration mode eases
+     the structure composition.
+ *
+ * Usually the channels descriptor looks like this:
+ * <tt> @verbatim
+struct a4l_channels_desc example_chan = {
+	mode: A4L_CHAN_GLOBAL_CHANDESC, -> Global declaration
+					      mode is set
+	length: 8, -> 8 channels
+	chans: {
+		{A4L_CHAN_AREF_GROUND, 16}, -> Each channel is 16 bits
+						  wide with the ground as
+						  reference
+	},
+};
+@endverbatim </tt>
+ *
+ * Ranges
+ *
+ * So as to perform conversion from logical values acquired by the
+ * device to physical units, some range structure(s) must be declared
+ * on the driver side.
+ *
+ * Such structures contain:
+ * - the physical unit type (Volt, Ampere, none);
+ * - the minimal and maximal values;
+ *
+ * These range structures must be associated with the channels at
+ * subdevice registration time as a channel can work with many
+ * ranges. At configuration time (thanks to an Analogy command), one
+ * range will be selected for each enabled channel.
+ *
+ * Consequently, for each channel, the developer must declare all the
+ * possible ranges in a structure called struct a4l_rngtab. Here is an
+ * example:
+ * <tt> @verbatim
+struct a4l_rngtab example_tab = {
+    length: 2,
+    rngs: {
+	RANGE_V(-5,5),
+	RANGE_V(-10,10),
+    },
+};
+@endverbatim </tt>
+ *
+ * For each subdevice, a specific structure is designed to gather all
+ * the ranges tabs of all the channels. In this structure, called
+ * struct a4l_rngdesc, three fields must be filled:
+ * - the declaration mode (A4L_RNG_GLOBAL_RNGDESC or
+ *   A4L_RNG_PERCHAN_RNGDESC);
+ * - the number of ranges tab;
+ * - the tab of ranges tabs pointers;
+ *
+ * Most of the time, the channels which belong to the same subdevice
+ * use the same set of ranges. So, there is no need to declare the
+ * same ranges for each channel. A macro is defined to prevent
+ * redundant declarations: RNG_GLOBAL().
+ *
+ * Here is an example:
+ * <tt> @verbatim
+struct a4l_rngdesc example_rng = RNG_GLOBAL(example_tab);
+@endverbatim </tt>
+ *
+ * @{
+ */
+
+
+/* --- Channel section --- */
+
+/*!
+ * @anchor A4L_CHAN_AREF_xxx @name Channel reference
+ * @brief Flags to define the channel's reference
+ * @{
+ */
+
+/**
+ * Ground reference
+ */
+#define A4L_CHAN_AREF_GROUND 0x1
+/**
+ * Common reference
+ */
+#define A4L_CHAN_AREF_COMMON 0x2
+/**
+ * Differential reference
+ */
+#define A4L_CHAN_AREF_DIFF 0x4
+/**
+ * Misc reference
+ */
+#define A4L_CHAN_AREF_OTHER 0x8
+
+	  /*! @} A4L_CHAN_AREF_xxx */
+
+/**
+ * Internal use flag (must not be used by driver developer)
+ */
+#define A4L_CHAN_GLOBAL 0x10
+
+/*!
+ * @brief Structure describing some channel's characteristics
+ */
+
+struct a4l_channel {
+	unsigned long flags; /*!< Channel flags to define the reference. */
+	unsigned long nb_bits; /*!< Channel resolution. */
+};
+
+/*!
+ * @anchor A4L_CHAN_xxx @name Channels declaration mode
+ * @brief Constant to define whether the channels in a descriptor are
+ * identical
+ * @{
+ */
+
+/**
+ * Global declaration, the set contains channels with similar
+ * characteristics
+ */
+#define A4L_CHAN_GLOBAL_CHANDESC 0
+/**
+ * Per channel declaration, the decriptor gathers differents channels
+ */
+#define A4L_CHAN_PERCHAN_CHANDESC 1
+
+	  /*! @} A4L_CHAN_xxx */
+
+/*!
+ * @brief Structure describing a channels set
+ */
+
+struct a4l_channels_desc {
+	unsigned long mode; /*!< Declaration mode (global or per channel) */
+	unsigned long length; /*!< Channels count */
+	struct a4l_channel chans[]; /*!< Channels tab */
+};
+
+/**
+ * Internal use flag (must not be used by driver developer)
+ */
+#define A4L_RNG_GLOBAL 0x8
+
+/*!
+ * @brief Structure describing a (unique) range
+ */
+
+struct a4l_range {
+	long min; /*!< Minimal value */
+	long max; /*!< Maximal falue */
+	unsigned long flags; /*!< Range flags (unit, etc.) */
+};
+
+/**
+ * Macro to declare a (unique) range with no unit defined
+ */
+#define RANGE(x,y) {(x * A4L_RNG_FACTOR), (y * A4L_RNG_FACTOR),	\
+			A4L_RNG_NO_UNIT}
+/**
+ * Macro to declare a (unique) range in Volt
+ */
+#define RANGE_V(x,y) {(x * A4L_RNG_FACTOR),(y * A4L_RNG_FACTOR), \
+			A4L_RNG_VOLT_UNIT}
+/**
+ * Macro to declare a (unique) range in milliAmpere
+ */
+#define RANGE_mA(x,y) {(x * A4L_RNG_FACTOR),(y * A4L_RNG_FACTOR), \
+			A4L_RNG_MAMP_UNIT}
+/**
+ * Macro to declare a (unique) range in some external reference
+ */
+#define RANGE_ext(x,y) {(x * A4L_RNG_FACTOR),(y * A4L_RNG_FACTOR), \
+			A4L_RNG_EXT_UNIT}
+
+
+/* Ranges tab descriptor */
+struct a4l_rngtab {
+	unsigned char length;
+	struct a4l_range rngs[];
+};
+
+/**
+ * Constant to define a ranges descriptor as global (inter-channel)
+ */
+#define A4L_RNG_GLOBAL_RNGDESC 0
+/**
+ * Constant to define a ranges descriptor as specific for a channel
+ */
+#define A4L_RNG_PERCHAN_RNGDESC 1
+
+/* Global ranges descriptor */
+struct a4l_rngdesc {
+	unsigned char mode;
+	unsigned char length;
+	struct a4l_rngtab *rngtabs[];
+};
+
+/**
+ * Macro to declare a ranges global descriptor in one line
+ */
+#define RNG_GLOBAL(x) {			\
+	.mode = A4L_RNG_GLOBAL_RNGDESC,	\
+	.length =  1,			\
+	.rngtabs = {&(x)},		\
+}
+
+extern struct a4l_rngdesc a4l_range_bipolar10;
+extern struct a4l_rngdesc a4l_range_bipolar5;
+extern struct a4l_rngdesc a4l_range_unipolar10;
+extern struct a4l_rngdesc a4l_range_unipolar5;
+extern struct a4l_rngdesc a4l_range_unknown;
+extern struct a4l_rngdesc a4l_range_fake;
+
+#define range_digital a4l_range_unipolar5
+
+/*! @} channelrange */
+
+#endif /* !_COBALT_RTDM_ANALOGY_CHANNEL_RANGE_H */
+++ linux-patched/include/xenomai/rtdm/analogy/driver.h	2022-03-21 12:58:31.805865402 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/analogy/device.h	1970-01-01 01:00:00.000000000 +0100
+/**
+ * @file
+ * Analogy for Linux, driver facilities
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_ANALOGY_DRIVER_H
+#define _COBALT_RTDM_ANALOGY_DRIVER_H
+
+#include <linux/list.h>
+#include <rtdm/analogy/rtdm_helpers.h>
+#include <rtdm/analogy/context.h>
+#include <rtdm/analogy/buffer.h>
+
+struct seq_file;
+struct a4l_link_desc;
+struct a4l_device;
+
+/** Structure containing driver declaration data.
+ *
+ *  @see rt_task_inquire()
+ */
+/* Analogy driver descriptor */
+struct a4l_driver {
+
+	/* List stuff */
+	struct list_head list;
+			   /**< List stuff */
+
+	/* Visible description stuff */
+	struct module *owner;
+	               /**< Pointer to module containing the code */
+	unsigned int flags;
+	               /**< Type / status driver's flags */
+	char *board_name;
+		       /**< Board name */
+	char *driver_name;
+	               /**< driver name */
+	int privdata_size;
+		       /**< Size of the driver's private data */
+
+	/* Init/destroy procedures */
+	int (*attach) (struct a4l_device *, struct a4l_link_desc *);
+								      /**< Attach procedure */
+	int (*detach) (struct a4l_device *);
+				   /**< Detach procedure */
+
+};
+
+/* Driver list related functions */
+
+int a4l_register_drv(struct a4l_driver * drv);
+int a4l_unregister_drv(struct a4l_driver * drv);
+int a4l_lct_drv(char *pin, struct a4l_driver ** pio);
+#ifdef CONFIG_PROC_FS
+int a4l_rdproc_drvs(struct seq_file *p, void *data);
+#endif /* CONFIG_PROC_FS */
+
+#endif /* !_COBALT_RTDM_ANALOGY_DRIVER_H */
+++ linux-patched/include/xenomai/rtdm/analogy/device.h	2022-03-21 12:58:31.797865480 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/analogy/buffer.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Analogy for Linux, device related features
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_ANALOGY_DEVICE_H
+#define _COBALT_RTDM_ANALOGY_DEVICE_H
+
+#include <rtdm/analogy/rtdm_helpers.h>
+#include <rtdm/analogy/transfer.h>
+#include <rtdm/analogy/driver.h>
+
+#define A4L_NB_DEVICES 10
+
+#define A4L_DEV_ATTACHED_NR 0
+
+struct a4l_device {
+
+	/* Spinlock for global device use */
+	rtdm_lock_t lock;
+
+	/* Device specific flags */
+	unsigned long flags;
+
+	/* Driver assigned to this device thanks to attaching
+	   procedure */
+	struct a4l_driver *driver;
+
+	/* Hidden description stuff */
+	struct list_head subdvsq;
+
+	/* Context-dependent stuff */
+	struct a4l_transfer transfer;
+
+	/* Private data useful for drivers functioning */
+	void *priv;
+};
+
+/* --- Devices tab related functions --- */
+void a4l_init_devs(void);
+int a4l_check_cleanup_devs(void);
+int a4l_rdproc_devs(struct seq_file *p, void *data);
+
+/* --- Context related function / macro --- */
+void a4l_set_dev(struct a4l_device_context *cxt);
+#define a4l_get_dev(x) ((x)->dev)
+
+/* --- Upper layer functions --- */
+int a4l_ioctl_devcfg(struct a4l_device_context * cxt, void *arg);
+int a4l_ioctl_devinfo(struct a4l_device_context * cxt, void *arg);
+
+#endif /* !_COBALT_RTDM_ANALOGY_DEVICE_H */
+++ linux-patched/include/xenomai/rtdm/analogy/buffer.h	2022-03-21 12:58:31.790865548 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/analogy/command.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Analogy for Linux, buffer related features
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_ANALOGY_BUFFER_H
+#define _COBALT_RTDM_ANALOGY_BUFFER_H
+
+#include <linux/version.h>
+#include <linux/mm.h>
+#include <rtdm/driver.h>
+#include <rtdm/uapi/analogy.h>
+#include <rtdm/analogy/rtdm_helpers.h>
+#include <rtdm/analogy/context.h>
+#include <rtdm/analogy/command.h>
+#include <rtdm/analogy/subdevice.h>
+
+/* --- Events bits / flags --- */
+
+#define A4L_BUF_EOBUF_NR 0
+#define A4L_BUF_EOBUF (1 << A4L_BUF_EOBUF_NR)
+
+#define A4L_BUF_ERROR_NR 1
+#define A4L_BUF_ERROR (1 << A4L_BUF_ERROR_NR)
+
+#define A4L_BUF_EOA_NR 2
+#define A4L_BUF_EOA (1 << A4L_BUF_EOA_NR)
+
+/* --- Status bits / flags --- */
+
+#define A4L_BUF_BULK_NR 8
+#define A4L_BUF_BULK (1 << A4L_BUF_BULK_NR)
+
+#define A4L_BUF_MAP_NR 9
+#define A4L_BUF_MAP (1 << A4L_BUF_MAP_NR)
+
+
+/* Buffer descriptor structure */
+struct a4l_buffer {
+
+	/* Added by the structure update */
+	struct a4l_subdevice *subd;
+
+	/* Buffer's first virtual page pointer */
+	void *buf;
+
+	/* Buffer's global size */
+	unsigned long size;
+	/* Tab containing buffer's pages pointers */
+	unsigned long *pg_list;
+
+	/* RT/NRT synchronization element */
+	struct a4l_sync sync;
+
+	/* Counters needed for transfer */
+	unsigned long end_count;
+	unsigned long prd_count;
+	unsigned long cns_count;
+	unsigned long tmp_count;
+
+	/* Status + events occuring during transfer */
+	unsigned long flags;
+
+	/* Command on progress */
+	struct a4l_cmd_desc *cur_cmd;
+
+	/* Munge counter */
+	unsigned long mng_count;
+
+	/* Theshold below which the user process should not be
+	   awakened */
+	unsigned long wake_count;
+};
+
+static inline void __dump_buffer_counters(struct a4l_buffer *buf)
+{
+	__a4l_dbg(1, core_dbg, "a4l_buffer=0x%p, p=0x%p \n", buf, buf->buf);
+	__a4l_dbg(1, core_dbg, "end=%06ld, prd=%06ld, cns=%06ld, tmp=%06ld \n",
+		buf->end_count, buf->prd_count, buf->cns_count, buf->tmp_count);
+}
+
+/* --- Static inline functions related with
+   user<->kernel data transfers --- */
+
+/* The function __produce is an inline function which copies data into
+   the asynchronous buffer and takes care of the non-contiguous issue
+   when looping. This function is used in read and write operations */
+static inline int __produce(struct a4l_device_context *cxt,
+			    struct a4l_buffer *buf, void *pin, unsigned long count)
+{
+	unsigned long start_ptr = (buf->prd_count % buf->size);
+	struct rtdm_fd *fd = rtdm_private_to_fd(cxt);
+	unsigned long tmp_cnt = count;
+	int ret = 0;
+
+	while (ret == 0 && tmp_cnt != 0) {
+		/* Check the data copy can be performed contiguously */
+		unsigned long blk_size = (start_ptr + tmp_cnt > buf->size) ?
+			buf->size - start_ptr : tmp_cnt;
+
+		/* Perform the copy */
+		if (cxt == NULL)
+			memcpy(buf->buf + start_ptr, pin, blk_size);
+		else
+			ret = rtdm_safe_copy_from_user(fd,
+						       buf->buf + start_ptr,
+						       pin, blk_size);
+
+		/* Update pointers/counts */
+		pin += blk_size;
+		tmp_cnt -= blk_size;
+		start_ptr = 0;
+	}
+
+	return ret;
+}
+
+/* The function __consume is an inline function which copies data from
+   the asynchronous buffer and takes care of the non-contiguous issue
+   when looping. This function is used in read and write operations */
+static inline int __consume(struct a4l_device_context *cxt,
+			    struct a4l_buffer *buf, void *pout, unsigned long count)
+{
+	unsigned long start_ptr = (buf->cns_count % buf->size);
+	struct rtdm_fd *fd = rtdm_private_to_fd(cxt);
+	unsigned long tmp_cnt = count;
+	int ret = 0;
+
+	while (ret == 0 && tmp_cnt != 0) {
+		/* Check the data copy can be performed contiguously */
+		unsigned long blk_size = (start_ptr + tmp_cnt > buf->size) ?
+			buf->size - start_ptr : tmp_cnt;
+
+		/* Perform the copy */
+		if (cxt == NULL)
+			memcpy(pout, buf->buf + start_ptr, blk_size);
+		else
+			ret = rtdm_safe_copy_to_user(fd,
+						     pout,
+						     buf->buf + start_ptr,
+						     blk_size);
+
+		/* Update pointers/counts */
+		pout += blk_size;
+		tmp_cnt -= blk_size;
+		start_ptr = 0;
+	}
+
+	return ret;
+}
+
+/* The function __munge is an inline function which calls the
+   subdevice specific munge callback on contiguous windows within the
+   whole buffer. This function is used in read and write operations */
+static inline void __munge(struct a4l_subdevice * subd,
+			   void (*munge) (struct a4l_subdevice *,
+					  void *, unsigned long),
+			   struct a4l_buffer * buf, unsigned long count)
+{
+	unsigned long start_ptr = (buf->mng_count % buf->size);
+	unsigned long tmp_cnt = count;
+
+	while (tmp_cnt != 0) {
+		/* Check the data copy can be performed contiguously */
+		unsigned long blk_size = (start_ptr + tmp_cnt > buf->size) ?
+			buf->size - start_ptr : tmp_cnt;
+
+		/* Perform the munge operation */
+		munge(subd, buf->buf + start_ptr, blk_size);
+
+		/* Update the start pointer and the count */
+		tmp_cnt -= blk_size;
+		start_ptr = 0;
+	}
+}
+
+/* The function __handle_event can only be called from process context
+   (not interrupt service routine). It allows the client process to
+   retrieve the buffer status which has been updated by the driver */
+static inline int __handle_event(struct a4l_buffer * buf)
+{
+	int ret = 0;
+
+	/* The event "End of acquisition" must not be cleaned
+	   before the complete flush of the buffer */
+	if (test_bit(A4L_BUF_EOA_NR, &buf->flags))
+		ret = -ENOENT;
+
+	if (test_bit(A4L_BUF_ERROR_NR, &buf->flags))
+		ret = -EPIPE;
+
+	return ret;
+}
+
+/* --- Counters management functions --- */
+
+/* Here, we may wonder why we need more than two counters / pointers.
+
+   Theoretically, we only need two counters (or two pointers):
+   - one which tells where the reader should be within the buffer
+   - one which tells where the writer should be within the buffer
+
+   With these two counters (or pointers), we just have to check that
+   the writer does not overtake the reader inside the ring buffer
+   BEFORE any read / write operations.
+
+   However, if one element is a DMA controller, we have to be more
+   careful. Generally a DMA transfer occurs like this:
+   DMA shot
+      |-> then DMA interrupt
+	 |-> then DMA soft handler which checks the counter
+
+   So, the checkings occur AFTER the write operations.
+
+   Let's take an example: the reader is a software task and the writer
+   is a DMA controller. At the end of the DMA shot, the write counter
+   is higher than the read counter. Unfortunately, a read operation
+   occurs between the DMA shot and the DMA interrupt, so the handler
+   will not notice that an overflow occured.
+
+   That is why tmp_count comes into play: tmp_count records the
+   read/consumer current counter before the next DMA shot and once the
+   next DMA shot is done, we check that the updated writer/producer
+   counter is not higher than tmp_count. Thus we are sure that the DMA
+   writer has not overtaken the reader because it was not able to
+   overtake the n-1 value. */
+
+static inline int __pre_abs_put(struct a4l_buffer * buf, unsigned long count)
+{
+	if (count - buf->tmp_count > buf->size) {
+		set_bit(A4L_BUF_ERROR_NR, &buf->flags);
+		return -EPIPE;
+	}
+
+	buf->tmp_count = buf->cns_count;
+
+	return 0;
+}
+
+static inline int __pre_put(struct a4l_buffer * buf, unsigned long count)
+{
+	return __pre_abs_put(buf, buf->tmp_count + count);
+}
+
+static inline int __pre_abs_get(struct a4l_buffer * buf, unsigned long count)
+{
+	/* The first time, we expect the buffer to be properly filled
+	before the trigger occurence; by the way, we need tmp_count to
+	have been initialized and tmp_count is updated right here */
+	if (buf->tmp_count == 0 || buf->cns_count == 0)
+		goto out;
+
+	/* At the end of the acquisition, the user application has
+	written the defined amount of data into the buffer; so the
+	last time, the DMA channel can easily overtake the tmp
+	frontier because no more data were sent from user space;
+	therefore no useless alarm should be sent */
+	if (buf->end_count != 0 && (long)(count - buf->end_count) > 0)
+		goto out;
+
+	/* Once the exception are passed, we check that the DMA
+	transfer has not overtaken the last record of the production
+	count (tmp_count was updated with prd_count the last time
+	__pre_abs_get was called). We must understand that we cannot
+	compare the current DMA count with the current production
+	count because even if, right now, the production count is
+	higher than the DMA count, it does not mean that the DMA count
+	was not greater a few cycles before; in such case, the DMA
+	channel would have retrieved the wrong data */
+	if ((long)(count - buf->tmp_count) > 0) {
+		set_bit(A4L_BUF_ERROR_NR, &buf->flags);
+		return -EPIPE;
+	}
+
+out:
+	buf->tmp_count = buf->prd_count;
+
+	return 0;
+}
+
+static inline int __pre_get(struct a4l_buffer * buf, unsigned long count)
+{
+	return __pre_abs_get(buf, buf->tmp_count + count);
+}
+
+static inline int __abs_put(struct a4l_buffer * buf, unsigned long count)
+{
+	unsigned long old = buf->prd_count;
+
+	if ((long)(buf->prd_count - count) >= 0)
+		return -EINVAL;
+
+	buf->prd_count = count;
+
+	if ((old / buf->size) != (count / buf->size))
+		set_bit(A4L_BUF_EOBUF_NR, &buf->flags);
+
+	if (buf->end_count != 0 && (long)(count - buf->end_count) >= 0)
+		set_bit(A4L_BUF_EOA_NR, &buf->flags);
+
+	return 0;
+}
+
+static inline int __put(struct a4l_buffer * buf, unsigned long count)
+{
+	return __abs_put(buf, buf->prd_count + count);
+}
+
+static inline int __abs_get(struct a4l_buffer * buf, unsigned long count)
+{
+	unsigned long old = buf->cns_count;
+
+	if ((long)(buf->cns_count - count) >= 0)
+		return -EINVAL;
+
+	buf->cns_count = count;
+
+	if ((old / buf->size) != count / buf->size)
+		set_bit(A4L_BUF_EOBUF_NR, &buf->flags);
+
+	if (buf->end_count != 0 && (long)(count - buf->end_count) >= 0)
+		set_bit(A4L_BUF_EOA_NR, &buf->flags);
+
+	return 0;
+}
+
+static inline int __get(struct a4l_buffer * buf, unsigned long count)
+{
+	return __abs_get(buf, buf->cns_count + count);
+}
+
+static inline unsigned long __count_to_put(struct a4l_buffer * buf)
+{
+	unsigned long ret;
+
+	if ((long) (buf->size + buf->cns_count - buf->prd_count) > 0)
+		ret = buf->size + buf->cns_count - buf->prd_count;
+	else
+		ret = 0;
+
+	return ret;
+}
+
+static inline unsigned long __count_to_get(struct a4l_buffer * buf)
+{
+	unsigned long ret;
+
+	/* If the acquisition is unlimited (end_count == 0), we must
+	   not take into account end_count */
+	if (buf->end_count == 0 || (long)(buf->end_count - buf->prd_count) > 0)
+		ret = buf->prd_count;
+	else
+		ret = buf->end_count;
+
+	if ((long)(ret - buf->cns_count) > 0)
+		ret -= buf->cns_count;
+	else
+		ret = 0;
+
+	return ret;
+}
+
+static inline unsigned long __count_to_end(struct a4l_buffer * buf)
+{
+	unsigned long ret = buf->end_count - buf->cns_count;
+
+	if (buf->end_count == 0)
+		return ULONG_MAX;
+
+	return ((long)ret) < 0 ? 0 : ret;
+}
+
+/* --- Buffer internal functions --- */
+
+int a4l_alloc_buffer(struct a4l_buffer *buf_desc, int buf_size);
+
+void a4l_free_buffer(struct a4l_buffer *buf_desc);
+
+void a4l_init_buffer(struct a4l_buffer * buf_desc);
+
+void a4l_cleanup_buffer(struct a4l_buffer * buf_desc);
+
+int a4l_setup_buffer(struct a4l_device_context *cxt, struct a4l_cmd_desc *cmd);
+
+void a4l_cancel_buffer(struct a4l_device_context *cxt);
+
+int a4l_buf_prepare_absput(struct a4l_subdevice *subd,
+			   unsigned long count);
+
+int a4l_buf_commit_absput(struct a4l_subdevice *subd,
+			  unsigned long count);
+
+int a4l_buf_prepare_put(struct a4l_subdevice *subd,
+			unsigned long count);
+
+int a4l_buf_commit_put(struct a4l_subdevice *subd,
+		       unsigned long count);
+
+int a4l_buf_put(struct a4l_subdevice *subd,
+		void *bufdata, unsigned long count);
+
+int a4l_buf_prepare_absget(struct a4l_subdevice *subd,
+			   unsigned long count);
+
+int a4l_buf_commit_absget(struct a4l_subdevice *subd,
+			  unsigned long count);
+
+int a4l_buf_prepare_get(struct a4l_subdevice *subd,
+			unsigned long count);
+
+int a4l_buf_commit_get(struct a4l_subdevice *subd,
+		       unsigned long count);
+
+int a4l_buf_get(struct a4l_subdevice *subd,
+		void *bufdata, unsigned long count);
+
+int a4l_buf_evt(struct a4l_subdevice *subd, unsigned long evts);
+
+unsigned long a4l_buf_count(struct a4l_subdevice *subd);
+
+/* --- Current Command management function --- */
+
+static inline struct a4l_cmd_desc *a4l_get_cmd(struct a4l_subdevice *subd)
+{
+	return (subd->buf) ? subd->buf->cur_cmd : NULL;
+}
+
+/* --- Munge related function --- */
+
+int a4l_get_chan(struct a4l_subdevice *subd);
+
+/* --- IOCTL / FOPS functions --- */
+
+int a4l_ioctl_mmap(struct a4l_device_context * cxt, void *arg);
+int a4l_ioctl_bufcfg(struct a4l_device_context * cxt, void *arg);
+int a4l_ioctl_bufcfg2(struct a4l_device_context * cxt, void *arg);
+int a4l_ioctl_bufinfo(struct a4l_device_context * cxt, void *arg);
+int a4l_ioctl_bufinfo2(struct a4l_device_context * cxt, void *arg);
+int a4l_ioctl_poll(struct a4l_device_context * cxt, void *arg);
+ssize_t a4l_read_buffer(struct a4l_device_context * cxt, void *bufdata, size_t nbytes);
+ssize_t a4l_write_buffer(struct a4l_device_context * cxt, const void *bufdata, size_t nbytes);
+int a4l_select(struct a4l_device_context *cxt,
+	       rtdm_selector_t *selector,
+	       enum rtdm_selecttype type, unsigned fd_index);
+
+#endif /* !_COBALT_RTDM_ANALOGY_BUFFER_H */
+++ linux-patched/include/xenomai/rtdm/analogy/command.h	2022-03-21 12:58:31.783865616 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/analogy/transfer.h	1970-01-01 01:00:00.000000000 +0100
+/**
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_ANALOGY_COMMAND_H
+#define _COBALT_RTDM_ANALOGY_COMMAND_H
+
+#include <rtdm/uapi/analogy.h>
+#include <rtdm/analogy/context.h>
+
+#define CR_CHAN(a) CHAN(a)
+#define CR_RNG(a) (((a)>>16)&0xff)
+#define CR_AREF(a) (((a)>>24)&0xf)
+
+/* --- Command related function --- */
+void a4l_free_cmddesc(struct a4l_cmd_desc * desc);
+
+/* --- Upper layer functions --- */
+int a4l_check_cmddesc(struct a4l_device_context * cxt, struct a4l_cmd_desc * desc);
+int a4l_ioctl_cmd(struct a4l_device_context * cxt, void *arg);
+
+#endif /* !_COBALT_RTDM_ANALOGY_COMMAND_H */
+++ linux-patched/include/xenomai/rtdm/analogy/transfer.h	2022-03-21 12:58:31.775865694 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/can.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Analogy for Linux, transfer related features
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_ANALOGY_TRANSFER_H
+#define _COBALT_RTDM_ANALOGY_TRANSFER_H
+
+#include <rtdm/analogy/buffer.h>
+
+/* IRQ types */
+#define A4L_IRQ_DISABLED 0
+
+/* Fields init values */
+#define A4L_IRQ_UNUSED (unsigned int)((unsigned short)(~0))
+#define A4L_IDX_UNUSED (unsigned int)(~0)
+
+/* TODO: IRQ handling must leave transfer for os_facilities */
+
+struct a4l_device;
+/* Analogy transfer descriptor */
+struct a4l_transfer {
+
+	/* Subdevices desc */
+	unsigned int nb_subd;
+	struct a4l_subdevice **subds;
+
+	/* Buffer stuff: the default size */
+	unsigned int default_bufsize;
+
+	/* IRQ in use */
+	/* TODO: irq_desc should vanish */
+	struct a4l_irq_descriptor irq_desc;
+};
+
+/* --- Proc function --- */
+
+int a4l_rdproc_transfer(struct seq_file *p, void *data);
+
+/* --- Upper layer functions --- */
+
+void a4l_presetup_transfer(struct a4l_device_context * cxt);
+int a4l_setup_transfer(struct a4l_device_context * cxt);
+int a4l_precleanup_transfer(struct a4l_device_context * cxt);
+int a4l_cleanup_transfer(struct a4l_device_context * cxt);
+int a4l_reserve_transfer(struct a4l_device_context * cxt, int idx_subd);
+int a4l_init_transfer(struct a4l_device_context * cxt, struct a4l_cmd_desc * cmd);
+int a4l_cancel_transfer(struct a4l_device_context * cxt, int idx_subd);
+int a4l_cancel_transfers(struct a4l_device_context * cxt);
+
+ssize_t a4l_put(struct a4l_device_context * cxt, void *buf, size_t nbytes);
+ssize_t a4l_get(struct a4l_device_context * cxt, void *buf, size_t nbytes);
+
+int a4l_request_irq(struct a4l_device *dev,
+		    unsigned int irq,
+		    a4l_irq_hdlr_t handler,
+		    unsigned long flags, void *cookie);
+int a4l_free_irq(struct a4l_device *dev, unsigned int irq);
+unsigned int a4l_get_irq(struct a4l_device *dev);
+
+int a4l_ioctl_cancel(struct a4l_device_context * cxt, void *arg);
+
+#endif /* !_COBALT_RTDM_ANALOGY_TRANSFER_H */
+++ linux-patched/include/xenomai/rtdm/can.h	2022-03-21 12:58:31.768865763 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/net.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * Copyright (C) 2005, 2006 Sebastian Smolorz
+ *                    <Sebastian.Smolorz@stud.uni-hannover.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_CAN_H
+#define _COBALT_RTDM_CAN_H
+
+#include <linux/net.h>
+#include <linux/socket.h>
+#include <linux/if.h>
+#include <rtdm/rtdm.h>
+#include <rtdm/uapi/can.h>
+
+#endif /* _COBALT_RTDM_CAN_H */
+++ linux-patched/include/xenomai/rtdm/net.h	2022-03-21 12:58:31.761865831 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/rtdm/autotune.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ *  RTnet - real-time networking subsystem
+ *  Copyright (C) 2005-2011 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _COBALT_RTDM_NET_H
+#define _COBALT_RTDM_NET_H
+
+#include <rtdm/rtdm.h>
+#include <rtdm/uapi/net.h>
+#include <rtdm/driver.h>
+
+struct rtnet_callback {
+    void    (*func)(struct rtdm_fd *, void *);
+    void    *arg;
+};
+
+#define RTNET_RTIOC_CALLBACK    _IOW(RTIOC_TYPE_NETWORK, 0x12, \
+				     struct rtnet_callback)
+
+/* utility functions */
+
+/* provided by rt_ipv4 */
+unsigned long rt_inet_aton(const char *ip);
+
+/* provided by rt_packet */
+int rt_eth_aton(unsigned char *addr_buf, const char *mac);
+
+#define RTNET_RTDM_VER 914
+
+#endif  /* _COBALT_RTDM_NET_H */
+++ linux-patched/include/xenomai/rtdm/autotune.h	2022-03-21 12:58:31.753865909 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/sched.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_AUTOTUNE_H
+#define _COBALT_RTDM_AUTOTUNE_H
+
+#include <rtdm/rtdm.h>
+#include <rtdm/uapi/autotune.h>
+
+#endif /* !_COBALT_RTDM_AUTOTUNE_H */
+++ linux-patched/include/xenomai/cobalt/uapi/sched.h	2022-03-21 12:58:32.206861492 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/mutex.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_SCHED_H
+#define _COBALT_UAPI_SCHED_H
+
+#include <cobalt/uapi/kernel/types.h>
+
+#define SCHED_COBALT		42
+#define SCHED_WEAK		43
+
+#ifndef SCHED_SPORADIC
+#define SCHED_SPORADIC		10
+#define sched_ss_low_priority	sched_u.ss.__sched_low_priority
+#define sched_ss_repl_period	sched_u.ss.__sched_repl_period
+#define sched_ss_init_budget	sched_u.ss.__sched_init_budget
+#define sched_ss_max_repl	sched_u.ss.__sched_max_repl
+#endif	/* !SCHED_SPORADIC */
+
+struct __sched_ss_param {
+	int __sched_low_priority;
+	struct __user_old_timespec __sched_repl_period;
+	struct __user_old_timespec __sched_init_budget;
+	int __sched_max_repl;
+};
+
+#define sched_rr_quantum	sched_u.rr.__sched_rr_quantum
+
+struct __sched_rr_param {
+	struct __user_old_timespec __sched_rr_quantum;
+};
+
+#ifndef SCHED_TP
+#define SCHED_TP		11
+#define sched_tp_partition	sched_u.tp.__sched_partition
+#endif	/* !SCHED_TP */
+
+struct __sched_tp_param {
+	int __sched_partition;
+};
+
+struct sched_tp_window {
+	struct __user_old_timespec offset;
+	struct __user_old_timespec duration;
+	int ptid;
+};
+
+enum {
+	sched_tp_install,
+	sched_tp_uninstall,
+	sched_tp_start,
+	sched_tp_stop,
+};
+	
+struct __sched_config_tp {
+	int op;
+	int nr_windows;
+	struct sched_tp_window windows[0];
+};
+
+#define sched_tp_confsz(nr_win) \
+  (sizeof(struct __sched_config_tp) + nr_win * sizeof(struct sched_tp_window))
+
+#ifndef SCHED_QUOTA
+#define SCHED_QUOTA		12
+#define sched_quota_group	sched_u.quota.__sched_group
+#endif	/* !SCHED_QUOTA */
+
+struct __sched_quota_param {
+	int __sched_group;
+};
+
+enum {
+	sched_quota_add,
+	sched_quota_remove,
+	sched_quota_force_remove,
+	sched_quota_set,
+	sched_quota_get,
+};
+
+struct __sched_config_quota {
+	int op;
+	union {
+		struct {
+			int pshared;
+		} add;
+		struct {
+			int tgid;
+		} remove;
+		struct {
+			int tgid;
+			int quota;
+			int quota_peak;
+		} set;
+		struct {
+			int tgid;
+		} get;
+	};
+	struct __sched_quota_info {
+		int tgid;
+		int quota;
+		int quota_peak;
+		int quota_sum;
+	} info;
+};
+
+#define sched_quota_confsz()  sizeof(struct __sched_config_quota)
+
+struct sched_param_ex {
+	int sched_priority;
+	union {
+		struct __sched_ss_param ss;
+		struct __sched_rr_param rr;
+		struct __sched_tp_param tp;
+		struct __sched_quota_param quota;
+	} sched_u;
+};
+
+union sched_config {
+	struct __sched_config_tp tp;
+	struct __sched_config_quota quota;
+};
+
+#endif /* !_COBALT_UAPI_SCHED_H */
+++ linux-patched/include/xenomai/cobalt/uapi/mutex.h	2022-03-21 12:58:32.199861560 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/kernel/synch.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_MUTEX_H
+#define _COBALT_UAPI_MUTEX_H
+
+#include <cobalt/uapi/kernel/types.h>
+
+#define COBALT_MUTEX_MAGIC  0x86860303
+
+struct cobalt_mutex_state {
+	atomic_t owner;
+	__u32 flags;
+#define COBALT_MUTEX_COND_SIGNAL 0x00000001
+#define COBALT_MUTEX_ERRORCHECK  0x00000002
+	__u32 ceiling;
+};
+
+union cobalt_mutex_union {
+	pthread_mutex_t native_mutex;
+	struct cobalt_mutex_shadow {
+		__u32 magic;
+		__u32 lockcnt;
+		__u32 state_offset;
+		xnhandle_t handle;
+		struct cobalt_mutexattr attr;
+	} shadow_mutex;
+};
+
+#endif /* !_COBALT_UAPI_MUTEX_H */
+++ linux-patched/include/xenomai/cobalt/uapi/kernel/synch.h	2022-03-21 12:58:32.192861628 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/kernel/limits.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2001-2013 Philippe Gerum <rpm@xenomai.org>.
+ * Copyright (C) 2008, 2009 Jan Kiszka <jan.kiszka@siemens.com>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_KERNEL_SYNCH_H
+#define _COBALT_UAPI_KERNEL_SYNCH_H
+
+#include <cobalt/uapi/kernel/types.h>
+
+/* Creation flags */
+#define XNSYNCH_FIFO    0x0
+#define XNSYNCH_PRIO    0x1
+#define XNSYNCH_PI      0x2
+#define XNSYNCH_DREORD  0x4
+#define XNSYNCH_OWNER   0x8
+#define XNSYNCH_PP      0x10
+
+/* Fast lock API */
+static inline int xnsynch_fast_is_claimed(xnhandle_t handle)
+{
+	return (handle & XNSYNCH_FLCLAIM) != 0;
+}
+
+static inline xnhandle_t xnsynch_fast_claimed(xnhandle_t handle)
+{
+	return handle | XNSYNCH_FLCLAIM;
+}
+
+static inline xnhandle_t xnsynch_fast_ceiling(xnhandle_t handle)
+{
+	return handle | XNSYNCH_FLCEIL;
+}
+
+static inline int
+xnsynch_fast_owner_check(atomic_t *fastlock, xnhandle_t ownerh)
+{
+	return (xnhandle_get_id(atomic_read(fastlock)) == ownerh) ?
+		0 : -EPERM;
+}
+
+static inline
+int xnsynch_fast_acquire(atomic_t *fastlock, xnhandle_t new_ownerh)
+{
+	xnhandle_t h;
+
+	h = atomic_cmpxchg(fastlock, XN_NO_HANDLE, new_ownerh);
+	if (h != XN_NO_HANDLE) {
+		if (xnhandle_get_id(h) == new_ownerh)
+			return -EBUSY;
+
+		return -EAGAIN;
+	}
+
+	return 0;
+}
+
+static inline
+int xnsynch_fast_release(atomic_t *fastlock, xnhandle_t cur_ownerh)
+{
+	return (xnhandle_t)atomic_cmpxchg(fastlock, cur_ownerh, XN_NO_HANDLE)
+		== cur_ownerh;
+}
+
+/* Local/shared property */
+static inline int xnsynch_is_shared(xnhandle_t handle)
+{
+	return (handle & XNSYNCH_PSHARED) != 0;
+}
+
+#endif /* !_COBALT_UAPI_KERNEL_SYNCH_H */
+++ linux-patched/include/xenomai/cobalt/uapi/kernel/limits.h	2022-03-21 12:58:32.184861706 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/kernel/types.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_KERNEL_LIMITS_H
+#define _COBALT_UAPI_KERNEL_LIMITS_H
+
+#define XNOBJECT_NAME_LEN 32
+
+#endif /* !_COBALT_UAPI_KERNEL_LIMITS_H */
+++ linux-patched/include/xenomai/cobalt/uapi/kernel/types.h	2022-03-21 12:58:32.177861774 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/kernel/urw.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_KERNEL_TYPES_H
+#define _COBALT_UAPI_KERNEL_TYPES_H
+
+#include <linux/types.h>
+#include <cobalt/uapi/kernel/limits.h>
+
+typedef __u64 xnticks_t;
+
+typedef __s64 xnsticks_t;
+
+typedef __u32 xnhandle_t;
+
+#define XN_NO_HANDLE		((xnhandle_t)0)
+#define XN_HANDLE_INDEX_MASK	((xnhandle_t)0xf0000000)
+
+/* Fixed bits (part of the identifier) */
+#define XNSYNCH_PSHARED		((xnhandle_t)0x40000000)
+
+/* Transient bits (expressing a status) */
+#define XNSYNCH_FLCLAIM		((xnhandle_t)0x80000000) /* Contended. */
+#define XNSYNCH_FLCEIL		((xnhandle_t)0x20000000) /* Ceiling active. */
+
+#define XN_HANDLE_TRANSIENT_MASK	(XNSYNCH_FLCLAIM|XNSYNCH_FLCEIL)
+
+/*
+ * Strip all special bits from the handle, only retaining the object
+ * index value in the registry.
+ */
+static inline xnhandle_t xnhandle_get_index(xnhandle_t handle)
+{
+	return handle & ~XN_HANDLE_INDEX_MASK;
+}
+
+/*
+ * Strip the transient bits from the handle, only retaining the fixed
+ * part making the identifier.
+ */
+static inline xnhandle_t xnhandle_get_id(xnhandle_t handle)
+{
+	return handle & ~XN_HANDLE_TRANSIENT_MASK;
+}
+
+/*
+ * Our representation of time specs at the kernel<->user interface
+ * boundary at the moment, until we have fully transitioned to a
+ * y2038-safe implementation in libcobalt. Once done, those legacy
+ * types will be removed.
+ */
+struct __user_old_timespec {
+	long  tv_sec;
+	long  tv_nsec;
+};
+
+struct __user_old_itimerspec {
+	struct __user_old_timespec it_interval;
+	struct __user_old_timespec it_value;
+};
+
+struct __user_old_timeval {
+	long  tv_sec;
+	long  tv_usec;
+};
+
+/* Lifted from include/uapi/linux/timex.h. */
+struct __user_old_timex {
+	unsigned int modes;	/* mode selector */
+	__kernel_long_t offset;	/* time offset (usec) */
+	__kernel_long_t freq;	/* frequency offset (scaled ppm) */
+	__kernel_long_t maxerror;/* maximum error (usec) */
+	__kernel_long_t esterror;/* estimated error (usec) */
+	int status;		/* clock command/status */
+	__kernel_long_t constant;/* pll time constant */
+	__kernel_long_t precision;/* clock precision (usec) (read only) */
+	__kernel_long_t tolerance;/* clock frequency tolerance (ppm)
+				   * (read only)
+				   */
+	struct __user_old_timeval time;	/* (read only, except for ADJ_SETOFFSET) */
+	__kernel_long_t tick;	/* (modified) usecs between clock ticks */
+
+	__kernel_long_t ppsfreq;/* pps frequency (scaled ppm) (ro) */
+	__kernel_long_t jitter; /* pps jitter (us) (ro) */
+	int shift;              /* interval duration (s) (shift) (ro) */
+	__kernel_long_t stabil;            /* pps stability (scaled ppm) (ro) */
+	__kernel_long_t jitcnt; /* jitter limit exceeded (ro) */
+	__kernel_long_t calcnt; /* calibration intervals (ro) */
+	__kernel_long_t errcnt; /* calibration errors (ro) */
+	__kernel_long_t stbcnt; /* stability limit exceeded (ro) */
+
+	int tai;		/* TAI offset (ro) */
+
+	int  :32; int  :32; int  :32; int  :32;
+	int  :32; int  :32; int  :32; int  :32;
+	int  :32; int  :32; int  :32;
+};
+
+#endif /* !_COBALT_UAPI_KERNEL_TYPES_H */
+++ linux-patched/include/xenomai/cobalt/uapi/kernel/urw.h	2022-03-21 12:58:32.170861843 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/kernel/vdso.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_KERNEL_URW_H
+#define _COBALT_UAPI_KERNEL_URW_H
+
+#include <linux/types.h>
+
+/*
+ * A restricted version of the kernel seqlocks with a slightly
+ * different interface, allowing for unsynced reads with concurrent
+ * write detection, without serializing writers.  Caller should
+ * provide for proper locking to deal with concurrent updates.
+ *
+ * urw_t lock = URW_INITIALIZER;
+ * urwstate_t tmp;
+ *
+ * unsynced_read_block(&tmp, &lock) {
+ *          (will redo until clean read)...
+ * }
+ *
+ * unsynced_write_block(&tmp, &lock) {
+ *          ...
+ * }
+ *
+ * This code was inspired by Wolfgang Mauerer's linux/seqlock.h
+ * adaptation for Xenomai 2.6 to support the VDSO feature.
+ */
+
+typedef struct {
+	__u32 sequence;
+} urw_t;
+
+typedef struct {
+	__u32 token;
+	__u32 dirty;
+} urwstate_t;
+
+#define URW_INITIALIZER     { 0 }
+#define DEFINE_URW(__name)  urw_t __name = URW_INITIALIZER
+
+#ifndef READ_ONCE
+#define READ_ONCE ACCESS_ONCE
+#endif
+
+static inline void __try_read_start(const urw_t *urw, urwstate_t *tmp)
+{
+	__u32 token;
+repeat:
+	token = READ_ONCE(urw->sequence);
+	smp_rmb();
+	if (token & 1) {
+		cpu_relax();
+		goto repeat;
+	}
+
+	tmp->token = token;
+	tmp->dirty = 1;
+}
+
+static inline void __try_read_end(const urw_t *urw, urwstate_t *tmp)
+{
+	smp_rmb();
+	if (urw->sequence != tmp->token) {
+		__try_read_start(urw, tmp);
+		return;
+	}
+
+	tmp->dirty = 0;
+}
+
+static inline void __do_write_start(urw_t *urw, urwstate_t *tmp)
+{
+	urw->sequence++;
+	tmp->dirty = 1;
+	smp_wmb();
+}
+
+static inline void __do_write_end(urw_t *urw, urwstate_t *tmp)
+{
+	smp_wmb();
+	tmp->dirty = 0;
+	urw->sequence++;
+}
+
+static inline void unsynced_rw_init(urw_t *urw)
+{
+	urw->sequence = 0;
+}
+
+#define unsynced_read_block(__tmp, __urw)		\
+	for (__try_read_start(__urw, __tmp);		\
+	     (__tmp)->dirty; __try_read_end(__urw, __tmp))
+
+#define unsynced_write_block(__tmp, __urw)		\
+	for (__do_write_start(__urw, __tmp);		\
+	     (__tmp)->dirty; __do_write_end(__urw, __tmp))
+
+#endif /* !_COBALT_UAPI_KERNEL_URW_H */
+++ linux-patched/include/xenomai/cobalt/uapi/kernel/vdso.h	2022-03-21 12:58:32.162861921 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/kernel/pipe.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2009 Wolfgang Mauerer <wolfgang.mauerer@siemens.com>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_KERNEL_VDSO_H
+#define _COBALT_UAPI_KERNEL_VDSO_H
+
+#include <cobalt/uapi/kernel/urw.h>
+
+/*
+ * I-pipe only. Dovetail enables the common vDSO for getting
+ * CLOCK_REALTIME timestamps from the out-of-band stage
+ * (XNVDSO_FEAT_HOST_REALTIME is cleared in this case).
+ */
+struct xnvdso_hostrt_data {
+	__u64 wall_sec;
+	__u64 wtom_sec;
+	__u64 cycle_last;
+	__u64 mask;
+	__u32 wall_nsec;
+	__u32 wtom_nsec;
+	__u32 mult;
+	__u32 shift;
+	__u32 live;
+	urw_t lock;
+};
+
+/*
+ * Data shared between the Cobalt kernel and applications, which lives
+ * in the shared memory heap (COBALT_MEMDEV_SHARED).
+ * xnvdso_hostrt_data.features tells which data is present. Notice
+ * that struct xnvdso may only grow, but never shrink.
+ */
+struct xnvdso {
+	__u64 features;
+	/* XNVDSO_FEAT_HOST_REALTIME */
+	struct xnvdso_hostrt_data hostrt_data;
+	/* XNVDSO_FEAT_WALLCLOCK_OFFSET */
+	__u64 wallclock_offset;
+};
+
+/* For each shared feature, add a flag below. */
+
+#define XNVDSO_FEAT_HOST_REALTIME	0x0000000000000001ULL
+#define XNVDSO_FEAT_WALLCLOCK_OFFSET	0x0000000000000002ULL
+
+static inline int xnvdso_test_feature(struct xnvdso *vdso,
+				      __u64 feature)
+{
+	return (vdso->features & feature) != 0;
+}
+
+#endif /* !_COBALT_UAPI_KERNEL_VDSO_H */
+++ linux-patched/include/xenomai/cobalt/uapi/kernel/pipe.h	2022-03-21 12:58:32.155861989 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/kernel/thread.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_KERNEL_PIPE_H
+#define _COBALT_UAPI_KERNEL_PIPE_H
+
+#define	XNPIPE_IOCTL_BASE	'p'
+
+#define XNPIPEIOC_GET_NRDEV	_IOW(XNPIPE_IOCTL_BASE, 0, int)
+#define XNPIPEIOC_IFLUSH	_IO(XNPIPE_IOCTL_BASE, 1)
+#define XNPIPEIOC_OFLUSH	_IO(XNPIPE_IOCTL_BASE, 2)
+#define XNPIPEIOC_FLUSH		XNPIPEIOC_OFLUSH
+#define XNPIPEIOC_SETSIG	_IO(XNPIPE_IOCTL_BASE, 3)
+
+#define XNPIPE_NORMAL	0x0
+#define XNPIPE_URGENT	0x1
+
+#define XNPIPE_IFLUSH	0x1
+#define XNPIPE_OFLUSH	0x2
+
+#define XNPIPE_MINOR_AUTO  (-1)
+
+#endif /* !_COBALT_UAPI_KERNEL_PIPE_H */
+++ linux-patched/include/xenomai/cobalt/uapi/kernel/thread.h	2022-03-21 12:58:32.147862067 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/kernel/heap.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_KERNEL_THREAD_H
+#define _COBALT_UAPI_KERNEL_THREAD_H
+
+#include <cobalt/uapi/kernel/types.h>
+
+/**
+ * @ingroup cobalt_core_thread
+ * @defgroup cobalt_core_thread_states Thread state flags
+ * @brief Bits reporting permanent or transient states of threads
+ * @{
+ */
+
+/* State flags (shared) */
+
+#define XNSUSP    0x00000001 /**< Suspended. */
+#define XNPEND    0x00000002 /**< Sleep-wait for a resource. */
+#define XNDELAY   0x00000004 /**< Delayed */
+#define XNREADY   0x00000008 /**< Linked to the ready queue. */
+#define XNDORMANT 0x00000010 /**< Not started yet */
+#define XNZOMBIE  0x00000020 /**< Zombie thread in deletion process */
+#define XNMAPPED  0x00000040 /**< Thread is mapped to a linux task */
+#define XNRELAX   0x00000080 /**< Relaxed shadow thread (blocking bit) */
+#define XNHELD    0x00000200 /**< Thread is held to process emergency. */
+#define XNBOOST   0x00000400 /**< PI/PP boost undergoing */
+#define XNSSTEP   0x00000800 /**< Single-stepped by debugger */
+#define XNLOCK    0x00001000 /**< Scheduler lock control (pseudo-bit, not in ->state) */
+#define XNRRB     0x00002000 /**< Undergoes a round-robin scheduling */
+#define XNWARN    0x00004000 /**< Issue SIGDEBUG on error detection */
+#define XNFPU     0x00008000 /**< Thread uses FPU */
+#define XNROOT    0x00010000 /**< Root thread (that is, Linux/IDLE) */
+#define XNWEAK    0x00020000 /**< Non real-time shadow (from the WEAK class) */
+#define XNUSER    0x00040000 /**< Shadow thread running in userland */
+#define XNJOINED  0x00080000 /**< Another thread waits for joining this thread */
+#define XNTRAPLB  0x00100000 /**< Trap lock break (i.e. may not sleep with sched lock) */
+#define XNDEBUG   0x00200000 /**< User-level debugging enabled */
+#define XNDBGSTOP 0x00400000 /**< Stopped for synchronous debugging */
+
+/** @} */
+
+/**
+ * @ingroup cobalt_core_thread
+ * @defgroup cobalt_core_thread_info Thread information flags
+ * @brief Bits reporting events notified to threads
+ * @{
+ */
+
+/* Information flags (shared) */
+
+#define XNTIMEO   0x00000001 /**< Woken up due to a timeout condition */
+#define XNRMID    0x00000002 /**< Pending on a removed resource */
+#define XNBREAK   0x00000004 /**< Forcibly awaken from a wait state */
+#define XNKICKED  0x00000008 /**< Forced out of primary mode */
+#define XNWAKEN   0x00000010 /**< Thread waken up upon resource availability */
+#define XNROBBED  0x00000020 /**< Robbed from resource ownership */
+#define XNCANCELD 0x00000040 /**< Cancellation request is pending */
+#define XNPIALERT 0x00000080 /**< Priority inversion alert (SIGDEBUG sent) */
+#define XNSCHEDP  0x00000100 /**< schedparam propagation is pending */
+#define XNCONTHI  0x00000200 /**< Continue in primary mode after debugging */
+
+/* Local information flags (private to current thread) */
+
+#define XNMOVED   0x00000001 /**< CPU migration in primary mode occurred */
+#define XNLBALERT 0x00000002 /**< Scheduler lock break alert (SIGDEBUG sent) */
+#define XNDESCENT 0x00000004 /**< Adaptive transitioning to secondary mode */
+#define XNSYSRST  0x00000008 /**< Thread awaiting syscall restart after signal */
+#define XNHICCUP  0x00000010 /**< Just left from ptracing */
+
+/** @} */
+
+/*
+ * Must follow strictly the declaration order of the state flags
+ * defined above. Status symbols are defined as follows:
+ *
+ * 'S' -> Forcibly suspended.
+ * 'w'/'W' -> Waiting for a resource, with or without timeout.
+ * 'D' -> Delayed (without any other wait condition).
+ * 'R' -> Runnable.
+ * 'U' -> Unstarted or dormant.
+ * 'X' -> Relaxed shadow.
+ * 'H' -> Held in emergency.
+ * 'b' -> Priority boost undergoing.
+ * 'T' -> Ptraced and stopped.
+ * 'l' -> Locks scheduler.
+ * 'r' -> Undergoes round-robin.
+ * 't' -> Runtime mode errors notified.
+ * 'L' -> Lock breaks trapped.
+ * 's' -> Ptraced, stopped synchronously.
+ */
+#define XNTHREAD_STATE_LABELS  "SWDRU..X.HbTlrt.....L.s"
+
+struct xnthread_user_window {
+	__u32 state;
+	__u32 info;
+	__u32 grant_value;
+	__u32 pp_pending;
+};
+
+#endif /* !_COBALT_UAPI_KERNEL_THREAD_H */
+++ linux-patched/include/xenomai/cobalt/uapi/kernel/heap.h	2022-03-21 12:58:32.140862135 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/kernel/trace.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_KERNEL_HEAP_H
+#define _COBALT_UAPI_KERNEL_HEAP_H
+
+#include <linux/types.h>
+
+#define COBALT_MEMDEV_PRIVATE  "memdev-private"
+#define COBALT_MEMDEV_SHARED   "memdev-shared"
+#define COBALT_MEMDEV_SYS      "memdev-sys"
+
+struct cobalt_memdev_stat {
+	__u32 size;
+	__u32 free;
+};
+
+#define MEMDEV_RTIOC_STAT	_IOR(RTDM_CLASS_MEMORY, 0, struct cobalt_memdev_stat)
+
+#endif /* !_COBALT_UAPI_KERNEL_HEAP_H */
+++ linux-patched/include/xenomai/cobalt/uapi/kernel/trace.h	2022-03-21 12:58:32.133862204 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/signal.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2006 Jan Kiszka <jan.kiszka@web.de>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_KERNEL_TRACE_H
+#define _COBALT_UAPI_KERNEL_TRACE_H
+
+#define __xntrace_op_max_begin		0
+#define __xntrace_op_max_end		1
+#define __xntrace_op_max_reset		2
+#define __xntrace_op_user_start		3
+#define __xntrace_op_user_stop		4
+#define __xntrace_op_user_freeze	5
+#define __xntrace_op_special		6
+#define __xntrace_op_special_u64	7
+#define __xntrace_op_latpeak_freeze	8
+
+#endif /* !_COBALT_UAPI_KERNEL_TRACE_H */
+++ linux-patched/include/xenomai/cobalt/uapi/signal.h	2022-03-21 12:58:32.125862281 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/sem.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2006 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_SIGNAL_H
+#define _COBALT_UAPI_SIGNAL_H
+
+/*
+ * Those are pseudo-signals only available with pthread_kill() to
+ * suspend/resume/unblock threads synchronously, force them out of
+ * primary mode or even demote them to the SCHED_OTHER class via the
+ * low-level nucleus interface. Can't block those signals, queue them,
+ * or even set them in a sigset. Those are nasty, strictly anti-POSIX
+ * things; we do provide them nevertheless only because we are mean
+ * people doing harmful code for no valid reason. Can't go against
+ * your nature, right?  Nah... (this said, don't blame us for POSIX,
+ * we are not _that_ mean).
+ */
+#define SIGSUSP (SIGRTMAX + 1)
+#define SIGRESM (SIGRTMAX + 2)
+#define SIGRELS (SIGRTMAX + 3)
+#define SIGKICK (SIGRTMAX + 4)
+#define SIGDEMT (SIGRTMAX + 5)
+
+/*
+ * Regular POSIX signals with specific handling by Xenomai.
+ */
+#define SIGSHADOW			SIGWINCH
+#define sigshadow_action(code)		((code) & 0xff)
+#define sigshadow_arg(code)		(((code) >> 8) & 0xff)
+#define sigshadow_int(action, arg)	((action) | ((arg) << 8))
+
+/* SIGSHADOW action codes. */
+#define SIGSHADOW_ACTION_HARDEN		1
+#define SIGSHADOW_ACTION_BACKTRACE	2
+#define SIGSHADOW_ACTION_HOME		3
+#define SIGSHADOW_BACKTRACE_DEPTH	16
+
+#define SIGDEBUG			SIGXCPU
+#define sigdebug_code(si)		((si)->si_value.sival_int)
+#define sigdebug_reason(si)		(sigdebug_code(si) & 0xff)
+#define sigdebug_marker			0xfccf0000
+#define sigdebug_marked(si)		\
+	((sigdebug_code(si) & 0xffff0000) == sigdebug_marker)
+
+/* Possible values of sigdebug_reason() */
+#define SIGDEBUG_UNDEFINED		0
+#define SIGDEBUG_MIGRATE_SIGNAL		1
+#define SIGDEBUG_MIGRATE_SYSCALL	2
+#define SIGDEBUG_MIGRATE_FAULT		3
+#define SIGDEBUG_MIGRATE_PRIOINV	4
+#define SIGDEBUG_NOMLOCK		5
+#define SIGDEBUG_WATCHDOG		6
+#define SIGDEBUG_RESCNT_IMBALANCE	7
+#define SIGDEBUG_LOCK_BREAK		8
+#define SIGDEBUG_MUTEX_SLEEP		9
+
+#define COBALT_DELAYMAX			2147483647U
+
+/*
+ * Internal accessors to extra siginfo/sigevent fields, extending some
+ * existing base field. The extra data should be grouped in a
+ * dedicated struct type. The extra space is taken from the padding
+ * area available from the original structure definitions.
+ *
+ * e.g. getting the address of the following extension to
+ * _sifields._rt from siginfo_t,
+ *
+ * struct bar {
+ *    int foo;
+ * };
+ *
+ * would be noted as:
+ *
+ * siginfo_t si;
+ * struct bar *p = __cobalt_si_extra(&si, _rt, struct bar);
+ *
+ * This code is shared between kernel and user space. Proper
+ * definitions of siginfo_t and sigevent_t should have been read prior
+ * to including this file.
+ *
+ * CAUTION: this macro does not handle alignment issues for the extra
+ * data. The extra type definition should take care of this.
+ */
+#ifdef __OPTIMIZE__
+extern void *__siginfo_overflow(void);
+static inline
+const void *__check_si_overflow(size_t fldsz, size_t extrasz, const void *p)
+{
+	siginfo_t *si __attribute__((unused));
+
+	if (fldsz + extrasz <= sizeof(si->_sifields))
+		return p;
+
+	return __siginfo_overflow();
+}
+#define __cobalt_si_extra(__si, __basefield, __type)				\
+	((__type *)__check_si_overflow(sizeof(__si->_sifields.__basefield),	\
+	       sizeof(__type), &(__si->_sifields.__basefield) + 1))
+#else
+#define __cobalt_si_extra(__si, __basefield, __type)				\
+	((__type *)((&__si->_sifields.__basefield) + 1))
+#endif
+
+/* Same approach, this time for extending sigevent_t. */
+
+#ifdef __OPTIMIZE__
+extern void *__sigevent_overflow(void);
+static inline
+const void *__check_sev_overflow(size_t fldsz, size_t extrasz, const void *p)
+{
+	sigevent_t *sev __attribute__((unused));
+
+	if (fldsz + extrasz <= sizeof(sev->_sigev_un))
+		return p;
+
+	return __sigevent_overflow();
+}
+#define __cobalt_sev_extra(__sev, __basefield, __type)				\
+	((__type *)__check_sev_overflow(sizeof(__sev->_sigev_un.__basefield),	\
+	       sizeof(__type), &(__sev->_sigev_un.__basefield) + 1))
+#else
+#define __cobalt_sev_extra(__sev, __basefield, __type)				\
+	((__type *)((&__sev->_sigev_un.__basefield) + 1))
+#endif
+
+#endif /* !_COBALT_UAPI_SIGNAL_H */
+++ linux-patched/include/xenomai/cobalt/uapi/sem.h	2022-03-21 12:58:32.118862350 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/corectl.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_SEM_H
+#define _COBALT_UAPI_SEM_H
+
+#include <cobalt/uapi/kernel/types.h>
+
+#define COBALT_SEM_MAGIC (0x86860707)
+#define COBALT_NAMED_SEM_MAGIC (0x86860D0D)
+
+struct cobalt_sem;
+
+struct cobalt_sem_state {
+	atomic_t value;
+	__u32 flags;
+};
+
+union cobalt_sem_union {
+	sem_t native_sem;
+	struct cobalt_sem_shadow {
+		__u32 magic;
+		__s32 state_offset;
+		xnhandle_t handle;
+	} shadow_sem;
+};
+
+struct cobalt_sem_info {
+	unsigned int value;
+	int flags;
+	int nrwait;
+};
+
+#define SEM_FIFO       0x1
+#define SEM_PULSE      0x2
+#define SEM_PSHARED    0x4
+#define SEM_REPORT     0x8
+#define SEM_WARNDEL    0x10
+#define SEM_RAWCLOCK   0x20
+#define SEM_NOBUSYDEL  0x40
+
+#endif /* !_COBALT_UAPI_SEM_H */
+++ linux-patched/include/xenomai/cobalt/uapi/corectl.h	2022-03-21 12:58:32.110862428 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/syscall.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2015 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_CORECTL_H
+#define _COBALT_UAPI_CORECTL_H
+
+#define _CC_COBALT_GET_VERSION		0
+#define _CC_COBALT_GET_NR_PIPES		1
+#define _CC_COBALT_GET_NR_TIMERS	2
+
+#define _CC_COBALT_GET_DEBUG			3
+#   define _CC_COBALT_DEBUG_ASSERT		1
+#   define _CC_COBALT_DEBUG_CONTEXT		2
+#   define _CC_COBALT_DEBUG_LOCKING		4
+#   define _CC_COBALT_DEBUG_USER		8
+#   define _CC_COBALT_DEBUG_MUTEX_RELAXED	16
+#   define _CC_COBALT_DEBUG_MUTEX_SLEEP		32
+/* bit 6 (64) formerly used for DEBUG_POSIX_SYNCHRO */
+#   define _CC_COBALT_DEBUG_LEGACY		128
+#   define _CC_COBALT_DEBUG_TRACE_RELAX		256
+#   define _CC_COBALT_DEBUG_NET			512
+
+#define _CC_COBALT_GET_POLICIES		4
+#   define _CC_COBALT_SCHED_FIFO	1
+#   define _CC_COBALT_SCHED_RR		2
+#   define _CC_COBALT_SCHED_WEAK	4
+#   define _CC_COBALT_SCHED_SPORADIC	8
+#   define _CC_COBALT_SCHED_QUOTA	16
+#   define _CC_COBALT_SCHED_TP		32
+
+#define _CC_COBALT_GET_WATCHDOG		5
+#define _CC_COBALT_GET_CORE_STATUS	6
+#define _CC_COBALT_START_CORE		7
+#define _CC_COBALT_STOP_CORE		8
+
+#define _CC_COBALT_GET_NET_CONFIG	9
+#   define _CC_COBALT_NET		0x00000001
+#   define _CC_COBALT_NET_ETH_P_ALL	0x00000002
+#   define _CC_COBALT_NET_IPV4		0x00000004
+#   define _CC_COBALT_NET_ICMP		0x00000008
+#   define _CC_COBALT_NET_NETROUTING	0x00000010
+#   define _CC_COBALT_NET_ROUTER	0x00000020
+#   define _CC_COBALT_NET_UDP		0x00000040
+#   define _CC_COBALT_NET_AF_PACKET	0x00000080
+#   define _CC_COBALT_NET_TDMA		0x00000100
+#   define _CC_COBALT_NET_NOMAC		0x00000200
+#   define _CC_COBALT_NET_CFG		0x00000400
+#   define _CC_COBALT_NET_CAP		0x00000800
+#   define _CC_COBALT_NET_PROXY		0x00001000
+
+
+enum cobalt_run_states {
+	COBALT_STATE_DISABLED,
+	COBALT_STATE_RUNNING,
+	COBALT_STATE_STOPPED,
+	COBALT_STATE_TEARDOWN,
+	COBALT_STATE_WARMUP,
+};
+
+#endif /* !_COBALT_UAPI_CORECTL_H */
+++ linux-patched/include/xenomai/cobalt/uapi/syscall.h	2022-03-21 12:58:32.103862496 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/time.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_SYSCALL_H
+#define _COBALT_UAPI_SYSCALL_H
+
+#include <cobalt/uapi/asm-generic/syscall.h>
+
+#define sc_cobalt_bind				0
+#define sc_cobalt_thread_create			1
+#define sc_cobalt_thread_getpid			2
+#define sc_cobalt_thread_setmode		3
+#define sc_cobalt_thread_setname		4
+#define sc_cobalt_thread_join			5
+#define sc_cobalt_thread_kill			6
+#define sc_cobalt_thread_setschedparam_ex	7
+#define sc_cobalt_thread_getschedparam_ex	8
+#define sc_cobalt_thread_getstat		9
+#define sc_cobalt_sem_init			10
+#define sc_cobalt_sem_destroy			11
+#define sc_cobalt_sem_post			12
+#define sc_cobalt_sem_wait			13
+#define sc_cobalt_sem_trywait			14
+#define sc_cobalt_sem_getvalue			15
+#define sc_cobalt_sem_open			16
+#define sc_cobalt_sem_close			17
+#define sc_cobalt_sem_unlink			18
+#define sc_cobalt_sem_timedwait			19
+#define sc_cobalt_sem_inquire			20
+#define sc_cobalt_sem_broadcast_np		21
+#define sc_cobalt_clock_getres			22
+#define sc_cobalt_clock_gettime			23
+#define sc_cobalt_clock_settime			24
+#define sc_cobalt_clock_nanosleep		25
+#define sc_cobalt_mutex_init			26
+#define sc_cobalt_mutex_check_init		27
+#define sc_cobalt_mutex_destroy			28
+#define sc_cobalt_mutex_lock			29
+#define sc_cobalt_mutex_timedlock		30
+#define sc_cobalt_mutex_trylock			31
+#define sc_cobalt_mutex_unlock			32
+#define sc_cobalt_cond_init			33
+#define sc_cobalt_cond_destroy			34
+#define sc_cobalt_cond_wait_prologue		35
+#define sc_cobalt_cond_wait_epilogue		36
+#define sc_cobalt_mq_open			37
+#define sc_cobalt_mq_close			38
+#define sc_cobalt_mq_unlink			39
+#define sc_cobalt_mq_getattr			40
+#define sc_cobalt_mq_timedsend			41
+#define sc_cobalt_mq_timedreceive		42
+#define sc_cobalt_mq_notify			43
+#define sc_cobalt_sched_minprio			44
+#define sc_cobalt_sched_maxprio			45
+#define sc_cobalt_sched_weightprio		46
+#define sc_cobalt_sched_yield			47
+#define sc_cobalt_sched_setscheduler_ex		48
+#define sc_cobalt_sched_getscheduler_ex		49
+#define sc_cobalt_sched_setconfig_np		50
+#define sc_cobalt_sched_getconfig_np		51
+#define sc_cobalt_timer_create			52
+#define sc_cobalt_timer_delete			53
+#define sc_cobalt_timer_settime			54
+#define sc_cobalt_timer_gettime			55
+#define sc_cobalt_timer_getoverrun		56
+#define sc_cobalt_timerfd_create		57
+#define sc_cobalt_timerfd_settime		58
+#define sc_cobalt_timerfd_gettime		59
+#define sc_cobalt_sigwait			60
+#define sc_cobalt_sigwaitinfo			61
+#define sc_cobalt_sigtimedwait			62
+#define sc_cobalt_sigpending			63
+#define sc_cobalt_kill				64
+#define sc_cobalt_sigqueue			65
+#define sc_cobalt_monitor_init			66
+#define sc_cobalt_monitor_destroy		67
+#define sc_cobalt_monitor_enter			68
+#define sc_cobalt_monitor_wait			69
+#define sc_cobalt_monitor_sync			70
+#define sc_cobalt_monitor_exit			71
+#define sc_cobalt_event_init			72
+#define sc_cobalt_event_wait			73
+#define sc_cobalt_event_sync			74
+#define sc_cobalt_event_destroy			75
+#define sc_cobalt_event_inquire			76
+#define sc_cobalt_open				77
+#define sc_cobalt_socket			78
+#define sc_cobalt_close				79
+#define sc_cobalt_ioctl				80
+#define sc_cobalt_read				81
+#define sc_cobalt_write				82
+#define sc_cobalt_recvmsg			83
+#define sc_cobalt_sendmsg			84
+#define sc_cobalt_mmap				85
+#define sc_cobalt_select			86
+#define sc_cobalt_fcntl				87
+#define sc_cobalt_migrate			88
+#define sc_cobalt_archcall			89
+#define sc_cobalt_trace				90
+#define sc_cobalt_corectl			91
+#define sc_cobalt_get_current			92
+/* 93: formerly mayday */
+#define sc_cobalt_backtrace			94
+#define sc_cobalt_serialdbg			95
+#define sc_cobalt_extend			96
+#define sc_cobalt_ftrace_puts			97
+#define sc_cobalt_recvmmsg			98
+#define sc_cobalt_sendmmsg			99
+#define sc_cobalt_clock_adjtime			100
+#define sc_cobalt_thread_setschedprio		101
+#define sc_cobalt_sem_timedwait64		102
+#define sc_cobalt_clock_gettime64		103
+#define sc_cobalt_clock_settime64		104
+#define sc_cobalt_clock_nanosleep64		105
+#define sc_cobalt_clock_getres64		106
+#define sc_cobalt_clock_adjtime64		107
+#define sc_cobalt_mutex_timedlock64		108
+#define sc_cobalt_mq_timedsend64		109
+#define sc_cobalt_mq_timedreceive64		110
+#define sc_cobalt_sigtimedwait64		111
+#define sc_cobalt_monitor_wait64		112
+#define sc_cobalt_event_wait64			113
+#define sc_cobalt_recvmmsg64			114
+
+#define __NR_COBALT_SYSCALLS			128 /* Power of 2 */
+
+#endif /* !_COBALT_UAPI_SYSCALL_H */
+++ linux-patched/include/xenomai/cobalt/uapi/time.h	2022-03-21 12:58:32.096862564 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/event.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_TIME_H
+#define _COBALT_UAPI_TIME_H
+
+#ifndef CLOCK_MONOTONIC_RAW
+#define CLOCK_MONOTONIC_RAW  4
+#endif
+
+/*
+ * Additional clock ids we manage are supposed not to collide with any
+ * of the POSIX and Linux kernel definitions so that no ambiguities
+ * arise when porting applications in both directions.
+ *
+ * 0  .. 31   regular POSIX/linux clock ids.
+ * 32 .. 63   statically reserved Cobalt clocks
+ * 64 .. 127  dynamically registered Cobalt clocks (external)
+ *
+ * CAUTION: clock ids must fit within a 7bit value, see
+ * include/cobalt/uapi/thread.h (e.g. cobalt_condattr).
+ */
+#define __COBALT_CLOCK_STATIC(nr)	((clockid_t)(nr + 32))
+
+#define CLOCK_HOST_REALTIME  __COBALT_CLOCK_STATIC(0)
+
+#define COBALT_MAX_EXTCLOCKS  64
+
+#define __COBALT_CLOCK_EXT(nr)		((clockid_t)(nr) | (1 << 6))
+#define __COBALT_CLOCK_EXT_P(id)	((int)(id) >= 64 && (int)(id) < 128)
+#define __COBALT_CLOCK_EXT_INDEX(id)	((int)(id) & ~(1 << 6))
+
+/*
+ * Additional timerfd defines
+ *
+ * when passing TFD_WAKEUP to timer_settime, any timer expiration
+ * unblocks the thread having issued timer_settime.
+ */
+#define TFD_WAKEUP	(1 << 2)
+
+#endif /* !_COBALT_UAPI_TIME_H */
+++ linux-patched/include/xenomai/cobalt/uapi/event.h	2022-03-21 12:58:32.088862642 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/monitor.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_EVENT_H
+#define _COBALT_UAPI_EVENT_H
+
+#include <cobalt/uapi/kernel/types.h>
+
+struct cobalt_event_state {
+	__u32 value;
+	__u32 flags;
+#define COBALT_EVENT_PENDED  0x1
+	__u32 nwaiters;
+};
+
+struct cobalt_event;
+
+/* Creation flags. */
+#define COBALT_EVENT_FIFO    0x0
+#define COBALT_EVENT_PRIO    0x1
+#define COBALT_EVENT_SHARED  0x2
+
+/* Wait mode. */
+#define COBALT_EVENT_ALL  0x0
+#define COBALT_EVENT_ANY  0x1
+
+struct cobalt_event_shadow {
+	__u32 state_offset;
+	__u32 flags;
+	xnhandle_t handle;
+};
+
+struct cobalt_event_info {
+	unsigned int value;
+	int flags;
+	int nrwait;
+};
+
+typedef struct cobalt_event_shadow cobalt_event_t;
+
+#endif /* !_COBALT_UAPI_EVENT_H */
+++ linux-patched/include/xenomai/cobalt/uapi/monitor.h	2022-03-21 12:58:32.081862711 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/asm-generic/arith.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_MONITOR_H
+#define _COBALT_UAPI_MONITOR_H
+
+#include <cobalt/uapi/kernel/types.h>
+
+struct cobalt_monitor_state {
+	atomic_t owner;
+	__u32 flags;
+#define COBALT_MONITOR_GRANTED    0x01
+#define COBALT_MONITOR_DRAINED    0x02
+#define COBALT_MONITOR_SIGNALED   0x03 /* i.e. GRANTED or DRAINED */
+#define COBALT_MONITOR_BROADCAST  0x04
+#define COBALT_MONITOR_PENDED     0x08
+};
+
+struct cobalt_monitor;
+
+struct cobalt_monitor_shadow {
+	__u32 state_offset;
+	__u32 flags;
+	xnhandle_t handle;
+#define COBALT_MONITOR_SHARED     0x1
+#define COBALT_MONITOR_WAITGRANT  0x0
+#define COBALT_MONITOR_WAITDRAIN  0x1
+};
+
+typedef struct cobalt_monitor_shadow cobalt_monitor_t;
+
+#endif /* !_COBALT_UAPI_MONITOR_H */
+++ linux-patched/include/xenomai/cobalt/uapi/asm-generic/arith.h	2022-03-21 12:58:32.074862779 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/asm-generic/syscall.h	1970-01-01 01:00:00.000000000 +0100
+/**
+ *   Generic arithmetic/conversion routines.
+ *   Copyright &copy; 2005 Stelian Pop.
+ *   Copyright &copy; 2005 Gilles Chanteperdrix.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_ASM_GENERIC_ARITH_H
+#define _COBALT_UAPI_ASM_GENERIC_ARITH_H
+
+#ifndef xnarch_u64tou32
+#define xnarch_u64tou32(ull, h, l) ({		\
+      union {					\
+	      unsigned long long _ull;		\
+	      struct endianstruct _s;		\
+      } _u;					\
+      _u._ull = (ull);				\
+      (h) = _u._s._h;				\
+      (l) = _u._s._l;				\
+})
+#endif /* !xnarch_u64tou32 */
+
+#ifndef xnarch_u64fromu32
+#define xnarch_u64fromu32(h, l) ({		\
+	union {					\
+		unsigned long long _ull;	\
+		struct endianstruct _s;		\
+	} _u;					\
+	_u._s._h = (h);				\
+	_u._s._l = (l);				\
+	_u._ull;				\
+})
+#endif /* !xnarch_u64fromu32 */
+
+#ifndef xnarch_ullmul
+static inline __attribute__((__const__)) unsigned long long
+xnarch_generic_ullmul(const unsigned m0, const unsigned m1)
+{
+	return (unsigned long long) m0 * m1;
+}
+#define xnarch_ullmul(m0,m1) xnarch_generic_ullmul((m0),(m1))
+#endif /* !xnarch_ullmul */
+
+#ifndef xnarch_ulldiv
+static inline unsigned long long xnarch_generic_ulldiv (unsigned long long ull,
+							const unsigned uld,
+							unsigned long *const rp)
+{
+	const unsigned r = do_div(ull, uld);
+
+	if (rp)
+		*rp = r;
+
+	return ull;
+}
+#define xnarch_ulldiv(ull,uld,rp) xnarch_generic_ulldiv((ull),(uld),(rp))
+#endif /* !xnarch_ulldiv */
+
+#ifndef xnarch_uldivrem
+#define xnarch_uldivrem(ull,ul,rp) ((unsigned) xnarch_ulldiv((ull),(ul),(rp)))
+#endif /* !xnarch_uldivrem */
+
+#ifndef xnarch_divmod64
+static inline unsigned long long
+xnarch_generic_divmod64(unsigned long long a,
+			unsigned long long b,
+			unsigned long long *rem)
+{
+	unsigned long long q;
+#if defined(__KERNEL__) && BITS_PER_LONG < 64
+	unsigned long long
+		xnarch_generic_full_divmod64(unsigned long long a,
+					     unsigned long long b,
+					     unsigned long long *rem);
+	if (b <= 0xffffffffULL) {
+		unsigned long r;
+		q = xnarch_ulldiv(a, b, &r);
+		if (rem)
+			*rem = r;
+	} else {
+		if (a < b) {
+			if (rem)
+				*rem = a;
+			return 0;
+		}
+
+		return xnarch_generic_full_divmod64(a, b, rem);
+	}
+#else /* !(__KERNEL__ && BITS_PER_LONG < 64) */
+	q = a / b;
+	if (rem)
+		*rem = a % b;
+#endif  /* !(__KERNEL__ && BITS_PER_LONG < 64) */
+	return q;
+}
+#define xnarch_divmod64(a,b,rp) xnarch_generic_divmod64((a),(b),(rp))
+#endif /* !xnarch_divmod64 */
+
+#ifndef xnarch_imuldiv
+static inline __attribute__((__const__)) int xnarch_generic_imuldiv(int i,
+								    int mult,
+								    int div)
+{
+	/* (int)i = (unsigned long long)i*(unsigned)(mult)/(unsigned)div. */
+	const unsigned long long ull = xnarch_ullmul(i, mult);
+	return xnarch_uldivrem(ull, div, NULL);
+}
+#define xnarch_imuldiv(i,m,d) xnarch_generic_imuldiv((i),(m),(d))
+#endif /* !xnarch_imuldiv */
+
+#ifndef xnarch_imuldiv_ceil
+static inline __attribute__((__const__)) int xnarch_generic_imuldiv_ceil(int i,
+									 int mult,
+									 int div)
+{
+	/* Same as xnarch_generic_imuldiv, rounding up. */
+	const unsigned long long ull = xnarch_ullmul(i, mult);
+	return xnarch_uldivrem(ull + (unsigned)div - 1, div, NULL);
+}
+#define xnarch_imuldiv_ceil(i,m,d) xnarch_generic_imuldiv_ceil((i),(m),(d))
+#endif /* !xnarch_imuldiv_ceil */
+
+/* Division of an unsigned 96 bits ((h << 32) + l) by an unsigned 32 bits.
+   Building block for llimd. Without const qualifiers, gcc reload registers
+   after each call to uldivrem. */
+static inline unsigned long long
+xnarch_generic_div96by32(const unsigned long long h,
+			 const unsigned l,
+			 const unsigned d,
+			 unsigned long *const rp)
+{
+	unsigned long rh;
+	const unsigned qh = xnarch_uldivrem(h, d, &rh);
+	const unsigned long long t = xnarch_u64fromu32(rh, l);
+	const unsigned ql = xnarch_uldivrem(t, d, rp);
+
+	return xnarch_u64fromu32(qh, ql);
+}
+
+#ifndef xnarch_llimd
+static inline __attribute__((__const__))
+unsigned long long xnarch_generic_ullimd(const unsigned long long op,
+					 const unsigned m,
+					 const unsigned d)
+{
+	unsigned int oph, opl, tlh, tll;
+	unsigned long long th, tl;
+
+	xnarch_u64tou32(op, oph, opl);
+	tl = xnarch_ullmul(opl, m);
+	xnarch_u64tou32(tl, tlh, tll);
+	th = xnarch_ullmul(oph, m);
+	th += tlh;
+
+	return xnarch_generic_div96by32(th, tll, d, NULL);
+}
+
+static inline __attribute__((__const__)) long long
+xnarch_generic_llimd (long long op, unsigned m, unsigned d)
+{
+	long long ret;
+	int sign = 0;
+
+	if (op < 0LL) {
+		sign = 1;
+		op = -op;
+	}
+	ret = xnarch_generic_ullimd(op, m, d);
+
+	return sign ? -ret : ret;
+}
+#define xnarch_llimd(ll,m,d) xnarch_generic_llimd((ll),(m),(d))
+#endif /* !xnarch_llimd */
+
+#ifndef _xnarch_u96shift
+#define xnarch_u96shift(h, m, l, s) ({		\
+	unsigned int _l = (l);			\
+	unsigned int _m = (m);			\
+	unsigned int _s = (s);			\
+	_l >>= _s;				\
+	_l |= (_m << (32 - _s));		\
+	_m >>= _s;				\
+	_m |= ((h) << (32 - _s));		\
+	xnarch_u64fromu32(_m, _l);		\
+})
+#endif /* !xnarch_u96shift */
+
+static inline long long xnarch_llmi(int i, int j)
+{
+	/* Fast 32x32->64 signed multiplication */
+	return (long long) i * j;
+}
+
+#ifndef xnarch_llmulshft
+/* Fast scaled-math-based replacement for long long multiply-divide */
+static inline long long
+xnarch_generic_llmulshft(const long long op,
+			  const unsigned m,
+			  const unsigned s)
+{
+	unsigned int oph, opl, tlh, tll, thh, thl;
+	unsigned long long th, tl;
+
+	xnarch_u64tou32(op, oph, opl);
+	tl = xnarch_ullmul(opl, m);
+	xnarch_u64tou32(tl, tlh, tll);
+	th = xnarch_llmi(oph, m);
+	th += tlh;
+	xnarch_u64tou32(th, thh, thl);
+
+	return xnarch_u96shift(thh, thl, tll, s);
+}
+#define xnarch_llmulshft(ll, m, s) xnarch_generic_llmulshft((ll), (m), (s))
+#endif /* !xnarch_llmulshft */
+
+#ifdef XNARCH_HAVE_NODIV_LLIMD
+
+/* Representation of a 32 bits fraction. */
+struct xnarch_u32frac {
+	unsigned long long frac;
+	unsigned integ;
+};
+
+static inline void xnarch_init_u32frac(struct xnarch_u32frac *const f,
+				       const unsigned m,
+				       const unsigned d)
+{
+	/*
+	 * Avoid clever compiler optimizations to occur when d is
+	 * known at compile-time. The performance of this function is
+	 * not critical since it is only called at init time.
+	 */
+	volatile unsigned vol_d = d;
+	f->integ = m / d;
+	f->frac = xnarch_generic_div96by32
+		(xnarch_u64fromu32(m % d, 0), 0, vol_d, NULL);
+}
+
+#ifndef xnarch_nodiv_imuldiv
+static inline __attribute__((__const__)) unsigned
+xnarch_generic_nodiv_imuldiv(unsigned op, const struct xnarch_u32frac f)
+{
+	return (xnarch_ullmul(op, f.frac >> 32) >> 32) + f.integ * op;
+}
+#define xnarch_nodiv_imuldiv(op, f) xnarch_generic_nodiv_imuldiv((op),(f))
+#endif /* xnarch_nodiv_imuldiv */
+
+#ifndef xnarch_nodiv_imuldiv_ceil
+static inline __attribute__((__const__)) unsigned
+xnarch_generic_nodiv_imuldiv_ceil(unsigned op, const struct xnarch_u32frac f)
+{
+	unsigned long long full = xnarch_ullmul(op, f.frac >> 32) + ~0U;
+	return (full >> 32) + f.integ * op;
+}
+#define xnarch_nodiv_imuldiv_ceil(op, f) \
+	xnarch_generic_nodiv_imuldiv_ceil((op),(f))
+#endif /* xnarch_nodiv_imuldiv_ceil */
+
+#ifndef xnarch_nodiv_ullimd
+
+#ifndef xnarch_add96and64
+#error "xnarch_add96and64 must be implemented."
+#endif
+
+static inline __attribute__((__const__)) unsigned long long
+xnarch_mul64by64_high(const unsigned long long op, const unsigned long long m)
+{
+	/* Compute high 64 bits of multiplication 64 bits x 64 bits. */
+	register unsigned long long t0, t1, t2, t3;
+	register unsigned int oph, opl, mh, ml, t0h, t0l, t1h, t1l, t2h, t2l, t3h, t3l;
+
+	xnarch_u64tou32(op, oph, opl);
+	xnarch_u64tou32(m, mh, ml);
+	t0 = xnarch_ullmul(opl, ml);
+	xnarch_u64tou32(t0, t0h, t0l);
+	t3 = xnarch_ullmul(oph, mh);
+	xnarch_u64tou32(t3, t3h, t3l);
+	xnarch_add96and64(t3h, t3l, t0h, 0, t0l >> 31);
+	t1 = xnarch_ullmul(oph, ml);
+	xnarch_u64tou32(t1, t1h, t1l);
+	xnarch_add96and64(t3h, t3l, t0h, t1h, t1l);
+	t2 = xnarch_ullmul(opl, mh);
+	xnarch_u64tou32(t2, t2h, t2l);
+	xnarch_add96and64(t3h, t3l, t0h, t2h, t2l);
+
+	return xnarch_u64fromu32(t3h, t3l);
+}
+
+static inline unsigned long long
+xnarch_generic_nodiv_ullimd(const unsigned long long op,
+			    const unsigned long long frac,
+			    unsigned int integ)
+{
+	return xnarch_mul64by64_high(op, frac) + integ * op;
+}
+#define xnarch_nodiv_ullimd(op, f, i)  xnarch_generic_nodiv_ullimd((op),(f), (i))
+#endif /* !xnarch_nodiv_ullimd */
+
+#ifndef xnarch_nodiv_llimd
+static inline __attribute__((__const__)) long long
+xnarch_generic_nodiv_llimd(long long op, unsigned long long frac,
+			   unsigned int integ)
+{
+	long long ret;
+	int sign = 0;
+
+	if (op < 0LL) {
+		sign = 1;
+		op = -op;
+	}
+	ret = xnarch_nodiv_ullimd(op, frac, integ);
+
+	return sign ? -ret : ret;
+}
+#define xnarch_nodiv_llimd(ll,frac,integ) xnarch_generic_nodiv_llimd((ll),(frac),(integ))
+#endif /* !xnarch_nodiv_llimd */
+
+#endif /* XNARCH_HAVE_NODIV_LLIMD */
+
+static inline void xnarch_init_llmulshft(const unsigned m_in,
+					 const unsigned d_in,
+					 unsigned *m_out,
+					 unsigned *s_out)
+{
+	/*
+	 * Avoid clever compiler optimizations to occur when d is
+	 * known at compile-time. The performance of this function is
+	 * not critical since it is only called at init time.
+	 */
+	volatile unsigned int vol_d = d_in;
+	unsigned long long mult;
+
+	*s_out = 31;
+	while (1) {
+		mult = ((unsigned long long)m_in) << *s_out;
+		do_div(mult, vol_d);
+		if (mult <= 0x7FFFFFFF)
+			break;
+		(*s_out)--;
+	}
+	*m_out = (unsigned int)mult;
+}
+
+#define xnarch_ullmod(ull,uld,rem)   ({ xnarch_ulldiv(ull,uld,rem); (*rem); })
+#define xnarch_uldiv(ull, d)         xnarch_uldivrem(ull, d, NULL)
+#define xnarch_ulmod(ull, d)         ({ unsigned long _rem;	\
+					xnarch_uldivrem(ull,d,&_rem); _rem; })
+
+#define xnarch_div64(a,b)            xnarch_divmod64((a),(b),NULL)
+#define xnarch_mod64(a,b)            ({ unsigned long long _rem; \
+					xnarch_divmod64((a),(b),&_rem); _rem; })
+
+#endif /* _COBALT_UAPI_ASM_GENERIC_ARITH_H */
+++ linux-patched/include/xenomai/cobalt/uapi/asm-generic/syscall.h	2022-03-21 12:58:32.066862857 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/asm-generic/features.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_ASM_GENERIC_SYSCALL_H
+#define _COBALT_UAPI_ASM_GENERIC_SYSCALL_H
+
+#include <linux/types.h>
+#include <asm/xenomai/uapi/features.h>
+#include <asm/xenomai/uapi/syscall.h>
+
+#define __COBALT_SYSCALL_BIT	0x10000000
+
+struct cobalt_bindreq {
+	/** Features userland requires. */
+	__u32 feat_req;
+	/** ABI revision userland uses. */
+	__u32 abi_rev;
+	/** Features the Cobalt core provides. */
+	struct cobalt_featinfo feat_ret;
+};
+
+#define COBALT_SECONDARY  0
+#define COBALT_PRIMARY    1
+
+#endif /* !_COBALT_UAPI_ASM_GENERIC_SYSCALL_H */
+++ linux-patched/include/xenomai/cobalt/uapi/asm-generic/features.h	2022-03-21 12:58:32.059862925 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/cond.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_ASM_GENERIC_FEATURES_H
+#define _COBALT_UAPI_ASM_GENERIC_FEATURES_H
+
+#include <linux/types.h>
+
+#define XNFEAT_STRING_LEN 64
+
+struct cobalt_featinfo {
+	/** Real-time clock frequency */
+	__u64 clock_freq;
+	/** Offset of nkvdso in the sem heap. */
+	__u32 vdso_offset;
+	/** ABI revision level. */
+	__u32 feat_abirev;
+	/** Available feature set. */
+	__u32 feat_all;
+	/** Mandatory features (when requested). */
+	__u32 feat_man;
+	/** Requested feature set. */
+	__u32 feat_req;
+	/** Missing features. */
+	__u32 feat_mis;
+	char feat_all_s[XNFEAT_STRING_LEN];
+	char feat_man_s[XNFEAT_STRING_LEN];
+	char feat_req_s[XNFEAT_STRING_LEN];
+	char feat_mis_s[XNFEAT_STRING_LEN];
+	/* Architecture-specific features. */
+	struct cobalt_featinfo_archdep feat_arch;
+};
+
+#define __xn_feat_smp         0x80000000
+#define __xn_feat_nosmp       0x40000000
+#define __xn_feat_fastsynch   0x20000000
+#define __xn_feat_nofastsynch 0x10000000
+#define __xn_feat_control     0x08000000
+#define __xn_feat_prioceiling 0x04000000
+
+#ifdef CONFIG_SMP
+#define __xn_feat_smp_mask __xn_feat_smp
+#else
+#define __xn_feat_smp_mask __xn_feat_nosmp
+#endif
+
+/*
+ * Revisit: all archs currently support fast locking, and there is no
+ * reason for any future port not to provide this. This will be
+ * written in stone at the next ABI update, when fastsynch support is
+ * dropped from the optional feature set.
+ */
+#define __xn_feat_fastsynch_mask __xn_feat_fastsynch
+
+/* List of generic features kernel or userland may support */
+#define __xn_feat_generic_mask			\
+	(__xn_feat_smp_mask		|	\
+	 __xn_feat_fastsynch_mask 	|	\
+	 __xn_feat_prioceiling)
+
+/*
+ * List of features both sides have to agree on: If userland supports
+ * it, the kernel has to provide it, too. This means backward
+ * compatibility between older userland and newer kernel may be
+ * supported for those features, but forward compatibility between
+ * newer userland and older kernel cannot.
+ */
+#define __xn_feat_generic_man_mask		\
+	(__xn_feat_fastsynch		|	\
+	 __xn_feat_nofastsynch		|	\
+	 __xn_feat_nosmp		|	\
+	 __xn_feat_prioceiling)
+
+static inline
+const char *get_generic_feature_label(unsigned int feature)
+{
+	switch (feature) {
+	case __xn_feat_smp:
+		return "smp";
+	case __xn_feat_nosmp:
+		return "nosmp";
+	case __xn_feat_fastsynch:
+		return "fastsynch";
+	case __xn_feat_nofastsynch:
+		return "nofastsynch";
+	case __xn_feat_control:
+		return "control";
+	case __xn_feat_prioceiling:
+		return "prioceiling";
+	default:
+		return 0;
+	}
+}
+
+static inline int check_abi_revision(unsigned long abirev)
+{
+	return abirev == XENOMAI_ABI_REV;
+}
+
+#endif /* !_COBALT_UAPI_ASM_GENERIC_FEATURES_H */
+++ linux-patched/include/xenomai/cobalt/uapi/cond.h	2022-03-21 12:58:32.052862993 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/uapi/thread.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_COND_H
+#define _COBALT_UAPI_COND_H
+
+#include <cobalt/uapi/mutex.h>
+
+#define COBALT_COND_MAGIC 0x86860505
+
+struct cobalt_cond_state {
+	__u32 pending_signals;
+	__u32 mutex_state_offset;
+};
+
+union cobalt_cond_union {
+	pthread_cond_t native_cond;
+	struct cobalt_cond_shadow {
+		__u32 magic;
+		__u32 state_offset;
+		xnhandle_t handle;
+	} shadow_cond;
+};
+
+#endif /* !_COBALT_UAPI_COND_H */
+++ linux-patched/include/xenomai/cobalt/uapi/thread.h	2022-03-21 12:58:32.044863071 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/schedparam.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_THREAD_H
+#define _COBALT_UAPI_THREAD_H
+
+#include <cobalt/uapi/kernel/thread.h>
+
+#define PTHREAD_WARNSW             XNWARN
+#define PTHREAD_LOCK_SCHED         XNLOCK
+#define PTHREAD_DISABLE_LOCKBREAK  XNTRAPLB
+#define PTHREAD_CONFORMING     0
+
+struct cobalt_mutexattr {
+	int type : 3;
+	int protocol : 3;
+	int pshared : 1;
+	int __pad : 1;
+	int ceiling : 8;  /* prio-1, (XN)SCHED_FIFO range. */
+};
+
+struct cobalt_condattr {
+	int clock : 7;
+	int pshared : 1;
+};
+
+struct cobalt_threadstat {
+	__u64 xtime;
+	__u64 timeout;
+	__u64 msw;
+	__u64 csw;
+	__u64 xsc;
+	__u32 status;
+	__u32 pf;
+	int cpu;
+	int cprio;
+	char name[XNOBJECT_NAME_LEN];
+	char personality[XNOBJECT_NAME_LEN];
+};
+
+#endif /* !_COBALT_UAPI_THREAD_H */
+++ linux-patched/include/xenomai/cobalt/kernel/schedparam.h	2022-03-21 12:58:31.742866016 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/vfile.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_SCHEDPARAM_H
+#define _COBALT_KERNEL_SCHEDPARAM_H
+
+/**
+ * @addtogroup cobalt_core_sched
+ * @{
+ */
+
+struct xnsched_idle_param {
+	int prio;
+};
+
+struct xnsched_weak_param {
+	int prio;
+};
+
+struct xnsched_rt_param {
+	int prio;
+};
+
+struct xnsched_tp_param {
+	int prio;
+	int ptid;	/* partition id. */
+};
+
+struct xnsched_sporadic_param {
+	xnticks_t init_budget;
+	xnticks_t repl_period;
+	int max_repl;
+	int low_prio;
+	int normal_prio;
+	int current_prio;
+};
+
+struct xnsched_quota_param {
+	int prio;
+	int tgid;	/* thread group id. */
+};
+
+union xnsched_policy_param {
+	struct xnsched_idle_param idle;
+	struct xnsched_rt_param rt;
+#ifdef CONFIG_XENO_OPT_SCHED_WEAK
+	struct xnsched_weak_param weak;
+#endif
+#ifdef CONFIG_XENO_OPT_SCHED_TP
+	struct xnsched_tp_param tp;
+#endif
+#ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
+	struct xnsched_sporadic_param pss;
+#endif
+#ifdef CONFIG_XENO_OPT_SCHED_QUOTA
+	struct xnsched_quota_param quota;
+#endif
+};
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_SCHEDPARAM_H */
+++ linux-patched/include/xenomai/cobalt/kernel/vfile.h	2022-03-21 12:58:31.735866084 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/synch.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2010 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef _COBALT_KERNEL_VFILE_H
+#define _COBALT_KERNEL_VFILE_H
+
+#if defined(CONFIG_XENO_OPT_VFILE) || defined(DOXYGEN_CPP)
+
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <cobalt/kernel/lock.h>
+
+/**
+ * @addtogroup cobalt_core_vfile
+ * @{
+ */
+
+struct xnvfile_directory;
+struct xnvfile_regular_iterator;
+struct xnvfile_snapshot_iterator;
+struct xnvfile_lock_ops;
+
+struct xnvfile {
+	struct proc_dir_entry *pde;
+	struct file *file;
+	struct xnvfile_lock_ops *lockops;
+	int refcnt;
+	void *private;
+};
+
+/**
+ * @brief Vfile locking operations
+ * @anchor vfile_lockops
+ *
+ * This structure describes the operations to be provided for
+ * implementing locking support on vfiles. They apply to both
+ * snapshot-driven and regular vfiles.
+ */
+struct xnvfile_lock_ops {
+	/**
+	 * @anchor lockops_get
+	 * This handler should grab the desired lock.
+	 *
+	 * @param vfile A pointer to the virtual file which needs
+	 * locking.
+	 *
+	 * @return zero should be returned if the call
+	 * succeeds. Otherwise, a negative error code can be returned;
+	 * upon error, the current vfile operation is aborted, and the
+	 * user-space caller is passed back the error value.
+	 */
+	int (*get)(struct xnvfile *vfile);
+	/**
+	 * @anchor lockops_put This handler should release the lock
+	 * previously grabbed by the @ref lockops_get "get() handler".
+	 *
+	 * @param vfile A pointer to the virtual file which currently
+	 * holds the lock to release.
+	 */
+	void (*put)(struct xnvfile *vfile);
+};
+
+struct xnvfile_hostlock_class {
+	struct xnvfile_lock_ops ops;
+	struct mutex mutex;
+};
+
+struct xnvfile_nklock_class {
+	struct xnvfile_lock_ops ops;
+	spl_t s;
+};
+
+struct xnvfile_input {
+	const char __user *u_buf;
+	size_t size;
+	struct xnvfile *vfile;
+};
+
+/**
+ * @brief Regular vfile operation descriptor
+ * @anchor regular_ops
+ *
+ * This structure describes the operations available with a regular
+ * vfile. It defines handlers for sending back formatted kernel data
+ * upon a user-space read request, and for obtaining user data upon a
+ * user-space write request.
+ */
+struct xnvfile_regular_ops {
+	/**
+	 * @anchor regular_rewind This handler is called only once,
+	 * when the virtual file is opened, before the @ref
+	 * regular_begin "begin() handler" is invoked.
+	 *
+	 * @param it A pointer to the vfile iterator which will be
+	 * used to read the file contents.
+	 *
+	 * @return Zero should be returned upon success. Otherwise, a
+	 * negative error code aborts the operation, and is passed
+	 * back to the reader.
+	 *
+	 * @note This handler is optional. It should not be used to
+	 * allocate resources but rather to perform consistency
+	 * checks, since no closure call is issued in case the open
+	 * sequence eventually fails.
+	 */
+	int (*rewind)(struct xnvfile_regular_iterator *it);
+	/**
+	 * @anchor regular_begin
+	 * This handler should prepare for iterating over the records
+	 * upon a read request, starting from the specified position.
+	 *
+	 * @param it A pointer to the current vfile iterator. On
+	 * entry, it->pos is set to the (0-based) position of the
+	 * first record to output. This handler may be called multiple
+	 * times with different position requests.
+	 *
+	 * @return A pointer to the first record to format and output,
+	 * to be passed to the @ref regular_show "show() handler" as
+	 * its @a data parameter, if the call succeeds. Otherwise:
+	 *
+	 * - NULL in case no record is available, in which case the
+	 * read operation will terminate immediately with no output.
+	 *
+	 * - VFILE_SEQ_START, a special value indicating that @ref
+	 * regular_show "the show() handler" should receive a NULL
+	 * data pointer first, in order to output a header.
+	 *
+	 * - ERR_PTR(errno), where errno is a negative error code;
+	 * upon error, the current operation will be aborted
+	 * immediately.
+	 *
+	 * @note This handler is optional; if none is given in the
+	 * operation descriptor (i.e. NULL value), the @ref
+	 * regular_show "show() handler()" will be called only once
+	 * for a read operation, with a NULL @a data parameter. This
+	 * particular setting is convenient for simple regular vfiles
+	 * having a single, fixed record to output.
+	 */
+	void *(*begin)(struct xnvfile_regular_iterator *it);
+	/**
+	 * @anchor regular_next
+	 * This handler should return the address of the next record
+	 * to format and output by the @ref regular_show "show()
+	 * handler".
+	 *
+	 * @param it A pointer to the current vfile iterator. On
+	 * entry, it->pos is set to the (0-based) position of the
+	 * next record to output.
+	 *
+	 * @return A pointer to the next record to format and output,
+	 * to be passed to the @ref regular_show "show() handler" as
+	 * its @a data parameter, if the call succeeds. Otherwise:
+	 *
+	 * - NULL in case no record is available, in which case the
+	 * read operation will terminate immediately with no output.
+	 *
+	 * - ERR_PTR(errno), where errno is a negative error code;
+	 * upon error, the current operation will be aborted
+	 * immediately.
+	 *
+	 * @note This handler is optional; if none is given in the
+	 * operation descriptor (i.e. NULL value), the read operation
+	 * will stop after the first invocation of the @ref regular_show
+	 * "show() handler".
+	 */
+	void *(*next)(struct xnvfile_regular_iterator *it);
+	/**
+	 * @anchor regular_end
+	 * This handler is called after all records have been output.
+	 *
+	 * @param it A pointer to the current vfile iterator.
+	 *
+	 * @note This handler is optional and the pointer may be NULL.
+	 */
+	void (*end)(struct xnvfile_regular_iterator *it);
+	/**
+	 * @anchor regular_show
+	 * This handler should format and output a record.
+	 *
+	 * xnvfile_printf(), xnvfile_write(), xnvfile_puts() and
+	 * xnvfile_putc() are available to format and/or emit the
+	 * output. All routines take the iterator argument @a it as
+	 * their first parameter.
+	 *
+	 * @param it A pointer to the current vfile iterator.
+	 *
+	 * @param data A pointer to the record to format then
+	 * output. The first call to the handler may receive a NULL @a
+	 * data pointer, depending on the presence and/or return of a
+	 * @ref regular_begin "hander"; the show handler should test
+	 * this special value to output any header that fits, prior to
+	 * receiving more calls with actual records.
+	 *
+	 * @return zero if the call succeeds, also indicating that the
+	 * handler should be called for the next record if
+	 * any. Otherwise:
+	 *
+	 * - A negative error code. This will abort the output phase,
+	 * and return this status to the reader.
+	 *
+	 * - VFILE_SEQ_SKIP, a special value indicating that the
+	 * current record should be skipped and will not be output.
+	 */
+	int (*show)(struct xnvfile_regular_iterator *it, void *data);
+	/**
+	 * @anchor regular_store
+	 * This handler receives data written to the vfile, likely for
+	 * updating some kernel setting, or triggering any other
+	 * action which fits. This is the only handler which deals
+	 * with the write-side of a vfile.  It is called when writing
+	 * to the /proc entry of the vfile from a user-space process.
+	 *
+	 * The input data is described by a descriptor passed to the
+	 * handler, which may be subsequently passed to parsing helper
+	 * routines.  For instance, xnvfile_get_string() will accept
+	 * the input descriptor for returning the written data as a
+	 * null-terminated character string. On the other hand,
+	 * xnvfile_get_integer() will attempt to return a long integer
+	 * from the input data.
+	 *
+	 * @param input A pointer to an input descriptor. It refers to
+	 * an opaque data from the handler's standpoint.
+	 *
+	 * @return the number of bytes read from the input descriptor
+	 * if the call succeeds. Otherwise, a negative error code.
+	 * Return values from parsing helper routines are commonly
+	 * passed back to the caller by the @ref regular_store
+	 * "store() handler".
+	 *
+	 * @note This handler is optional, and may be omitted for
+	 * read-only vfiles.
+	 */
+	ssize_t (*store)(struct xnvfile_input *input);
+};
+
+struct xnvfile_regular {
+	struct xnvfile entry;
+	size_t privsz;
+	struct xnvfile_regular_ops *ops;
+};
+
+struct xnvfile_regular_template {
+	size_t privsz;
+	struct xnvfile_regular_ops *ops;
+	struct xnvfile_lock_ops *lockops;
+};
+
+/**
+ * @brief Regular vfile iterator
+ * @anchor regular_iterator
+ *
+ * This structure defines an iterator over a regular vfile.
+ */
+struct xnvfile_regular_iterator {
+	/** Current record position while iterating. */
+	loff_t pos;
+	/** Backlink to the host sequential file supporting the vfile. */
+	struct seq_file *seq;
+	/** Backlink to the vfile being read. */
+	struct xnvfile_regular *vfile;
+	/**
+	 * Start of private area. Use xnvfile_iterator_priv() to
+	 * address it.
+	 */
+	char private[0];
+};
+
+/**
+ * @brief Snapshot vfile operation descriptor
+ * @anchor snapshot_ops
+ *
+ * This structure describes the operations available with a
+ * snapshot-driven vfile. It defines handlers for returning a
+ * printable snapshot of some Xenomai object contents upon a
+ * user-space read request, and for updating this object upon a
+ * user-space write request.
+ */
+struct xnvfile_snapshot_ops {
+	/**
+	 * @anchor snapshot_rewind
+	 * This handler (re-)initializes the data collection, moving
+	 * the seek pointer at the first record. When the file
+	 * revision tag is touched while collecting data, the current
+	 * reading is aborted, all collected data dropped, and the
+	 * vfile is eventually rewound.
+	 *
+	 * @param it A pointer to the current snapshot iterator. Two
+	 * useful information can be retrieved from this iterator in
+	 * this context:
+	 *
+	 * - it->vfile is a pointer to the descriptor of the virtual
+	 * file being rewound.
+	 *
+	 * - xnvfile_iterator_priv(it) returns a pointer to the
+	 * private data area, available from the descriptor, which
+	 * size is vfile->privsz. If the latter size is zero, the
+	 * returned pointer is meaningless and should not be used.
+	 *
+	 * @return A negative error code aborts the data collection,
+	 * and is passed back to the reader. Otherwise:
+	 *
+	 * - a strictly positive value is interpreted as the total
+	 * number of records which will be returned by the @ref
+	 * snapshot_next "next() handler" during the data collection
+	 * phase. If no @ref snapshot_begin "begin() handler" is
+	 * provided in the @ref snapshot_ops "operation descriptor",
+	 * this value is used to allocate the snapshot buffer
+	 * internally. The size of this buffer would then be
+	 * vfile->datasz * value.
+	 *
+	 * - zero leaves the allocation to the @ref snapshot_begin
+	 * "begin() handler" if present, or indicates that no record
+	 * is to be output in case such handler is not given.
+	 *
+	 * @note This handler is optional; a NULL value indicates that
+	 * nothing needs to be done for rewinding the vfile.  It is
+	 * called with the vfile lock held.
+	 */
+	int (*rewind)(struct xnvfile_snapshot_iterator *it);
+	/**
+	 * @anchor snapshot_begin
+	 * This handler should allocate the snapshot buffer to hold
+	 * records during the data collection phase.  When specified,
+	 * all records collected via the @ref snapshot_next "next()
+	 * handler" will be written to a cell from the memory area
+	 * returned by begin().
+	 *
+	 * @param it A pointer to the current snapshot iterator.
+	 *
+	 * @return A pointer to the record buffer, if the call
+	 * succeeds. Otherwise:
+	 *
+	 * - NULL in case of allocation error. This will abort the data
+	 * collection, and return -ENOMEM to the reader.
+	 *
+	 * - VFILE_SEQ_EMPTY, a special value indicating that no
+	 * record will be output. In such a case, the @ref
+	 * snapshot_next "next() handler" will not be called, and the
+	 * data collection will stop immediately. However, the @ref
+	 * snapshot_show "show() handler" will still be called once,
+	 * with a NULL data pointer (i.e. header display request).
+	 *
+	 * @note This handler is optional; if none is given, an
+	 * internal allocation depending on the value returned by the
+	 * @ref snapshot_rewind "rewind() handler" can be obtained.
+	 */
+	void *(*begin)(struct xnvfile_snapshot_iterator *it);
+	/**
+	 * @anchor snapshot_end
+	 * This handler releases the memory buffer previously obtained
+	 * from begin(). It is usually called after the snapshot data
+	 * has been output by show(), but it may also be called before
+	 * rewinding the vfile after a revision change, to release the
+	 * dropped buffer.
+	 *
+	 * @param it A pointer to the current snapshot iterator.
+	 *
+	 * @param buf A pointer to the buffer to release.
+	 *
+	 * @note This routine is optional and the pointer may be
+	 * NULL. It is not needed upon internal buffer allocation;
+	 * see the description of the @ref snapshot_rewind "rewind()
+	 * handler".
+	 */
+	void (*end)(struct xnvfile_snapshot_iterator *it, void *buf);
+	/**
+	 * @anchor snapshot_next
+	 * This handler fetches the next record, as part of the
+	 * snapshot data to be sent back to the reader via the
+	 * show().
+	 *
+	 * @param it A pointer to the current snapshot iterator.
+	 *
+	 * @param data A pointer to the record to fill in.
+	 *
+	 * @return a strictly positive value, if the call succeeds and
+	 * leaves a valid record into @a data, which should be passed
+	 * to the @ref snapshot_show "show() handler()" during the
+	 * formatting and output phase. Otherwise:
+	 *
+	 * - A negative error code. This will abort the data
+	 * collection, and return this status to the reader.
+	 *
+	 * - VFILE_SEQ_SKIP, a special value indicating that the
+	 * current record should be skipped. In such a case, the @a
+	 * data pointer is not advanced to the next position before
+	 * the @ref snapshot_next "next() handler" is called anew.
+	 *
+	 * @note This handler is called with the vfile lock
+	 * held. Before each invocation of this handler, the vfile
+	 * core checks whether the revision tag has been touched, in
+	 * which case the data collection is restarted from scratch. A
+	 * data collection phase succeeds whenever all records can be
+	 * fetched via the @ref snapshot_next "next() handler", while
+	 * the revision tag remains unchanged, which indicates that a
+	 * consistent snapshot of the object state was taken.
+	 */
+	int (*next)(struct xnvfile_snapshot_iterator *it, void *data);
+	/**
+	 * @anchor snapshot_show
+	 * This handler should format and output a record from the
+	 * collected data.
+	 *
+	 * xnvfile_printf(), xnvfile_write(), xnvfile_puts() and
+	 * xnvfile_putc() are available to format and/or emit the
+	 * output. All routines take the iterator argument @a it as
+	 * their first parameter.
+	 *
+	 * @param it A pointer to the current snapshot iterator.
+	 *
+	 * @param data A pointer to the record to format then
+	 * output. The first call to the handler is always passed a
+	 * NULL @a data pointer; the show handler should test this
+	 * special value to output any header that fits, prior to
+	 * receiving more calls with actual records.
+	 *
+	 * @return zero if the call succeeds, also indicating that the
+	 * handler should be called for the next record if
+	 * any. Otherwise:
+	 *
+	 * - A negative error code. This will abort the output phase,
+	 * and return this status to the reader.
+	 *
+	 * - VFILE_SEQ_SKIP, a special value indicating that the
+	 * current record should be skipped and will not be output.
+	 */
+	int (*show)(struct xnvfile_snapshot_iterator *it, void *data);
+	/**
+	 * @anchor snapshot_store
+	 * This handler receives data written to the vfile, likely for
+	 * updating the associated Xenomai object's state, or
+	 * triggering any other action which fits. This is the only
+	 * handler which deals with the write-side of a vfile.  It is
+	 * called when writing to the /proc entry of the vfile
+	 * from a user-space process.
+	 *
+	 * The input data is described by a descriptor passed to the
+	 * handler, which may be subsequently passed to parsing helper
+	 * routines.  For instance, xnvfile_get_string() will accept
+	 * the input descriptor for returning the written data as a
+	 * null-terminated character string. On the other hand,
+	 * xnvfile_get_integer() will attempt to return a long integer
+	 * from the input data.
+	 *
+	 * @param input A pointer to an input descriptor. It refers to
+	 * an opaque data from the handler's standpoint.
+	 *
+	 * @return the number of bytes read from the input descriptor
+	 * if the call succeeds. Otherwise, a negative error code.
+	 * Return values from parsing helper routines are commonly
+	 * passed back to the caller by the @ref snapshot_store
+	 * "store() handler".
+	 *
+	 * @note This handler is optional, and may be omitted for
+	 * read-only vfiles.
+	 */
+	ssize_t (*store)(struct xnvfile_input *input);
+};
+
+/**
+ * @brief Snapshot revision tag
+ * @anchor revision_tag
+ *
+ * This structure defines a revision tag to be used with @ref
+ * snapshot_vfile "snapshot-driven vfiles".
+ */
+struct xnvfile_rev_tag {
+	/** Current revision number. */
+	int rev;
+};
+
+struct xnvfile_snapshot_template {
+	size_t privsz;
+	size_t datasz;
+	struct xnvfile_rev_tag *tag;
+	struct xnvfile_snapshot_ops *ops;
+	struct xnvfile_lock_ops *lockops;
+};
+
+/**
+ * @brief Snapshot vfile descriptor
+ * @anchor snapshot_vfile
+ *
+ * This structure describes a snapshot-driven vfile.  Reading from
+ * such a vfile involves a preliminary data collection phase under
+ * lock protection, and a subsequent formatting and output phase of
+ * the collected data records. Locking is done in a way that does not
+ * increase worst-case latency, regardless of the number of records to
+ * be collected for output.
+ */
+struct xnvfile_snapshot {
+	struct xnvfile entry;
+	size_t privsz;
+	size_t datasz;
+	struct xnvfile_rev_tag *tag;
+	struct xnvfile_snapshot_ops *ops;
+};
+
+/**
+ * @brief Snapshot-driven vfile iterator
+ * @anchor snapshot_iterator
+ *
+ * This structure defines an iterator over a snapshot-driven vfile.
+ */
+struct xnvfile_snapshot_iterator {
+	/** Number of collected records. */
+	int nrdata;
+	/** Address of record buffer. */
+	caddr_t databuf;
+	/** Backlink to the host sequential file supporting the vfile. */
+	struct seq_file *seq;
+	/** Backlink to the vfile being read. */
+	struct xnvfile_snapshot *vfile;
+	/** Buffer release handler. */
+	void (*endfn)(struct xnvfile_snapshot_iterator *it, void *buf);
+	/**
+	 * Start of private area. Use xnvfile_iterator_priv() to
+	 * address it.
+	 */
+	char private[0];
+};
+
+struct xnvfile_directory {
+	struct xnvfile entry;
+};
+
+struct xnvfile_link {
+	struct xnvfile entry;
+};
+
+/* vfile.begin()=> */
+#define VFILE_SEQ_EMPTY			((void *)-1)
+/* =>vfile.show() */
+#define VFILE_SEQ_START			SEQ_START_TOKEN
+/* vfile.next/show()=> */
+#define VFILE_SEQ_SKIP			2
+
+#define xnvfile_printf(it, args...)	seq_printf((it)->seq, ##args)
+#define xnvfile_write(it, data, len)	seq_write((it)->seq, (data),(len))
+#define xnvfile_puts(it, s)		seq_puts((it)->seq, (s))
+#define xnvfile_putc(it, c)		seq_putc((it)->seq, (c))
+
+static inline void xnvfile_touch_tag(struct xnvfile_rev_tag *tag)
+{
+	tag->rev++;
+}
+
+static inline void xnvfile_touch(struct xnvfile_snapshot *vfile)
+{
+	xnvfile_touch_tag(vfile->tag);
+}
+
+#define xnvfile_noentry			\
+	{				\
+		.pde = NULL,		\
+		.private = NULL,	\
+		.file = NULL,		\
+		.refcnt = 0,		\
+	}
+
+#define xnvfile_nodir	{ .entry = xnvfile_noentry }
+#define xnvfile_nolink	{ .entry = xnvfile_noentry }
+#define xnvfile_nofile	{ .entry = xnvfile_noentry }
+
+#define xnvfile_priv(e)			((e)->entry.private)
+#define xnvfile_nref(e)			((e)->entry.refcnt)
+#define xnvfile_file(e)			((e)->entry.file)
+#define xnvfile_iterator_priv(it)	((void *)(&(it)->private))
+
+extern struct xnvfile_nklock_class xnvfile_nucleus_lock;
+
+extern struct xnvfile_directory cobalt_vfroot;
+
+int xnvfile_init_root(void);
+
+void xnvfile_destroy_root(void);
+
+int xnvfile_init_snapshot(const char *name,
+			  struct xnvfile_snapshot *vfile,
+			  struct xnvfile_directory *parent);
+
+int xnvfile_init_regular(const char *name,
+			 struct xnvfile_regular *vfile,
+			 struct xnvfile_directory *parent);
+
+int xnvfile_init_dir(const char *name,
+		     struct xnvfile_directory *vdir,
+		     struct xnvfile_directory *parent);
+
+int xnvfile_init_link(const char *from,
+		      const char *to,
+		      struct xnvfile_link *vlink,
+		      struct xnvfile_directory *parent);
+
+void xnvfile_destroy(struct xnvfile *vfile);
+
+ssize_t xnvfile_get_blob(struct xnvfile_input *input,
+			 void *data, size_t size);
+
+ssize_t xnvfile_get_string(struct xnvfile_input *input,
+			   char *s, size_t maxlen);
+
+ssize_t xnvfile_get_integer(struct xnvfile_input *input, long *valp);
+
+int __vfile_hostlock_get(struct xnvfile *vfile);
+
+void __vfile_hostlock_put(struct xnvfile *vfile);
+
+static inline
+void xnvfile_destroy_snapshot(struct xnvfile_snapshot *vfile)
+{
+	xnvfile_destroy(&vfile->entry);
+}
+
+static inline
+void xnvfile_destroy_regular(struct xnvfile_regular *vfile)
+{
+	xnvfile_destroy(&vfile->entry);
+}
+
+static inline
+void xnvfile_destroy_dir(struct xnvfile_directory *vdir)
+{
+	xnvfile_destroy(&vdir->entry);
+}
+
+static inline
+void xnvfile_destroy_link(struct xnvfile_link *vlink)
+{
+	xnvfile_destroy(&vlink->entry);
+}
+
+#define DEFINE_VFILE_HOSTLOCK(name)					\
+	struct xnvfile_hostlock_class name = {				\
+		.ops = {						\
+			.get = __vfile_hostlock_get,			\
+			.put = __vfile_hostlock_put,			\
+		},							\
+		.mutex = __MUTEX_INITIALIZER(name.mutex),		\
+	}
+
+#else /* !CONFIG_XENO_OPT_VFILE */
+
+#define xnvfile_touch_tag(tag)	do { } while (0)
+
+#define xnvfile_touch(vfile)	do { } while (0)
+
+#endif /* !CONFIG_XENO_OPT_VFILE */
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_VFILE_H */
+++ linux-patched/include/xenomai/cobalt/kernel/synch.h	2022-03-21 12:58:31.727866163 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/sched.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2001,2002,2003 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_SYNCH_H
+#define _COBALT_KERNEL_SYNCH_H
+
+#include <cobalt/kernel/list.h>
+#include <cobalt/kernel/assert.h>
+#include <cobalt/kernel/timer.h>
+#include <cobalt/uapi/kernel/synch.h>
+#include <cobalt/uapi/kernel/thread.h>
+
+/**
+ * @addtogroup cobalt_core_synch
+ * @{
+ */
+#define XNSYNCH_CLAIMED  0x100	/* Claimed by other thread(s) (PI) */
+#define XNSYNCH_CEILING  0x200	/* Actively boosting (PP) */
+
+/* Spare flags usable by upper interfaces */
+#define XNSYNCH_SPARE0  0x01000000
+#define XNSYNCH_SPARE1  0x02000000
+#define XNSYNCH_SPARE2  0x04000000
+#define XNSYNCH_SPARE3  0x08000000
+#define XNSYNCH_SPARE4  0x10000000
+#define XNSYNCH_SPARE5  0x20000000
+#define XNSYNCH_SPARE6  0x40000000
+#define XNSYNCH_SPARE7  0x80000000
+
+/* Statuses */
+#define XNSYNCH_DONE    0	/* Resource available / operation complete */
+#define XNSYNCH_WAIT    1	/* Calling thread blocked -- start rescheduling */
+#define XNSYNCH_RESCHED 2	/* Force rescheduling */
+
+struct xnthread;
+struct xnsynch;
+
+struct xnsynch {
+	/** wait (weighted) prio in thread->boosters */
+	int wprio;
+	/** thread->boosters */
+	struct list_head next;
+	/**
+	 *  &variable holding the current priority ceiling value
+	 *  (xnsched_class_rt-based, [1..255], XNSYNCH_PP).
+	 */
+	u32 *ceiling_ref;
+	/** Status word */
+	unsigned long status;
+	/** Pending threads */
+	struct list_head pendq;
+	/** Thread which owns the resource */
+	struct xnthread *owner;
+	 /** Pointer to fast lock word */
+	atomic_t *fastlock;
+	/* Cleanup handler */
+	void (*cleanup)(struct xnsynch *synch);
+};
+
+#define XNSYNCH_WAITQUEUE_INITIALIZER(__name) {		\
+		.status = XNSYNCH_PRIO,			\
+		.wprio = -1,				\
+		.pendq = LIST_HEAD_INIT((__name).pendq),	\
+		.owner = NULL,				\
+		.cleanup = NULL,			\
+		.fastlock = NULL,			\
+	}
+
+#define DEFINE_XNWAITQ(__name)	\
+	struct xnsynch __name = XNSYNCH_WAITQUEUE_INITIALIZER(__name)
+
+static inline void xnsynch_set_status(struct xnsynch *synch, int bits)
+{
+	synch->status |= bits;
+}
+
+static inline void xnsynch_clear_status(struct xnsynch *synch, int bits)
+{
+	synch->status &= ~bits;
+}
+
+#define xnsynch_for_each_sleeper(__pos, __synch)		\
+	list_for_each_entry(__pos, &(__synch)->pendq, plink)
+
+#define xnsynch_for_each_sleeper_safe(__pos, __tmp, __synch)	\
+	list_for_each_entry_safe(__pos, __tmp, &(__synch)->pendq, plink)
+
+static inline int xnsynch_pended_p(struct xnsynch *synch)
+{
+	return !list_empty(&synch->pendq);
+}
+
+static inline struct xnthread *xnsynch_owner(struct xnsynch *synch)
+{
+	return synch->owner;
+}
+
+#define xnsynch_fastlock(synch)		((synch)->fastlock)
+#define xnsynch_fastlock_p(synch)	((synch)->fastlock != NULL)
+#define xnsynch_owner_check(synch, thread) \
+	xnsynch_fast_owner_check((synch)->fastlock, thread->handle)
+
+#ifdef CONFIG_XENO_OPT_DEBUG_MUTEX_RELAXED
+
+void xnsynch_detect_relaxed_owner(struct xnsynch *synch,
+				  struct xnthread *sleeper);
+
+void xnsynch_detect_boosted_relax(struct xnthread *owner);
+
+#else /* !CONFIG_XENO_OPT_DEBUG_MUTEX_RELAXED */
+
+static inline void xnsynch_detect_relaxed_owner(struct xnsynch *synch,
+				  struct xnthread *sleeper) { }
+
+static inline void xnsynch_detect_boosted_relax(struct xnthread *owner) { }
+
+#endif /* !CONFIG_XENO_OPT_DEBUG_MUTEX_RELAXED */
+
+void xnsynch_init(struct xnsynch *synch, int flags,
+		  atomic_t *fastlock);
+
+void xnsynch_init_protect(struct xnsynch *synch, int flags,
+			  atomic_t *fastlock, u32 *ceiling_ref);
+
+int xnsynch_destroy(struct xnsynch *synch);
+
+void xnsynch_commit_ceiling(struct xnthread *curr);
+
+static inline void xnsynch_register_cleanup(struct xnsynch *synch,
+					    void (*handler)(struct xnsynch *))
+{
+	synch->cleanup = handler;
+}
+
+int __must_check xnsynch_sleep_on(struct xnsynch *synch,
+				  xnticks_t timeout,
+				  xntmode_t timeout_mode);
+
+struct xnthread *xnsynch_wakeup_one_sleeper(struct xnsynch *synch);
+
+int xnsynch_wakeup_many_sleepers(struct xnsynch *synch, int nr);
+
+void xnsynch_wakeup_this_sleeper(struct xnsynch *synch,
+				 struct xnthread *sleeper);
+
+int __must_check xnsynch_acquire(struct xnsynch *synch,
+				 xnticks_t timeout,
+				 xntmode_t timeout_mode);
+
+int __must_check xnsynch_try_acquire(struct xnsynch *synch);
+
+bool xnsynch_release(struct xnsynch *synch, struct xnthread *thread);
+
+struct xnthread *xnsynch_peek_pendq(struct xnsynch *synch);
+
+int xnsynch_flush(struct xnsynch *synch, int reason);
+
+void xnsynch_requeue_sleeper(struct xnthread *thread);
+
+void xnsynch_forget_sleeper(struct xnthread *thread);
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_SYNCH_H_ */
+++ linux-patched/include/xenomai/cobalt/kernel/sched.h	2022-03-21 12:58:31.720866231 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/map.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_SCHED_H
+#define _COBALT_KERNEL_SCHED_H
+
+#include <linux/percpu.h>
+#include <cobalt/kernel/lock.h>
+#include <cobalt/kernel/thread.h>
+#include <cobalt/kernel/schedqueue.h>
+#include <cobalt/kernel/sched-tp.h>
+#include <cobalt/kernel/sched-weak.h>
+#include <cobalt/kernel/sched-sporadic.h>
+#include <cobalt/kernel/sched-quota.h>
+#include <cobalt/kernel/vfile.h>
+#include <cobalt/kernel/assert.h>
+#include <asm/xenomai/machine.h>
+#include <pipeline/sched.h>
+
+/**
+ * @addtogroup cobalt_core_sched
+ * @{
+ */
+
+/* Sched status flags */
+#define XNRESCHED	0x10000000	/* Needs rescheduling */
+#define XNINSW		0x20000000	/* In context switch */
+#define XNINTCK		0x40000000	/* In master tick handler context */
+
+/* Sched local flags */
+#define XNIDLE		0x00010000	/* Idle (no outstanding timer) */
+#define XNHTICK		0x00008000	/* Host tick pending  */
+#define XNINIRQ		0x00004000	/* In IRQ handling context */
+#define XNHDEFER	0x00002000	/* Host tick deferred */
+
+/*
+ * Hardware timer is stopped.
+ */
+#define XNTSTOP		0x00000800
+
+struct xnsched_rt {
+	xnsched_queue_t runnable;	/*!< Runnable thread queue. */
+};
+
+/*!
+ * \brief Scheduling information structure.
+ */
+
+struct xnsched {
+	/*!< Scheduler specific status bitmask. */
+	unsigned long status;
+	/*!< Scheduler specific local flags bitmask. */
+	unsigned long lflags;
+	/*!< Current thread. */
+	struct xnthread *curr;
+#ifdef CONFIG_SMP
+	/*!< Owner CPU id. */
+	int cpu;
+	/*!< Mask of CPUs needing rescheduling. */
+	cpumask_t resched;
+#endif
+	/*!< Context of built-in real-time class. */
+	struct xnsched_rt rt;
+#ifdef CONFIG_XENO_OPT_SCHED_WEAK
+	/*!< Context of weak scheduling class. */
+	struct xnsched_weak weak;
+#endif
+#ifdef CONFIG_XENO_OPT_SCHED_TP
+	/*!< Context of TP class. */
+	struct xnsched_tp tp;
+#endif
+#ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
+	/*!< Context of sporadic scheduling class. */
+	struct xnsched_sporadic pss;
+#endif
+#ifdef CONFIG_XENO_OPT_SCHED_QUOTA
+	/*!< Context of runtime quota scheduling. */
+	struct xnsched_quota quota;
+#endif
+	/*!< Interrupt nesting level. */
+	volatile unsigned inesting;
+	/*!< Host timer. */
+	struct xntimer htimer;
+	/*!< Round-robin timer. */
+	struct xntimer rrbtimer;
+	/*!< Root thread control block. */
+	struct xnthread rootcb;
+#ifdef CONFIG_XENO_ARCH_FPU
+	/*!< Thread owning the current FPU context. */
+	struct xnthread *fpuholder;
+#endif
+#ifdef CONFIG_XENO_OPT_WATCHDOG
+	/*!< Watchdog timer object. */
+	struct xntimer wdtimer;
+#endif
+#ifdef CONFIG_XENO_OPT_STATS
+	/*!< Last account switch date (ticks). */
+	xnticks_t last_account_switch;
+	/*!< Currently active account */
+	xnstat_exectime_t *current_account;
+#endif
+};
+
+DECLARE_PER_CPU(struct xnsched, nksched);
+
+extern cpumask_t cobalt_cpu_affinity;
+
+extern struct list_head nkthreadq;
+
+extern int cobalt_nrthreads;
+
+#ifdef CONFIG_XENO_OPT_VFILE
+extern struct xnvfile_rev_tag nkthreadlist_tag;
+#endif
+
+union xnsched_policy_param;
+
+struct xnsched_class {
+	void (*sched_init)(struct xnsched *sched);
+	void (*sched_enqueue)(struct xnthread *thread);
+	void (*sched_dequeue)(struct xnthread *thread);
+	void (*sched_requeue)(struct xnthread *thread);
+	struct xnthread *(*sched_pick)(struct xnsched *sched);
+	void (*sched_tick)(struct xnsched *sched);
+	void (*sched_rotate)(struct xnsched *sched,
+			     const union xnsched_policy_param *p);
+	void (*sched_migrate)(struct xnthread *thread,
+			      struct xnsched *sched);
+	int (*sched_chkparam)(struct xnthread *thread,
+			      const union xnsched_policy_param *p);
+	/**
+	 * Set base scheduling parameters. This routine is indirectly
+	 * called upon a change of base scheduling settings through
+	 * __xnthread_set_schedparam() -> xnsched_set_policy(),
+	 * exclusively.
+	 *
+	 * The scheduling class implementation should do the necessary
+	 * housekeeping to comply with the new settings.
+	 * thread->base_class is up to date before the call is made,
+	 * and should be considered for the new weighted priority
+	 * calculation. On the contrary, thread->sched_class should
+	 * NOT be referred to by this handler.
+	 *
+	 * sched_setparam() is NEVER involved in PI or PP
+	 * management. However it must deny a priority update if it
+	 * contradicts an ongoing boost for @a thread. This is
+	 * typically what the xnsched_set_effective_priority() helper
+	 * does for such handler.
+	 *
+	 * @param thread Affected thread.
+	 * @param p New base policy settings.
+	 *
+	 * @return True if the effective priority was updated
+	 * (thread->cprio).
+	 */
+	bool (*sched_setparam)(struct xnthread *thread,
+			       const union xnsched_policy_param *p);
+	void (*sched_getparam)(struct xnthread *thread,
+			       union xnsched_policy_param *p);
+	void (*sched_trackprio)(struct xnthread *thread,
+				const union xnsched_policy_param *p);
+	void (*sched_protectprio)(struct xnthread *thread, int prio);
+	int (*sched_declare)(struct xnthread *thread,
+			     const union xnsched_policy_param *p);
+	void (*sched_forget)(struct xnthread *thread);
+	void (*sched_kick)(struct xnthread *thread);
+#ifdef CONFIG_XENO_OPT_VFILE
+	int (*sched_init_vfile)(struct xnsched_class *schedclass,
+				struct xnvfile_directory *vfroot);
+	void (*sched_cleanup_vfile)(struct xnsched_class *schedclass);
+#endif
+	int nthreads;
+	struct xnsched_class *next;
+	int weight;
+	int policy;
+	const char *name;
+};
+
+#define XNSCHED_CLASS_WEIGHT(n)		(n * XNSCHED_CLASS_WEIGHT_FACTOR)
+
+/* Placeholder for current thread priority */
+#define XNSCHED_RUNPRIO   0x80000000
+
+#define xnsched_for_each_thread(__thread)	\
+	list_for_each_entry(__thread, &nkthreadq, glink)
+
+#ifdef CONFIG_SMP
+static inline int xnsched_cpu(struct xnsched *sched)
+{
+	return sched->cpu;
+}
+#else /* !CONFIG_SMP */
+static inline int xnsched_cpu(struct xnsched *sched)
+{
+	return 0;
+}
+#endif /* CONFIG_SMP */
+
+static inline struct xnsched *xnsched_struct(int cpu)
+{
+	return &per_cpu(nksched, cpu);
+}
+
+static inline struct xnsched *xnsched_current(void)
+{
+	/* IRQs off */
+	return raw_cpu_ptr(&nksched);
+}
+
+static inline struct xnthread *xnsched_current_thread(void)
+{
+	return xnsched_current()->curr;
+}
+
+/* Test resched flag of given sched. */
+static inline int xnsched_resched_p(struct xnsched *sched)
+{
+	return sched->status & XNRESCHED;
+}
+
+/* Set self resched flag for the current scheduler. */
+static inline void xnsched_set_self_resched(struct xnsched *sched)
+{
+	sched->status |= XNRESCHED;
+}
+
+/* Set resched flag for the given scheduler. */
+#ifdef CONFIG_SMP
+
+static inline void xnsched_set_resched(struct xnsched *sched)
+{
+	struct xnsched *current_sched = xnsched_current();
+
+	if (current_sched == sched)
+		current_sched->status |= XNRESCHED;
+	else if (!xnsched_resched_p(sched)) {
+		cpumask_set_cpu(xnsched_cpu(sched), &current_sched->resched);
+		sched->status |= XNRESCHED;
+		current_sched->status |= XNRESCHED;
+	}
+}
+
+#define xnsched_realtime_cpus    cobalt_pipeline.supported_cpus
+
+static inline int xnsched_supported_cpu(int cpu)
+{
+	return cpumask_test_cpu(cpu, &xnsched_realtime_cpus);
+}
+
+static inline int xnsched_threading_cpu(int cpu)
+{
+	return cpumask_test_cpu(cpu, &cobalt_cpu_affinity);
+}
+
+#else /* !CONFIG_SMP */
+
+static inline void xnsched_set_resched(struct xnsched *sched)
+{
+	xnsched_set_self_resched(sched);
+}
+
+#define xnsched_realtime_cpus CPU_MASK_ALL
+
+static inline int xnsched_supported_cpu(int cpu)
+{
+	return 1;
+}
+
+static inline int xnsched_threading_cpu(int cpu)
+{
+	return 1;
+}
+
+#endif /* !CONFIG_SMP */
+
+#define for_each_realtime_cpu(cpu)		\
+	for_each_online_cpu(cpu)		\
+		if (xnsched_supported_cpu(cpu))	\
+
+int ___xnsched_run(struct xnsched *sched);
+
+void __xnsched_run_handler(void);
+
+static inline int __xnsched_run(struct xnsched *sched)
+{
+	/*
+	 * Reschedule if XNSCHED is pending, but never over an IRQ
+	 * handler or in the middle of unlocked context switch.
+	 */
+	if (((sched->status|sched->lflags) &
+	     (XNINIRQ|XNINSW|XNRESCHED)) != XNRESCHED)
+		return 0;
+
+	return pipeline_schedule(sched);
+}
+
+static inline int xnsched_run(void)
+{
+	struct xnsched *sched = xnsched_current();
+	/*
+	 * sched->curr is shared locklessly with ___xnsched_run().
+	 * READ_ONCE() makes sure the compiler never uses load tearing
+	 * for reading this pointer piecemeal, so that multiple stores
+	 * occurring concurrently on remote CPUs never yield a
+	 * spurious merged value on the local one.
+	 */
+	struct xnthread *curr = READ_ONCE(sched->curr);
+
+	/*
+	 * If running over the root thread, hard irqs must be off
+	 * (asserted out of line in ___xnsched_run()).
+	 */
+	return curr->lock_count > 0 ? 0 : __xnsched_run(sched);
+}
+
+void xnsched_lock(void);
+
+void xnsched_unlock(void);
+
+static inline int xnsched_interrupt_p(void)
+{
+	return xnsched_current()->lflags & XNINIRQ;
+}
+
+static inline int xnsched_root_p(void)
+{
+	return xnthread_test_state(xnsched_current_thread(), XNROOT);
+}
+
+static inline int xnsched_unblockable_p(void)
+{
+	return xnsched_interrupt_p() || xnsched_root_p();
+}
+
+static inline int xnsched_primary_p(void)
+{
+	return !xnsched_unblockable_p();
+}
+
+bool xnsched_set_effective_priority(struct xnthread *thread,
+				    int prio);
+
+#include <cobalt/kernel/sched-idle.h>
+#include <cobalt/kernel/sched-rt.h>
+
+int xnsched_init_proc(void);
+
+void xnsched_cleanup_proc(void);
+
+void xnsched_register_classes(void);
+
+void xnsched_init_all(void);
+
+void xnsched_destroy_all(void);
+
+struct xnthread *xnsched_pick_next(struct xnsched *sched);
+
+void xnsched_putback(struct xnthread *thread);
+
+int xnsched_set_policy(struct xnthread *thread,
+		       struct xnsched_class *sched_class,
+		       const union xnsched_policy_param *p);
+
+void xnsched_track_policy(struct xnthread *thread,
+			  struct xnthread *target);
+
+void xnsched_protect_priority(struct xnthread *thread,
+			      int prio);
+
+void xnsched_migrate(struct xnthread *thread,
+		     struct xnsched *sched);
+
+void xnsched_migrate_passive(struct xnthread *thread,
+			     struct xnsched *sched);
+
+/**
+ * @fn void xnsched_rotate(struct xnsched *sched, struct xnsched_class *sched_class, const union xnsched_policy_param *sched_param)
+ * @brief Rotate a scheduler runqueue.
+ *
+ * The specified scheduling class is requested to rotate its runqueue
+ * for the given scheduler. Rotation is performed according to the
+ * scheduling parameter specified by @a sched_param.
+ *
+ * @note The nucleus supports round-robin scheduling for the members
+ * of the RT class.
+ *
+ * @param sched The per-CPU scheduler hosting the target scheduling
+ * class.
+ *
+ * @param sched_class The scheduling class which should rotate its
+ * runqueue.
+ *
+ * @param sched_param The scheduling parameter providing rotation
+ * information to the specified scheduling class.
+ *
+ * @coretags{unrestricted, atomic-entry}
+ */
+static inline void xnsched_rotate(struct xnsched *sched,
+				  struct xnsched_class *sched_class,
+				  const union xnsched_policy_param *sched_param)
+{
+	sched_class->sched_rotate(sched, sched_param);
+}
+
+static inline int xnsched_init_thread(struct xnthread *thread)
+{
+	int ret = 0;
+
+	xnsched_idle_init_thread(thread);
+	xnsched_rt_init_thread(thread);
+
+#ifdef CONFIG_XENO_OPT_SCHED_TP
+	ret = xnsched_tp_init_thread(thread);
+	if (ret)
+		return ret;
+#endif /* CONFIG_XENO_OPT_SCHED_TP */
+#ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
+	ret = xnsched_sporadic_init_thread(thread);
+	if (ret)
+		return ret;
+#endif /* CONFIG_XENO_OPT_SCHED_SPORADIC */
+#ifdef CONFIG_XENO_OPT_SCHED_QUOTA
+	ret = xnsched_quota_init_thread(thread);
+	if (ret)
+		return ret;
+#endif /* CONFIG_XENO_OPT_SCHED_QUOTA */
+
+	return ret;
+}
+
+static inline int xnsched_root_priority(struct xnsched *sched)
+{
+	return sched->rootcb.cprio;
+}
+
+static inline struct xnsched_class *xnsched_root_class(struct xnsched *sched)
+{
+	return sched->rootcb.sched_class;
+}
+
+static inline void xnsched_tick(struct xnsched *sched)
+{
+	struct xnthread *curr = sched->curr;
+	struct xnsched_class *sched_class = curr->sched_class;
+	/*
+	 * A thread that undergoes round-robin scheduling only
+	 * consumes its time slice when it runs within its own
+	 * scheduling class, which excludes temporary PI boosts, and
+	 * does not hold the scheduler lock.
+	 */
+	if (sched_class == curr->base_class &&
+	    sched_class->sched_tick &&
+	    xnthread_test_state(curr, XNTHREAD_BLOCK_BITS|XNRRB) == XNRRB &&
+		curr->lock_count == 0)
+		sched_class->sched_tick(sched);
+}
+
+static inline int xnsched_chkparam(struct xnsched_class *sched_class,
+				   struct xnthread *thread,
+				   const union xnsched_policy_param *p)
+{
+	if (sched_class->sched_chkparam)
+		return sched_class->sched_chkparam(thread, p);
+
+	return 0;
+}
+
+static inline int xnsched_declare(struct xnsched_class *sched_class,
+				  struct xnthread *thread,
+				  const union xnsched_policy_param *p)
+{
+	int ret;
+
+	if (sched_class->sched_declare) {
+		ret = sched_class->sched_declare(thread, p);
+		if (ret)
+			return ret;
+	}
+	if (sched_class != thread->base_class)
+		sched_class->nthreads++;
+
+	return 0;
+}
+
+static inline int xnsched_calc_wprio(struct xnsched_class *sched_class,
+				     int prio)
+{
+	return prio + sched_class->weight;
+}
+
+#ifdef CONFIG_XENO_OPT_SCHED_CLASSES
+
+static inline void xnsched_enqueue(struct xnthread *thread)
+{
+	struct xnsched_class *sched_class = thread->sched_class;
+
+	if (sched_class != &xnsched_class_idle)
+		sched_class->sched_enqueue(thread);
+}
+
+static inline void xnsched_dequeue(struct xnthread *thread)
+{
+	struct xnsched_class *sched_class = thread->sched_class;
+
+	if (sched_class != &xnsched_class_idle)
+		sched_class->sched_dequeue(thread);
+}
+
+static inline void xnsched_requeue(struct xnthread *thread)
+{
+	struct xnsched_class *sched_class = thread->sched_class;
+
+	if (sched_class != &xnsched_class_idle)
+		sched_class->sched_requeue(thread);
+}
+
+static inline
+bool xnsched_setparam(struct xnthread *thread,
+		      const union xnsched_policy_param *p)
+{
+	return thread->base_class->sched_setparam(thread, p);
+}
+
+static inline void xnsched_getparam(struct xnthread *thread,
+				    union xnsched_policy_param *p)
+{
+	thread->sched_class->sched_getparam(thread, p);
+}
+
+static inline void xnsched_trackprio(struct xnthread *thread,
+				     const union xnsched_policy_param *p)
+{
+	thread->sched_class->sched_trackprio(thread, p);
+	thread->wprio = xnsched_calc_wprio(thread->sched_class, thread->cprio);
+}
+
+static inline void xnsched_protectprio(struct xnthread *thread, int prio)
+{
+	thread->sched_class->sched_protectprio(thread, prio);
+	thread->wprio = xnsched_calc_wprio(thread->sched_class, thread->cprio);
+}
+
+static inline void xnsched_forget(struct xnthread *thread)
+{
+	struct xnsched_class *sched_class = thread->base_class;
+
+	--sched_class->nthreads;
+
+	if (sched_class->sched_forget)
+		sched_class->sched_forget(thread);
+}
+
+static inline void xnsched_kick(struct xnthread *thread)
+{
+	struct xnsched_class *sched_class = thread->base_class;
+
+	xnthread_set_info(thread, XNKICKED);
+
+	if (sched_class->sched_kick)
+		sched_class->sched_kick(thread);
+
+	xnsched_set_resched(thread->sched);
+}
+
+#else /* !CONFIG_XENO_OPT_SCHED_CLASSES */
+
+/*
+ * If only the RT and IDLE scheduling classes are compiled in, we can
+ * fully inline common helpers for dealing with those.
+ */
+
+static inline void xnsched_enqueue(struct xnthread *thread)
+{
+	struct xnsched_class *sched_class = thread->sched_class;
+
+	if (sched_class != &xnsched_class_idle)
+		__xnsched_rt_enqueue(thread);
+}
+
+static inline void xnsched_dequeue(struct xnthread *thread)
+{
+	struct xnsched_class *sched_class = thread->sched_class;
+
+	if (sched_class != &xnsched_class_idle)
+		__xnsched_rt_dequeue(thread);
+}
+
+static inline void xnsched_requeue(struct xnthread *thread)
+{
+	struct xnsched_class *sched_class = thread->sched_class;
+
+	if (sched_class != &xnsched_class_idle)
+		__xnsched_rt_requeue(thread);
+}
+
+static inline bool xnsched_setparam(struct xnthread *thread,
+				    const union xnsched_policy_param *p)
+{
+	struct xnsched_class *sched_class = thread->base_class;
+
+	if (sched_class == &xnsched_class_idle)
+		return __xnsched_idle_setparam(thread, p);
+
+	return __xnsched_rt_setparam(thread, p);
+}
+
+static inline void xnsched_getparam(struct xnthread *thread,
+				    union xnsched_policy_param *p)
+{
+	struct xnsched_class *sched_class = thread->sched_class;
+
+	if (sched_class == &xnsched_class_idle)
+		__xnsched_idle_getparam(thread, p);
+	else
+		__xnsched_rt_getparam(thread, p);
+}
+
+static inline void xnsched_trackprio(struct xnthread *thread,
+				     const union xnsched_policy_param *p)
+{
+	struct xnsched_class *sched_class = thread->sched_class;
+
+	if (sched_class == &xnsched_class_idle)
+		__xnsched_idle_trackprio(thread, p);
+	else
+		__xnsched_rt_trackprio(thread, p);
+
+	thread->wprio = xnsched_calc_wprio(sched_class, thread->cprio);
+}
+
+static inline void xnsched_protectprio(struct xnthread *thread, int prio)
+{
+	struct xnsched_class *sched_class = thread->sched_class;
+
+	if (sched_class == &xnsched_class_idle)
+		__xnsched_idle_protectprio(thread, prio);
+	else
+		__xnsched_rt_protectprio(thread, prio);
+
+	thread->wprio = xnsched_calc_wprio(sched_class, thread->cprio);
+}
+
+static inline void xnsched_forget(struct xnthread *thread)
+{
+	--thread->base_class->nthreads;
+	__xnsched_rt_forget(thread);
+}
+
+static inline void xnsched_kick(struct xnthread *thread)
+{
+	xnthread_set_info(thread, XNKICKED);
+	xnsched_set_resched(thread->sched);
+}
+
+#endif /* !CONFIG_XENO_OPT_SCHED_CLASSES */
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_SCHED_H */
+++ linux-patched/include/xenomai/cobalt/kernel/map.h	2022-03-21 12:58:31.713866299 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/sched-idle.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2007 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_MAP_H
+#define _COBALT_KERNEL_MAP_H
+
+#include <asm/bitsperlong.h>
+
+/**
+ * @addtogroup cobalt_core_map
+ * @{
+ */
+
+#define XNMAP_MAX_KEYS	(BITS_PER_LONG * BITS_PER_LONG)
+
+struct xnmap {
+    int nkeys;
+    int ukeys;
+    int offset;
+    unsigned long himask;
+    unsigned long himap;
+#define __IDMAP_LONGS	((XNMAP_MAX_KEYS+BITS_PER_LONG-1)/BITS_PER_LONG)
+    unsigned long lomap[__IDMAP_LONGS];
+#undef __IDMAP_LONGS
+    void *objarray[1];
+};
+
+struct xnmap *xnmap_create(int nkeys,
+			   int reserve,
+			   int offset);
+
+void xnmap_delete(struct xnmap *map);
+
+int xnmap_enter(struct xnmap *map,
+		int key,
+		void *objaddr);
+
+int xnmap_remove(struct xnmap *map,
+		 int key);
+
+static inline void *xnmap_fetch_nocheck(struct xnmap *map, int key)
+{
+	int ofkey = key - map->offset;
+	return map->objarray[ofkey];
+}
+
+static inline void *xnmap_fetch(struct xnmap *map, int key)
+{
+	int ofkey = key - map->offset;
+
+	if (ofkey < 0 || ofkey >= map->nkeys)
+		return NULL;
+
+	return map->objarray[ofkey];
+}
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_MAP_H */
+++ linux-patched/include/xenomai/cobalt/kernel/sched-idle.h	2022-03-21 12:58:31.705866377 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/sched-weak.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_SCHED_IDLE_H
+#define _COBALT_KERNEL_SCHED_IDLE_H
+
+#ifndef _COBALT_KERNEL_SCHED_H
+#error "please don't include cobalt/kernel/sched-idle.h directly"
+#endif
+
+/**
+ * @addtogroup cobalt_core_sched
+ * @{
+ */
+
+/* Idle priority level - actually never used for indexing. */
+#define XNSCHED_IDLE_PRIO	-1
+
+extern struct xnsched_class xnsched_class_idle;
+
+static inline bool __xnsched_idle_setparam(struct xnthread *thread,
+					   const union xnsched_policy_param *p)
+{
+	xnthread_clear_state(thread, XNWEAK);
+	return xnsched_set_effective_priority(thread, p->idle.prio);
+}
+
+static inline void __xnsched_idle_getparam(struct xnthread *thread,
+					   union xnsched_policy_param *p)
+{
+	p->idle.prio = thread->cprio;
+}
+
+static inline void __xnsched_idle_trackprio(struct xnthread *thread,
+					    const union xnsched_policy_param *p)
+{
+	if (p)
+		/* Inheriting a priority-less class makes no sense. */
+		XENO_WARN_ON_ONCE(COBALT, 1);
+	else
+		thread->cprio = XNSCHED_IDLE_PRIO;
+}
+
+static inline void __xnsched_idle_protectprio(struct xnthread *thread, int prio)
+{
+	XENO_WARN_ON_ONCE(COBALT, 1);
+}
+
+static inline int xnsched_idle_init_thread(struct xnthread *thread)
+{
+	return 0;
+}
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_SCHED_IDLE_H */
+++ linux-patched/include/xenomai/cobalt/kernel/sched-weak.h	2022-03-21 12:58:31.698866445 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/arith.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_SCHED_WEAK_H
+#define _COBALT_KERNEL_SCHED_WEAK_H
+
+#ifndef _COBALT_KERNEL_SCHED_H
+#error "please don't include cobalt/kernel/sched-weak.h directly"
+#endif
+
+/**
+ * @addtogroup cobalt_core_sched
+ * @{
+ */
+
+#ifdef CONFIG_XENO_OPT_SCHED_WEAK
+
+#define XNSCHED_WEAK_MIN_PRIO	0
+#define XNSCHED_WEAK_MAX_PRIO	99
+#define XNSCHED_WEAK_NR_PRIO	\
+	(XNSCHED_WEAK_MAX_PRIO - XNSCHED_WEAK_MIN_PRIO + 1)
+
+#if XNSCHED_WEAK_NR_PRIO > XNSCHED_CLASS_WEIGHT_FACTOR ||	\
+	(defined(CONFIG_XENO_OPT_SCALABLE_SCHED) &&		\
+	 XNSCHED_WEAK_NR_PRIO > XNSCHED_MLQ_LEVELS)
+#error "WEAK class has too many priority levels"
+#endif
+
+extern struct xnsched_class xnsched_class_weak;
+
+struct xnsched_weak {
+	xnsched_queue_t runnable;	/*!< Runnable thread queue. */
+};
+
+static inline int xnsched_weak_init_thread(struct xnthread *thread)
+{
+	return 0;
+}
+
+#endif /* CONFIG_XENO_OPT_SCHED_WEAK */
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_SCHED_WEAK_H */
+++ linux-patched/include/xenomai/cobalt/kernel/arith.h	2022-03-21 12:58:31.691866513 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/sched-tp.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ *   Generic arithmetic/conversion routines.
+ *   Copyright &copy; 2005 Stelian Pop.
+ *   Copyright &copy; 2005 Gilles Chanteperdrix.
+ *
+ *   Xenomai is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ *   USA; either version 2 of the License, or (at your option) any later
+ *   version.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program; if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_ARITH_H
+#define _COBALT_KERNEL_ARITH_H
+
+#include <asm/byteorder.h>
+#include <asm/div64.h>
+
+#ifdef __BIG_ENDIAN
+#define endianstruct { unsigned int _h; unsigned int _l; }
+#else /* __LITTLE_ENDIAN */
+#define endianstruct { unsigned int _l; unsigned int _h; }
+#endif
+
+#include <asm/xenomai/uapi/arith.h>
+
+#endif /* _COBALT_KERNEL_ARITH_H */
+++ linux-patched/include/xenomai/cobalt/kernel/sched-tp.h	2022-03-21 12:58:31.683866592 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/ppd.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_SCHED_TP_H
+#define _COBALT_KERNEL_SCHED_TP_H
+
+#ifndef _COBALT_KERNEL_SCHED_H
+#error "please don't include cobalt/kernel/sched-tp.h directly"
+#endif
+
+/**
+ * @addtogroup cobalt_core_sched
+ * @{
+ */
+
+#ifdef CONFIG_XENO_OPT_SCHED_TP
+
+#define XNSCHED_TP_MIN_PRIO	1
+#define XNSCHED_TP_MAX_PRIO	255
+#define XNSCHED_TP_NR_PRIO	\
+	(XNSCHED_TP_MAX_PRIO - XNSCHED_TP_MIN_PRIO + 1)
+
+extern struct xnsched_class xnsched_class_tp;
+
+struct xnsched_tp_window {
+	xnticks_t w_offset;
+	int w_part;
+};
+
+struct xnsched_tp_schedule {
+	int pwin_nr;
+	xnticks_t tf_duration;
+	atomic_t refcount;
+	struct xnsched_tp_window pwins[0];
+};
+
+struct xnsched_tp {
+	struct xnsched_tpslot {
+		/** Per-partition runqueue. */
+		xnsched_queue_t runnable;
+	} partitions[CONFIG_XENO_OPT_SCHED_TP_NRPART];
+	/** Idle slot for passive windows. */
+	struct xnsched_tpslot idle;
+	/** Active partition slot */
+	struct xnsched_tpslot *tps;
+	/** Time frame timer */
+	struct xntimer tf_timer;
+	/** Global partition schedule */
+	struct xnsched_tp_schedule *gps;
+	/** Window index of next partition */
+	int wnext;
+	/** Start of next time frame */
+	xnticks_t tf_start;
+	/** Assigned thread queue */
+	struct list_head threads;
+};
+
+static inline int xnsched_tp_init_thread(struct xnthread *thread)
+{
+	thread->tps = NULL;
+
+	return 0;
+}
+
+struct xnsched_tp_schedule *
+xnsched_tp_set_schedule(struct xnsched *sched,
+			struct xnsched_tp_schedule *gps);
+
+void xnsched_tp_start_schedule(struct xnsched *sched);
+
+void xnsched_tp_stop_schedule(struct xnsched *sched);
+
+int xnsched_tp_get_partition(struct xnsched *sched);
+
+struct xnsched_tp_schedule *
+xnsched_tp_get_schedule(struct xnsched *sched);
+
+void xnsched_tp_put_schedule(struct xnsched_tp_schedule *gps);
+
+#endif /* CONFIG_XENO_OPT_SCHED_TP */
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_SCHED_TP_H */
+++ linux-patched/include/xenomai/cobalt/kernel/ppd.h	2022-03-21 12:58:31.676866660 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/compat.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright &copy; 2006 Gilles Chanteperdrix <gch@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ * USA; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_PPD_H
+#define _COBALT_KERNEL_PPD_H
+
+#include <linux/types.h>
+#include <linux/atomic.h>
+#include <linux/rbtree.h>
+#include <cobalt/kernel/heap.h>
+
+struct cobalt_umm {
+	struct xnheap heap;
+	atomic_t refcount;
+	void (*release)(struct cobalt_umm *umm);
+};
+
+struct cobalt_ppd {
+	struct cobalt_umm umm;
+	atomic_t refcnt;
+	char *exe_path;
+	struct rb_root fds;
+};
+
+extern struct cobalt_ppd cobalt_kernel_ppd;
+
+#endif /* _COBALT_KERNEL_PPD_H */
+++ linux-patched/include/xenomai/cobalt/kernel/compat.h	2022-03-21 12:58:31.668866738 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/assert.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_COMPAT_H
+#define _COBALT_KERNEL_COMPAT_H
+
+#ifdef CONFIG_XENO_ARCH_SYS3264
+
+#include <linux/compat.h>
+#include <net/compat.h>
+#include <asm/xenomai/wrappers.h>
+#include <cobalt/uapi/sched.h>
+
+struct mq_attr;
+
+struct __compat_sched_ss_param {
+	int __sched_low_priority;
+	struct old_timespec32 __sched_repl_period;
+	struct old_timespec32 __sched_init_budget;
+	int __sched_max_repl;
+};
+
+struct __compat_sched_rr_param {
+	struct old_timespec32 __sched_rr_quantum;
+};
+
+struct compat_sched_param_ex {
+	int sched_priority;
+	union {
+		struct __compat_sched_ss_param ss;
+		struct __compat_sched_rr_param rr;
+		struct __sched_tp_param tp;
+		struct __sched_quota_param quota;
+	} sched_u;
+};
+
+struct compat_mq_attr {
+	compat_long_t mq_flags;
+	compat_long_t mq_maxmsg;
+	compat_long_t mq_msgsize;
+	compat_long_t mq_curmsgs;
+};
+
+struct compat_sched_tp_window {
+	struct old_timespec32 offset;
+	struct old_timespec32 duration;
+	int ptid;
+};
+
+struct __compat_sched_config_tp {
+	int op;
+	int nr_windows;
+	struct compat_sched_tp_window windows[0];
+};
+
+union compat_sched_config {
+	struct __compat_sched_config_tp tp;
+	struct __sched_config_quota quota;
+};
+
+#define compat_sched_tp_confsz(nr_win) \
+  (sizeof(struct __compat_sched_config_tp) + nr_win * sizeof(struct compat_sched_tp_window))
+
+typedef struct {
+	compat_ulong_t fds_bits[__FD_SETSIZE / (8 * sizeof(compat_long_t))];
+} compat_fd_set;
+
+struct compat_rtdm_mmap_request {
+	u64 offset;
+	compat_size_t length;
+	int prot;
+	int flags;
+};
+
+int sys32_get_timespec(struct timespec64 *ts,
+		       const struct old_timespec32 __user *cts);
+
+int sys32_put_timespec(struct old_timespec32 __user *cts,
+		       const struct timespec64 *ts);
+
+int sys32_get_itimerspec(struct itimerspec64 *its,
+			 const struct old_itimerspec32 __user *cits);
+
+int sys32_put_itimerspec(struct old_itimerspec32 __user *cits,
+			 const struct itimerspec64 *its);
+
+int sys32_get_timeval(struct __kernel_old_timeval *tv,
+		      const struct old_timeval32 __user *ctv);
+
+int sys32_put_timeval(struct old_timeval32 __user *ctv,
+		      const struct __kernel_old_timeval *tv);
+
+int sys32_get_timex(struct __kernel_timex *tx,
+		    const struct old_timex32 __user *ctx);
+
+int sys32_put_timex(struct old_timex32 __user *ctx,
+		    const struct __kernel_timex *tx);
+
+int sys32_get_fdset(fd_set *fds, const compat_fd_set __user *cfds,
+		    size_t cfdsize);
+
+int sys32_put_fdset(compat_fd_set __user *cfds, const fd_set *fds,
+		    size_t fdsize);
+
+int sys32_get_param_ex(int policy,
+		       struct sched_param_ex *p,
+		       const struct compat_sched_param_ex __user *u_cp);
+
+int sys32_put_param_ex(int policy,
+		       struct compat_sched_param_ex __user *u_cp,
+		       const struct sched_param_ex *p);
+
+int sys32_get_mqattr(struct mq_attr *ap,
+		     const struct compat_mq_attr __user *u_cap);
+
+int sys32_put_mqattr(struct compat_mq_attr __user *u_cap,
+		     const struct mq_attr *ap);
+
+int sys32_get_sigevent(struct sigevent *ev,
+		       const struct compat_sigevent *__user u_cev);
+
+int sys32_get_sigset(sigset_t *set, const compat_sigset_t *u_cset);
+
+int sys32_put_sigset(compat_sigset_t *u_cset, const sigset_t *set);
+
+int sys32_get_sigval(union sigval *val, const union compat_sigval *u_cval);
+
+int sys32_put_siginfo(void __user *u_si, const struct siginfo *si,
+		      int overrun);
+
+int sys32_get_msghdr(struct user_msghdr *msg,
+		     const struct compat_msghdr __user *u_cmsg);
+
+int sys32_get_mmsghdr(struct mmsghdr *mmsg,
+		      const struct compat_mmsghdr __user *u_cmmsg);
+
+int sys32_put_msghdr(struct compat_msghdr __user *u_cmsg,
+		     const struct user_msghdr *msg);
+
+int sys32_put_mmsghdr(struct compat_mmsghdr __user *u_cmmsg,
+		     const struct mmsghdr *mmsg);
+
+int sys32_get_iovec(struct iovec *iov,
+		    const struct compat_iovec __user *ciov,
+		    int ciovlen);
+
+int sys32_put_iovec(struct compat_iovec __user *u_ciov,
+		    const struct iovec *iov,
+		    int iovlen);
+
+#endif /* CONFIG_XENO_ARCH_SYS3264 */
+
+#endif /* !_COBALT_KERNEL_COMPAT_H */
+++ linux-patched/include/xenomai/cobalt/kernel/assert.h	2022-03-21 12:58:31.661866806 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/timer.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2006 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_ASSERT_H
+#define _COBALT_KERNEL_ASSERT_H
+
+#include <linux/kconfig.h>
+
+#define XENO_INFO	KERN_INFO    "[Xenomai] "
+#define XENO_WARNING	KERN_WARNING "[Xenomai] "
+#define XENO_ERR	KERN_ERR     "[Xenomai] "
+
+#define XENO_DEBUG(__subsys)				\
+	IS_ENABLED(CONFIG_XENO_OPT_DEBUG_##__subsys)
+#define XENO_ASSERT(__subsys, __cond)			\
+	(!WARN_ON(XENO_DEBUG(__subsys) && !(__cond)))
+#define XENO_BUG(__subsys)				\
+	BUG_ON(XENO_DEBUG(__subsys))
+#define XENO_BUG_ON(__subsys, __cond)			\
+	BUG_ON(XENO_DEBUG(__subsys) && (__cond))
+#define XENO_WARN(__subsys, __cond, __fmt...)		\
+	WARN(XENO_DEBUG(__subsys) && (__cond), __fmt)
+#define XENO_WARN_ON(__subsys, __cond)			\
+	WARN_ON(XENO_DEBUG(__subsys) && (__cond))
+#define XENO_WARN_ON_ONCE(__subsys, __cond)		\
+	WARN_ON_ONCE(XENO_DEBUG(__subsys) && (__cond))
+#ifdef CONFIG_SMP
+#define XENO_BUG_ON_SMP(__subsys, __cond)		\
+	XENO_BUG_ON(__subsys, __cond)
+#define XENO_WARN_ON_SMP(__subsys, __cond)		\
+	XENO_WARN_ON(__subsys, __cond)
+#define XENO_WARN_ON_ONCE_SMP(__subsys, __cond)		\
+	XENO_WARN_ON_ONCE(__subsys, __cond)
+#else
+#define XENO_BUG_ON_SMP(__subsys, __cond)		\
+	do { } while (0)
+#define XENO_WARN_ON_SMP(__subsys, __cond)		\
+	do { } while (0)
+#define XENO_WARN_ON_ONCE_SMP(__subsys, __cond)		\
+	do { } while (0)
+#endif
+
+#define TODO()    BUILD_BUG_ON(IS_ENABLED(CONFIG_XENO_TODO))
+
+#define primary_mode_only()	XENO_BUG_ON(CONTEXT, is_secondary_domain())
+#define secondary_mode_only()	XENO_BUG_ON(CONTEXT, !is_secondary_domain())
+#define interrupt_only()	XENO_BUG_ON(CONTEXT, !xnsched_interrupt_p())
+#define realtime_cpu_only()	XENO_BUG_ON(CONTEXT, !xnsched_supported_cpu(raw_smp_processor_id()))
+#define thread_only()		XENO_BUG_ON(CONTEXT, xnsched_interrupt_p())
+#define irqoff_only()		XENO_BUG_ON(CONTEXT, hard_irqs_disabled() == 0)
+#ifdef CONFIG_XENO_OPT_DEBUG_LOCKING
+#define atomic_only()		XENO_BUG_ON(CONTEXT, (xnlock_is_owner(&nklock) && hard_irqs_disabled()) == 0)
+#define preemptible_only()	XENO_BUG_ON(CONTEXT, xnlock_is_owner(&nklock) || hard_irqs_disabled())
+#else
+#define atomic_only()		XENO_BUG_ON(CONTEXT, hard_irqs_disabled() == 0)
+#define preemptible_only()	XENO_BUG_ON(CONTEXT, hard_irqs_disabled() != 0)
+#endif
+
+#endif /* !_COBALT_KERNEL_ASSERT_H */
+++ linux-patched/include/xenomai/cobalt/kernel/timer.h	2022-03-21 12:58:31.654866874 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/init.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2001,2002,2003 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+
+#ifndef _COBALT_KERNEL_TIMER_H
+#define _COBALT_KERNEL_TIMER_H
+
+#include <cobalt/kernel/clock.h>
+#include <cobalt/kernel/stat.h>
+#include <cobalt/kernel/list.h>
+#include <cobalt/kernel/assert.h>
+#include <cobalt/kernel/ancillaries.h>
+#include <asm/xenomai/wrappers.h>
+
+/**
+ * @addtogroup cobalt_core_timer
+ * @{
+ */
+#define XN_INFINITE   ((xnticks_t)0)
+#define XN_NONBLOCK   ((xnticks_t)-1)
+
+/* Timer modes */
+typedef enum xntmode {
+	XN_RELATIVE,
+	XN_ABSOLUTE,
+	XN_REALTIME
+} xntmode_t;
+
+/* Timer status */
+#define XNTIMER_DEQUEUED  0x00000001
+#define XNTIMER_KILLED    0x00000002
+#define XNTIMER_PERIODIC  0x00000004
+#define XNTIMER_REALTIME  0x00000008
+#define XNTIMER_FIRED     0x00000010
+#define XNTIMER_RUNNING   0x00000020
+#define XNTIMER_KGRAVITY  0x00000040
+#define XNTIMER_UGRAVITY  0x00000080
+#define XNTIMER_IGRAVITY  0	     /* most conservative */
+
+#define XNTIMER_GRAVITY_MASK	(XNTIMER_KGRAVITY|XNTIMER_UGRAVITY)
+#define XNTIMER_INIT_MASK	XNTIMER_GRAVITY_MASK
+
+/* These flags are available to the real-time interfaces */
+#define XNTIMER_SPARE0  0x01000000
+#define XNTIMER_SPARE1  0x02000000
+#define XNTIMER_SPARE2  0x04000000
+#define XNTIMER_SPARE3  0x08000000
+#define XNTIMER_SPARE4  0x10000000
+#define XNTIMER_SPARE5  0x20000000
+#define XNTIMER_SPARE6  0x40000000
+#define XNTIMER_SPARE7  0x80000000
+
+/* Timer priorities */
+#define XNTIMER_LOPRIO  (-999999999)
+#define XNTIMER_STDPRIO 0
+#define XNTIMER_HIPRIO  999999999
+
+struct xntlholder {
+	struct list_head link;
+	xnticks_t key;
+	int prio;
+};
+
+#define xntlholder_date(h)	((h)->key)
+#define xntlholder_prio(h)	((h)->prio)
+#define xntlist_init(q)		INIT_LIST_HEAD(q)
+#define xntlist_empty(q)	list_empty(q)
+
+static inline struct xntlholder *xntlist_head(struct list_head *q)
+{
+	if (list_empty(q))
+		return NULL;
+
+	return list_first_entry(q, struct xntlholder, link);
+}
+
+static inline struct xntlholder *xntlist_next(struct list_head *q,
+					      struct xntlholder *h)
+{
+	if (list_is_last(&h->link, q))
+		return NULL;
+
+	return list_entry(h->link.next, struct xntlholder, link);
+}
+
+static inline struct xntlholder *xntlist_second(struct list_head *q,
+	struct xntlholder *h)
+{
+	return xntlist_next(q, h);
+}
+
+static inline void xntlist_insert(struct list_head *q, struct xntlholder *holder)
+{
+	struct xntlholder *p;
+
+	if (list_empty(q)) {
+		list_add(&holder->link, q);
+		return;
+	}
+
+	/*
+	 * Insert the new timer at the proper place in the single
+	 * queue. O(N) here, but this is the price for the increased
+	 * flexibility...
+	 */
+	list_for_each_entry_reverse(p, q, link) {
+		if ((xnsticks_t) (holder->key - p->key) > 0 ||
+		    (holder->key == p->key && holder->prio <= p->prio))
+		  break;
+	}
+
+	list_add(&holder->link, &p->link);
+}
+
+#define xntlist_remove(q, h)			\
+	do {					\
+		(void)(q);			\
+		list_del(&(h)->link);		\
+	} while (0)
+
+#if defined(CONFIG_XENO_OPT_TIMER_RBTREE)
+
+#include <linux/rbtree.h>
+
+typedef struct {
+	unsigned long long date;
+	unsigned prio;
+	struct rb_node link;
+} xntimerh_t;
+
+#define xntimerh_date(h) ((h)->date)
+#define xntimerh_prio(h) ((h)->prio)
+#define xntimerh_init(h) do { } while (0)
+
+typedef struct {
+	struct rb_root root;
+	xntimerh_t *head;
+} xntimerq_t;
+
+#define xntimerq_init(q)			\
+	({					\
+		xntimerq_t *_q = (q);		\
+		_q->root = RB_ROOT;		\
+		_q->head = NULL;		\
+	})
+
+#define xntimerq_destroy(q) do { } while (0)
+#define xntimerq_empty(q) ((q)->head == NULL)
+
+#define xntimerq_head(q) ((q)->head)
+
+#define xntimerq_next(q, h)						\
+	({								\
+		struct rb_node *_node = rb_next(&(h)->link);		\
+		_node ? (container_of(_node, xntimerh_t, link)) : NULL; \
+	})
+
+#define xntimerq_second(q, h) xntimerq_next(q, h)
+
+void xntimerq_insert(xntimerq_t *q, xntimerh_t *holder);
+
+static inline void xntimerq_remove(xntimerq_t *q, xntimerh_t *holder)
+{
+	if (holder == q->head)
+		q->head = xntimerq_second(q, holder);
+
+	rb_erase(&holder->link, &q->root);
+}
+
+typedef struct { } xntimerq_it_t;
+
+#define xntimerq_it_begin(q,i)	((void) (i), xntimerq_head(q))
+#define xntimerq_it_next(q,i,h) ((void) (i), xntimerq_next((q),(h)))
+
+#else /* CONFIG_XENO_OPT_TIMER_LIST */
+
+typedef struct xntlholder xntimerh_t;
+
+#define xntimerh_date(h)       xntlholder_date(h)
+#define xntimerh_prio(h)       xntlholder_prio(h)
+#define xntimerh_init(h)       do { } while (0)
+
+typedef struct list_head xntimerq_t;
+
+#define xntimerq_init(q)        xntlist_init(q)
+#define xntimerq_destroy(q)     do { } while (0)
+#define xntimerq_empty(q)       xntlist_empty(q)
+#define xntimerq_head(q)        xntlist_head(q)
+#define xntimerq_second(q, h)   xntlist_second((q),(h))
+#define xntimerq_insert(q, h)   xntlist_insert((q),(h))
+#define xntimerq_remove(q, h)   xntlist_remove((q),(h))
+
+typedef struct { } xntimerq_it_t;
+
+#define xntimerq_it_begin(q,i)  ((void) (i), xntlist_head(q))
+#define xntimerq_it_next(q,i,h) ((void) (i), xntlist_next((q),(h)))
+
+#endif /* CONFIG_XENO_OPT_TIMER_LIST */
+
+struct xnsched;
+
+struct xntimerdata {
+	xntimerq_t q;
+};
+
+static inline struct xntimerdata *
+xnclock_percpu_timerdata(struct xnclock *clock, int cpu)
+{
+	return per_cpu_ptr(clock->timerdata, cpu);
+}
+
+static inline struct xntimerdata *
+xnclock_this_timerdata(struct xnclock *clock)
+{
+	return raw_cpu_ptr(clock->timerdata);
+}
+
+struct xntimer {
+#ifdef CONFIG_XENO_OPT_EXTCLOCK
+	struct xnclock *clock;
+#endif
+	/** Link in timers list. */
+	xntimerh_t aplink;
+	struct list_head adjlink;
+	/** Timer status. */
+	unsigned long status;
+	/** Periodic interval (clock ticks, 0 == one shot). */
+	xnticks_t interval;
+	/** Periodic interval (nanoseconds, 0 == one shot). */
+	xnticks_t interval_ns;
+	/** Count of timer ticks in periodic mode. */
+	xnticks_t periodic_ticks;
+	/** First tick date in periodic mode. */
+	xnticks_t start_date;
+	/** Date of next periodic release point (timer ticks). */
+	xnticks_t pexpect_ticks;
+	/** Sched structure to which the timer is attached. */
+	struct xnsched *sched;
+	/** Timeout handler. */
+	void (*handler)(struct xntimer *timer);
+#ifdef CONFIG_XENO_OPT_STATS
+#ifdef CONFIG_XENO_OPT_EXTCLOCK
+	struct xnclock *tracker;
+#endif
+	/** Timer name to be displayed. */
+	char name[XNOBJECT_NAME_LEN];
+	/** Timer holder in timebase. */
+	struct list_head next_stat;
+	/** Number of timer schedules. */
+	xnstat_counter_t scheduled;
+	/** Number of timer events. */
+	xnstat_counter_t fired;
+#endif /* CONFIG_XENO_OPT_STATS */
+};
+
+#ifdef CONFIG_XENO_OPT_EXTCLOCK
+
+static inline struct xnclock *xntimer_clock(struct xntimer *timer)
+{
+	return timer->clock;
+}
+
+void xntimer_set_clock(struct xntimer *timer,
+		       struct xnclock *newclock);
+
+#else /* !CONFIG_XENO_OPT_EXTCLOCK */
+
+static inline struct xnclock *xntimer_clock(struct xntimer *timer)
+{
+	return &nkclock;
+}
+
+static inline void xntimer_set_clock(struct xntimer *timer,
+				     struct xnclock *newclock)
+{
+	XENO_BUG_ON(COBALT, newclock != &nkclock);
+}
+
+#endif /* !CONFIG_XENO_OPT_EXTCLOCK */
+
+#ifdef CONFIG_SMP
+static inline struct xnsched *xntimer_sched(struct xntimer *timer)
+{
+	return timer->sched;
+}
+#else /* !CONFIG_SMP */
+#define xntimer_sched(t)	xnsched_current()
+#endif /* !CONFIG_SMP */
+
+#define xntimer_percpu_queue(__timer)					\
+	({								\
+		struct xntimerdata *tmd;				\
+		int cpu = xnsched_cpu((__timer)->sched);		\
+		tmd = xnclock_percpu_timerdata(xntimer_clock(__timer), cpu); \
+		&tmd->q;						\
+	})
+
+static inline unsigned long xntimer_gravity(struct xntimer *timer)
+{
+	struct xnclock *clock = xntimer_clock(timer);
+
+	if (timer->status & XNTIMER_KGRAVITY)
+		return clock->gravity.kernel;
+
+	if (timer->status & XNTIMER_UGRAVITY)
+		return clock->gravity.user;
+
+	return clock->gravity.irq;
+}
+
+static inline void xntimer_update_date(struct xntimer *timer)
+{
+	xntimerh_date(&timer->aplink) = timer->start_date
+		+ xnclock_ns_to_ticks(xntimer_clock(timer),
+			timer->periodic_ticks * timer->interval_ns)
+		- xntimer_gravity(timer);
+}
+
+static inline xnticks_t xntimer_pexpect(struct xntimer *timer)
+{
+	return timer->start_date +
+		xnclock_ns_to_ticks(xntimer_clock(timer),
+				timer->pexpect_ticks * timer->interval_ns);
+}
+
+static inline void xntimer_set_priority(struct xntimer *timer,
+					int prio)
+{
+	xntimerh_prio(&timer->aplink) = prio;
+}
+
+static inline int xntimer_active_p(struct xntimer *timer)
+{
+	return timer->sched != NULL;
+}
+
+static inline int xntimer_running_p(struct xntimer *timer)
+{
+	return (timer->status & XNTIMER_RUNNING) != 0;
+}
+
+static inline int xntimer_fired_p(struct xntimer *timer)
+{
+	return (timer->status & XNTIMER_FIRED) != 0;
+}
+
+static inline int xntimer_periodic_p(struct xntimer *timer)
+{
+	return (timer->status & XNTIMER_PERIODIC) != 0;
+}
+
+void __xntimer_init(struct xntimer *timer,
+		    struct xnclock *clock,
+		    void (*handler)(struct xntimer *timer),
+		    struct xnsched *sched,
+		    int flags);
+
+void xntimer_set_gravity(struct xntimer *timer,
+			 int gravity);
+
+#ifdef CONFIG_XENO_OPT_STATS
+
+#define xntimer_init(__timer, __clock, __handler, __sched, __flags)	\
+do {									\
+	__xntimer_init(__timer, __clock, __handler, __sched, __flags);	\
+	xntimer_set_name(__timer, #__handler);				\
+} while (0)
+
+static inline void xntimer_reset_stats(struct xntimer *timer)
+{
+	xnstat_counter_set(&timer->scheduled, 0);
+	xnstat_counter_set(&timer->fired, 0);
+}
+
+static inline void xntimer_account_scheduled(struct xntimer *timer)
+{
+	xnstat_counter_inc(&timer->scheduled);
+}
+
+static inline void xntimer_account_fired(struct xntimer *timer)
+{
+	xnstat_counter_inc(&timer->fired);
+}
+
+static inline void xntimer_set_name(struct xntimer *timer, const char *name)
+{
+	knamecpy(timer->name, name);
+}
+
+#else /* !CONFIG_XENO_OPT_STATS */
+
+#define xntimer_init	__xntimer_init
+
+static inline void xntimer_reset_stats(struct xntimer *timer) { }
+
+static inline void xntimer_account_scheduled(struct xntimer *timer) { }
+
+static inline void xntimer_account_fired(struct xntimer *timer) { }
+
+static inline void xntimer_set_name(struct xntimer *timer, const char *name) { }
+
+#endif /* !CONFIG_XENO_OPT_STATS */
+
+#if defined(CONFIG_XENO_OPT_EXTCLOCK) && defined(CONFIG_XENO_OPT_STATS)
+void xntimer_switch_tracking(struct xntimer *timer,
+			     struct xnclock *newclock);
+#else
+static inline
+void xntimer_switch_tracking(struct xntimer *timer,
+			     struct xnclock *newclock) { }
+#endif
+
+void xntimer_destroy(struct xntimer *timer);
+
+/**
+ * @fn xnticks_t xntimer_interval(struct xntimer *timer)
+ *
+ * @brief Return the timer interval value.
+ *
+ * Return the timer interval value in nanoseconds.
+ *
+ * @param timer The address of a valid timer descriptor.
+ *
+ * @return The duration of a period in nanoseconds. The special value
+ * XN_INFINITE is returned if @a timer is currently disabled or
+ * one shot.
+ *
+ * @coretags{unrestricted, atomic-entry}
+ */
+static inline xnticks_t xntimer_interval(struct xntimer *timer)
+{
+	return timer->interval_ns;
+}
+
+static inline xnticks_t xntimer_expiry(struct xntimer *timer)
+{
+	/* Real expiry date in ticks without anticipation (no gravity) */
+	return xntimerh_date(&timer->aplink) + xntimer_gravity(timer);
+}
+
+int xntimer_start(struct xntimer *timer,
+		xnticks_t value,
+		xnticks_t interval,
+		xntmode_t mode);
+
+void __xntimer_stop(struct xntimer *timer);
+
+xnticks_t xntimer_get_date(struct xntimer *timer);
+
+xnticks_t __xntimer_get_timeout(struct xntimer *timer);
+
+xnticks_t xntimer_get_interval(struct xntimer *timer);
+
+int xntimer_heading_p(struct xntimer *timer);
+
+static inline void xntimer_stop(struct xntimer *timer)
+{
+	if (timer->status & XNTIMER_RUNNING)
+		__xntimer_stop(timer);
+}
+
+static inline xnticks_t xntimer_get_timeout(struct xntimer *timer)
+{
+	if (!xntimer_running_p(timer))
+		return XN_INFINITE;
+
+	return __xntimer_get_timeout(timer);
+}
+
+static inline xnticks_t xntimer_get_timeout_stopped(struct xntimer *timer)
+{
+	return __xntimer_get_timeout(timer);
+}
+
+static inline void xntimer_enqueue(struct xntimer *timer,
+				   xntimerq_t *q)
+{
+	xntimerq_insert(q, &timer->aplink);
+	timer->status &= ~XNTIMER_DEQUEUED;
+	xntimer_account_scheduled(timer);
+}
+
+static inline void xntimer_dequeue(struct xntimer *timer,
+				   xntimerq_t *q)
+{
+	xntimerq_remove(q, &timer->aplink);
+	timer->status |= XNTIMER_DEQUEUED;
+}
+
+unsigned long long xntimer_get_overruns(struct xntimer *timer,
+					struct xnthread *waiter,
+					xnticks_t now);
+
+#ifdef CONFIG_SMP
+
+void __xntimer_migrate(struct xntimer *timer, struct xnsched *sched);
+
+static inline
+void xntimer_migrate(struct xntimer *timer, struct xnsched *sched)
+{				/* nklocked, IRQs off */
+	if (timer->sched != sched)
+		__xntimer_migrate(timer, sched);
+}
+
+void __xntimer_set_affinity(struct xntimer *timer,
+			    struct xnsched *sched);
+
+static inline void xntimer_set_affinity(struct xntimer *timer,
+					struct xnsched *sched)
+{
+	if (sched != xntimer_sched(timer))
+		__xntimer_set_affinity(timer, sched);
+}
+
+#else /* ! CONFIG_SMP */
+
+static inline void xntimer_migrate(struct xntimer *timer,
+				   struct xnsched *sched)
+{
+	timer->sched = sched;
+}
+
+static inline void xntimer_set_affinity(struct xntimer *timer,
+					struct xnsched *sched)
+{
+	xntimer_migrate(timer, sched);
+}
+
+#endif /* CONFIG_SMP */
+
+char *xntimer_format_time(xnticks_t ns,
+			  char *buf, size_t bufsz);
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_TIMER_H */
+++ linux-patched/include/xenomai/cobalt/kernel/init.h	2022-03-21 12:58:31.646866952 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/registry.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_INIT_H
+#define _COBALT_KERNEL_INIT_H
+
+#include <linux/atomic.h>
+#include <linux/notifier.h>
+#include <cobalt/uapi/corectl.h>
+
+extern atomic_t cobalt_runstate;
+
+static inline enum cobalt_run_states realtime_core_state(void)
+{
+	return atomic_read(&cobalt_runstate);
+}
+
+static inline int realtime_core_enabled(void)
+{
+	return atomic_read(&cobalt_runstate) != COBALT_STATE_DISABLED;
+}
+
+static inline int realtime_core_running(void)
+{
+	return atomic_read(&cobalt_runstate) == COBALT_STATE_RUNNING;
+}
+
+static inline void set_realtime_core_state(enum cobalt_run_states state)
+{
+	atomic_set(&cobalt_runstate, state);
+}
+
+void cobalt_add_state_chain(struct notifier_block *nb);
+
+void cobalt_remove_state_chain(struct notifier_block *nb);
+
+void cobalt_call_state_chain(enum cobalt_run_states newstate);
+
+#endif /* !_COBALT_KERNEL_INIT_H_ */
+++ linux-patched/include/xenomai/cobalt/kernel/registry.h	2022-03-21 12:58:31.639867021 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/sched-rt.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2004 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_REGISTRY_H
+#define _COBALT_KERNEL_REGISTRY_H
+
+#include <cobalt/kernel/list.h>
+#include <cobalt/kernel/synch.h>
+#include <cobalt/kernel/vfile.h>
+
+/**
+ * @addtogroup cobalt_core_registry
+ *
+ * @{
+ */
+struct xnpnode;
+
+struct xnobject {
+	void *objaddr;
+	const char *key;	  /* !< Hash key. May be NULL if anonynous. */
+	unsigned long cstamp;		  /* !< Creation stamp. */
+#ifdef CONFIG_XENO_OPT_VFILE
+	struct xnpnode *pnode;	/* !< v-file information class. */
+	union {
+		struct {
+			struct xnvfile_rev_tag tag;
+			struct xnvfile_snapshot file;
+		} vfsnap; /* !< virtual snapshot file. */
+		struct xnvfile_regular vfreg; /* !< virtual regular file */
+		struct xnvfile_link link;     /* !< virtual link. */
+	} vfile_u;
+	struct xnvfile *vfilp;
+#endif /* CONFIG_XENO_OPT_VFILE */
+	struct hlist_node hlink; /* !< Link in h-table */
+	struct list_head link;
+};
+
+int xnregistry_init(void);
+
+void xnregistry_cleanup(void);
+
+#ifdef CONFIG_XENO_OPT_VFILE
+
+#define XNOBJECT_EXPORT_SCHEDULED  ((struct xnvfile *)1L)
+#define XNOBJECT_EXPORT_INPROGRESS ((struct xnvfile *)2L)
+#define XNOBJECT_EXPORT_ABORTED    ((struct xnvfile *)3L)
+
+struct xnptree {
+	const char *dirname;
+	/* hidden */
+	int entries;
+	struct xnvfile_directory vdir;
+};
+
+#define DEFINE_XNPTREE(__var, __name)		\
+	struct xnptree __var = {		\
+		.dirname = __name,		\
+		.entries = 0,			\
+		.vdir = xnvfile_nodir,		\
+	}
+
+struct xnpnode_ops {
+	int (*export)(struct xnobject *object, struct xnpnode *pnode);
+	void (*unexport)(struct xnobject *object, struct xnpnode *pnode);
+	void (*touch)(struct xnobject *object);
+};
+
+struct xnpnode {
+	const char *dirname;
+	struct xnptree *root;
+	struct xnpnode_ops *ops;
+	/* hidden */
+	int entries;
+	struct xnvfile_directory vdir;
+};
+
+struct xnpnode_snapshot {
+	struct xnpnode node;
+	struct xnvfile_snapshot_template vfile;
+};
+
+struct xnpnode_regular {
+	struct xnpnode node;
+	struct xnvfile_regular_template vfile;
+};
+
+struct xnpnode_link {
+	struct xnpnode node;
+	char *(*target)(void *obj);
+};
+
+#else /* !CONFIG_XENO_OPT_VFILE */
+
+#define DEFINE_XNPTREE(__var, __name);
+
+/* Placeholders. */
+
+struct xnpnode {
+	const char *dirname;
+};
+
+struct xnpnode_snapshot {
+	struct xnpnode node;
+};
+
+struct xnpnode_regular {
+	struct xnpnode node;
+};
+
+struct xnpnode_link {
+	struct xnpnode node;
+};
+
+#endif /* !CONFIG_XENO_OPT_VFILE */
+
+/* Public interface. */
+
+extern struct xnobject *registry_obj_slots;
+
+static inline struct xnobject *xnregistry_validate(xnhandle_t handle)
+{
+	struct xnobject *object;
+	/*
+	 * Careful: a removed object which is still in flight to be
+	 * unexported carries a NULL objaddr, so we have to check this
+	 * as well.
+	 */
+	handle = xnhandle_get_index(handle);
+	if (likely(handle && handle < CONFIG_XENO_OPT_REGISTRY_NRSLOTS)) {
+		object = &registry_obj_slots[handle];
+		return object->objaddr ? object : NULL;
+	}
+
+	return NULL;
+}
+
+static inline const char *xnregistry_key(xnhandle_t handle)
+{
+	struct xnobject *object = xnregistry_validate(handle);
+	return object ? object->key : NULL;
+}
+
+int xnregistry_enter(const char *key,
+		     void *objaddr,
+		     xnhandle_t *phandle,
+		     struct xnpnode *pnode);
+
+static inline int
+xnregistry_enter_anon(void *objaddr, xnhandle_t *phandle)
+{
+	return xnregistry_enter(NULL, objaddr, phandle, NULL);
+}
+
+int xnregistry_bind(const char *key,
+		    xnticks_t timeout,
+		    int timeout_mode,
+		    xnhandle_t *phandle);
+
+int xnregistry_remove(xnhandle_t handle);
+
+static inline
+void *xnregistry_lookup(xnhandle_t handle,
+			unsigned long *cstamp_r)
+{
+	struct xnobject *object = xnregistry_validate(handle);
+
+	if (object == NULL)
+		return NULL;
+
+	if (cstamp_r)
+		*cstamp_r = object->cstamp;
+
+	return object->objaddr;
+}
+
+int xnregistry_unlink(const char *key);
+
+unsigned xnregistry_hash_size(void);
+
+extern struct xnpnode_ops xnregistry_vfsnap_ops;
+
+extern struct xnpnode_ops xnregistry_vlink_ops;
+
+extern struct xnpnode_ops xnregistry_vfreg_ops;
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_REGISTRY_H */
+++ linux-patched/include/xenomai/cobalt/kernel/sched-rt.h	2022-03-21 12:58:31.631867099 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/time.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_SCHED_RT_H
+#define _COBALT_KERNEL_SCHED_RT_H
+
+#ifndef _COBALT_KERNEL_SCHED_H
+#error "please don't include cobalt/kernel/sched-rt.h directly"
+#endif
+
+/**
+ * @addtogroup cobalt_core_sched
+ * @{
+ */
+
+/*
+ * Global priority scale for Xenomai's core scheduling class,
+ * available to SCHED_COBALT members.
+ */
+#define XNSCHED_CORE_MIN_PRIO	0
+#define XNSCHED_CORE_MAX_PRIO	259
+#define XNSCHED_CORE_NR_PRIO	\
+	(XNSCHED_CORE_MAX_PRIO - XNSCHED_CORE_MIN_PRIO + 1)
+
+/*
+ * Priority range for SCHED_FIFO, and all other classes Cobalt
+ * implements except SCHED_COBALT.
+ */
+#define XNSCHED_FIFO_MIN_PRIO	1
+#define XNSCHED_FIFO_MAX_PRIO	256
+
+#if XNSCHED_CORE_NR_PRIO > XNSCHED_CLASS_WEIGHT_FACTOR ||	\
+  (defined(CONFIG_XENO_OPT_SCALABLE_SCHED) &&			\
+   XNSCHED_CORE_NR_PRIO > XNSCHED_MLQ_LEVELS)
+#error "XNSCHED_MLQ_LEVELS is too low"
+#endif
+
+extern struct xnsched_class xnsched_class_rt;
+
+static inline void __xnsched_rt_requeue(struct xnthread *thread)
+{
+	xnsched_addq(&thread->sched->rt.runnable, thread);
+}
+
+static inline void __xnsched_rt_enqueue(struct xnthread *thread)
+{
+	xnsched_addq_tail(&thread->sched->rt.runnable, thread);
+}
+
+static inline void __xnsched_rt_dequeue(struct xnthread *thread)
+{
+	xnsched_delq(&thread->sched->rt.runnable, thread);
+}
+
+static inline void __xnsched_rt_track_weakness(struct xnthread *thread)
+{
+	/*
+	 * We have to track threads exiting weak scheduling, i.e. any
+	 * thread leaving the WEAK class code if compiled in, or
+	 * assigned a zero priority if weak threads are hosted by the
+	 * RT class.
+	 *
+	 * CAUTION: since we need to check the effective priority
+	 * level for determining the weakness state, this can only
+	 * apply to non-boosted threads.
+	 */
+	if (IS_ENABLED(CONFIG_XENO_OPT_SCHED_WEAK) || thread->cprio)
+		xnthread_clear_state(thread, XNWEAK);
+	else
+		xnthread_set_state(thread, XNWEAK);
+}
+
+static inline bool __xnsched_rt_setparam(struct xnthread *thread,
+					 const union xnsched_policy_param *p)
+{
+	bool ret = xnsched_set_effective_priority(thread, p->rt.prio);
+	
+	if (!xnthread_test_state(thread, XNBOOST))
+		__xnsched_rt_track_weakness(thread);
+
+	return ret;
+}
+
+static inline void __xnsched_rt_getparam(struct xnthread *thread,
+					 union xnsched_policy_param *p)
+{
+	p->rt.prio = thread->cprio;
+}
+
+static inline void __xnsched_rt_trackprio(struct xnthread *thread,
+					  const union xnsched_policy_param *p)
+{
+	if (p)
+		thread->cprio = p->rt.prio; /* Force update. */
+	else {
+		thread->cprio = thread->bprio;
+		/* Leaving PI/PP, so non-boosted by definition. */
+		__xnsched_rt_track_weakness(thread);
+	}
+}
+
+static inline void __xnsched_rt_protectprio(struct xnthread *thread, int prio)
+{
+	/*
+	 * The RT class supports the widest priority range from
+	 * XNSCHED_CORE_MIN_PRIO to XNSCHED_CORE_MAX_PRIO inclusive,
+	 * no need to cap the input value which is guaranteed to be in
+	 * the range [1..XNSCHED_CORE_MAX_PRIO].
+	 */
+	thread->cprio = prio;
+}
+
+static inline void __xnsched_rt_forget(struct xnthread *thread)
+{
+}
+
+static inline int xnsched_rt_init_thread(struct xnthread *thread)
+{
+	return 0;
+}
+
+#ifdef CONFIG_XENO_OPT_SCHED_CLASSES
+struct xnthread *xnsched_rt_pick(struct xnsched *sched);
+#else
+static inline struct xnthread *xnsched_rt_pick(struct xnsched *sched)
+{
+	return xnsched_getq(&sched->rt.runnable);
+}
+#endif
+
+void xnsched_rt_tick(struct xnsched *sched);
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_SCHED_RT_H */
+++ linux-patched/include/xenomai/cobalt/kernel/time.h	2022-03-21 12:58:31.624867167 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/intr.h	1970-01-01 01:00:00.000000000 +0100
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _COBALT_KERNEL_TIME_H
+#define _COBALT_KERNEL_TIME_H
+
+#include <linux/time.h>
+#include <linux/time64.h>
+
+/**
+ * Read struct __kernel_timespec from userspace and convert to
+ * struct timespec64
+ *
+ * @param ts The destination, will be filled
+ * @param uts The source, provided by an application
+ * @return 0 on success, -EFAULT otherwise
+ */
+int cobalt_get_timespec64(struct timespec64 *ts,
+			  const struct __kernel_timespec __user *uts);
+
+/**
+ * Covert struct timespec64 to struct __kernel_timespec
+ * and copy to userspace
+ *
+ * @param ts The source, provided by kernel
+ * @param uts The destination, will be filled
+ * @return 0 on success, -EFAULT otherwise
+ */
+int cobalt_put_timespec64(const struct timespec64 *ts,
+			   struct __kernel_timespec __user *uts);
+
+#endif //_COBALT_KERNEL_TIME_H
+++ linux-patched/include/xenomai/cobalt/kernel/intr.h	2022-03-21 12:58:31.617867235 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/schedqueue.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2001,2002,2003 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_INTR_H
+#define _COBALT_KERNEL_INTR_H
+
+#include <linux/spinlock.h>
+#include <cobalt/kernel/stat.h>
+#include <pipeline/irq.h>
+
+/**
+ * @addtogroup cobalt_core_irq
+ * @{
+ */
+
+/* Possible return values of a handler. */
+#define XN_IRQ_NONE	 0x1
+#define XN_IRQ_HANDLED	 0x2
+#define XN_IRQ_STATMASK	 (XN_IRQ_NONE|XN_IRQ_HANDLED)
+#define XN_IRQ_PROPAGATE 0x100
+#define XN_IRQ_DISABLE   0x200
+
+/* Init flags. */
+#define XN_IRQTYPE_SHARED  0x1
+#define XN_IRQTYPE_EDGE    0x2
+
+/* Status bits. */
+#define XN_IRQSTAT_ATTACHED   0
+#define _XN_IRQSTAT_ATTACHED  (1 << XN_IRQSTAT_ATTACHED)
+#define XN_IRQSTAT_DISABLED   1
+#define _XN_IRQSTAT_DISABLED  (1 << XN_IRQSTAT_DISABLED)
+
+struct xnintr;
+struct xnsched;
+
+typedef int (*xnisr_t)(struct xnintr *intr);
+
+typedef void (*xniack_t)(unsigned irq, void *arg);
+
+struct xnirqstat {
+	/** Number of handled receipts since attachment. */
+	xnstat_counter_t hits;
+	/** Runtime accounting entity */
+	xnstat_exectime_t account;
+	/** Accumulated accounting entity */
+	xnstat_exectime_t sum;
+};
+
+struct xnintr {
+#ifdef CONFIG_XENO_OPT_SHIRQ
+	/** Next object in the IRQ-sharing chain. */
+	struct xnintr *next;
+#endif
+	/** Number of consequent unhandled interrupts */
+	unsigned int unhandled;
+	/** Interrupt service routine. */
+	xnisr_t isr;
+	/** User-defined cookie value. */
+	void *cookie;
+	/** runtime status */
+	unsigned long status;
+	/** Creation flags. */
+	int flags;
+	/** IRQ number. */
+	unsigned int irq;
+	/** Interrupt acknowledge routine. */
+	xniack_t iack;
+	/** Symbolic name. */
+	const char *name;
+	/** Descriptor maintenance lock. */
+	raw_spinlock_t lock;
+#ifdef CONFIG_XENO_OPT_STATS_IRQS
+	/** Statistics. */
+	struct xnirqstat *stats;
+#endif
+};
+
+struct xnintr_iterator {
+    int cpu;		/** Current CPU in iteration. */
+    unsigned long hits;	/** Current hit counter. */
+    xnticks_t exectime_period;	/** Used CPU time in current accounting period. */
+    xnticks_t account_period; /** Length of accounting period. */
+    xnticks_t exectime_total;	/** Overall CPU time consumed. */
+    int list_rev;	/** System-wide xnintr list revision (internal use). */
+    struct xnintr *prev;	/** Previously visited xnintr object (internal use). */
+};
+
+void xnintr_core_clock_handler(void);
+
+void xnintr_host_tick(struct xnsched *sched);
+
+    /* Public interface. */
+
+int xnintr_init(struct xnintr *intr,
+		const char *name,
+		unsigned irq,
+		xnisr_t isr,
+		xniack_t iack,
+		int flags);
+
+void xnintr_destroy(struct xnintr *intr);
+
+int xnintr_attach(struct xnintr *intr,
+		  void *cookie, const cpumask_t *cpumask);
+
+void xnintr_detach(struct xnintr *intr);
+
+void xnintr_enable(struct xnintr *intr);
+
+void xnintr_disable(struct xnintr *intr);
+
+int xnintr_affinity(struct xnintr *intr, const cpumask_t *cpumask);
+
+#ifdef CONFIG_XENO_OPT_STATS_IRQS
+
+int xnintr_query_init(struct xnintr_iterator *iterator);
+
+int xnintr_get_query_lock(void);
+
+void xnintr_put_query_lock(void);
+
+int xnintr_query_next(int irq, struct xnintr_iterator *iterator,
+		      char *name_buf);
+
+#else /* !CONFIG_XENO_OPT_STATS_IRQS */
+
+static inline int xnintr_query_init(struct xnintr_iterator *iterator)
+{
+	return 0;
+}
+
+static inline int xnintr_get_query_lock(void)
+{
+	return 0;
+}
+
+static inline void xnintr_put_query_lock(void) {}
+#endif /* !CONFIG_XENO_OPT_STATS_IRQS */
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_INTR_H */
+++ linux-patched/include/xenomai/cobalt/kernel/schedqueue.h	2022-03-21 12:58:31.609867313 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/vdso.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_SCHEDQUEUE_H
+#define _COBALT_KERNEL_SCHEDQUEUE_H
+
+#include <cobalt/kernel/list.h>
+
+/**
+ * @addtogroup cobalt_core_sched
+ * @{
+ */
+
+#define XNSCHED_CLASS_WEIGHT_FACTOR	1024
+
+#ifdef CONFIG_XENO_OPT_SCALABLE_SCHED
+
+#include <linux/bitmap.h>
+
+/*
+ * Multi-level priority queue, suitable for handling the runnable
+ * thread queue of the core scheduling class with O(1) property. We
+ * only manage a descending queuing order, i.e. highest numbered
+ * priorities come first.
+ */
+#define XNSCHED_MLQ_LEVELS  260	/* i.e. XNSCHED_CORE_NR_PRIO */
+
+struct xnsched_mlq {
+	int elems;
+	DECLARE_BITMAP(prio_map, XNSCHED_MLQ_LEVELS);
+	struct list_head heads[XNSCHED_MLQ_LEVELS];
+};
+
+struct xnthread;
+
+void xnsched_initq(struct xnsched_mlq *q);
+
+void xnsched_addq(struct xnsched_mlq *q,
+		  struct xnthread *thread);
+
+void xnsched_addq_tail(struct xnsched_mlq *q, 
+		       struct xnthread *thread);
+
+void xnsched_delq(struct xnsched_mlq *q,
+		  struct xnthread *thread);
+
+struct xnthread *xnsched_getq(struct xnsched_mlq *q);
+
+static inline int xnsched_emptyq_p(struct xnsched_mlq *q)
+{
+	return q->elems == 0;
+}
+
+static inline int xnsched_weightq(struct xnsched_mlq *q)
+{
+	return find_first_bit(q->prio_map, XNSCHED_MLQ_LEVELS);
+}
+
+typedef struct xnsched_mlq xnsched_queue_t;
+
+#else /* ! CONFIG_XENO_OPT_SCALABLE_SCHED */
+
+typedef struct list_head xnsched_queue_t;
+
+#define xnsched_initq(__q)			INIT_LIST_HEAD(__q)
+#define xnsched_emptyq_p(__q)			list_empty(__q)
+#define xnsched_addq(__q, __t)			list_add_prilf(__t, __q, cprio, rlink)
+#define xnsched_addq_tail(__q, __t)		list_add_priff(__t, __q, cprio, rlink)
+#define xnsched_delq(__q, __t)			(void)(__q), list_del(&(__t)->rlink)
+#define xnsched_getq(__q)							\
+	({									\
+		struct xnthread *__t = NULL;					\
+		if (!list_empty(__q))						\
+			__t = list_get_entry(__q, struct xnthread, rlink);	\
+		__t;								\
+	})
+#define xnsched_weightq(__q)						\
+	({								\
+		struct xnthread *__t;					\
+		__t = list_first_entry(__q, struct xnthread, rlink);	\
+		__t->cprio;						\
+	})
+	
+
+#endif /* !CONFIG_XENO_OPT_SCALABLE_SCHED */
+
+struct xnthread *xnsched_findq(xnsched_queue_t *q, int prio);
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_SCHEDQUEUE_H */
+++ linux-patched/include/xenomai/cobalt/kernel/vdso.h	2022-03-21 12:58:31.602867381 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/pipe.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2009 Wolfgang Mauerer <wolfgang.mauerer@siemens.com>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_VDSO_H
+#define _COBALT_KERNEL_VDSO_H
+
+#include <linux/time.h>
+#include <asm/barrier.h>
+#include <asm/atomic.h>
+#include <asm/processor.h>
+#include <cobalt/uapi/kernel/vdso.h>
+
+extern struct xnvdso *nkvdso;
+
+/*
+ * Define the available feature set here. We have a single feature
+ * defined for now, only in the I-pipe case.
+ */
+#ifdef CONFIG_IPIPE_HAVE_HOSTRT
+
+#define XNVDSO_FEATURES XNVDSO_FEAT_HOST_REALTIME
+
+static inline struct xnvdso_hostrt_data *get_hostrt_data(void)
+{
+	return &nkvdso->hostrt_data;
+}
+
+#else
+
+#define XNVDSO_FEATURES 0
+
+#endif
+
+#endif /* _COBALT_KERNEL_VDSO_H */
+++ linux-patched/include/xenomai/cobalt/kernel/pipe.h	2022-03-21 12:58:31.595867450 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/ancillaries.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2001,2002,2003 Philippe Gerum.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA
+ * 02139, USA; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_PIPE_H
+#define _COBALT_KERNEL_PIPE_H
+
+#include <linux/types.h>
+#include <linux/poll.h>
+#include <cobalt/kernel/synch.h>
+#include <cobalt/kernel/thread.h>
+#include <cobalt/uapi/kernel/pipe.h>
+
+#define XNPIPE_NDEVS      CONFIG_XENO_OPT_PIPE_NRDEV
+#define XNPIPE_DEV_MAJOR  150
+
+#define XNPIPE_KERN_CONN         0x1
+#define XNPIPE_KERN_LCLOSE       0x2
+#define XNPIPE_USER_CONN         0x4
+#define XNPIPE_USER_SIGIO        0x8
+#define XNPIPE_USER_WREAD        0x10
+#define XNPIPE_USER_WREAD_READY  0x20
+#define XNPIPE_USER_WSYNC        0x40
+#define XNPIPE_USER_WSYNC_READY  0x80
+#define XNPIPE_USER_LCONN        0x100
+
+#define XNPIPE_USER_ALL_WAIT \
+(XNPIPE_USER_WREAD|XNPIPE_USER_WSYNC)
+
+#define XNPIPE_USER_ALL_READY \
+(XNPIPE_USER_WREAD_READY|XNPIPE_USER_WSYNC_READY)
+
+struct xnpipe_mh {
+	size_t size;
+	size_t rdoff;
+	struct list_head link;
+};
+
+struct xnpipe_state;
+
+struct xnpipe_operations {
+	void (*output)(struct xnpipe_mh *mh, void *xstate);
+	int (*input)(struct xnpipe_mh *mh, int retval, void *xstate);
+	void *(*alloc_ibuf)(size_t size, void *xstate);
+	void (*free_ibuf)(void *buf, void *xstate);
+	void (*free_obuf)(void *buf, void *xstate);
+	void (*release)(void *xstate);
+};
+
+struct xnpipe_state {
+	struct list_head slink;	/* Link on sleep queue */
+	struct list_head alink;	/* Link on async queue */
+
+	struct list_head inq;		/* From user-space to kernel */
+	int nrinq;
+	struct list_head outq;		/* From kernel to user-space */
+	int nroutq;
+	struct xnsynch synchbase;
+	struct xnpipe_operations ops;
+	void *xstate;		/* Extra state managed by caller */
+
+	/* Linux kernel part */
+	unsigned long status;
+	struct fasync_struct *asyncq;
+	wait_queue_head_t readq;	/* open/read/poll waiters */
+	wait_queue_head_t syncq;	/* sync waiters */
+	int wcount;			/* number of waiters on this minor */
+	size_t ionrd;
+};
+
+extern struct xnpipe_state xnpipe_states[];
+
+#define xnminor_from_state(s) (s - xnpipe_states)
+
+#ifdef CONFIG_XENO_OPT_PIPE
+int xnpipe_mount(void);
+void xnpipe_umount(void);
+#else /* !CONFIG_XENO_OPT_PIPE */
+static inline int xnpipe_mount(void) { return 0; }
+static inline void xnpipe_umount(void) { }
+#endif /* !CONFIG_XENO_OPT_PIPE */
+
+/* Entry points of the kernel interface. */
+
+int xnpipe_connect(int minor,
+		   struct xnpipe_operations *ops, void *xstate);
+
+int xnpipe_disconnect(int minor);
+
+ssize_t xnpipe_send(int minor,
+		    struct xnpipe_mh *mh, size_t size, int flags);
+
+ssize_t xnpipe_mfixup(int minor, struct xnpipe_mh *mh, ssize_t size);
+
+ssize_t xnpipe_recv(int minor,
+		    struct xnpipe_mh **pmh, xnticks_t timeout);
+
+int xnpipe_flush(int minor, int mode);
+
+int xnpipe_pollstate(int minor, unsigned int *mask_r);
+
+static inline unsigned int __xnpipe_pollstate(int minor)
+{
+	struct xnpipe_state *state = xnpipe_states + minor;
+	unsigned int mask = POLLOUT;
+
+	if (!list_empty(&state->inq))
+		mask |= POLLIN;
+
+	return mask;
+}
+
+static inline char *xnpipe_m_data(struct xnpipe_mh *mh)
+{
+	return (char *)(mh + 1);
+}
+
+#define xnpipe_m_size(mh) ((mh)->size)
+
+#define xnpipe_m_rdoff(mh) ((mh)->rdoff)
+
+#endif /* !_COBALT_KERNEL_PIPE_H */
+++ linux-patched/include/xenomai/cobalt/kernel/ancillaries.h	2022-03-21 12:58:31.587867528 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/bufd.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_ANCILLARIES_H
+#define _COBALT_KERNEL_ANCILLARIES_H
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/uidgid.h>
+#include <cobalt/uapi/kernel/limits.h>
+
+#define ksformat(__dst, __len, __fmt, __args...)			\
+	({								\
+		size_t __ret;						\
+		__ret = snprintf(__dst, __len, __fmt, ##__args);	\
+		if (__ret >= __len)					\
+			__dst[__len-1] = '\0';				\
+		__ret;							\
+	})
+
+#define kasformat(__fmt, __args...)					\
+	({								\
+		kasprintf(GFP_KERNEL, __fmt, ##__args);			\
+	})
+
+#define kvsformat(__dst, __len, __fmt, __ap)				\
+	({								\
+		size_t __ret;						\
+		__ret = vsnprintf(__dst, __len, __fmt, __ap);		\
+		if (__ret >= __len)					\
+			__dst[__len-1] = '\0';				\
+		__ret;							\
+	})
+
+#define kvasformat(__fmt, __ap)						\
+	({								\
+		kvasprintf(GFP_KERNEL, __fmt, __ap);			\
+	})
+
+void __knamecpy_requires_character_array_as_destination(void);
+
+#define knamecpy(__dst, __src)						\
+	({								\
+		if (!__builtin_types_compatible_p(typeof(__dst), char[])) \
+			__knamecpy_requires_character_array_as_destination();	\
+		strncpy((__dst), __src, sizeof(__dst));			\
+		__dst[sizeof(__dst) - 1] = '\0';			\
+		__dst;							\
+	 })
+
+#define get_current_uuid() from_kuid_munged(current_user_ns(), current_uid())
+
+#endif /* !_COBALT_KERNEL_ANCILLARIES_H */
+++ linux-patched/include/xenomai/cobalt/kernel/bufd.h	2022-03-21 12:58:31.580867596 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/sched-quota.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_BUFD_H
+#define _COBALT_KERNEL_BUFD_H
+
+#include <linux/types.h>
+
+/**
+ * @addtogroup cobalt_core_bufd
+ *
+ * @{
+ */
+
+struct mm_struct;
+
+struct xnbufd {
+	caddr_t b_ptr;		/* src/dst buffer address */
+	size_t b_len;		/* total length of buffer */
+	off_t b_off;		/* # of bytes read/written */
+	struct mm_struct *b_mm;	/* src/dst address space */
+	caddr_t b_carry;	/* pointer to carry over area */
+	char b_buf[64];		/* fast carry over area */
+};
+
+void xnbufd_map_umem(struct xnbufd *bufd,
+		     void __user *ptr, size_t len);
+
+static inline void xnbufd_map_uread(struct xnbufd *bufd,
+				    const void __user *ptr, size_t len)
+{
+	xnbufd_map_umem(bufd, (void __user *)ptr, len);
+}
+
+static inline void xnbufd_map_uwrite(struct xnbufd *bufd,
+				     void __user *ptr, size_t len)
+{
+	xnbufd_map_umem(bufd, ptr, len);
+}
+
+ssize_t xnbufd_unmap_uread(struct xnbufd *bufd);
+
+ssize_t xnbufd_unmap_uwrite(struct xnbufd *bufd);
+
+void xnbufd_map_kmem(struct xnbufd *bufd,
+		     void *ptr, size_t len);
+
+static inline void xnbufd_map_kread(struct xnbufd *bufd,
+				    const void *ptr, size_t len)
+{
+	xnbufd_map_kmem(bufd, (void *)ptr, len);
+}
+
+static inline void xnbufd_map_kwrite(struct xnbufd *bufd,
+				     void *ptr, size_t len)
+{
+	xnbufd_map_kmem(bufd, ptr, len);
+}
+
+ssize_t xnbufd_unmap_kread(struct xnbufd *bufd);
+
+ssize_t xnbufd_unmap_kwrite(struct xnbufd *bufd);
+
+ssize_t xnbufd_copy_to_kmem(void *ptr,
+			    struct xnbufd *bufd, size_t len);
+
+ssize_t xnbufd_copy_from_kmem(struct xnbufd *bufd,
+			      void *from, size_t len);
+
+void xnbufd_invalidate(struct xnbufd *bufd);
+
+static inline void xnbufd_reset(struct xnbufd *bufd)
+{
+	bufd->b_off = 0;
+}
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_BUFD_H */
+++ linux-patched/include/xenomai/cobalt/kernel/sched-quota.h	2022-03-21 12:58:31.572867674 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/tree.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_SCHED_QUOTA_H
+#define _COBALT_KERNEL_SCHED_QUOTA_H
+
+#ifndef _COBALT_KERNEL_SCHED_H
+#error "please don't include cobalt/kernel/sched-quota.h directly"
+#endif
+
+/**
+ * @addtogroup cobalt_core_sched
+ * @{
+ */
+
+#ifdef CONFIG_XENO_OPT_SCHED_QUOTA
+
+#define XNSCHED_QUOTA_MIN_PRIO	1
+#define XNSCHED_QUOTA_MAX_PRIO	255
+#define XNSCHED_QUOTA_NR_PRIO	\
+	(XNSCHED_QUOTA_MAX_PRIO - XNSCHED_QUOTA_MIN_PRIO + 1)
+
+extern struct xnsched_class xnsched_class_quota;
+
+struct xnsched_quota_group {
+	struct xnsched *sched;
+	xnticks_t quota_ns;
+	xnticks_t quota_peak_ns;
+	xnticks_t run_start_ns;
+	xnticks_t run_budget_ns;
+	xnticks_t run_credit_ns;
+	struct list_head members;
+	struct list_head expired;
+	struct list_head next;
+	int nr_active;
+	int nr_threads;
+	int tgid;
+	int quota_percent;
+	int quota_peak_percent;
+};
+
+struct xnsched_quota {
+	xnticks_t period_ns;
+	struct xntimer refill_timer;
+	struct xntimer limit_timer;
+	struct list_head groups;
+};
+
+static inline int xnsched_quota_init_thread(struct xnthread *thread)
+{
+	thread->quota = NULL;
+	INIT_LIST_HEAD(&thread->quota_expired);
+
+	return 0;
+}
+
+int xnsched_quota_create_group(struct xnsched_quota_group *tg,
+			       struct xnsched *sched,
+			       int *quota_sum_r);
+
+int xnsched_quota_destroy_group(struct xnsched_quota_group *tg,
+				int force,
+				int *quota_sum_r);
+
+void xnsched_quota_set_limit(struct xnsched_quota_group *tg,
+			     int quota_percent, int quota_peak_percent,
+			     int *quota_sum_r);
+
+struct xnsched_quota_group *
+xnsched_quota_find_group(struct xnsched *sched, int tgid);
+
+int xnsched_quota_sum_all(struct xnsched *sched);
+
+#endif /* !CONFIG_XENO_OPT_SCHED_QUOTA */
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_SCHED_QUOTA_H */
+++ linux-patched/include/xenomai/cobalt/kernel/tree.h	2022-03-21 12:58:31.565867742 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/sched-sporadic.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2014 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_TREE_H
+#define _COBALT_KERNEL_TREE_H
+
+#include <linux/errno.h>
+#include <linux/rbtree.h>
+#include <cobalt/kernel/assert.h>
+
+typedef unsigned long long xnkey_t;
+
+static inline xnkey_t PTR_KEY(void *p)
+{
+	return (xnkey_t)(long)p;
+}
+
+struct xnid {
+	xnkey_t key;
+	struct rb_node link;
+};
+
+#define xnid_entry(ptr, type, member)					\
+	({								\
+		typeof(ptr) _ptr = (ptr);				\
+		(_ptr ? container_of(_ptr, type, member.link) : NULL);	\
+	})
+
+#define xnid_next_entry(ptr, member)				\
+	xnid_entry(rb_next(&ptr->member.link), typeof(*ptr), member)
+
+static inline void xntree_init(struct rb_root *t)
+{
+	*t = RB_ROOT;
+}
+
+#define xntree_for_each_entry(pos, root, member)			\
+	for (pos = xnid_entry(rb_first(root), typeof(*pos), member);	\
+	     pos; pos = xnid_next_entry(pos, member))
+
+void xntree_cleanup(struct rb_root *t, void *cookie,
+		void (*destroy)(void *cookie, struct xnid *id));
+
+int xnid_enter(struct rb_root *t, struct xnid *xnid, xnkey_t key);
+
+static inline xnkey_t xnid_key(struct xnid *i)
+{
+	return i->key;
+}
+
+static inline
+struct xnid *xnid_fetch(struct rb_root *t, xnkey_t key)
+{
+	struct rb_node *node = t->rb_node;
+
+	while (node) {
+		struct xnid *i = container_of(node, struct xnid, link);
+
+		if (key < i->key)
+			node = node->rb_left;
+		else if (key > i->key)
+			node = node->rb_right;
+		else
+			return i;
+	}
+
+	return NULL;
+}
+
+static inline int xnid_remove(struct rb_root *t, struct xnid *xnid)
+{
+#ifdef CONFIG_XENO_OPT_DEBUG_COBALT
+	if (xnid_fetch(t, xnid->key) != xnid)
+		return -ENOENT;
+#endif
+	rb_erase(&xnid->link, t);
+	return 0;
+}
+
+#endif /* _COBALT_KERNEL_TREE_H */
+++ linux-patched/include/xenomai/cobalt/kernel/sched-sporadic.h	2022-03-21 12:58:31.558867811 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/stat.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_SCHED_SPORADIC_H
+#define _COBALT_KERNEL_SCHED_SPORADIC_H
+
+#ifndef _COBALT_KERNEL_SCHED_H
+#error "please don't include cobalt/kernel/sched-sporadic.h directly"
+#endif
+
+/**
+ * @addtogroup cobalt_core_sched
+ * @{
+ */
+
+#ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
+
+#define XNSCHED_SPORADIC_MIN_PRIO	1
+#define XNSCHED_SPORADIC_MAX_PRIO	255
+#define XNSCHED_SPORADIC_NR_PRIO	\
+	(XNSCHED_SPORADIC_MAX_PRIO - XNSCHED_SPORADIC_MIN_PRIO + 1)
+
+extern struct xnsched_class xnsched_class_sporadic;
+
+struct xnsched_sporadic_repl {
+	xnticks_t date;
+	xnticks_t amount;
+};
+
+struct xnsched_sporadic_data {
+	xnticks_t resume_date;
+	xnticks_t budget;
+	int repl_in;
+	int repl_out;
+	int repl_pending;
+	struct xntimer repl_timer;
+	struct xntimer drop_timer;
+	struct xnsched_sporadic_repl repl_data[CONFIG_XENO_OPT_SCHED_SPORADIC_MAXREPL];
+	struct xnsched_sporadic_param param;
+	struct xnthread *thread;
+};
+
+struct xnsched_sporadic {
+#ifdef CONFIG_XENO_OPT_DEBUG_COBALT
+	unsigned long drop_retries;
+#endif
+};
+
+static inline int xnsched_sporadic_init_thread(struct xnthread *thread)
+{
+	thread->pss = NULL;
+
+	return 0;
+}
+
+#endif /* !CONFIG_XENO_OPT_SCHED_SPORADIC */
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_SCHED_SPORADIC_H */
+++ linux-patched/include/xenomai/cobalt/kernel/stat.h	2022-03-21 12:58:31.550867889 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/thread.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2006 Jan Kiszka <jan.kiszka@web.de>.
+ * Copyright (C) 2006 Dmitry Adamushko <dmitry.adamushko@gmail.com>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_STAT_H
+#define _COBALT_KERNEL_STAT_H
+
+#include <cobalt/kernel/clock.h>
+
+/**
+ * @ingroup cobalt_core_thread
+ * @defgroup cobalt_core_stat Thread runtime statistics
+ * @{
+ */
+#ifdef CONFIG_XENO_OPT_STATS
+
+typedef struct xnstat_exectime {
+
+	xnticks_t start;   /* Start of execution time accumulation */
+
+	xnticks_t total; /* Accumulated execution time */
+
+} xnstat_exectime_t;
+
+/* Return current date which can be passed to other xnstat services for
+   immediate or lazy accounting. */
+#define xnstat_exectime_now() xnclock_core_read_raw()
+
+/* Accumulate exectime of the current account until the given date. */
+#define xnstat_exectime_update(sched, date) \
+do { \
+	xnticks_t __date = date; \
+	(sched)->current_account->total += \
+		__date - (sched)->last_account_switch; \
+	(sched)->last_account_switch = __date; \
+	/* All changes must be committed before changing the current_account \
+	   reference in sched (required for xnintr_sync_stat_references) */ \
+	smp_wmb(); \
+} while (0)
+
+/* Update the current account reference, returning the previous one. */
+#define xnstat_exectime_set_current(sched, new_account) \
+({ \
+	xnstat_exectime_t *__prev; \
+	__prev = (xnstat_exectime_t *) \
+		atomic_long_xchg((atomic_long_t *)&(sched)->current_account, \
+				 (long)(new_account)); \
+	__prev; \
+})
+
+/* Return the currently active accounting entity. */
+#define xnstat_exectime_get_current(sched) ((sched)->current_account)
+
+/* Finalize an account (no need to accumulate the exectime, just mark the
+   switch date and set the new account). */
+#define xnstat_exectime_finalize(sched, new_account) \
+do { \
+	(sched)->last_account_switch = xnclock_core_read_raw(); \
+	(sched)->current_account = (new_account); \
+} while (0)
+
+/* Obtain content of xnstat_exectime_t */
+#define xnstat_exectime_get_start(account)	((account)->start)
+#define xnstat_exectime_get_total(account)	((account)->total)
+
+/* Obtain last account switch date of considered sched */
+#define xnstat_exectime_get_last_switch(sched)	((sched)->last_account_switch)
+
+/* Reset statistics from inside the accounted entity (e.g. after CPU
+   migration). */
+#define xnstat_exectime_reset_stats(stat) \
+do { \
+	(stat)->total = 0; \
+	(stat)->start = xnclock_core_read_raw(); \
+} while (0)
+
+
+typedef struct xnstat_counter {
+	unsigned long counter;
+} xnstat_counter_t;
+
+static inline unsigned long xnstat_counter_inc(xnstat_counter_t *c)
+{
+	return c->counter++;
+}
+
+static inline unsigned long xnstat_counter_get(xnstat_counter_t *c)
+{
+	return c->counter;
+}
+
+static inline void xnstat_counter_set(xnstat_counter_t *c, unsigned long value)
+{
+	c->counter = value;
+}
+
+#else /* !CONFIG_XENO_OPT_STATS */
+typedef struct xnstat_exectime {
+} xnstat_exectime_t;
+
+#define xnstat_exectime_now()					({ 0; })
+#define xnstat_exectime_update(sched, date)			do { } while (0)
+#define xnstat_exectime_set_current(sched, new_account)		({ (void)sched; NULL; })
+#define xnstat_exectime_get_current(sched)			({ (void)sched; NULL; })
+#define xnstat_exectime_finalize(sched, new_account)		do { } while (0)
+#define xnstat_exectime_get_start(account)			({ 0; })
+#define xnstat_exectime_get_total(account)			({ 0; })
+#define xnstat_exectime_get_last_switch(sched)			({ 0; })
+#define xnstat_exectime_reset_stats(account)			do { } while (0)
+
+typedef struct xnstat_counter {
+} xnstat_counter_t;
+
+#define xnstat_counter_inc(c) ({ do { } while(0); 0; })
+#define xnstat_counter_get(c) ({ 0; })
+#define xnstat_counter_set(c, value) do { } while (0)
+#endif /* CONFIG_XENO_OPT_STATS */
+
+/* Account the exectime of the current account until now, switch to
+   new_account, and return the previous one. */
+#define xnstat_exectime_switch(sched, new_account) \
+({ \
+	xnstat_exectime_update(sched, xnstat_exectime_now()); \
+	xnstat_exectime_set_current(sched, new_account); \
+})
+
+/* Account the exectime of the current account until given start time, switch
+   to new_account, and return the previous one. */
+#define xnstat_exectime_lazy_switch(sched, new_account, date) \
+({ \
+	xnstat_exectime_update(sched, date); \
+	xnstat_exectime_set_current(sched, new_account); \
+})
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_STAT_H */
+++ linux-patched/include/xenomai/cobalt/kernel/thread.h	2022-03-21 12:58:31.543867957 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/select.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2001,2002,2003 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_THREAD_H
+#define _COBALT_KERNEL_THREAD_H
+
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/sched/rt.h>
+#include <pipeline/thread.h>
+#include <pipeline/inband_work.h>
+#include <cobalt/kernel/list.h>
+#include <cobalt/kernel/stat.h>
+#include <cobalt/kernel/timer.h>
+#include <cobalt/kernel/registry.h>
+#include <cobalt/kernel/schedparam.h>
+#include <cobalt/kernel/trace.h>
+#include <cobalt/kernel/synch.h>
+#include <cobalt/uapi/kernel/thread.h>
+#include <cobalt/uapi/signal.h>
+#include <asm/xenomai/machine.h>
+#include <asm/xenomai/thread.h>
+
+/**
+ * @addtogroup cobalt_core_thread
+ * @{
+ */
+#define XNTHREAD_BLOCK_BITS   (XNSUSP|XNPEND|XNDELAY|XNDORMANT|XNRELAX|XNHELD|XNDBGSTOP)
+#define XNTHREAD_MODE_BITS    (XNRRB|XNWARN|XNTRAPLB)
+
+#define XNTHREAD_SIGDEBUG		0
+#define XNTHREAD_SIGSHADOW_HARDEN	1
+#define XNTHREAD_SIGSHADOW_BACKTRACE	2
+#define XNTHREAD_SIGSHADOW_HOME		3
+#define XNTHREAD_SIGTERM		4
+#define XNTHREAD_MAX_SIGNALS		5
+
+struct xnthread;
+struct xnsched;
+struct xnselector;
+struct xnsched_class;
+struct xnsched_tpslot;
+struct xnthread_personality;
+struct completion;
+
+struct lostage_signal {
+	struct pipeline_inband_work inband_work; /* Must be first. */
+	struct task_struct *task;
+	int signo, sigval;
+	struct lostage_signal *self; /* Revisit: I-pipe requirement */
+};
+
+struct xnthread_init_attr {
+	struct xnthread_personality *personality;
+	cpumask_t affinity;
+	int flags;
+	const char *name;
+};
+
+struct xnthread_start_attr {
+	int mode;
+	void (*entry)(void *cookie);
+	void *cookie;
+};
+
+struct xnthread_wait_context {
+	int posted;
+};
+
+struct xnthread_personality {
+	const char *name;
+	unsigned int magic;
+	int xid;
+	atomic_t refcnt;
+	struct {
+		void *(*attach_process)(void);
+		void (*detach_process)(void *arg);
+		void (*map_thread)(struct xnthread *thread);
+		struct xnthread_personality *(*relax_thread)(struct xnthread *thread);
+		struct xnthread_personality *(*harden_thread)(struct xnthread *thread);
+		struct xnthread_personality *(*move_thread)(struct xnthread *thread,
+							    int dest_cpu);
+		struct xnthread_personality *(*exit_thread)(struct xnthread *thread);
+		struct xnthread_personality *(*finalize_thread)(struct xnthread *thread);
+	} ops;
+	struct module *module;
+};
+
+struct xnthread {
+	struct xnarchtcb tcb;	/* Architecture-dependent block */
+
+	__u32 state;		/* Thread state flags */
+	__u32 info;		/* Thread information flags */
+	__u32 local_info;	/* Local thread information flags */
+
+	struct xnsched *sched;		/* Thread scheduler */
+	struct xnsched_class *sched_class; /* Current scheduling class */
+	struct xnsched_class *base_class; /* Base scheduling class */
+
+#ifdef CONFIG_XENO_OPT_SCHED_TP
+	struct xnsched_tpslot *tps;	/* Current partition slot for TP scheduling */
+	struct list_head tp_link;	/* Link in per-sched TP thread queue */
+#endif
+#ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
+	struct xnsched_sporadic_data *pss; /* Sporadic scheduling data. */
+#endif
+#ifdef CONFIG_XENO_OPT_SCHED_QUOTA
+	struct xnsched_quota_group *quota; /* Quota scheduling group. */
+	struct list_head quota_expired;
+	struct list_head quota_next;
+#endif
+	cpumask_t affinity;	/* Processor affinity. */
+
+	/** Base priority (before PI/PP boost) */
+	int bprio;
+
+	/** Current (effective) priority */
+	int cprio;
+
+	/**
+	 * Weighted priority (cprio + scheduling class weight).
+	 */
+	int wprio;
+
+	int lock_count;	/** Scheduler lock count. */
+
+	/**
+	 * Thread holder in xnsched run queue. Ordered by
+	 * thread->cprio.
+	 */
+	struct list_head rlink;
+
+	/**
+	 * Thread holder in xnsynch pendq. Prioritized by
+	 * thread->cprio + scheduling class weight.
+	 */
+	struct list_head plink;
+
+	/** Thread holder in global queue. */
+	struct list_head glink;
+
+	/**
+	 * List of xnsynch owned by this thread which cause a priority
+	 * boost due to one of the following reasons:
+	 *
+	 * - they are currently claimed by other thread(s) when
+	 * enforcing the priority inheritance protocol (XNSYNCH_PI).
+	 *
+	 * - they require immediate priority ceiling (XNSYNCH_PP).
+	 *
+	 * This list is ordered by decreasing (weighted) thread
+	 * priorities.
+	 */
+	struct list_head boosters;
+
+	struct xnsynch *wchan;		/* Resource the thread pends on */
+
+	struct xnsynch *wwake;		/* Wait channel the thread was resumed from */
+
+	int res_count;			/* Held resources count */
+
+	struct xntimer rtimer;		/* Resource timer */
+
+	struct xntimer ptimer;		/* Periodic timer */
+
+	xnticks_t rrperiod;		/* Allotted round-robin period (ns) */
+
+  	struct xnthread_wait_context *wcontext;	/* Active wait context. */
+
+	struct {
+		xnstat_counter_t ssw;	/* Primary -> secondary mode switch count */
+		xnstat_counter_t csw;	/* Context switches (includes secondary -> primary switches) */
+		xnstat_counter_t xsc;	/* Xenomai syscalls */
+		xnstat_counter_t pf;	/* Number of page faults */
+		xnstat_exectime_t account; /* Execution time accounting entity */
+		xnstat_exectime_t lastperiod; /* Interval marker for execution time reports */
+	} stat;
+
+	struct xnselector *selector;    /* For select. */
+
+	xnhandle_t handle;	/* Handle in registry */
+
+	char name[XNOBJECT_NAME_LEN]; /* Symbolic name of thread */
+
+	void (*entry)(void *cookie); /* Thread entry routine */
+	void *cookie;		/* Cookie to pass to the entry routine */
+
+	/**
+	 * Thread data visible from userland through a window on the
+	 * global heap.
+	 */
+	struct xnthread_user_window *u_window;
+
+	struct xnthread_personality *personality;
+
+	struct completion exited;
+
+#ifdef CONFIG_XENO_OPT_DEBUG
+	const char *exe_path;	/* Executable path */
+	u32 proghash;		/* Hash value for exe_path */
+#endif
+	struct lostage_signal sigarray[XNTHREAD_MAX_SIGNALS];
+};
+
+static inline int xnthread_get_state(const struct xnthread *thread)
+{
+	return thread->state;
+}
+
+static inline int xnthread_test_state(struct xnthread *thread, int bits)
+{
+	return thread->state & bits;
+}
+
+static inline void xnthread_set_state(struct xnthread *thread, int bits)
+{
+	thread->state |= bits;
+}
+
+static inline void xnthread_clear_state(struct xnthread *thread, int bits)
+{
+	thread->state &= ~bits;
+}
+
+static inline int xnthread_test_info(struct xnthread *thread, int bits)
+{
+	return thread->info & bits;
+}
+
+static inline void xnthread_set_info(struct xnthread *thread, int bits)
+{
+	thread->info |= bits;
+}
+
+static inline void xnthread_clear_info(struct xnthread *thread, int bits)
+{
+	thread->info &= ~bits;
+}
+
+static inline int xnthread_test_localinfo(struct xnthread *curr, int bits)
+{
+	return curr->local_info & bits;
+}
+
+static inline void xnthread_set_localinfo(struct xnthread *curr, int bits)
+{
+	curr->local_info |= bits;
+}
+
+static inline void xnthread_clear_localinfo(struct xnthread *curr, int bits)
+{
+	curr->local_info &= ~bits;
+}
+
+static inline struct xnarchtcb *xnthread_archtcb(struct xnthread *thread)
+{
+	return &thread->tcb;
+}
+
+static inline int xnthread_base_priority(const struct xnthread *thread)
+{
+	return thread->bprio;
+}
+
+static inline int xnthread_current_priority(const struct xnthread *thread)
+{
+	return thread->cprio;
+}
+
+static inline struct task_struct *xnthread_host_task(struct xnthread *thread)
+{
+	return xnarch_host_task(xnthread_archtcb(thread));
+}
+
+#define xnthread_for_each_booster(__pos, __thread)		\
+	list_for_each_entry(__pos, &(__thread)->boosters, next)
+
+#define xnthread_for_each_booster_safe(__pos, __tmp, __thread)	\
+	list_for_each_entry_safe(__pos, __tmp, &(__thread)->boosters, next)
+
+#define xnthread_run_handler(__t, __h, __a...)				\
+	do {								\
+		struct xnthread_personality *__p__ = (__t)->personality;	\
+		if ((__p__)->ops.__h)					\
+			(__p__)->ops.__h(__t, ##__a);			\
+	} while (0)
+
+#define xnthread_run_handler_stack(__t, __h, __a...)			\
+	do {								\
+		struct xnthread_personality *__p__ = (__t)->personality;	\
+		do {							\
+			if ((__p__)->ops.__h == NULL)			\
+				break;					\
+			__p__ = (__p__)->ops.__h(__t, ##__a);		\
+		} while (__p__);					\
+	} while (0)
+
+static inline
+struct xnthread_wait_context *xnthread_get_wait_context(struct xnthread *thread)
+{
+	return thread->wcontext;
+}
+
+static inline
+int xnthread_register(struct xnthread *thread, const char *name)
+{
+	return xnregistry_enter(name, thread, &thread->handle, NULL);
+}
+
+static inline
+struct xnthread *xnthread_lookup(xnhandle_t threadh)
+{
+	struct xnthread *thread = xnregistry_lookup(threadh, NULL);
+	return thread && thread->handle == xnhandle_get_index(threadh) ? thread : NULL;
+}
+
+static inline void xnthread_sync_window(struct xnthread *thread)
+{
+	if (thread->u_window) {
+		thread->u_window->state = thread->state;
+		thread->u_window->info = thread->info;
+	}
+}
+
+static inline
+void xnthread_clear_sync_window(struct xnthread *thread, int state_bits)
+{
+	if (thread->u_window) {
+		thread->u_window->state = thread->state & ~state_bits;
+		thread->u_window->info = thread->info;
+	}
+}
+
+static inline
+void xnthread_set_sync_window(struct xnthread *thread, int state_bits)
+{
+	if (thread->u_window) {
+		thread->u_window->state = thread->state | state_bits;
+		thread->u_window->info = thread->info;
+	}
+}
+
+static inline int normalize_priority(int prio)
+{
+	return prio < MAX_RT_PRIO ? prio : MAX_RT_PRIO - 1;
+}
+
+int __xnthread_init(struct xnthread *thread,
+		    const struct xnthread_init_attr *attr,
+		    struct xnsched *sched,
+		    struct xnsched_class *sched_class,
+		    const union xnsched_policy_param *sched_param);
+
+void __xnthread_test_cancel(struct xnthread *curr);
+
+void __xnthread_cleanup(struct xnthread *curr);
+
+void __xnthread_discard(struct xnthread *thread);
+
+/**
+ * @fn struct xnthread *xnthread_current(void)
+ * @brief Retrieve the current Cobalt core TCB.
+ *
+ * Returns the address of the current Cobalt core thread descriptor,
+ * or NULL if running over a regular Linux task. This call is not
+ * affected by the current runtime mode of the core thread.
+ *
+ * @note The returned value may differ from xnsched_current_thread()
+ * called from the same context, since the latter returns the root
+ * thread descriptor for the current CPU if the caller is running in
+ * secondary mode.
+ *
+ * @coretags{unrestricted}
+ */
+static inline struct xnthread *xnthread_current(void)
+{
+	return pipeline_current()->thread;
+}
+
+/**
+ * @fn struct xnthread *xnthread_from_task(struct task_struct *p)
+ * @brief Retrieve the Cobalt core TCB attached to a Linux task.
+ *
+ * Returns the address of the Cobalt core thread descriptor attached
+ * to the Linux task @a p, or NULL if @a p is a regular Linux
+ * task. This call is not affected by the current runtime mode of the
+ * core thread.
+ *
+ * @coretags{unrestricted}
+ */
+static inline struct xnthread *xnthread_from_task(struct task_struct *p)
+{
+	return pipeline_thread_from_task(p);
+}
+
+/**
+ * @fn void xnthread_test_cancel(void)
+ * @brief Introduce a thread cancellation point.
+ *
+ * Terminates the current thread if a cancellation request is pending
+ * for it, i.e. if xnthread_cancel() was called.
+ *
+ * @coretags{mode-unrestricted}
+ */
+static inline void xnthread_test_cancel(void)
+{
+	struct xnthread *curr = xnthread_current();
+
+	if (curr && xnthread_test_info(curr, XNCANCELD))
+		__xnthread_test_cancel(curr);
+}
+
+static inline
+void xnthread_complete_wait(struct xnthread_wait_context *wc)
+{
+	wc->posted = 1;
+}
+
+static inline
+int xnthread_wait_complete_p(struct xnthread_wait_context *wc)
+{
+	return wc->posted;
+}
+
+#ifdef CONFIG_XENO_ARCH_FPU
+void xnthread_switch_fpu(struct xnsched *sched);
+#else
+static inline void xnthread_switch_fpu(struct xnsched *sched) { }
+#endif /* CONFIG_XENO_ARCH_FPU */
+
+void xnthread_deregister(struct xnthread *thread);
+
+char *xnthread_format_status(unsigned long status,
+			     char *buf, int size);
+
+pid_t xnthread_host_pid(struct xnthread *thread);
+
+int xnthread_set_clock(struct xnthread *thread,
+		       struct xnclock *newclock);
+
+xnticks_t xnthread_get_timeout(struct xnthread *thread,
+			       xnticks_t ns);
+
+xnticks_t xnthread_get_period(struct xnthread *thread);
+
+void xnthread_prepare_wait(struct xnthread_wait_context *wc);
+
+int xnthread_init(struct xnthread *thread,
+		  const struct xnthread_init_attr *attr,
+		  struct xnsched_class *sched_class,
+		  const union xnsched_policy_param *sched_param);
+
+int xnthread_start(struct xnthread *thread,
+		   const struct xnthread_start_attr *attr);
+
+int xnthread_set_mode(int clrmask,
+		      int setmask);
+
+void xnthread_suspend(struct xnthread *thread,
+		      int mask,
+		      xnticks_t timeout,
+		      xntmode_t timeout_mode,
+		      struct xnsynch *wchan);
+
+void xnthread_resume(struct xnthread *thread,
+		     int mask);
+
+int xnthread_unblock(struct xnthread *thread);
+
+int xnthread_set_periodic(struct xnthread *thread,
+			  xnticks_t idate,
+			  xntmode_t timeout_mode,
+			  xnticks_t period);
+
+int xnthread_wait_period(unsigned long *overruns_r);
+
+int xnthread_set_slice(struct xnthread *thread,
+		       xnticks_t quantum);
+
+void xnthread_cancel(struct xnthread *thread);
+
+int xnthread_join(struct xnthread *thread, bool uninterruptible);
+
+int xnthread_harden(void);
+
+void xnthread_relax(int notify, int reason);
+
+void __xnthread_kick(struct xnthread *thread);
+
+void xnthread_kick(struct xnthread *thread);
+
+void __xnthread_demote(struct xnthread *thread);
+
+void xnthread_demote(struct xnthread *thread);
+
+void __xnthread_signal(struct xnthread *thread, int sig, int arg);
+
+void xnthread_signal(struct xnthread *thread, int sig, int arg);
+
+void xnthread_pin_initial(struct xnthread *thread);
+
+void xnthread_call_mayday(struct xnthread *thread, int reason);
+
+static inline void xnthread_get_resource(struct xnthread *curr)
+{
+	if (xnthread_test_state(curr, XNWEAK|XNDEBUG))
+		curr->res_count++;
+}
+
+static inline int xnthread_put_resource(struct xnthread *curr)
+{
+	if (xnthread_test_state(curr, XNWEAK) ||
+	    IS_ENABLED(CONFIG_XENO_OPT_DEBUG_MUTEX_SLEEP)) {
+		if (unlikely(curr->res_count == 0)) {
+			if (xnthread_test_state(curr, XNWARN))
+				xnthread_signal(curr, SIGDEBUG,
+						SIGDEBUG_RESCNT_IMBALANCE);
+			return -EPERM;
+		}
+		curr->res_count--;
+	}
+
+	return 0;
+}
+
+static inline void xnthread_commit_ceiling(struct xnthread *curr)
+{
+	if (curr->u_window->pp_pending)
+		xnsynch_commit_ceiling(curr);
+}
+
+#ifdef CONFIG_SMP
+
+void xnthread_migrate_passive(struct xnthread *thread,
+			      struct xnsched *sched);
+#else
+
+static inline void xnthread_migrate_passive(struct xnthread *thread,
+					    struct xnsched *sched)
+{ }
+
+#endif
+
+int __xnthread_set_schedparam(struct xnthread *thread,
+			      struct xnsched_class *sched_class,
+			      const union xnsched_policy_param *sched_param);
+
+int xnthread_set_schedparam(struct xnthread *thread,
+			    struct xnsched_class *sched_class,
+			    const union xnsched_policy_param *sched_param);
+
+int xnthread_killall(int grace, int mask);
+
+void __xnthread_propagate_schedparam(struct xnthread *curr);
+
+static inline void xnthread_propagate_schedparam(struct xnthread *curr)
+{
+	if (xnthread_test_info(curr, XNSCHEDP))
+		__xnthread_propagate_schedparam(curr);
+}
+
+extern struct xnthread_personality xenomai_personality;
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_THREAD_H */
+++ linux-patched/include/xenomai/cobalt/kernel/select.h	2022-03-21 12:58:31.535868035 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/lock.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2008 Efixo <gilles.chanteperdrix@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_SELECT_H
+#define _COBALT_KERNEL_SELECT_H
+
+#include <cobalt/kernel/list.h>
+#include <cobalt/kernel/thread.h>
+
+/**
+ * @addtogroup cobalt_core_select
+ * @{
+ */
+
+#define XNSELECT_READ      0
+#define XNSELECT_WRITE     1
+#define XNSELECT_EXCEPT    2
+#define XNSELECT_MAX_TYPES 3
+
+struct xnselector {
+	struct xnsynch synchbase;
+	struct fds {
+		fd_set expected;
+		fd_set pending;
+	} fds [XNSELECT_MAX_TYPES];
+	struct list_head destroy_link;
+	struct list_head bindings; /* only used by xnselector_destroy */
+};
+
+#define __NFDBITS__	(8 * sizeof(unsigned long))
+#define __FDSET_LONGS__	(__FD_SETSIZE/__NFDBITS__)
+#define	__FDELT__(d)	((d) / __NFDBITS__)
+#define	__FDMASK__(d)	(1UL << ((d) % __NFDBITS__))
+
+static inline void __FD_SET__(unsigned long __fd, __kernel_fd_set *__fdsetp)
+{
+        unsigned long __tmp = __fd / __NFDBITS__;
+        unsigned long __rem = __fd % __NFDBITS__;
+        __fdsetp->fds_bits[__tmp] |= (1UL<<__rem);
+}
+
+static inline void __FD_CLR__(unsigned long __fd, __kernel_fd_set *__fdsetp)
+{
+        unsigned long __tmp = __fd / __NFDBITS__;
+        unsigned long __rem = __fd % __NFDBITS__;
+        __fdsetp->fds_bits[__tmp] &= ~(1UL<<__rem);
+}
+
+static inline int __FD_ISSET__(unsigned long __fd, const __kernel_fd_set *__p)
+{
+        unsigned long __tmp = __fd / __NFDBITS__;
+        unsigned long __rem = __fd % __NFDBITS__;
+        return (__p->fds_bits[__tmp] & (1UL<<__rem)) != 0;
+}
+
+static inline void __FD_ZERO__(__kernel_fd_set *__p)
+{
+	unsigned long *__tmp = __p->fds_bits;
+	int __i;
+
+	__i = __FDSET_LONGS__;
+	while (__i) {
+		__i--;
+		*__tmp = 0;
+		__tmp++;
+	}
+}
+
+struct xnselect {
+	struct list_head bindings;
+};
+
+#define DECLARE_XNSELECT(name) struct xnselect name
+
+struct xnselect_binding {
+	struct xnselector *selector;
+	struct xnselect *fd;
+	unsigned int type;
+	unsigned int bit_index;
+	struct list_head link;  /* link in selected fds list. */
+	struct list_head slink; /* link in selector list */
+};
+
+void xnselect_init(struct xnselect *select_block);
+
+int xnselect_bind(struct xnselect *select_block,
+		  struct xnselect_binding *binding,
+		  struct xnselector *selector,
+		  unsigned int type,
+		  unsigned int bit_index,
+		  unsigned int state);
+
+int __xnselect_signal(struct xnselect *select_block, unsigned int state);
+
+/**
+ * Signal a file descriptor state change.
+ *
+ * @param select_block pointer to an @a xnselect structure representing the file
+ * descriptor whose state changed;
+ * @param state new value of the state.
+ *
+ * @retval 1 if rescheduling is needed;
+ * @retval 0 otherwise.
+ */
+static inline int
+xnselect_signal(struct xnselect *select_block, unsigned int state)
+{
+	if (!list_empty(&select_block->bindings))
+		return __xnselect_signal(select_block, state);
+
+	return 0;
+}
+
+void xnselect_destroy(struct xnselect *select_block);
+
+int xnselector_init(struct xnselector *selector);
+
+int xnselect(struct xnselector *selector,
+	     fd_set *out_fds[XNSELECT_MAX_TYPES],
+	     fd_set *in_fds[XNSELECT_MAX_TYPES],
+	     int nfds,
+	     xnticks_t timeout, xntmode_t timeout_mode);
+
+void xnselector_destroy(struct xnselector *selector);
+
+int xnselect_mount(void);
+
+int xnselect_umount(void);
+
+/** @} */
+
+#endif /* _COBALT_KERNEL_SELECT_H */
+++ linux-patched/include/xenomai/cobalt/kernel/lock.h	2022-03-21 12:58:31.528868103 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/heap.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2001-2008,2012 Philippe Gerum <rpm@xenomai.org>.
+ * Copyright (C) 2004,2005 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_LOCK_H
+#define _COBALT_KERNEL_LOCK_H
+
+#include <pipeline/lock.h>
+#include <linux/percpu.h>
+#include <cobalt/kernel/assert.h>
+#include <pipeline/pipeline.h>
+
+/**
+ * @addtogroup cobalt_core_lock
+ *
+ * @{
+ */
+#ifdef CONFIG_XENO_OPT_DEBUG_LOCKING
+
+struct xnlock {
+	unsigned owner;
+	arch_spinlock_t alock;
+	const char *file;
+	const char *function;
+	unsigned int line;
+	int cpu;
+	unsigned long long spin_time;
+	unsigned long long lock_date;
+};
+
+struct xnlockinfo {
+	unsigned long long spin_time;
+	unsigned long long lock_time;
+	const char *file;
+	const char *function;
+	unsigned int line;
+};
+
+#define XNARCH_LOCK_UNLOCKED (struct xnlock) {	\
+	~0,					\
+	__ARCH_SPIN_LOCK_UNLOCKED,		\
+	NULL,					\
+	NULL,					\
+	0,					\
+	-1,					\
+	0LL,					\
+	0LL,					\
+}
+
+#define XNLOCK_DBG_CONTEXT		, __FILE__, __LINE__, __FUNCTION__
+#define XNLOCK_DBG_CONTEXT_ARGS					\
+	, const char *file, int line, const char *function
+#define XNLOCK_DBG_PASS_CONTEXT		, file, line, function
+
+void xnlock_dbg_prepare_acquire(unsigned long long *start);
+void xnlock_dbg_prepare_spin(unsigned int *spin_limit);
+void xnlock_dbg_acquired(struct xnlock *lock, int cpu,
+			 unsigned long long *start,
+			 const char *file, int line,
+			 const char *function);
+int xnlock_dbg_release(struct xnlock *lock,
+			 const char *file, int line,
+			 const char *function);
+
+DECLARE_PER_CPU(struct xnlockinfo, xnlock_stats);
+
+#else /* !CONFIG_XENO_OPT_DEBUG_LOCKING */
+
+struct xnlock {
+	unsigned owner;
+	arch_spinlock_t alock;
+};
+
+#define XNARCH_LOCK_UNLOCKED			\
+	(struct xnlock) {			\
+		~0,				\
+		__ARCH_SPIN_LOCK_UNLOCKED,	\
+	}
+
+#define XNLOCK_DBG_CONTEXT
+#define XNLOCK_DBG_CONTEXT_ARGS
+#define XNLOCK_DBG_PASS_CONTEXT
+
+static inline
+void xnlock_dbg_prepare_acquire(unsigned long long *start)
+{
+}
+
+static inline
+void xnlock_dbg_prepare_spin(unsigned int *spin_limit)
+{
+}
+
+static inline void
+xnlock_dbg_acquired(struct xnlock *lock, int cpu,
+		    unsigned long long *start)
+{
+}
+
+static inline int xnlock_dbg_release(struct xnlock *lock)
+{
+	return 0;
+}
+
+#endif /* !CONFIG_XENO_OPT_DEBUG_LOCKING */
+
+#if defined(CONFIG_SMP) || defined(CONFIG_XENO_OPT_DEBUG_LOCKING)
+
+#define xnlock_get(lock)		__xnlock_get(lock  XNLOCK_DBG_CONTEXT)
+#define xnlock_put(lock)		__xnlock_put(lock  XNLOCK_DBG_CONTEXT)
+#define xnlock_get_irqsave(lock,x) \
+	((x) = __xnlock_get_irqsave(lock  XNLOCK_DBG_CONTEXT))
+#define xnlock_put_irqrestore(lock,x) \
+	__xnlock_put_irqrestore(lock,x  XNLOCK_DBG_CONTEXT)
+#define xnlock_clear_irqoff(lock)	xnlock_put_irqrestore(lock, 1)
+#define xnlock_clear_irqon(lock)	xnlock_put_irqrestore(lock, 0)
+
+static inline void xnlock_init (struct xnlock *lock)
+{
+	*lock = XNARCH_LOCK_UNLOCKED;
+}
+
+#define DECLARE_XNLOCK(lock)		struct xnlock lock
+#define DECLARE_EXTERN_XNLOCK(lock)	extern struct xnlock lock
+#define DEFINE_XNLOCK(lock)		struct xnlock lock = XNARCH_LOCK_UNLOCKED
+#define DEFINE_PRIVATE_XNLOCK(lock)	static DEFINE_XNLOCK(lock)
+
+static inline int ____xnlock_get(struct xnlock *lock /*, */ XNLOCK_DBG_CONTEXT_ARGS)
+{
+	int cpu = raw_smp_processor_id();
+	unsigned long long start;
+
+	if (lock->owner == cpu)
+		return 2;
+
+	xnlock_dbg_prepare_acquire(&start);
+
+	arch_spin_lock(&lock->alock);
+	lock->owner = cpu;
+
+	xnlock_dbg_acquired(lock, cpu, &start /*, */ XNLOCK_DBG_PASS_CONTEXT);
+
+	return 0;
+}
+
+static inline void ____xnlock_put(struct xnlock *lock /*, */ XNLOCK_DBG_CONTEXT_ARGS)
+{
+	if (xnlock_dbg_release(lock /*, */ XNLOCK_DBG_PASS_CONTEXT))
+		return;
+
+	lock->owner = ~0U;
+	arch_spin_unlock(&lock->alock);
+}
+
+#ifndef CONFIG_XENO_ARCH_OUTOFLINE_XNLOCK
+#define ___xnlock_get ____xnlock_get
+#define ___xnlock_put ____xnlock_put
+#else /* out of line xnlock */
+int ___xnlock_get(struct xnlock *lock /*, */ XNLOCK_DBG_CONTEXT_ARGS);
+
+void ___xnlock_put(struct xnlock *lock /*, */ XNLOCK_DBG_CONTEXT_ARGS);
+#endif /* out of line xnlock */
+
+static inline spl_t
+__xnlock_get_irqsave(struct xnlock *lock /*, */ XNLOCK_DBG_CONTEXT_ARGS)
+{
+	unsigned long flags;
+
+	splhigh(flags);
+
+	if (__locking_active__)
+		flags |= ___xnlock_get(lock /*, */ XNLOCK_DBG_PASS_CONTEXT);
+
+	return flags;
+}
+
+static inline void __xnlock_put_irqrestore(struct xnlock *lock, spl_t flags
+					   /*, */ XNLOCK_DBG_CONTEXT_ARGS)
+{
+	/* Only release the lock if we didn't take it recursively. */
+	if (__locking_active__ && !(flags & 2))
+		___xnlock_put(lock /*, */ XNLOCK_DBG_PASS_CONTEXT);
+
+	splexit(flags & 1);
+}
+
+static inline int xnlock_is_owner(struct xnlock *lock)
+{
+	if (__locking_active__)
+		return lock->owner == raw_smp_processor_id();
+
+	return 1;
+}
+
+static inline int __xnlock_get(struct xnlock *lock /*, */ XNLOCK_DBG_CONTEXT_ARGS)
+{
+	if (__locking_active__)
+		return ___xnlock_get(lock /* , */ XNLOCK_DBG_PASS_CONTEXT);
+
+	return 0;
+}
+
+static inline void __xnlock_put(struct xnlock *lock /*, */ XNLOCK_DBG_CONTEXT_ARGS)
+{
+	if (__locking_active__)
+		___xnlock_put(lock /*, */ XNLOCK_DBG_PASS_CONTEXT);
+}
+
+#undef __locking_active__
+
+#else /* !(CONFIG_SMP || CONFIG_XENO_OPT_DEBUG_LOCKING) */
+
+#define xnlock_init(lock)		do { } while(0)
+#define xnlock_get(lock)		do { } while(0)
+#define xnlock_put(lock)		do { } while(0)
+#define xnlock_get_irqsave(lock,x)	splhigh(x)
+#define xnlock_put_irqrestore(lock,x)	splexit(x)
+#define xnlock_clear_irqoff(lock)	splmax()
+#define xnlock_clear_irqon(lock)	splnone()
+#define xnlock_is_owner(lock)		1
+
+#define DECLARE_XNLOCK(lock)
+#define DECLARE_EXTERN_XNLOCK(lock)
+#define DEFINE_XNLOCK(lock)
+#define DEFINE_PRIVATE_XNLOCK(lock)
+
+#endif /* !(CONFIG_SMP || CONFIG_XENO_OPT_DEBUG_LOCKING) */
+
+DECLARE_EXTERN_XNLOCK(nklock);
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_LOCK_H */
+++ linux-patched/include/xenomai/cobalt/kernel/heap.h	2022-03-21 12:58:31.521868171 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/trace.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2001,2002,2003 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_HEAP_H
+#define _COBALT_KERNEL_HEAP_H
+
+#include <linux/string.h>
+#include <linux/rbtree.h>
+#include <cobalt/kernel/lock.h>
+#include <cobalt/kernel/list.h>
+#include <cobalt/uapi/kernel/types.h>
+#include <cobalt/uapi/kernel/heap.h>
+
+/**
+ * @addtogroup cobalt_core_heap
+ * @{
+ */
+
+#define XNHEAP_PAGE_SHIFT	9 /* 2^9 => 512 bytes */
+#define XNHEAP_PAGE_SIZE	(1UL << XNHEAP_PAGE_SHIFT)
+#define XNHEAP_PAGE_MASK	(~(XNHEAP_PAGE_SIZE - 1))
+#define XNHEAP_MIN_LOG2		4 /* 16 bytes */
+/*
+ * Use bucketed memory for sizes between 2^XNHEAP_MIN_LOG2 and
+ * 2^(XNHEAP_PAGE_SHIFT-1).
+ */
+#define XNHEAP_MAX_BUCKETS	(XNHEAP_PAGE_SHIFT - XNHEAP_MIN_LOG2)
+#define XNHEAP_MIN_ALIGN	(1U << XNHEAP_MIN_LOG2)
+/* Maximum size of a heap (4Gb - PAGE_SIZE). */
+#define XNHEAP_MAX_HEAPSZ	(4294967295U - PAGE_SIZE + 1)
+/* Bits we need for encoding a page # */
+#define XNHEAP_PGENT_BITS      (32 - XNHEAP_PAGE_SHIFT)
+/* Each page is represented by a page map entry. */
+#define XNHEAP_PGMAP_BYTES	sizeof(struct xnheap_pgentry)
+
+struct xnheap_pgentry {
+	/* Linkage in bucket list. */
+	unsigned int prev : XNHEAP_PGENT_BITS;
+	unsigned int next : XNHEAP_PGENT_BITS;
+	/*  page_list or log2. */
+	unsigned int type : 6;
+	/*
+	 * We hold either a spatial map of busy blocks within the page
+	 * for bucketed memory (up to 32 blocks per page), or the
+	 * overall size of the multi-page block if entry.type ==
+	 * page_list.
+	 */
+	union {
+		u32 map;
+		u32 bsize;
+	};
+};
+
+/*
+ * A range descriptor is stored at the beginning of the first page of
+ * a range of free pages. xnheap_range.size is nrpages *
+ * XNHEAP_PAGE_SIZE. Ranges are indexed by address and size in
+ * rbtrees.
+ */
+struct xnheap_range {
+	struct rb_node addr_node;
+	struct rb_node size_node;
+	size_t size;
+};
+
+struct xnheap {
+	void *membase;
+	struct rb_root addr_tree;
+	struct rb_root size_tree;
+	struct xnheap_pgentry *pagemap;
+	size_t usable_size;
+	size_t used_size;
+	u32 buckets[XNHEAP_MAX_BUCKETS];
+	char name[XNOBJECT_NAME_LEN];
+	DECLARE_XNLOCK(lock);
+	struct list_head next;
+};
+
+extern struct xnheap cobalt_heap;
+
+#define xnmalloc(size)     xnheap_alloc(&cobalt_heap, size)
+#define xnfree(ptr)        xnheap_free(&cobalt_heap, ptr)
+
+static inline void *xnheap_get_membase(const struct xnheap *heap)
+{
+	return heap->membase;
+}
+
+static inline
+size_t xnheap_get_size(const struct xnheap *heap)
+{
+	return heap->usable_size;
+}
+
+static inline
+size_t xnheap_get_used(const struct xnheap *heap)
+{
+	return heap->used_size;
+}
+
+static inline
+size_t xnheap_get_free(const struct xnheap *heap)
+{
+	return heap->usable_size - heap->used_size;
+}
+
+int xnheap_init(struct xnheap *heap,
+		void *membase, size_t size);
+
+void xnheap_destroy(struct xnheap *heap);
+
+void *xnheap_alloc(struct xnheap *heap, size_t size);
+
+void xnheap_free(struct xnheap *heap, void *block);
+
+ssize_t xnheap_check_block(struct xnheap *heap, void *block);
+
+void xnheap_set_name(struct xnheap *heap,
+		     const char *name, ...);
+
+void *xnheap_vmalloc(size_t size);
+
+void xnheap_vfree(void *p);
+
+static inline void *xnheap_zalloc(struct xnheap *heap, size_t size)
+{
+	void *p;
+
+	p = xnheap_alloc(heap, size);
+	if (p)
+		memset(p, 0, size);
+
+	return p;
+}
+
+static inline char *xnstrdup(const char *s)
+{
+	char *p;
+
+	p = xnmalloc(strlen(s) + 1);
+	if (p == NULL)
+		return NULL;
+
+	return strcpy(p, s);
+}
+
+#ifdef CONFIG_XENO_OPT_VFILE
+void xnheap_init_proc(void);
+void xnheap_cleanup_proc(void);
+#else /* !CONFIG_XENO_OPT_VFILE */
+static inline void xnheap_init_proc(void) { }
+static inline void xnheap_cleanup_proc(void) { }
+#endif /* !CONFIG_XENO_OPT_VFILE */
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_HEAP_H */
+++ linux-patched/include/xenomai/cobalt/kernel/trace.h	2022-03-21 12:58:31.513868249 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/clock.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
+#ifndef _COBALT_KERNEL_TRACE_H
+#define _COBALT_KERNEL_TRACE_H
+
+#include <pipeline/trace.h>
+
+#endif /* !_COBALT_KERNEL_TRACE_H */
+++ linux-patched/include/xenomai/cobalt/kernel/clock.h	2022-03-21 12:58:31.506868318 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/xenomai/cobalt/kernel/list.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2006,2007 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_CLOCK_H
+#define _COBALT_KERNEL_CLOCK_H
+
+#include <pipeline/pipeline.h>
+#include <pipeline/clock.h>
+#include <cobalt/kernel/list.h>
+#include <cobalt/kernel/vfile.h>
+#include <cobalt/uapi/kernel/types.h>
+#include <asm/xenomai/wrappers.h>
+
+/**
+ * @addtogroup cobalt_core_clock
+ * @{
+ */
+
+struct xnsched;
+struct xntimerdata;
+struct __kernel_timex;
+
+struct xnclock_gravity {
+	unsigned long irq;
+	unsigned long kernel;
+	unsigned long user;
+};
+
+struct xnclock {
+	/** (ns) */
+	xnsticks_t wallclock_offset;
+	/** (ns) */
+	xnticks_t resolution;
+	/** (raw clock ticks). */
+	struct xnclock_gravity gravity;
+	/** Clock name. */
+	const char *name;
+	struct {
+#ifdef CONFIG_XENO_OPT_EXTCLOCK
+		xnticks_t (*read_raw)(struct xnclock *clock);
+		xnticks_t (*read_monotonic)(struct xnclock *clock);
+		int (*set_time)(struct xnclock *clock,
+				const struct timespec64 *ts);
+		xnsticks_t (*ns_to_ticks)(struct xnclock *clock,
+					  xnsticks_t ns);
+		xnsticks_t (*ticks_to_ns)(struct xnclock *clock,
+					  xnsticks_t ticks);
+		xnsticks_t (*ticks_to_ns_rounded)(struct xnclock *clock,
+						  xnsticks_t ticks);
+		void (*program_local_shot)(struct xnclock *clock,
+					   struct xnsched *sched);
+		void (*program_remote_shot)(struct xnclock *clock,
+					    struct xnsched *sched);
+#endif
+		int (*adjust_time)(struct xnclock *clock,
+				   struct __kernel_timex *tx);
+		int (*set_gravity)(struct xnclock *clock,
+				   const struct xnclock_gravity *p);
+		void (*reset_gravity)(struct xnclock *clock);
+#ifdef CONFIG_XENO_OPT_VFILE
+		void (*print_status)(struct xnclock *clock,
+				     struct xnvfile_regular_iterator *it);
+#endif
+	} ops;
+	/* Private section. */
+	struct xntimerdata *timerdata;
+	int id;
+#ifdef CONFIG_SMP
+	/** Possible CPU affinity of clock beat. */
+	cpumask_t affinity;
+#endif
+#ifdef CONFIG_XENO_OPT_STATS
+	struct xnvfile_snapshot timer_vfile;
+	struct xnvfile_rev_tag timer_revtag;
+	struct list_head timerq;
+	int nrtimers;
+#endif /* CONFIG_XENO_OPT_STATS */
+#ifdef CONFIG_XENO_OPT_VFILE
+	struct xnvfile_regular vfile;
+#endif
+};
+
+struct xnclock_ratelimit_state {
+	xnticks_t interval;
+	xnticks_t begin;
+	int burst;
+	int printed;
+	int missed;
+};
+
+extern struct xnclock nkclock;
+
+int xnclock_register(struct xnclock *clock,
+		     const cpumask_t *affinity);
+
+void xnclock_deregister(struct xnclock *clock);
+
+void xnclock_tick(struct xnclock *clock);
+
+void xnclock_core_local_shot(struct xnsched *sched);
+
+void xnclock_core_remote_shot(struct xnsched *sched);
+
+xnsticks_t xnclock_core_ns_to_ticks(xnsticks_t ns);
+
+xnsticks_t xnclock_core_ticks_to_ns(xnsticks_t ticks);
+
+xnsticks_t xnclock_core_ticks_to_ns_rounded(xnsticks_t ticks);
+
+xnticks_t xnclock_core_read_monotonic(void);
+
+static inline xnticks_t xnclock_core_read_raw(void)
+{
+	return pipeline_read_cycle_counter();
+}
+
+/* We use the Linux defaults */
+#define XN_RATELIMIT_INTERVAL	5000000000LL
+#define XN_RATELIMIT_BURST	10
+
+int __xnclock_ratelimit(struct xnclock_ratelimit_state *rs, const char *func);
+
+#define xnclock_ratelimit()	({					\
+	static struct xnclock_ratelimit_state __state = {		\
+		.interval	= XN_RATELIMIT_INTERVAL,		\
+		.burst		= XN_RATELIMIT_BURST,			\
+	};								\
+	__xnclock_ratelimit(&__state, __func__);			\
+})
+
+#ifdef CONFIG_XENO_OPT_EXTCLOCK
+
+static inline void xnclock_program_shot(struct xnclock *clock,
+					struct xnsched *sched)
+{
+	if (likely(clock == &nkclock))
+		xnclock_core_local_shot(sched);
+	else if (clock->ops.program_local_shot)
+		clock->ops.program_local_shot(clock, sched);
+}
+
+static inline void xnclock_remote_shot(struct xnclock *clock,
+				       struct xnsched *sched)
+{
+#ifdef CONFIG_SMP
+	if (likely(clock == &nkclock))
+		xnclock_core_remote_shot(sched);
+	else if (clock->ops.program_remote_shot)
+		clock->ops.program_remote_shot(clock, sched);
+#endif
+}
+
+static inline xnticks_t xnclock_read_raw(struct xnclock *clock)
+{
+	if (likely(clock == &nkclock))
+		return xnclock_core_read_raw();
+
+	return clock->ops.read_raw(clock);
+}
+
+static inline xnsticks_t xnclock_ns_to_ticks(struct xnclock *clock,
+					     xnsticks_t ns)
+{
+	if (likely(clock == &nkclock))
+		return xnclock_core_ns_to_ticks(ns);
+
+	return clock->ops.ns_to_ticks(clock, ns);
+}
+
+static inline xnsticks_t xnclock_ticks_to_ns(struct xnclock *clock,
+					     xnsticks_t ticks)
+{
+	if (likely(clock == &nkclock))
+		return xnclock_core_ticks_to_ns(ticks);
+
+	return clock->ops.ticks_to_ns(clock, ticks);
+}
+
+static inline xnsticks_t xnclock_ticks_to_ns_rounded(struct xnclock *clock,
+						     xnsticks_t ticks)
+{
+	if (likely(clock == &nkclock))
+		return xnclock_core_ticks_to_ns_rounded(ticks);
+
+	return clock->ops.ticks_to_ns_rounded(clock, ticks);
+}
+
+static inline xnticks_t xnclock_read_monotonic(struct xnclock *clock)
+{
+	if (likely(clock == &nkclock))
+		return xnclock_core_read_monotonic();
+
+	return clock->ops.read_monotonic(clock);
+}
+
+static inline int xnclock_set_time(struct xnclock *clock,
+				   const struct timespec64 *ts)
+{
+	if (likely(clock == &nkclock))
+		return -EINVAL;
+
+	return clock->ops.set_time(clock, ts);
+}
+
+#else /* !CONFIG_XENO_OPT_EXTCLOCK */
+
+static inline void xnclock_program_shot(struct xnclock *clock,
+					struct xnsched *sched)
+{
+	xnclock_core_local_shot(sched);
+}
+
+static inline void xnclock_remote_shot(struct xnclock *clock,
+				       struct xnsched *sched)
+{
+#ifdef CONFIG_SMP
+	xnclock_core_remote_shot(sched);
+#endif
+}
+
+static inline xnticks_t xnclock_read_raw(struct xnclock *clock)
+{
+	return xnclock_core_read_raw();
+}
+
+static inline xnsticks_t xnclock_ns_to_ticks(struct xnclock *clock,
+					     xnsticks_t ns)
+{
+	return xnclock_core_ns_to_ticks(ns);
+}
+
+static inline xnsticks_t xnclock_ticks_to_ns(struct xnclock *clock,
+					     xnsticks_t ticks)
+{
+	return xnclock_core_ticks_to_ns(ticks);
+}
+
+static inline xnsticks_t xnclock_ticks_to_ns_rounded(struct xnclock *clock,
+						     xnsticks_t ticks)
+{
+	return xnclock_core_ticks_to_ns_rounded(ticks);
+}
+
+static inline xnticks_t xnclock_read_monotonic(struct xnclock *clock)
+{
+	return xnclock_core_read_monotonic();
+}
+
+static inline int xnclock_set_time(struct xnclock *clock,
+				   const struct timespec64 *ts)
+{
+	/*
+	 * There is no way to change the core clock's idea of time.
+	 */
+	return -EINVAL;
+}
+
+#endif /* !CONFIG_XENO_OPT_EXTCLOCK */
+
+static inline int xnclock_adjust_time(struct xnclock *clock,
+				      struct __kernel_timex *tx)
+{
+	if (clock->ops.adjust_time == NULL)
+		return -EOPNOTSUPP;
+
+	return clock->ops.adjust_time(clock, tx);
+}
+
+static inline xnticks_t xnclock_get_offset(struct xnclock *clock)
+{
+	return clock->wallclock_offset;
+}
+
+static inline xnticks_t xnclock_get_resolution(struct xnclock *clock)
+{
+	return clock->resolution; /* ns */
+}
+
+static inline void xnclock_set_resolution(struct xnclock *clock,
+					  xnticks_t resolution)
+{
+	clock->resolution = resolution; /* ns */
+}
+
+static inline int xnclock_set_gravity(struct xnclock *clock,
+				      const struct xnclock_gravity *gravity)
+{
+	if (clock->ops.set_gravity)
+		return clock->ops.set_gravity(clock, gravity);
+
+	return -EINVAL;
+}
+
+static inline void xnclock_reset_gravity(struct xnclock *clock)
+{
+	if (clock->ops.reset_gravity)
+		clock->ops.reset_gravity(clock);
+}
+
+#define xnclock_get_gravity(__clock, __type)  ((__clock)->gravity.__type)
+
+static inline xnticks_t xnclock_read_realtime(struct xnclock *clock)
+{
+	if (likely(clock == &nkclock))
+		return pipeline_read_wallclock();
+	/*
+	 * Return an adjusted value of the monotonic time with the
+	 * translated system wallclock offset.
+	 */
+	return xnclock_read_monotonic(clock) + xnclock_get_offset(clock);
+}
+
+void xnclock_apply_offset(struct xnclock *clock,
+			  xnsticks_t delta_ns);
+
+void xnclock_set_wallclock(xnticks_t epoch_ns);
+
+unsigned long long xnclock_divrem_billion(unsigned long long value,
+					  unsigned long *rem);
+
+#ifdef CONFIG_XENO_OPT_VFILE
+
+void xnclock_init_proc(void);
+
+void xnclock_cleanup_proc(void);
+
+static inline void xnclock_print_status(struct xnclock *clock,
+					struct xnvfile_regular_iterator *it)
+{
+	if (clock->ops.print_status)
+		clock->ops.print_status(clock, it);
+}
+
+#else
+static inline void xnclock_init_proc(void) { }
+static inline void xnclock_cleanup_proc(void) { }
+#endif
+
+int xnclock_init(void);
+
+void xnclock_cleanup(void);
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_CLOCK_H */
+++ linux-patched/include/xenomai/cobalt/kernel/list.h	2022-03-21 12:58:31.498868396 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/linux/xenomai/wrappers.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_LIST_H
+#define _COBALT_KERNEL_LIST_H
+
+#include <linux/list.h>
+
+#define __list_add_pri(__new, __head, __member_pri, __member_next, __relop)	\
+do {										\
+	typeof(*__new) *__pos;							\
+	if (list_empty(__head))							\
+		list_add(&(__new)->__member_next, __head);		 	\
+	else {									\
+		list_for_each_entry_reverse(__pos, __head, __member_next) {	\
+			if ((__new)->__member_pri __relop __pos->__member_pri)	\
+				break;						\
+		}								\
+		list_add(&(__new)->__member_next, &__pos->__member_next); 	\
+	}									\
+} while (0)
+
+#define list_add_priff(__new, __head, __member_pri, __member_next)		\
+	__list_add_pri(__new, __head, __member_pri, __member_next, <=)
+
+#define list_add_prilf(__new, __head, __member_pri, __member_next)		\
+	__list_add_pri(__new, __head, __member_pri, __member_next, <)
+
+#define list_get_entry(__head, __type, __member)		\
+  ({								\
+	  __type *__item;					\
+	  __item = list_first_entry(__head, __type, __member);	\
+	  list_del(&__item->__member);				\
+	  __item;						\
+  })
+
+#define list_get_entry_init(__head, __type, __member)		\
+  ({								\
+	  __type *__item;					\
+	  __item = list_first_entry(__head, __type, __member);	\
+	  list_del_init(&__item->__member);			\
+	  __item;						\
+  })
+
+#ifndef list_next_entry
+#define list_next_entry(__item, __member)			\
+	list_entry((__item)->__member.next, typeof(*(__item)), __member)
+#endif
+
+#endif /* !_COBALT_KERNEL_LIST_H_ */
+++ linux-patched/include/linux/xenomai/wrappers.h	2022-03-21 12:58:28.942893320 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/asm-generic/xenomai/wrappers.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2017 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_LINUX_WRAPPERS_H
+#define _COBALT_LINUX_WRAPPERS_H
+
+#include <linux/version.h>
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 20, 0)
+#include <linux/signal.h>
+typedef siginfo_t kernel_siginfo_t;
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,11,0)
+#include <linux/sched.h>
+#include <linux/sched/rt.h>
+#else
+#include <linux/sched.h>
+#include <linux/sched/signal.h>
+#include <linux/sched/rt.h>
+#include <linux/sched/mm.h>
+#include <linux/sched/debug.h>
+#include <linux/sched/task_stack.h>
+#include <uapi/linux/sched/types.h>
+#endif
+
+#include <pipeline/wrappers.h>
+
+#endif /* !_COBALT_LINUX_WRAPPERS_H */
+++ linux-patched/include/asm-generic/xenomai/wrappers.h	2022-03-21 12:58:28.937893369 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/asm-generic/xenomai/syscall.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2005-2012 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_ASM_GENERIC_WRAPPERS_H
+
+#include <linux/xenomai/wrappers.h>
+
+#define COBALT_BACKPORT(__sym) __cobalt_backport_ ##__sym
+
+/*
+ * To keep the #ifdefery as readable as possible, please:
+ *
+ * - keep the conditional structure flat, no nesting (e.g. do not fold
+ *   the pre-3.11 conditions into the pre-3.14 ones).
+ * - group all wrappers for a single kernel revision.
+ * - list conditional blocks in order of kernel release, latest first
+ * - identify the first kernel release for which the wrapper should
+ *   be defined, instead of testing the existence of a preprocessor
+ *   symbol, so that obsolete wrappers can be spotted.
+ */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,11,0)
+#define raw_copy_to_user(__to, __from, __n)	__copy_to_user_inatomic(__to, __from, __n)
+#define raw_copy_from_user(__to, __from, __n)	__copy_from_user_inatomic(__to, __from, __n)
+#define raw_put_user(__from, __to)		__put_user_inatomic(__from, __to)
+#define raw_get_user(__to, __from)		__get_user_inatomic(__to, __from)
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,6,0)
+#define in_ia32_syscall() (current_thread_info()->status & TS_COMPAT)
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0)
+#define cobalt_gpiochip_dev(__gc)	((__gc)->dev)
+#else
+#define cobalt_gpiochip_dev(__gc)	((__gc)->parent)
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,0,0)
+#define cobalt_get_restart_block(p)	(&task_thread_info(p)->restart_block)
+#else
+#define cobalt_get_restart_block(p)	(&(p)->restart_block)
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0)
+#define user_msghdr msghdr
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0)
+#include <linux/netdevice.h>
+
+#undef alloc_netdev
+#define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \
+	alloc_netdev_mqs(sizeof_priv, name, setup, 1, 1)
+ 
+#include <linux/trace_seq.h>
+
+static inline unsigned char *
+trace_seq_buffer_ptr(struct trace_seq *s)
+{
+	return s->buffer + s->len;
+}
+
+#endif /* < 3.17 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0)
+#define smp_mb__before_atomic()  smp_mb()
+#define smp_mb__after_atomic()   smp_mb()
+#endif /* < 3.16 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0)
+#define raw_cpu_ptr(v)	__this_cpu_ptr(v)
+#endif /* < 3.15 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0)
+#include <linux/pci.h>
+
+#ifdef CONFIG_PCI
+#define pci_enable_msix_range COBALT_BACKPORT(pci_enable_msix_range)
+#ifdef CONFIG_PCI_MSI
+int pci_enable_msix_range(struct pci_dev *dev,
+			  struct msix_entry *entries,
+			  int minvec, int maxvec);
+#else /* !CONFIG_PCI_MSI */
+static inline
+int pci_enable_msix_range(struct pci_dev *dev,
+			  struct msix_entry *entries,
+			  int minvec, int maxvec)
+{
+	return -ENOSYS;
+}
+#endif /* !CONFIG_PCI_MSI */
+#endif /* CONFIG_PCI */
+#endif /* < 3.14 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0)
+#include <linux/dma-mapping.h>
+#include <linux/hwmon.h>
+
+#define dma_set_mask_and_coherent COBALT_BACKPORT(dma_set_mask_and_coherent)
+static inline
+int dma_set_mask_and_coherent(struct device *dev, u64 mask)
+{
+	int rc = dma_set_mask(dev, mask);
+	if (rc == 0)
+		dma_set_coherent_mask(dev, mask);
+	return rc;
+}
+
+#ifdef CONFIG_HWMON
+#define hwmon_device_register_with_groups \
+	COBALT_BACKPORT(hwmon_device_register_with_groups)
+struct device *
+hwmon_device_register_with_groups(struct device *dev, const char *name,
+				void *drvdata,
+				const struct attribute_group **groups);
+
+#define devm_hwmon_device_register_with_groups \
+	COBALT_BACKPORT(devm_hwmon_device_register_with_groups)
+struct device *
+devm_hwmon_device_register_with_groups(struct device *dev, const char *name,
+				void *drvdata,
+				const struct attribute_group **groups);
+#endif /* !CONFIG_HWMON */
+
+#define reinit_completion(__x)	INIT_COMPLETION(*(__x))
+
+#endif /* < 3.13 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,11,0)
+#define DEVICE_ATTR_RW(_name)	__ATTR_RW(_name)
+#define DEVICE_ATTR_RO(_name)	__ATTR_RO(_name)
+#define DEVICE_ATTR_WO(_name)	__ATTR_WO(_name)
+#endif /* < 3.11 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0)
+#error "Xenomai/cobalt requires Linux kernel 3.10 or above"
+#endif /* < 3.10 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,0,0)
+#define __kernel_timex		timex
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,1,0)
+#define old_timex32		compat_timex
+#define SO_RCVTIMEO_OLD		SO_RCVTIMEO
+#define SO_SNDTIMEO_OLD		SO_SNDTIMEO
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,2,0)
+#define mmiowb()		do { } while (0)
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0)
+#define __kernel_old_timeval	timeval
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,8,0)
+#define mmap_read_lock(__mm)	down_read(&mm->mmap_sem)
+#define mmap_read_unlock(__mm)	up_read(&mm->mmap_sem)
+#define mmap_write_lock(__mm)	down_write(&mm->mmap_sem)
+#define mmap_write_unlock(__mm)	up_write(&mm->mmap_sem)
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,6,0)
+#define DEFINE_PROC_OPS(__name, __open, __release, __read, __write) \
+	struct file_operations __name = {			    \
+		.open = (__open),				    \
+		.release = (__release),				    \
+		.read = (__read),				    \
+		.write = (__write),				    \
+		.llseek = seq_lseek,				    \
+}
+#else
+#define DEFINE_PROC_OPS(__name, __open, __release, __read, __write)	\
+	struct proc_ops __name = {					\
+		.proc_open = (__open),					\
+		.proc_release = (__release),				\
+		.proc_read = (__read),					\
+		.proc_write = (__write),				\
+		.proc_lseek = seq_lseek,				\
+}
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,8,0)
+#define vmalloc_kernel(__size, __flags)	__vmalloc(__size, GFP_KERNEL|__flags, PAGE_KERNEL)
+#else
+#define vmalloc_kernel(__size, __flags)	__vmalloc(__size, GFP_KERNEL|__flags)
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,7,0)
+#define pci_aer_clear_nonfatal_status	pci_cleanup_aer_uncorrect_error_status
+#define old_timespec32    compat_timespec
+#define old_itimerspec32  compat_itimerspec
+#define old_timeval32     compat_timeval
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,8,0)
+#define vmalloc_kernel(__size, __flags)	__vmalloc(__size, GFP_KERNEL|__flags, PAGE_KERNEL)
+#else
+#define vmalloc_kernel(__size, __flags)	__vmalloc(__size, GFP_KERNEL|__flags)
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,9,0)
+#define read_file_from_kernel(__file, __buf, __buf_size, __file_size, __id) \
+	({								\
+		loff_t ___file_size;					\
+		int __ret;						\
+		__ret = kernel_read_file(__file, __buf, &___file_size,	\
+				__buf_size, __id);			\
+		(*__file_size) = ___file_size;				\
+		__ret;							\
+	})
+#else
+#define read_file_from_kernel(__file, __buf, __buf_size, __file_size, __id) \
+	kernel_read_file(__file, 0, __buf, __buf_size, __file_size, __id)
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0)
+#if __has_attribute(__fallthrough__)
+# define fallthrough			__attribute__((__fallthrough__))
+#else
+# define fallthrough			do {} while (0)  /* fallthrough */
+#endif
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,11,0)
+#define IRQ_WORK_INIT(_func) (struct irq_work) {	\
+	.flags = ATOMIC_INIT(0),			\
+	.func = (_func),				\
+}
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,14,0)
+#define close_fd(__ufd)	__close_fd(current->files, __ufd)
+#endif
+
+#endif /* _COBALT_ASM_GENERIC_WRAPPERS_H */
+++ linux-patched/include/asm-generic/xenomai/syscall.h	2022-03-21 12:58:28.933893408 +0100
@ linux-patched/include/xenomai/linux/stdarg.h:4 @
--- linux/include/asm-generic/xenomai/pci_ids.h	1970-01-01 01:00:00.000000000 +0100
+/*
+ * Copyright (C) 2001,2002,2003,2004,2005 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_ASM_GENERIC_SYSCALL_H
+#define _COBALT_ASM_GENERIC_SYSCALL_H
+
+#include <linux/types.h>
+#include <linux/version.h>
+#include <linux/uaccess.h>
+#include <asm/xenomai/features.h>
+#include <asm/xenomai/wrappers.h>
+#include <asm/xenomai/machine.h>
+#include <cobalt/uapi/asm-generic/syscall.h>
+#include <cobalt/uapi/kernel/types.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)
+#define access_rok(addr, size)	access_ok((addr), (size))
+#define access_wok(addr, size)	access_ok((addr), (size))