--- linux/include/xenomai/linux/stdarg.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/linux/stdarg.h 2022-03-21 12:58:32.320860380 +0100 @@ -0,0 +1 @@ +#include --- linux/include/xenomai/version.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/version.h 2022-03-21 12:58:32.309860487 +0100 @@ -0,0 +1,34 @@ +/* + * Copyright (C) 2001-2013 Philippe Gerum . + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _XENOMAI_VERSION_H +#define _XENOMAI_VERSION_H + +#ifndef __KERNEL__ +#include +#include +#endif + +#define XENO_VERSION(maj, min, rev) (((maj)<<16)|((min)<<8)|(rev)) + +#define XENO_VERSION_CODE XENO_VERSION(CONFIG_XENO_VERSION_MAJOR, \ + CONFIG_XENO_VERSION_MINOR, \ + CONFIG_XENO_REVISION_LEVEL) + +#define XENO_VERSION_STRING CONFIG_XENO_VERSION_STRING + +#endif /* _XENOMAI_VERSION_H */ --- linux/include/xenomai/pipeline/sched.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/pipeline/sched.h 2022-03-21 12:58:32.033863179 +0100 @@ -0,0 +1,62 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2020 Philippe Gerum + */ + +#ifndef _COBALT_KERNEL_DOVETAIL_SCHED_H +#define _COBALT_KERNEL_DOVETAIL_SCHED_H + +#include + +struct xnthread; +struct xnsched; +struct task_struct; + +void pipeline_init_shadow_tcb(struct xnthread *thread); + +void pipeline_init_root_tcb(struct xnthread *thread); + +int ___xnsched_run(struct xnsched *sched); + +static inline int pipeline_schedule(struct xnsched *sched) +{ + return run_oob_call((int (*)(void *))___xnsched_run, sched); +} + +static inline void pipeline_prep_switch_oob(struct xnthread *root) +{ + /* N/A */ +} + +bool pipeline_switch_to(struct xnthread *prev, + struct xnthread *next, + bool leaving_inband); + +int pipeline_leave_inband(void); + +int pipeline_leave_oob_prepare(void); + +static inline void pipeline_leave_oob_unlock(void) +{ + /* + * We may not re-enable hard irqs due to the specifics of + * stage escalation via run_oob_call(), to prevent breaking + * the (virtual) interrupt state. + */ + xnlock_put(&nklock); +} + +void pipeline_leave_oob_finish(void); + +static inline +void pipeline_finalize_thread(struct xnthread *thread) +{ + /* N/A */ +} + +void pipeline_raise_mayday(struct task_struct *tsk); + +void pipeline_clear_mayday(void); + +#endif /* !_COBALT_KERNEL_DOVETAIL_SCHED_H */ --- linux/include/xenomai/pipeline/sirq.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/pipeline/sirq.h 2022-03-21 12:58:32.026863247 +0100 @@ -0,0 +1,65 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2020 Philippe Gerum + */ + +#ifndef _COBALT_KERNEL_DOVETAIL_SIRQ_H +#define _COBALT_KERNEL_DOVETAIL_SIRQ_H + +#include +#include + +/* + * Wrappers to create "synthetic IRQs" the Dovetail way. Those + * interrupt channels can only be trigged by software, in order to run + * a handler on the in-band execution stage. + */ + +static inline +int pipeline_create_inband_sirq(irqreturn_t (*handler)(int irq, void *dev_id)) +{ + /* + * Allocate an IRQ from the synthetic interrupt domain then + * trap it to @handler, to be fired from the in-band stage. + */ + int sirq, ret; + + sirq = irq_create_direct_mapping(synthetic_irq_domain); + if (sirq == 0) + return -EAGAIN; + + ret = __request_percpu_irq(sirq, + handler, + IRQF_NO_THREAD, + "Inband sirq", + &cobalt_machine_cpudata); + + if (ret) { + irq_dispose_mapping(sirq); + return ret; + } + + return sirq; +} + +static inline +void pipeline_delete_inband_sirq(int sirq) +{ + /* + * Free the synthetic IRQ then deallocate it to its + * originating domain. + */ + free_percpu_irq(sirq, + &cobalt_machine_cpudata); + + irq_dispose_mapping(sirq); +} + +static inline void pipeline_post_sirq(int sirq) +{ + /* Trigger the synthetic IRQ */ + irq_post_inband(sirq); +} + +#endif /* !_COBALT_KERNEL_DOVETAIL_SIRQ_H */ --- linux/include/xenomai/pipeline/wrappers.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/pipeline/wrappers.h 2022-03-21 12:58:32.019863315 +0100 @@ -0,0 +1,9 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + */ +#ifndef _COBALT_KERNEL_DOVETAIL_WRAPPERS_H +#define _COBALT_KERNEL_DOVETAIL_WRAPPERS_H + +/* No wrapper needed so far. */ + +#endif /* !_COBALT_KERNEL_DOVETAIL_WRAPPERS_H */ --- linux/include/xenomai/pipeline/kevents.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/pipeline/kevents.h 2022-03-21 12:58:32.011863393 +0100 @@ -0,0 +1,36 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2020 Philippe Gerum + */ + +#ifndef _COBALT_KERNEL_DOVETAIL_KEVENTS_H +#define _COBALT_KERNEL_DOVETAIL_KEVENTS_H + +#define KEVENT_PROPAGATE 0 +#define KEVENT_STOP 1 + +struct cobalt_process; +struct cobalt_thread; + +static inline +int pipeline_attach_process(struct cobalt_process *process) +{ + return 0; +} + +static inline +void pipeline_detach_process(struct cobalt_process *process) +{ } + +int pipeline_prepare_current(void); + +void pipeline_attach_current(struct xnthread *thread); + +int pipeline_trap_kevents(void); + +void pipeline_enable_kevents(void); + +void pipeline_cleanup_process(void); + +#endif /* !_COBALT_KERNEL_DOVETAIL_KEVENTS_H */ --- linux/include/xenomai/pipeline/vdso_fallback.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/pipeline/vdso_fallback.h 2022-03-21 12:58:32.004863461 +0100 @@ -0,0 +1,73 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2020 Philippe Gerum + * Copyright (c) Siemens AG, 2021 + */ + +#ifndef _COBALT_KERNEL_PIPELINE_VDSO_FALLBACK_H +#define _COBALT_KERNEL_PIPELINE_VDSO_FALLBACK_H + +#include +#include +#include + +#define is_clock_gettime(__nr) ((__nr) == __NR_clock_gettime) + +#ifndef __NR_clock_gettime64 +#define is_clock_gettime64(__nr) 0 +#else +#define is_clock_gettime64(__nr) ((__nr) == __NR_clock_gettime64) +#endif + +static __always_inline bool +pipeline_handle_vdso_fallback(int nr, struct pt_regs *regs) +{ + struct __kernel_old_timespec __user *u_old_ts; + struct __kernel_timespec uts, __user *u_uts; + struct __kernel_old_timespec old_ts; + struct timespec64 ts64; + int clock_id, ret = 0; + unsigned long args[6]; + + if (!is_clock_gettime(nr) && !is_clock_gettime64(nr)) + return false; + + /* + * We need to fetch the args again because not all archs use the same + * calling convention for Linux and Xenomai syscalls. + */ + syscall_get_arguments(current, regs, args); + + clock_id = (int)args[0]; + switch (clock_id) { + case CLOCK_MONOTONIC: + ns2ts(&ts64, xnclock_read_monotonic(&nkclock)); + break; + case CLOCK_REALTIME: + ns2ts(&ts64, xnclock_read_realtime(&nkclock)); + break; + default: + return false; + } + + if (is_clock_gettime(nr)) { + old_ts.tv_sec = (__kernel_old_time_t)ts64.tv_sec; + old_ts.tv_nsec = ts64.tv_nsec; + u_old_ts = (struct __kernel_old_timespec __user *)args[1]; + if (raw_copy_to_user(u_old_ts, &old_ts, sizeof(old_ts))) + ret = -EFAULT; + } else if (is_clock_gettime64(nr)) { + uts.tv_sec = ts64.tv_sec; + uts.tv_nsec = ts64.tv_nsec; + u_uts = (struct __kernel_timespec __user *)args[1]; + if (raw_copy_to_user(u_uts, &uts, sizeof(uts))) + ret = -EFAULT; + } + + __xn_status_return(regs, ret); + + return true; +} + +#endif /* !_COBALT_KERNEL_PIPELINE_VDSO_FALLBACK_H */ --- linux/include/xenomai/pipeline/machine.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/pipeline/machine.h 2022-03-21 12:58:31.997863530 +0100 @@ -0,0 +1,51 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2020 Philippe Gerum + */ + +#ifndef _COBALT_KERNEL_DOVETAIL_MACHINE_H +#define _COBALT_KERNEL_DOVETAIL_MACHINE_H + +#include + +#ifdef CONFIG_FTRACE +#define boot_lat_trace_notice "[LTRACE]" +#else +#define boot_lat_trace_notice "" +#endif + +struct vm_area_struct; + +struct cobalt_machine { + const char *name; + int (*init)(void); + int (*late_init)(void); + void (*cleanup)(void); + void (*prefault)(struct vm_area_struct *vma); + const char *const *fault_labels; +}; + +extern struct cobalt_machine cobalt_machine; + +struct cobalt_machine_cpudata { + unsigned int faults[32]; +}; + +DECLARE_PER_CPU(struct cobalt_machine_cpudata, cobalt_machine_cpudata); + +struct cobalt_pipeline { +#ifdef CONFIG_SMP + cpumask_t supported_cpus; +#endif +}; + +int pipeline_init(void); + +int pipeline_late_init(void); + +void pipeline_cleanup(void); + +extern struct cobalt_pipeline cobalt_pipeline; + +#endif /* !_COBALT_KERNEL_IPIPE_MACHINE_H */ --- linux/include/xenomai/pipeline/irq.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/pipeline/irq.h 2022-03-21 12:58:31.989863608 +0100 @@ -0,0 +1,24 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + */ + +#ifndef _COBALT_KERNEL_DOVETAIL_IRQ_H +#define _COBALT_KERNEL_DOVETAIL_IRQ_H + +static inline void xnintr_init_proc(void) +{ + /* N/A */ +} + +static inline void xnintr_cleanup_proc(void) +{ + /* N/A */ +} + +static inline int xnintr_mount(void) +{ + /* N/A */ + return 0; +} + +#endif /* !_COBALT_KERNEL_DOVETAIL_IRQ_H */ --- linux/include/xenomai/pipeline/tick.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/pipeline/tick.h 2022-03-21 12:58:31.982863676 +0100 @@ -0,0 +1,16 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + */ + +#ifndef _COBALT_KERNEL_IPIPE_TICK_H +#define _COBALT_KERNEL_IPIPE_TICK_H + +int pipeline_install_tick_proxy(void); + +void pipeline_uninstall_tick_proxy(void); + +struct xnsched; + +bool pipeline_must_force_program_tick(struct xnsched *sched); + +#endif /* !_COBALT_KERNEL_IPIPE_TICK_H */ --- linux/include/xenomai/pipeline/thread.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/pipeline/thread.h 2022-03-21 12:58:31.974863754 +0100 @@ -0,0 +1,27 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2019 Philippe Gerum + */ + +#ifndef _COBALT_KERNEL_DOVETAIL_THREAD_H +#define _COBALT_KERNEL_DOVETAIL_THREAD_H + +#include + +struct xnthread; + +#define cobalt_threadinfo oob_thread_state + +static inline struct cobalt_threadinfo *pipeline_current(void) +{ + return dovetail_current_state(); +} + +static inline +struct xnthread *pipeline_thread_from_task(struct task_struct *p) +{ + return dovetail_task_state(p)->thread; +} + +#endif /* !_COBALT_KERNEL_DOVETAIL_THREAD_H */ --- linux/include/xenomai/pipeline/inband_work.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/pipeline/inband_work.h 2022-03-21 12:58:31.967863822 +0100 @@ -0,0 +1,28 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2020 Philippe Gerum + */ + +#ifndef _COBALT_KERNEL_DOVETAIL_INBAND_WORK_H +#define _COBALT_KERNEL_DOVETAIL_INBAND_WORK_H + +#include + +/* + * This field must be named inband_work and appear first in the + * container work struct. + */ +struct pipeline_inband_work { + struct irq_work work; +}; + +#define PIPELINE_INBAND_WORK_INITIALIZER(__work, __handler) \ + { \ + .work = IRQ_WORK_INIT((void (*)(struct irq_work *))__handler), \ + } + +#define pipeline_post_inband_work(__work) \ + irq_work_queue(&(__work)->inband_work.work) + +#endif /* !_COBALT_KERNEL_DOVETAIL_INBAND_WORK_H */ --- linux/include/xenomai/pipeline/lock.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/pipeline/lock.h 2022-03-21 12:58:31.960863890 +0100 @@ -0,0 +1,28 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + */ + +#ifndef _COBALT_KERNEL_DOVETAIL_LOCK_H +#define _COBALT_KERNEL_DOVETAIL_LOCK_H + +#include + +typedef hard_spinlock_t pipeline_spinlock_t; + +#define PIPELINE_SPIN_LOCK_UNLOCKED(__name) __HARD_SPIN_LOCK_INITIALIZER(__name) + +#ifdef CONFIG_XENO_OPT_DEBUG_LOCKING +/* Disable UP-over-SMP kernel optimization in debug mode. */ +#define __locking_active__ 1 + +#else + +#ifdef CONFIG_SMP +#define __locking_active__ 1 +#else +#define __locking_active__ IS_ENABLED(CONFIG_SMP) +#endif + +#endif + +#endif /* !_COBALT_KERNEL_DOVETAIL_LOCK_H */ --- linux/include/xenomai/pipeline/pipeline.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/pipeline/pipeline.h 2022-03-21 12:58:31.952863968 +0100 @@ -0,0 +1,109 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + */ + +#ifndef _COBALT_KERNEL_DOVETAIL_PIPELINE_H +#define _COBALT_KERNEL_DOVETAIL_PIPELINE_H + +#include +#include +#include +#include +#include +#include +#include + +typedef unsigned long spl_t; + +/* + * We only keep the LSB when testing in SMP mode in order to strip off + * the recursion marker (0x2) the nklock may store there. + */ +#define splhigh(x) ((x) = oob_irq_save() & 1) +#ifdef CONFIG_SMP +#define splexit(x) oob_irq_restore(x & 1) +#else /* !CONFIG_SMP */ +#define splexit(x) oob_irq_restore(x) +#endif /* !CONFIG_SMP */ +#define splmax() oob_irq_disable() +#define splnone() oob_irq_enable() +#define spltest() oob_irqs_disabled() + +#define is_secondary_domain() running_inband() +#define is_primary_domain() running_oob() + +#ifdef CONFIG_SMP + +irqreturn_t pipeline_reschedule_ipi_handler(int irq, void *dev_id); + +static inline int pipeline_request_resched_ipi(void (*handler)(void)) +{ + if (num_possible_cpus() == 1) + return 0; + + /* Trap the out-of-band rescheduling interrupt. */ + return __request_percpu_irq(RESCHEDULE_OOB_IPI, + pipeline_reschedule_ipi_handler, + IRQF_OOB, + "Xenomai reschedule", + &cobalt_machine_cpudata); +} + +static inline void pipeline_free_resched_ipi(void) +{ + if (num_possible_cpus() > 1) + /* Release the out-of-band rescheduling interrupt. */ + free_percpu_irq(RESCHEDULE_OOB_IPI, &cobalt_machine_cpudata); +} + +static inline void pipeline_send_resched_ipi(const struct cpumask *dest) +{ + /* + * Trigger the out-of-band rescheduling interrupt on remote + * CPU(s). + */ + irq_send_oob_ipi(RESCHEDULE_OOB_IPI, dest); +} + +static inline void pipeline_send_timer_ipi(const struct cpumask *dest) +{ + /* + * Trigger the out-of-band timer interrupt on remote CPU(s). + */ + irq_send_oob_ipi(TIMER_OOB_IPI, dest); +} + +#else /* !CONFIG_SMP */ + +static inline int pipeline_request_resched_ipi(void (*handler)(void)) +{ + return 0; +} + + +static inline void pipeline_free_resched_ipi(void) +{ +} + +#endif /* CONFIG_SMP */ + +static inline void pipeline_prepare_panic(void) +{ + /* N/A */ +} + +static inline void pipeline_collect_features(struct cobalt_featinfo *f) +{ + f->clock_freq = 0; /* N/A */ +} + +#ifndef pipeline_get_syscall_args +static inline void pipeline_get_syscall_args(struct task_struct *task, + struct pt_regs *regs, + unsigned long *args) +{ + syscall_get_arguments(task, regs, args); +} +#endif /* !pipeline_get_syscall_args */ + +#endif /* !_COBALT_KERNEL_DOVETAIL_PIPELINE_H */ --- linux/include/xenomai/pipeline/trace.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/pipeline/trace.h 2022-03-21 12:58:31.945864037 +0100 @@ -0,0 +1,113 @@ +/* + * Copyright (C) 2006 Jan Kiszka . + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_KERNEL_DOVETAIL_TRACE_H +#define _COBALT_KERNEL_DOVETAIL_TRACE_H + +#include +#include +#include +#include +#include + +static inline int xntrace_max_begin(unsigned long v) +{ + TODO(); + return 0; +} + +static inline int xntrace_max_end(unsigned long v) +{ + TODO(); + return 0; +} + +static inline int xntrace_max_reset(void) +{ + TODO(); + return 0; +} + +static inline int xntrace_user_start(void) +{ + TODO(); + return 0; +} + +static inline int xntrace_user_stop(unsigned long v) +{ + TODO(); + return 0; +} + +static inline int xntrace_user_freeze(unsigned long v, int once) +{ + trace_cobalt_trace_longval(0, v); + trace_cobalt_trigger("user-freeze"); + return 0; +} + +static inline void xntrace_latpeak_freeze(int delay) +{ + trace_cobalt_latpeak(delay); + trace_cobalt_trigger("latency-freeze"); +} + +static inline int xntrace_special(unsigned char id, unsigned long v) +{ + trace_cobalt_trace_longval(id, v); + return 0; +} + +static inline int xntrace_special_u64(unsigned char id, + unsigned long long v) +{ + trace_cobalt_trace_longval(id, v); + return 0; +} + +static inline int xntrace_pid(pid_t pid, short prio) +{ + trace_cobalt_trace_pid(pid, prio); + return 0; +} + +static inline int xntrace_tick(unsigned long delay_ticks) /* ns */ +{ + trace_cobalt_tick_shot(delay_ticks); + return 0; +} + +static inline int xntrace_panic_freeze(void) +{ + TODO(); + return 0; +} + +static inline int xntrace_panic_dump(void) +{ + TODO(); + return 0; +} + +static inline bool xntrace_enabled(void) +{ + return IS_ENABLED(CONFIG_DOVETAIL_TRACE); +} + +#endif /* !_COBALT_KERNEL_DOVETAIL_TRACE_H */ --- linux/include/xenomai/pipeline/clock.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/pipeline/clock.h 2022-03-21 12:58:31.938864105 +0100 @@ -0,0 +1,77 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + */ + +#ifndef _COBALT_KERNEL_DOVETAIL_CLOCK_H +#define _COBALT_KERNEL_DOVETAIL_CLOCK_H + +#include +#include +#include +#include + +struct timespec64; + +static inline u64 pipeline_read_cycle_counter(void) +{ + /* + * With Dovetail, our idea of time is directly based on a + * refined count of nanoseconds since the epoch, the hardware + * time counter is transparent to us. For this reason, + * xnclock_ticks_to_ns() and xnclock_ns_to_ticks() are + * idempotent when building for Dovetail. + */ + return ktime_get_mono_fast_ns(); +} + +static inline xnticks_t pipeline_read_wallclock(void) +{ + return ktime_get_real_fast_ns(); +} + +static inline int pipeline_set_wallclock(xnticks_t epoch_ns) +{ + return -EOPNOTSUPP; +} + +void pipeline_set_timer_shot(unsigned long cycles); + +const char *pipeline_timer_name(void); + +static inline const char *pipeline_clock_name(void) +{ + /* Return the name of the current clock source. */ + TODO(); + + return "?"; +} + +static inline int pipeline_get_host_time(struct timespec64 *tp) +{ + /* Convert ktime_get_real_fast_ns() to timespec. */ + *tp = ktime_to_timespec64(ktime_get_real_fast_ns()); + + return 0; +} + +static inline void pipeline_init_clock(void) +{ + /* N/A */ +} + +static inline xnsticks_t xnclock_core_ticks_to_ns(xnsticks_t ticks) +{ + return ticks; +} + +static inline xnsticks_t xnclock_core_ticks_to_ns_rounded(xnsticks_t ticks) +{ + return ticks; +} + +static inline xnsticks_t xnclock_core_ns_to_ticks(xnsticks_t ns) +{ + return ns; +} + +#endif /* !_COBALT_KERNEL_DOVETAIL_CLOCK_H */ --- linux/include/xenomai/rtdm/uapi/ipc.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/rtdm/uapi/ipc.h 2022-03-21 12:58:32.298860595 +0100 @@ -0,0 +1,881 @@ +/** + * @file + * This file is part of the Xenomai project. + * + * @note Copyright (C) 2009 Philippe Gerum + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ + +#ifndef _RTDM_UAPI_IPC_H +#define _RTDM_UAPI_IPC_H + +/** + * @ingroup rtdm_profiles + * @defgroup rtdm_ipc Real-time IPC + * + * @b Profile @b Revision: 1 + * @n + * @n + * @par Device Characteristics + * @n + * @ref rtdm_driver_flags "Device Flags": @c RTDM_PROTOCOL_DEVICE @n + * @n + * @ref rtdm_driver.protocol_family "Protocol Family": @c PF_RTIPC @n + * @n + * @ref rtdm_driver.socket_type "Socket Type": @c SOCK_DGRAM @n + * @n + * @ref rtdm_driver_profile "Device Class": @c RTDM_CLASS_RTIPC @n + * @n + * @{ + * + * @anchor rtipc_operations @name Supported operations + * Standard socket operations supported by the RTIPC protocols. + * @{ + */ + +/** Create an endpoint for communication in the AF_RTIPC domain. + * + * @param[in] domain The communication domain. Must be AF_RTIPC. + * + * @param[in] type The socket type. Must be SOCK_DGRAM. + * + * @param [in] protocol Any of @ref IPCPROTO_XDDP, @ref IPCPROTO_IDDP, + * or @ref IPCPROTO_BUFP. @ref IPCPROTO_IPC is also valid, and refers + * to the default RTIPC protocol, namely @ref IPCPROTO_IDDP. + * + * @return In addition to the standard error codes for @c socket(2), + * the following specific error code may be returned: + * - -ENOPROTOOPT (Protocol is known, but not compiled in the RTIPC driver). + * See @ref RTIPC_PROTO "RTIPC protocols" + * for available protocols. + * + * @par Calling context: + * non-RT + */ +#ifdef DOXYGEN_CPP +int socket__AF_RTIPC(int domain =AF_RTIPC, int type =SOCK_DGRAM, int protocol); +#endif + +/** + * Close a RTIPC socket descriptor. + * + * Blocking calls to any of the @ref sendmsg__AF_RTIPC "sendmsg" or @ref + * recvmsg__AF_RTIPC "recvmsg" functions will be unblocked when the socket + * is closed and return with an error. + * + * @param[in] sockfd The socket descriptor to close. + * + * @return In addition to the standard error codes for @c close(2), + * the following specific error code may be returned: + * none + * + * @par Calling context: + * non-RT + */ +#ifdef DOXYGEN_CPP +int close__AF_RTIPC(int sockfd); +#endif + +/** + * Bind a RTIPC socket to a port. + * + * Bind the socket to a destination port. + * + * @param[in] sockfd The RTDM file descriptor obtained from the socket + * creation call. + * + * @param [in] addr The address to bind the socket to (see struct + * sockaddr_ipc). The meaning of such address depends on the RTIPC + * protocol in use for the socket: + * + * - IPCPROTO_XDDP + * + * This action creates an endpoint for channelling traffic between + * the Xenomai and Linux domains. + * + * @em sipc_family must be AF_RTIPC, @em sipc_port is either -1, + * or a valid free port number between 0 and + * CONFIG_XENO_OPT_PIPE_NRDEV-1. + * + * If @em sipc_port is -1, a free port will be assigned automatically. + * + * Upon success, the pseudo-device /dev/rtp@em N will be reserved + * for this communication channel, where @em N is the assigned port + * number. The non real-time side shall open this device to exchange + * data over the bound socket. + * + * @anchor xddp_label_binding + * If a label was assigned (see @ref XDDP_LABEL) prior to + * binding the socket to a port, a registry link referring to the + * created pseudo-device will be automatically set up as + * @c /proc/xenomai/registry/rtipc/xddp/@em label, where @em label is the + * label string passed to setsockopt() for the @ref XDDP_LABEL option. + * + * - IPCPROTO_IDDP + * + * This action creates an endpoint for exchanging datagrams within + * the Xenomai domain. + * + * @em sipc_family must be AF_RTIPC, @em sipc_port is either -1, + * or a valid free port number between 0 and + * CONFIG_XENO_OPT_IDDP_NRPORT-1. + * + * If @em sipc_port is -1, a free port will be assigned + * automatically. The real-time peer shall connect to the same port + * for exchanging data over the bound socket. + * + * @anchor iddp_label_binding + * If a label was assigned (see @ref IDDP_LABEL) prior to binding + * the socket to a port, a registry link referring to the assigned + * port number will be automatically set up as @c + * /proc/xenomai/registry/rtipc/iddp/@em label, where @em label is + * the label string passed to setsockopt() for the @ref IDDP_LABEL + * option. + * + * - IPCPROTO_BUFP + * + * This action creates an endpoint for a one-way byte + * stream within the Xenomai domain. + * + * @em sipc_family must be AF_RTIPC, @em sipc_port is either -1, + * or a valid free port number between 0 and CONFIG_XENO_OPT_BUFP_NRPORT-1. + * + * If @em sipc_port is -1, an available port will be assigned + * automatically. The real-time peer shall connect to the same port + * for exchanging data over the bound socket. + * + * @anchor bufp_label_binding + * If a label was assigned (see @ref BUFP_LABEL) prior to binding + * the socket to a port, a registry link referring to the assigned + * port number will be automatically set up as @c + * /proc/xenomai/registry/rtipc/bufp/@em label, where @em label is + * the label string passed to setsockopt() for the @a BUFP_LABEL + * option. + * + * @param[in] addrlen The size in bytes of the structure pointed to by + * @a addr. + * + * @return In addition to the standard error codes for @c + * bind(2), the following specific error code may be returned: + * - -EFAULT (Invalid data address given) + * - -ENOMEM (Not enough memory) + * - -EINVAL (Invalid parameter) + * - -EADDRINUSE (Socket already bound to a port, or no port available) + * - -EAGAIN (no registry slot available, check/raise + * CONFIG_XENO_OPT_REGISTRY_NRSLOTS) . + * + * @par Calling context: + * non-RT + */ +#ifdef DOXYGEN_CPP +int bind__AF_RTIPC(int sockfd, const struct sockaddr_ipc *addr, + socklen_t addrlen); +#endif + +/** + * Initiate a connection on a RTIPC socket. + * + * @param[in] sockfd The RTDM file descriptor obtained from the socket + * creation call. + * + * @param [in] addr The address to connect the socket to (see struct + * sockaddr_ipc). + * + * - If sipc_port is a valid port for the protocol, it is used + * verbatim and the connection succeeds immediately, regardless of + * whether the destination is bound at the time of the call. + * + * - If sipc_port is -1 and a label was assigned to the socket, + * connect() blocks for the requested amount of time (see @ref + * SO_RCVTIMEO) until a socket is bound to the same label via @c + * bind(2) (see @ref XDDP_LABEL, @ref IDDP_LABEL, @ref BUFP_LABEL), in + * which case a connection is established between both endpoints. + * + * - If sipc_port is -1 and no label was assigned to the socket, the + * default destination address is cleared, meaning that any subsequent + * write to the socket will return -EDESTADDRREQ, until a valid + * destination address is set via @c connect(2) or @c bind(2). + * + * @param[in] addrlen The size in bytes of the structure pointed to by + * @a addr. + * + * @return In addition to the standard error codes for @c connect(2), + * the following specific error code may be returned: + * none. + * + * @par Calling context: + * RT/non-RT + */ +#ifdef DOXYGEN_CPP +int connect__AF_RTIPC(int sockfd, const struct sockaddr_ipc *addr, + socklen_t addrlen); +#endif + +/** + * Set options on RTIPC sockets. + * + * These functions allow to set various socket options. + * Supported Levels and Options: + * + * - Level @ref sockopts_socket "SOL_SOCKET" + * - Level @ref sockopts_xddp "SOL_XDDP" + * - Level @ref sockopts_iddp "SOL_IDDP" + * - Level @ref sockopts_bufp "SOL_BUFP" + * . + * + * @return In addition to the standard error codes for @c + * setsockopt(2), the following specific error code may + * be returned: + * follow the option links above. + * + * @par Calling context: + * non-RT + */ +#ifdef DOXYGEN_CPP +int setsockopt__AF_RTIPC(int sockfd, int level, int optname, + const void *optval, socklen_t optlen); +#endif +/** + * Get options on RTIPC sockets. + * + * These functions allow to get various socket options. + * Supported Levels and Options: + * + * - Level @ref sockopts_socket "SOL_SOCKET" + * - Level @ref sockopts_xddp "SOL_XDDP" + * - Level @ref sockopts_iddp "SOL_IDDP" + * - Level @ref sockopts_bufp "SOL_BUFP" + * . + * + * @return In addition to the standard error codes for @c + * getsockopt(2), the following specific error code may + * be returned: + * follow the option links above. + * + * @par Calling context: + * RT/non-RT + */ +#ifdef DOXYGEN_CPP +int getsockopt__AF_RTIPC(int sockfd, int level, int optname, + void *optval, socklen_t *optlen); +#endif + +/** + * Send a message on a RTIPC socket. + * + * @param[in] sockfd The RTDM file descriptor obtained from the socket + * creation call. + * + * @param[in] msg The address of the message header conveying the + * datagram. + * + * @param [in] flags Operation flags: + * + * - MSG_OOB Send out-of-band message. For all RTIPC protocols except + * @ref IPCPROTO_BUFP, sending out-of-band data actually means + * pushing them to the head of the receiving queue, so that the + * reader will always receive them before normal messages. @ref + * IPCPROTO_BUFP does not support out-of-band sending. + * + * - MSG_DONTWAIT Non-blocking I/O operation. The caller will not be + * blocked whenever the message cannot be sent immediately at the + * time of the call (e.g. memory shortage), but will rather return + * with -EWOULDBLOCK. Unlike other RTIPC protocols, @ref + * IPCPROTO_XDDP accepts but never considers MSG_DONTWAIT since + * writing to a real-time XDDP endpoint is inherently a non-blocking + * operation. + * + * - MSG_MORE Accumulate data before sending. This flag is accepted by + * the @ref IPCPROTO_XDDP protocol only, and tells the send service + * to accumulate the outgoing data into an internal streaming + * buffer, instead of issuing a datagram immediately for it. See + * @ref XDDP_BUFSZ for more. + * + * @note No RTIPC protocol allows for short writes, and only complete + * messages are sent to the peer. + * + * @return In addition to the standard error codes for @c sendmsg(2), + * the following specific error code may be returned: + * none. + * + * @par Calling context: + * RT + */ +#ifdef DOXYGEN_CPP +ssize_t sendmsg__AF_RTIPC(int sockfd, const struct msghdr *msg, int flags); +#endif + +/** + * Receive a message from a RTIPC socket. + * + * @param[in] sockfd The RTDM file descriptor obtained from the socket + * creation call. + * + * @param[out] msg The address the message header will be copied at. + * + * @param [in] flags Operation flags: + * + * - MSG_DONTWAIT Non-blocking I/O operation. The caller will not be + * blocked whenever no message is immediately available for receipt + * at the time of the call, but will rather return with + * -EWOULDBLOCK. + * + * @note @ref IPCPROTO_BUFP does not allow for short reads and always + * returns the requested amount of bytes, except in one situation: + * whenever some writer is waiting for sending data upon a buffer full + * condition, while the caller would have to wait for receiving a + * complete message. This is usually the sign of a pathological use + * of the BUFP socket, like defining an incorrect buffer size via @ref + * BUFP_BUFSZ. In that case, a short read is allowed to prevent a + * deadlock. + * + * @return In addition to the standard error codes for @c recvmsg(2), + * the following specific error code may be returned: + * none. + * + * @par Calling context: + * RT + */ +#ifdef DOXYGEN_CPP +ssize_t recvmsg__AF_RTIPC(int sockfd, struct msghdr *msg, int flags); +#endif + +/** + * Get socket name. + * + * The name of the local endpoint for the socket is copied back (see + * struct sockaddr_ipc). + * + * @return In addition to the standard error codes for @c getsockname(2), + * the following specific error code may be returned: + * none. + * + * @par Calling context: + * RT/non-RT + */ +#ifdef DOXYGEN_CPP +int getsockname__AF_RTIPC(int sockfd, struct sockaddr_ipc *addr, socklen_t *addrlen); +#endif + +/** + * Get socket peer. + * + * The name of the remote endpoint for the socket is copied back (see + * struct sockaddr_ipc). This is the default destination address for + * messages sent on the socket. It can be set either explicitly via @c + * connect(2), or implicitly via @c bind(2) if no @c connect(2) was + * called prior to binding the socket to a port, in which case both + * the local and remote names are equal. + * + * @return In addition to the standard error codes for @c getpeername(2), + * the following specific error code may be returned: + * none. + * + * @par Calling context: + * RT/non-RT + */ +#ifdef DOXYGEN_CPP +int getpeername__AF_RTIPC(int sockfd, struct sockaddr_ipc *addr, socklen_t *addrlen); +#endif + +/** @} */ + +#include +#include +#include + +/* Address family */ +#define AF_RTIPC 111 + +/* Protocol family */ +#define PF_RTIPC AF_RTIPC + +/** + * @anchor RTIPC_PROTO @name RTIPC protocol list + * protocols for the PF_RTIPC protocol family + * + * @{ */ +enum { +/** Default protocol (IDDP) */ + IPCPROTO_IPC = 0, +/** + * Cross-domain datagram protocol (RT <-> non-RT). + * + * Real-time Xenomai threads and regular Linux threads may want to + * exchange data in a way that does not require the former to leave + * the real-time domain (i.e. primary mode). The RTDM-based XDDP + * protocol is available for this purpose. + * + * On the Linux domain side, pseudo-device files named /dev/rtp@em \ + * give regular POSIX threads access to non real-time communication + * endpoints, via the standard character-based I/O interface. On the + * Xenomai domain side, sockets may be bound to XDDP ports, which act + * as proxies to send and receive data to/from the associated + * pseudo-device files. Ports and pseudo-device minor numbers are + * paired, meaning that e.g. socket port 7 will proxy the traffic to/from + * /dev/rtp7. + * + * All data sent through a bound/connected XDDP socket via @c + * sendto(2) or @c write(2) will be passed to the peer endpoint in the + * Linux domain, and made available for reading via the standard @c + * read(2) system call. Conversely, all data sent using @c write(2) + * through the non real-time endpoint will be conveyed to the + * real-time socket endpoint, and made available to the @c recvfrom(2) + * or @c read(2) system calls. + */ + IPCPROTO_XDDP = 1, +/** + * Intra-domain datagram protocol (RT <-> RT). + * + * The RTDM-based IDDP protocol enables real-time threads to exchange + * datagrams within the Xenomai domain, via socket endpoints. + */ + IPCPROTO_IDDP = 2, +/** + * Buffer protocol (RT <-> RT, byte-oriented). + * + * The RTDM-based BUFP protocol implements a lightweight, + * byte-oriented, one-way Producer-Consumer data path. All messages + * written are buffered into a single memory area in strict FIFO + * order, until read by the consumer. + * + * This protocol always prevents short writes, and only allows short + * reads when a potential deadlock situation arises (i.e. readers and + * writers waiting for each other indefinitely). + */ + IPCPROTO_BUFP = 3, + IPCPROTO_MAX +}; +/** @} */ + +/** + * Port number type for the RTIPC address family. + */ +typedef int16_t rtipc_port_t; + +/** + * Port label information structure. + */ +struct rtipc_port_label { + /** Port label string, null-terminated. */ + char label[XNOBJECT_NAME_LEN]; +}; + +/** + * Socket address structure for the RTIPC address family. + */ +struct sockaddr_ipc { + /** RTIPC address family, must be @c AF_RTIPC */ + sa_family_t sipc_family; + /** Port number. */ + rtipc_port_t sipc_port; +}; + +#define SOL_XDDP 311 +/** + * @anchor sockopts_xddp @name XDDP socket options + * Setting and getting XDDP socket options. + * @{ */ +/** + * XDDP label assignment + * + * ASCII label strings can be attached to XDDP ports, so that opening + * the non-RT endpoint can be done by specifying this symbolic device + * name rather than referring to a raw pseudo-device entry + * (i.e. /dev/rtp@em N). + * + * When available, this label will be registered when binding, in + * addition to the port number (see @ref xddp_label_binding + * "XDDP port binding"). + * + * It is not allowed to assign a label after the socket was + * bound. However, multiple assignment calls are allowed prior to the + * binding; the last label set will be used. + * + * @param [in] level @ref sockopts_xddp "SOL_XDDP" + * @param [in] optname @b XDDP_LABEL + * @param [in] optval Pointer to struct rtipc_port_label + * @param [in] optlen sizeof(struct rtipc_port_label) + * + * @return 0 is returned upon success. Otherwise: + * + * - -EFAULT (Invalid data address given) + * - -EALREADY (socket already bound) + * - -EINVAL (@a optlen invalid) + * . + * + * @par Calling context: + * RT/non-RT + */ +#define XDDP_LABEL 1 +/** + * XDDP local pool size configuration + * + * By default, the memory needed to convey the data is pulled from + * Xenomai's system pool. Setting a local pool size overrides this + * default for the socket. + * + * If a non-zero size was configured, a local pool is allocated at + * binding time. This pool will provide storage for pending datagrams. + * + * It is not allowed to configure a local pool size after the socket + * was bound. However, multiple configuration calls are allowed prior + * to the binding; the last value set will be used. + * + * @note: the pool memory is obtained from the host allocator by the + * @ref bind__AF_RTIPC "bind call". + * + * @param [in] level @ref sockopts_xddp "SOL_XDDP" + * @param [in] optname @b XDDP_POOLSZ + * @param [in] optval Pointer to a variable of type size_t, containing + * the required size of the local pool to reserve at binding time + * @param [in] optlen sizeof(size_t) + * + * @return 0 is returned upon success. Otherwise: + * + * - -EFAULT (Invalid data address given) + * - -EALREADY (socket already bound) + * - -EINVAL (@a optlen invalid or *@a optval is zero) + * . + * + * @par Calling context: + * RT/non-RT + */ +#define XDDP_POOLSZ 2 +/** + * XDDP streaming buffer size configuration + * + * In addition to sending datagrams, real-time threads may stream data + * in a byte-oriented mode through the port as well. This increases + * the bandwidth and reduces the overhead, when the overall data to + * send to the Linux domain is collected by bits, and keeping the + * message boundaries is not required. + * + * This feature is enabled when a non-zero buffer size is set for the + * socket. In that case, the real-time data accumulates into the + * streaming buffer when MSG_MORE is passed to any of the @ref + * sendmsg__AF_RTIPC "send functions", until: + * + * - the receiver from the Linux domain wakes up and consumes it, + * - a different source port attempts to send data to the same + * destination port, + * - MSG_MORE is absent from the send flags, + * - the buffer is full, + * . + * whichever comes first. + * + * Setting *@a optval to zero disables the streaming buffer, in which + * case all sendings are conveyed in separate datagrams, regardless of + * MSG_MORE. + * + * @note only a single streaming buffer exists per socket. When this + * buffer is full, the real-time data stops accumulating and sending + * operations resume in mere datagram mode. Accumulation may happen + * again after some or all data in the streaming buffer is consumed + * from the Linux domain endpoint. + * + * The streaming buffer size may be adjusted multiple times during the + * socket lifetime; the latest configuration change will take effect + * when the accumulation resumes after the previous buffer was + * flushed. + * + * @param [in] level @ref sockopts_xddp "SOL_XDDP" + * @param [in] optname @b XDDP_BUFSZ + * @param [in] optval Pointer to a variable of type size_t, containing + * the required size of the streaming buffer + * @param [in] optlen sizeof(size_t) + * + * @return 0 is returned upon success. Otherwise: + * + * - -EFAULT (Invalid data address given) + * - -ENOMEM (Not enough memory) + * - -EINVAL (@a optlen is invalid) + * . + * + * @par Calling context: + * RT/non-RT + */ +#define XDDP_BUFSZ 3 +/** + * XDDP monitoring callback + * + * Other RTDM drivers may install a user-defined callback via the @ref + * rtdm_setsockopt call from the inter-driver API, in order to collect + * particular events occurring on the channel. + * + * This notification mechanism is particularly useful to monitor a + * channel asynchronously while performing other tasks. + * + * The user-provided routine will be passed the RTDM file descriptor + * of the socket receiving the event, the event code, and an optional + * argument. Four events are currently defined, see @ref XDDP_EVENTS. + * + * The XDDP_EVTIN and XDDP_EVTOUT events are fired on behalf of a + * fully atomic context; therefore, care must be taken to keep their + * overhead low. In those cases, the Xenomai services that may be + * called from the callback are restricted to the set allowed to a + * real-time interrupt handler. + * + * @param [in] level @ref sockopts_xddp "SOL_XDDP" + * @param [in] optname @b XDDP_MONITOR + * @param [in] optval Pointer to a pointer to function of type int + * (*)(int fd, int event, long arg), containing the address of the + * user-defined callback.Passing a NULL callback pointer + * in @a optval disables monitoring. + * @param [in] optlen sizeof(int (*)(int fd, int event, long arg)) + * + * @return 0 is returned upon success. Otherwise: + * + * - -EFAULT (Invalid data address given) + * - -EPERM (Operation not allowed from user-space) + * - -EINVAL (@a optlen is invalid) + * . + * + * @par Calling context: + * RT/non-RT, kernel space only + */ +#define XDDP_MONITOR 4 +/** @} */ + +/** + * @anchor XDDP_EVENTS @name XDDP events + * Specific events occurring on XDDP channels, which can be monitored + * via the @ref XDDP_MONITOR socket option. + * + * @{ */ +/** + * @ref XDDP_MONITOR "Monitor" writes to the non real-time endpoint. + * + * XDDP_EVTIN is sent when data is written to the non real-time + * endpoint the socket is bound to (i.e. via /dev/rtp@em N), which + * means that some input is pending for the real-time endpoint. The + * argument is the size of the incoming message. + */ +#define XDDP_EVTIN 1 +/** + * @ref XDDP_MONITOR "Monitor" reads from the non real-time endpoint. + * + * XDDP_EVTOUT is sent when the non real-time endpoint successfully + * reads a complete message (i.e. via /dev/rtp@em N). The argument is + * the size of the outgoing message. + */ +#define XDDP_EVTOUT 2 +/** + * @ref XDDP_MONITOR "Monitor" close from the non real-time endpoint. + * + * XDDP_EVTDOWN is sent when the non real-time endpoint is closed. The + * argument is always 0. + */ +#define XDDP_EVTDOWN 3 +/** + * @ref XDDP_MONITOR "Monitor" memory shortage for non real-time + * datagrams. + * + * XDDP_EVTNOBUF is sent when no memory is available from the pool to + * hold the message currently sent from the non real-time + * endpoint. The argument is the size of the failed allocation. Upon + * return from the callback, the caller will block and retry until + * enough space is available from the pool; during that process, the + * callback might be invoked multiple times, each time a new attempt + * to get the required memory fails. + */ +#define XDDP_EVTNOBUF 4 +/** @} */ + +#define SOL_IDDP 312 +/** + * @anchor sockopts_iddp @name IDDP socket options + * Setting and getting IDDP socket options. + * @{ */ +/** + * IDDP label assignment + * + * ASCII label strings can be attached to IDDP ports, in order to + * connect sockets to them in a more descriptive way than using plain + * numeric port values. + * + * When available, this label will be registered when binding, in + * addition to the port number (see @ref iddp_label_binding + * "IDDP port binding"). + * + * It is not allowed to assign a label after the socket was + * bound. However, multiple assignment calls are allowed prior to the + * binding; the last label set will be used. + * + * @param [in] level @ref sockopts_iddp "SOL_IDDP" + * @param [in] optname @b IDDP_LABEL + * @param [in] optval Pointer to struct rtipc_port_label + * @param [in] optlen sizeof(struct rtipc_port_label) + * + * @return 0 is returned upon success. Otherwise: + * + * - -EFAULT (Invalid data address given) + * - -EALREADY (socket already bound) + * - -EINVAL (@a optlen is invalid) + * . + * + * @par Calling context: + * RT/non-RT + */ +#define IDDP_LABEL 1 +/** + * IDDP local pool size configuration + * + * By default, the memory needed to convey the data is pulled from + * Xenomai's system pool. Setting a local pool size overrides this + * default for the socket. + * + * If a non-zero size was configured, a local pool is allocated at + * binding time. This pool will provide storage for pending datagrams. + * + * It is not allowed to configure a local pool size after the socket + * was bound. However, multiple configuration calls are allowed prior + * to the binding; the last value set will be used. + * + * @note: the pool memory is obtained from the host allocator by the + * @ref bind__AF_RTIPC "bind call". + * + * @param [in] level @ref sockopts_iddp "SOL_IDDP" + * @param [in] optname @b IDDP_POOLSZ + * @param [in] optval Pointer to a variable of type size_t, containing + * the required size of the local pool to reserve at binding time + * @param [in] optlen sizeof(size_t) + * + * @return 0 is returned upon success. Otherwise: + * + * - -EFAULT (Invalid data address given) + * - -EALREADY (socket already bound) + * - -EINVAL (@a optlen is invalid or *@a optval is zero) + * . + * + * @par Calling context: + * RT/non-RT + */ +#define IDDP_POOLSZ 2 +/** @} */ + +#define SOL_BUFP 313 +/** + * @anchor sockopts_bufp @name BUFP socket options + * Setting and getting BUFP socket options. + * @{ */ +/** + * BUFP label assignment + * + * ASCII label strings can be attached to BUFP ports, in order to + * connect sockets to them in a more descriptive way than using plain + * numeric port values. + * + * When available, this label will be registered when binding, in + * addition to the port number (see @ref bufp_label_binding + * "BUFP port binding"). + * + * It is not allowed to assign a label after the socket was + * bound. However, multiple assignment calls are allowed prior to the + * binding; the last label set will be used. + * + * @param [in] level @ref sockopts_bufp "SOL_BUFP" + * @param [in] optname @b BUFP_LABEL + * @param [in] optval Pointer to struct rtipc_port_label + * @param [in] optlen sizeof(struct rtipc_port_label) + * + * @return 0 is returned upon success. Otherwise: + * + * - -EFAULT (Invalid data address given) + * - -EALREADY (socket already bound) + * - -EINVAL (@a optlen is invalid) + * . + * + * @par Calling context: + * RT/non-RT + */ +#define BUFP_LABEL 1 +/** + * BUFP buffer size configuration + * + * All messages written to a BUFP socket are buffered in a single + * per-socket memory area. Configuring the size of such buffer prior + * to binding the socket to a destination port is mandatory. + * + * It is not allowed to configure a buffer size after the socket was + * bound. However, multiple configuration calls are allowed prior to + * the binding; the last value set will be used. + * + * @note: the buffer memory is obtained from the host allocator by the + * @ref bind__AF_RTIPC "bind call". + * + * @param [in] level @ref sockopts_bufp "SOL_BUFP" + * @param [in] optname @b BUFP_BUFSZ + * @param [in] optval Pointer to a variable of type size_t, containing + * the required size of the buffer to reserve at binding time + * @param [in] optlen sizeof(size_t) + * + * @return 0 is returned upon success. Otherwise: + * + * - -EFAULT (Invalid data address given) + * - -EALREADY (socket already bound) + * - -EINVAL (@a optlen is invalid or *@a optval is zero) + * . + * + * @par Calling context: + * RT/non-RT + */ +#define BUFP_BUFSZ 2 +/** @} */ + +/** + * @anchor sockopts_socket @name Socket level options + * Setting and getting supported standard socket level options. + * @{ */ +/** + * + * @ref IPCPROTO_IDDP and @ref IPCPROTO_BUFP protocols support the + * standard SO_SNDTIMEO socket option, from the @c SOL_SOCKET level. + * + * @see @c setsockopt(), @c getsockopt() in IEEE Std 1003.1, + * http://www.opengroup.org/onlinepubs/009695399/ + */ +#ifdef DOXYGEN_CPP +#define SO_SNDTIMEO defined_by_kernel_header_file +#endif +/** + * + * All RTIPC protocols support the standard SO_RCVTIMEO socket option, + * from the @c SOL_SOCKET level. + * + * @see @c setsockopt(), @c getsockopt() in IEEE Std 1003.1, + * http://www.opengroup.org/onlinepubs/009695399/ + */ +#ifdef DOXYGEN_CPP +#define SO_RCVTIMEO defined_by_kernel_header_file +#endif +/** @} */ + +/** + * @anchor rtdm_ipc_examples @name RTIPC examples + * @{ */ +/** @example bufp-readwrite.c */ +/** @example bufp-label.c */ +/** @example iddp-label.c */ +/** @example iddp-sendrecv.c */ +/** @example xddp-echo.c */ +/** @example xddp-label.c */ +/** @example xddp-stream.c */ +/** @} */ + +/** @} */ + +#endif /* !_RTDM_UAPI_IPC_H */ --- linux/include/xenomai/rtdm/uapi/udd.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/rtdm/uapi/udd.h 2022-03-21 12:58:32.291860663 +0100 @@ -0,0 +1,98 @@ +/** + * @file + * This file is part of the Xenomai project. + * + * @author Copyright (C) 2014 Philippe Gerum + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _RTDM_UAPI_UDD_H +#define _RTDM_UAPI_UDD_H + +/** + * @addtogroup rtdm_udd + * + * @{ + */ + +/** + * @anchor udd_signotify + * @brief UDD event notification descriptor + * + * This structure shall be used to pass the information required to + * enable/disable the notification by signal upon interrupt receipt. + * + * If PID is zero or negative, the notification is disabled. + * Otherwise, the Cobalt thread whose PID is given will receive the + * Cobalt signal also mentioned, along with the count of interrupts at + * the time of the receipt stored in siginfo.si_int. A Cobalt thread + * must explicitly wait for notifications using the sigwaitinfo() or + * sigtimedwait() services (no asynchronous mode available). + */ +struct udd_signotify { + /** + * PID of the Cobalt thread to notify upon interrupt + * receipt. If @a pid is zero or negative, the notification is + * disabled. + */ + pid_t pid; + /** + * Signal number to send to PID for notifying, which must be + * in the range [SIGRTMIN .. SIGRTMAX] inclusive. This value + * is not considered if @a pid is zero or negative. + */ + int sig; +}; + +/** + * @anchor udd_ioctl_codes @name UDD_IOCTL + * IOCTL requests + * + * @{ + */ + +/** + * Enable the interrupt line. The UDD-class mini-driver should handle + * this request when received through its ->ioctl() handler if + * provided. Otherwise, the UDD core enables the interrupt line in the + * interrupt controller before returning to the caller. + */ +#define UDD_RTIOC_IRQEN _IO(RTDM_CLASS_UDD, 0) +/** + * Disable the interrupt line. The UDD-class mini-driver should handle + * this request when received through its ->ioctl() handler if + * provided. Otherwise, the UDD core disables the interrupt line in + * the interrupt controller before returning to the caller. + * + * @note The mini-driver must handle the UDD_RTIOC_IRQEN request for a + * custom IRQ from its ->ioctl() handler, otherwise such request + * receives -EIO from the UDD core. + */ +#define UDD_RTIOC_IRQDIS _IO(RTDM_CLASS_UDD, 1) +/** + * Enable/Disable signal notification upon interrupt event. A valid + * @ref udd_signotify "notification descriptor" must be passed along + * with this request, which is handled by the UDD core directly. + * + * @note The mini-driver must handle the UDD_RTIOC_IRQDIS request for + * a custom IRQ from its ->ioctl() handler, otherwise such request + * receives -EIO from the UDD core. + */ +#define UDD_RTIOC_IRQSIG _IOW(RTDM_CLASS_UDD, 2, struct udd_signotify) + +/** @} */ +/** @} */ + +#endif /* !_RTDM_UAPI_UDD_H */ --- linux/include/xenomai/rtdm/uapi/testing.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/rtdm/uapi/testing.h 2022-03-21 12:58:32.284860731 +0100 @@ -0,0 +1,198 @@ +/** + * @file + * Real-Time Driver Model for Xenomai, testing device profile header + * + * @note Copyright (C) 2005 Jan Kiszka + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * @ingroup rttesting + */ +#ifndef _RTDM_UAPI_TESTING_H +#define _RTDM_UAPI_TESTING_H + +#include + +#define RTTST_PROFILE_VER 2 + +typedef struct rttst_bench_res { + __s32 avg; + __s32 min; + __s32 max; + __s32 overruns; + __s32 test_loops; +} rttst_bench_res_t; + +typedef struct rttst_interm_bench_res { + struct rttst_bench_res last; + struct rttst_bench_res overall; +} rttst_interm_bench_res_t; + +typedef struct rttst_overall_bench_res { + struct rttst_bench_res result; + __s32 *histogram_avg; + __s32 *histogram_min; + __s32 *histogram_max; +} rttst_overall_bench_res_t; + +#define RTTST_TMBENCH_INVALID -1 /* internal use only */ +#define RTTST_TMBENCH_TASK 0 +#define RTTST_TMBENCH_HANDLER 1 + +typedef struct rttst_tmbench_config { + int mode; + int priority; + __u64 period; + int warmup_loops; + int histogram_size; + int histogram_bucketsize; + int freeze_max; +} rttst_tmbench_config_t; + +struct rttst_swtest_task { + unsigned int index; + unsigned int flags; +}; + +/* Possible values for struct rttst_swtest_task::flags. */ +#define RTTST_SWTEST_FPU 0x1 +#define RTTST_SWTEST_USE_FPU 0x2 /* Only for kernel-space tasks. */ +#define RTTST_SWTEST_FREEZE 0x4 /* Only for kernel-space tasks. */ + +struct rttst_swtest_dir { + unsigned int from; + unsigned int to; +}; + +struct rttst_swtest_error { + struct rttst_swtest_dir last_switch; + unsigned int fp_val; +}; + +#define RTTST_RTDM_NORMAL_CLOSE 0 +#define RTTST_RTDM_DEFER_CLOSE_CONTEXT 1 + +#define RTTST_RTDM_MAGIC_PRIMARY 0xfefbfefb +#define RTTST_RTDM_MAGIC_SECONDARY 0xa5b9a5b9 + +#define RTTST_HEAPCHECK_ZEROOVRD 1 +#define RTTST_HEAPCHECK_SHUFFLE 2 +#define RTTST_HEAPCHECK_PATTERN 4 +#define RTTST_HEAPCHECK_HOT 8 + +struct rttst_heap_parms { + __u64 heap_size; + __u64 block_size; + int flags; + int nrstats; +}; + +struct rttst_heap_stats { + __u64 heap_size; + __u64 user_size; + __u64 block_size; + __s64 alloc_avg_ns; + __s64 alloc_max_ns; + __s64 free_avg_ns; + __s64 free_max_ns; + __u64 maximum_free; + __u64 largest_free; + int nrblocks; + int flags; +}; + +struct rttst_heap_stathdr { + int nrstats; + struct rttst_heap_stats *buf; +}; + +#define RTIOC_TYPE_TESTING RTDM_CLASS_TESTING + +/*! + * @name Sub-Classes of RTDM_CLASS_TESTING + * @{ */ +/** subclass name: "timerbench" */ +#define RTDM_SUBCLASS_TIMERBENCH 0 +/** subclass name: "irqbench" */ +#define RTDM_SUBCLASS_IRQBENCH 1 +/** subclass name: "switchtest" */ +#define RTDM_SUBCLASS_SWITCHTEST 2 +/** subclase name: "rtdm" */ +#define RTDM_SUBCLASS_RTDMTEST 3 +/** subclase name: "heapcheck" */ +#define RTDM_SUBCLASS_HEAPCHECK 4 +/** @} */ + +/*! + * @anchor TSTIOCTLs @name IOCTLs + * Testing device IOCTLs + * @{ */ +#define RTTST_RTIOC_INTERM_BENCH_RES \ + _IOWR(RTIOC_TYPE_TESTING, 0x00, struct rttst_interm_bench_res) + +#define RTTST_RTIOC_TMBENCH_START \ + _IOW(RTIOC_TYPE_TESTING, 0x10, struct rttst_tmbench_config) + +#define RTTST_RTIOC_TMBENCH_STOP \ + _IOWR(RTIOC_TYPE_TESTING, 0x11, struct rttst_overall_bench_res) + +#define RTTST_RTIOC_SWTEST_SET_TASKS_COUNT \ + _IOW(RTIOC_TYPE_TESTING, 0x30, __u32) + +#define RTTST_RTIOC_SWTEST_SET_CPU \ + _IOW(RTIOC_TYPE_TESTING, 0x31, __u32) + +#define RTTST_RTIOC_SWTEST_REGISTER_UTASK \ + _IOW(RTIOC_TYPE_TESTING, 0x32, struct rttst_swtest_task) + +#define RTTST_RTIOC_SWTEST_CREATE_KTASK \ + _IOWR(RTIOC_TYPE_TESTING, 0x33, struct rttst_swtest_task) + +#define RTTST_RTIOC_SWTEST_PEND \ + _IOR(RTIOC_TYPE_TESTING, 0x34, struct rttst_swtest_task) + +#define RTTST_RTIOC_SWTEST_SWITCH_TO \ + _IOR(RTIOC_TYPE_TESTING, 0x35, struct rttst_swtest_dir) + +#define RTTST_RTIOC_SWTEST_GET_SWITCHES_COUNT \ + _IOR(RTIOC_TYPE_TESTING, 0x36, __u32) + +#define RTTST_RTIOC_SWTEST_GET_LAST_ERROR \ + _IOR(RTIOC_TYPE_TESTING, 0x37, struct rttst_swtest_error) + +#define RTTST_RTIOC_SWTEST_SET_PAUSE \ + _IOW(RTIOC_TYPE_TESTING, 0x38, __u32) + +#define RTTST_RTIOC_RTDM_DEFER_CLOSE \ + _IOW(RTIOC_TYPE_TESTING, 0x40, __u32) + +#define RTTST_RTIOC_RTDM_ACTOR_GET_CPU \ + _IOR(RTIOC_TYPE_TESTING, 0x41, __u32) + +#define RTTST_RTIOC_RTDM_PING_PRIMARY \ + _IOR(RTIOC_TYPE_TESTING, 0x42, __u32) + +#define RTTST_RTIOC_RTDM_PING_SECONDARY \ + _IOR(RTIOC_TYPE_TESTING, 0x43, __u32) + +#define RTTST_RTIOC_HEAP_CHECK \ + _IOR(RTIOC_TYPE_TESTING, 0x44, struct rttst_heap_parms) + +#define RTTST_RTIOC_HEAP_STAT_COLLECT \ + _IOR(RTIOC_TYPE_TESTING, 0x45, int) + +/** @} */ + +#endif /* !_RTDM_UAPI_TESTING_H */ --- linux/include/xenomai/rtdm/uapi/analogy.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/rtdm/uapi/analogy.h 2022-03-21 12:58:32.276860809 +0100 @@ -0,0 +1,743 @@ +/** + * @file + * Analogy for Linux, UAPI bits + * @note Copyright (C) 1997-2000 David A. Schleef + * @note Copyright (C) 2008 Alexis Berlemont + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _RTDM_UAPI_ANALOGY_H +#define _RTDM_UAPI_ANALOGY_H + +/* --- Misc precompilation constant --- */ +#define A4L_NAMELEN 20 + +#define A4L_INFINITE 0 +#define A4L_NONBLOCK (-1) + +/* --- Common Analogy types --- */ + +typedef unsigned short sampl_t; +typedef unsigned long lsampl_t; + +/* MMAP ioctl argument structure */ +struct a4l_mmap_arg { + unsigned int idx_subd; + unsigned long size; + void *ptr; +}; +typedef struct a4l_mmap_arg a4l_mmap_t; + +/* Constants related with buffer size + (might be used with BUFCFG ioctl) */ +#define A4L_BUF_MAXSIZE 0x1000000 +#define A4L_BUF_DEFSIZE 0x10000 +#define A4L_BUF_DEFMAGIC 0xffaaff55 + +/* BUFCFG ioctl argument structure */ +struct a4l_buffer_config { + /* NOTE: with the last buffer implementation, the field + idx_subd became useless; the buffer are now + per-context. So, the buffer size configuration is specific + to an opened device. There is a little exception: we can + define a default buffer size for a device. + So far, a hack is used to implement the configuration of + the default buffer size */ + unsigned int idx_subd; + unsigned long buf_size; +}; +typedef struct a4l_buffer_config a4l_bufcfg_t; + +/* BUFINFO ioctl argument structure */ +struct a4l_buffer_info { + unsigned int idx_subd; + unsigned long buf_size; + unsigned long rw_count; +}; +typedef struct a4l_buffer_info a4l_bufinfo_t; + +/* BUFCFG2 / BUFINFO2 ioctl argument structure */ +struct a4l_buffer_config2 { + unsigned long wake_count; + unsigned long reserved[3]; +}; +typedef struct a4l_buffer_config2 a4l_bufcfg2_t; + +/* POLL ioctl argument structure */ +struct a4l_poll { + unsigned int idx_subd; + unsigned long arg; +}; +typedef struct a4l_poll a4l_poll_t; + +/* DEVCFG ioctl argument structure */ +struct a4l_link_desc { + unsigned char bname_size; + char *bname; + unsigned int opts_size; + void *opts; +}; +typedef struct a4l_link_desc a4l_lnkdesc_t; + +/* DEVINFO ioctl argument structure */ +struct a4l_dev_info { + char board_name[A4L_NAMELEN]; + char driver_name[A4L_NAMELEN]; + int nb_subd; + int idx_read_subd; + int idx_write_subd; +}; +typedef struct a4l_dev_info a4l_dvinfo_t; + +#define CIO 'd' +#define A4L_DEVCFG _IOW(CIO,0,a4l_lnkdesc_t) +#define A4L_DEVINFO _IOR(CIO,1,a4l_dvinfo_t) +#define A4L_SUBDINFO _IOR(CIO,2,a4l_sbinfo_t) +#define A4L_CHANINFO _IOR(CIO,3,a4l_chinfo_arg_t) +#define A4L_RNGINFO _IOR(CIO,4,a4l_rnginfo_arg_t) +#define A4L_CMD _IOWR(CIO,5,a4l_cmd_t) +#define A4L_CANCEL _IOR(CIO,6,unsigned int) +#define A4L_INSNLIST _IOR(CIO,7,unsigned int) +#define A4L_INSN _IOR(CIO,8,unsigned int) +#define A4L_BUFCFG _IOR(CIO,9,a4l_bufcfg_t) +#define A4L_BUFINFO _IOWR(CIO,10,a4l_bufinfo_t) +#define A4L_POLL _IOR(CIO,11,unsigned int) +#define A4L_MMAP _IOWR(CIO,12,unsigned int) +#define A4L_NBCHANINFO _IOR(CIO,13,a4l_chinfo_arg_t) +#define A4L_NBRNGINFO _IOR(CIO,14,a4l_rnginfo_arg_t) + +/* These IOCTLs are bound to be merged with A4L_BUFCFG and A4L_BUFINFO + at the next major release */ +#define A4L_BUFCFG2 _IOR(CIO,15,a4l_bufcfg_t) +#define A4L_BUFINFO2 _IOWR(CIO,16,a4l_bufcfg_t) + +/*! + * @addtogroup analogy_lib_async1 + * @{ + */ + +/*! + * @anchor ANALOGY_CMD_xxx @name ANALOGY_CMD_xxx + * @brief Common command flags definitions + * @{ + */ + +/** + * Do not execute the command, just check it + */ +#define A4L_CMD_SIMUL 0x1 +/** + * Perform data recovery / transmission in bulk mode + */ +#define A4L_CMD_BULK 0x2 +/** + * Perform a command which will write data to the device + */ +#define A4L_CMD_WRITE 0x4 + + /*! @} ANALOGY_CMD_xxx */ + +/*! + * @anchor TRIG_xxx @name TRIG_xxx + * @brief Command triggers flags definitions + * @{ + */ + +/** + * Never trigger + */ +#define TRIG_NONE 0x00000001 +/** + * Trigger now + N ns + */ +#define TRIG_NOW 0x00000002 +/** + * Trigger on next lower level trig + */ +#define TRIG_FOLLOW 0x00000004 +/** + * Trigger at time N ns + */ +#define TRIG_TIME 0x00000008 +/** + * Trigger at rate N ns + */ +#define TRIG_TIMER 0x00000010 +/** + * Trigger when count reaches N + */ +#define TRIG_COUNT 0x00000020 +/** + * Trigger on external signal N + */ +#define TRIG_EXT 0x00000040 +/** + * Trigger on analogy-internal signal N + */ +#define TRIG_INT 0x00000080 +/** + * Driver defined trigger + */ +#define TRIG_OTHER 0x00000100 +/** + * Wake up on end-of-scan + */ +#define TRIG_WAKE_EOS 0x0020 +/** + * Trigger not implemented yet + */ +#define TRIG_ROUND_MASK 0x00030000 +/** + * Trigger not implemented yet + */ +#define TRIG_ROUND_NEAREST 0x00000000 +/** + * Trigger not implemented yet + */ +#define TRIG_ROUND_DOWN 0x00010000 +/** + * Trigger not implemented yet + */ +#define TRIG_ROUND_UP 0x00020000 +/** + * Trigger not implemented yet + */ +#define TRIG_ROUND_UP_NEXT 0x00030000 + + /*! @} TRIG_xxx */ + +/*! + * @anchor CHAN_RNG_AREF @name Channel macros + * @brief Specific precompilation macros and constants useful for the + * channels descriptors tab located in the command structure + * @{ + */ + +/** + * Channel indication macro + */ +#define CHAN(a) ((a) & 0xffff) +/** + * Range definition macro + */ +#define RNG(a) (((a) & 0xff) << 16) +/** + * Reference definition macro + */ +#define AREF(a) (((a) & 0x03) << 24) +/** + * Flags definition macro + */ +#define FLAGS(a) ((a) & CR_FLAGS_MASK) +/** + * Channel + range + reference definition macro + */ +#define PACK(a, b, c) (a | RNG(b) | AREF(c)) +/** + * Channel + range + reference + flags definition macro + */ +#define PACK_FLAGS(a, b, c, d) (PACK(a, b, c) | FLAGS(d)) + +/** + * Analog reference is analog ground + */ +#define AREF_GROUND 0x00 +/** + * Analog reference is analog common + */ +#define AREF_COMMON 0x01 +/** + * Analog reference is differential + */ +#define AREF_DIFF 0x02 +/** + * Analog reference is undefined + */ +#define AREF_OTHER 0x03 + + /*! @} CHAN_RNG_AREF */ + +#if !defined(DOXYGEN_CPP) + +#define CR_FLAGS_MASK 0xfc000000 +#define CR_ALT_FILTER (1<<26) +#define CR_DITHER CR_ALT_FILTER +#define CR_DEGLITCH CR_ALT_FILTER +#define CR_ALT_SOURCE (1<<27) +#define CR_EDGE (1<<30) +#define CR_INVERT (1<<31) + +#endif /* !DOXYGEN_CPP */ + +/*! + * @brief Structure describing the asynchronous instruction + * @see a4l_snd_command() + */ + +struct a4l_cmd_desc { + unsigned char idx_subd; + /**< Subdevice to which the command will be applied. */ + + unsigned long flags; + /**< Command flags */ + + /* Command trigger characteristics */ + unsigned int start_src; + /**< Start trigger type */ + unsigned int start_arg; + /**< Start trigger argument */ + unsigned int scan_begin_src; + /**< Scan begin trigger type */ + unsigned int scan_begin_arg; + /**< Scan begin trigger argument */ + unsigned int convert_src; + /**< Convert trigger type */ + unsigned int convert_arg; + /**< Convert trigger argument */ + unsigned int scan_end_src; + /**< Scan end trigger type */ + unsigned int scan_end_arg; + /**< Scan end trigger argument */ + unsigned int stop_src; + /**< Stop trigger type */ + unsigned int stop_arg; + /**< Stop trigger argument */ + + unsigned char nb_chan; + /**< Count of channels related with the command */ + unsigned int *chan_descs; + /**< Tab containing channels descriptors */ + + /* Driver specific fields */ + unsigned int valid_simul_stages; + /** < cmd simulation valid stages (driver dependent) */ + + unsigned int data_len; + /**< Driver specific buffer size */ + sampl_t *data; + /**< Driver specific buffer pointer */ +}; +typedef struct a4l_cmd_desc a4l_cmd_t; + +/*! @} analogy_lib_async1 */ + +/* --- Range section --- */ + +/** Constant for internal use only (must not be used by driver + developer). */ +#define A4L_RNG_FACTOR 1000000 + +/** + * Volt unit range flag + */ +#define A4L_RNG_VOLT_UNIT 0x0 +/** + * MilliAmpere unit range flag + */ +#define A4L_RNG_MAMP_UNIT 0x1 +/** + * No unit range flag + */ +#define A4L_RNG_NO_UNIT 0x2 +/** + * External unit range flag + */ +#define A4L_RNG_EXT_UNIT 0x4 + +/** + * Macro to retrieve the range unit from the range flags + */ +#define A4L_RNG_UNIT(x) (x & (A4L_RNG_VOLT_UNIT | \ + A4L_RNG_MAMP_UNIT | \ + A4L_RNG_NO_UNIT | \ + A4L_RNG_EXT_UNIT)) + +/* --- Subdevice flags desc stuff --- */ + +/* TODO: replace ANALOGY_SUBD_AI with ANALOGY_SUBD_ANALOG + and ANALOGY_SUBD_INPUT */ + +/* Subdevice types masks */ +#define A4L_SUBD_MASK_READ 0x80000000 +#define A4L_SUBD_MASK_WRITE 0x40000000 +#define A4L_SUBD_MASK_SPECIAL 0x20000000 + +/*! + * @addtogroup analogy_subdevice + * @{ + */ + +/*! + * @anchor ANALOGY_SUBD_xxx @name Subdevices types + * @brief Flags to define the subdevice type + * @{ + */ + +/** + * Unused subdevice + */ +#define A4L_SUBD_UNUSED (A4L_SUBD_MASK_SPECIAL|0x1) +/** + * Analog input subdevice + */ +#define A4L_SUBD_AI (A4L_SUBD_MASK_READ|0x2) +/** + * Analog output subdevice + */ +#define A4L_SUBD_AO (A4L_SUBD_MASK_WRITE|0x4) +/** + * Digital input subdevice + */ +#define A4L_SUBD_DI (A4L_SUBD_MASK_READ|0x8) +/** + * Digital output subdevice + */ +#define A4L_SUBD_DO (A4L_SUBD_MASK_WRITE|0x10) +/** + * Digital input/output subdevice + */ +#define A4L_SUBD_DIO (A4L_SUBD_MASK_SPECIAL|0x20) +/** + * Counter subdevice + */ +#define A4L_SUBD_COUNTER (A4L_SUBD_MASK_SPECIAL|0x40) +/** + * Timer subdevice + */ +#define A4L_SUBD_TIMER (A4L_SUBD_MASK_SPECIAL|0x80) +/** + * Memory, EEPROM, DPRAM + */ +#define A4L_SUBD_MEMORY (A4L_SUBD_MASK_SPECIAL|0x100) +/** + * Calibration subdevice DACs + */ +#define A4L_SUBD_CALIB (A4L_SUBD_MASK_SPECIAL|0x200) +/** + * Processor, DSP + */ +#define A4L_SUBD_PROC (A4L_SUBD_MASK_SPECIAL|0x400) +/** + * Serial IO subdevice + */ +#define A4L_SUBD_SERIAL (A4L_SUBD_MASK_SPECIAL|0x800) +/** + * Mask which gathers all the types + */ +#define A4L_SUBD_TYPES (A4L_SUBD_UNUSED | \ + A4L_SUBD_AI | \ + A4L_SUBD_AO | \ + A4L_SUBD_DI | \ + A4L_SUBD_DO | \ + A4L_SUBD_DIO | \ + A4L_SUBD_COUNTER | \ + A4L_SUBD_TIMER | \ + A4L_SUBD_MEMORY | \ + A4L_SUBD_CALIB | \ + A4L_SUBD_PROC | \ + A4L_SUBD_SERIAL) + +/*! @} ANALOGY_SUBD_xxx */ + +/*! + * @anchor ANALOGY_SUBD_FT_xxx @name Subdevice features + * @brief Flags to define the subdevice's capabilities + * @{ + */ + +/* Subdevice capabilities */ +/** + * The subdevice can handle command (i.e it can perform asynchronous + * acquisition) + */ +#define A4L_SUBD_CMD 0x1000 +/** + * The subdevice support mmap operations (technically, any driver can + * do it; however, the developer might want that his driver must be + * accessed through read / write + */ +#define A4L_SUBD_MMAP 0x8000 + +/*! @} ANALOGY_SUBD_FT_xxx */ + +/*! + * @anchor ANALOGY_SUBD_ST_xxx @name Subdevice status + * @brief Flags to define the subdevice's status + * @{ + */ + +/* Subdevice status flag(s) */ +/** + * The subdevice is busy, a synchronous or an asynchronous acquisition + * is occuring + */ +#define A4L_SUBD_BUSY_NR 0 +#define A4L_SUBD_BUSY (1 << A4L_SUBD_BUSY_NR) + +/** + * The subdevice is about to be cleaned in the middle of the detach + * procedure + */ +#define A4L_SUBD_CLEAN_NR 1 +#define A4L_SUBD_CLEAN (1 << A4L_SUBD_CLEAN_NR) + + +/*! @} ANALOGY_SUBD_ST_xxx */ + +/* --- Subdevice related IOCTL arguments structures --- */ + +/* SUDBINFO IOCTL argument */ +struct a4l_subd_info { + unsigned long flags; + unsigned long status; + unsigned char nb_chan; +}; +typedef struct a4l_subd_info a4l_sbinfo_t; + +/* CHANINFO / NBCHANINFO IOCTL arguments */ +struct a4l_chan_info { + unsigned long chan_flags; + unsigned char nb_rng; + unsigned char nb_bits; +}; +typedef struct a4l_chan_info a4l_chinfo_t; + +struct a4l_chinfo_arg { + unsigned int idx_subd; + void *info; +}; +typedef struct a4l_chinfo_arg a4l_chinfo_arg_t; + +/* RNGINFO / NBRNGINFO IOCTL arguments */ +struct a4l_rng_info { + long min; + long max; + unsigned long flags; +}; +typedef struct a4l_rng_info a4l_rnginfo_t; + +struct a4l_rng_info_arg { + unsigned int idx_subd; + unsigned int idx_chan; + void *info; +}; +typedef struct a4l_rng_info_arg a4l_rnginfo_arg_t; + +/*! @} */ + +#define A4L_INSN_MASK_READ 0x8000000 +#define A4L_INSN_MASK_WRITE 0x4000000 +#define A4L_INSN_MASK_SPECIAL 0x2000000 + +/*! + * @addtogroup analogy_lib_sync1 + * @{ + */ + +/*! + * @anchor ANALOGY_INSN_xxx @name Instruction type + * @brief Flags to define the type of instruction + * @{ + */ + +/** + * Read instruction + */ +#define A4L_INSN_READ (0 | A4L_INSN_MASK_READ) +/** + * Write instruction + */ +#define A4L_INSN_WRITE (1 | A4L_INSN_MASK_WRITE) +/** + * "Bits" instruction + */ +#define A4L_INSN_BITS (2 | A4L_INSN_MASK_READ | \ + A4L_INSN_MASK_WRITE) +/** + * Configuration instruction + */ +#define A4L_INSN_CONFIG (3 | A4L_INSN_MASK_READ | \ + A4L_INSN_MASK_WRITE) +/** + * Get time instruction + */ +#define A4L_INSN_GTOD (4 | A4L_INSN_MASK_READ | \ + A4L_INSN_MASK_SPECIAL) +/** + * Wait instruction + */ +#define A4L_INSN_WAIT (5 | A4L_INSN_MASK_WRITE | \ + A4L_INSN_MASK_SPECIAL) +/** + * Trigger instruction (to start asynchronous acquisition) + */ +#define A4L_INSN_INTTRIG (6 | A4L_INSN_MASK_WRITE | \ + A4L_INSN_MASK_SPECIAL) + + /*! @} ANALOGY_INSN_xxx */ + +/** + * Maximal wait duration + */ +#define A4L_INSN_WAIT_MAX 100000 + +/*! + * @anchor INSN_CONFIG_xxx @name Configuration instruction type + * @brief Values to define the type of configuration instruction + * @{ + */ + +#define A4L_INSN_CONFIG_DIO_INPUT 0 +#define A4L_INSN_CONFIG_DIO_OUTPUT 1 +#define A4L_INSN_CONFIG_DIO_OPENDRAIN 2 +#define A4L_INSN_CONFIG_ANALOG_TRIG 16 +#define A4L_INSN_CONFIG_ALT_SOURCE 20 +#define A4L_INSN_CONFIG_DIGITAL_TRIG 21 +#define A4L_INSN_CONFIG_BLOCK_SIZE 22 +#define A4L_INSN_CONFIG_TIMER_1 23 +#define A4L_INSN_CONFIG_FILTER 24 +#define A4L_INSN_CONFIG_CHANGE_NOTIFY 25 +#define A4L_INSN_CONFIG_SERIAL_CLOCK 26 +#define A4L_INSN_CONFIG_BIDIRECTIONAL_DATA 27 +#define A4L_INSN_CONFIG_DIO_QUERY 28 +#define A4L_INSN_CONFIG_PWM_OUTPUT 29 +#define A4L_INSN_CONFIG_GET_PWM_OUTPUT 30 +#define A4L_INSN_CONFIG_ARM 31 +#define A4L_INSN_CONFIG_DISARM 32 +#define A4L_INSN_CONFIG_GET_COUNTER_STATUS 33 +#define A4L_INSN_CONFIG_RESET 34 +#define A4L_INSN_CONFIG_GPCT_SINGLE_PULSE_GENERATOR 1001 /* Use CTR as single pulsegenerator */ +#define A4L_INSN_CONFIG_GPCT_PULSE_TRAIN_GENERATOR 1002 /* Use CTR as pulsetraingenerator */ +#define A4L_INSN_CONFIG_GPCT_QUADRATURE_ENCODER 1003 /* Use the counter as encoder */ +#define A4L_INSN_CONFIG_SET_GATE_SRC 2001 /* Set gate source */ +#define A4L_INSN_CONFIG_GET_GATE_SRC 2002 /* Get gate source */ +#define A4L_INSN_CONFIG_SET_CLOCK_SRC 2003 /* Set master clock source */ +#define A4L_INSN_CONFIG_GET_CLOCK_SRC 2004 /* Get master clock source */ +#define A4L_INSN_CONFIG_SET_OTHER_SRC 2005 /* Set other source */ +#define A4L_INSN_CONFIG_SET_COUNTER_MODE 4097 +#define A4L_INSN_CONFIG_SET_ROUTING 4099 +#define A4L_INSN_CONFIG_GET_ROUTING 4109 + +/*! @} INSN_CONFIG_xxx */ + +/*! + * @anchor ANALOGY_COUNTER_xxx @name Counter status bits + * @brief Status bits for INSN_CONFIG_GET_COUNTER_STATUS + * @{ + */ + +#define A4L_COUNTER_ARMED 0x1 +#define A4L_COUNTER_COUNTING 0x2 +#define A4L_COUNTER_TERMINAL_COUNT 0x4 + + /*! @} ANALOGY_COUNTER_xxx */ + +/*! + * @anchor ANALOGY_IO_DIRECTION @name IO direction + * @brief Values to define the IO polarity + * @{ + */ + +#define A4L_INPUT 0 +#define A4L_OUTPUT 1 +#define A4L_OPENDRAIN 2 + + /*! @} ANALOGY_IO_DIRECTION */ + + +/*! + * @anchor ANALOGY_EV_xxx @name Events types + * @brief Values to define the Analogy events. They might used to send + * some specific events through the instruction interface. + * @{ + */ + +#define A4L_EV_START 0x00040000 +#define A4L_EV_SCAN_BEGIN 0x00080000 +#define A4L_EV_CONVERT 0x00100000 +#define A4L_EV_SCAN_END 0x00200000 +#define A4L_EV_STOP 0x00400000 + +/*! @} ANALOGY_EV_xxx */ + +/*! + * @brief Structure describing the synchronous instruction + * @see a4l_snd_insn() + */ + +struct a4l_instruction { + unsigned int type; + /**< Instruction type */ + unsigned int idx_subd; + /**< Subdevice to which the instruction will be applied. */ + unsigned int chan_desc; + /**< Channel descriptor */ + unsigned int data_size; + /**< Size of the intruction data */ + void *data; + /**< Instruction data */ +}; +typedef struct a4l_instruction a4l_insn_t; + +/*! + * @brief Structure describing the list of synchronous instructions + * @see a4l_snd_insnlist() + */ + +struct a4l_instruction_list { + unsigned int count; + /**< Instructions count */ + a4l_insn_t *insns; + /**< Tab containing the instructions pointers */ +}; +typedef struct a4l_instruction_list a4l_insnlst_t; + +/*! @} analogy_lib_sync1 */ + +struct a4l_calibration_subdev { + a4l_sbinfo_t *info; + char *name; + int slen; + int idx; +}; + +struct a4l_calibration_subdev_data { + int index; + int channel; + int range; + int expansion; + int nb_coeff; + double *coeff; + +}; + +struct a4l_calibration_data { + char *driver_name; + char *board_name; + int nb_ai; + struct a4l_calibration_subdev_data *ai; + int nb_ao; + struct a4l_calibration_subdev_data *ao; +}; + +struct a4l_polynomial { + int expansion; + int order; + int nb_coeff; + double *coeff; +}; + + +#endif /* _RTDM_UAPI_ANALOGY_H */ --- linux/include/xenomai/rtdm/uapi/gpio.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/rtdm/uapi/gpio.h 2022-03-21 12:58:32.269860877 +0100 @@ -0,0 +1,43 @@ +/** + * @note Copyright (C) 2016 Philippe Gerum + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _RTDM_UAPI_GPIO_H +#define _RTDM_UAPI_GPIO_H + +struct rtdm_gpio_readout { + nanosecs_abs_t timestamp; + __s32 value; +}; + +#define GPIO_RTIOC_DIR_OUT _IOW(RTDM_CLASS_GPIO, 0, int) +#define GPIO_RTIOC_DIR_IN _IO(RTDM_CLASS_GPIO, 1) +#define GPIO_RTIOC_IRQEN _IOW(RTDM_CLASS_GPIO, 2, int) /* GPIO trigger */ +#define GPIO_RTIOC_IRQDIS _IO(RTDM_CLASS_GPIO, 3) +#define GPIO_RTIOC_REQS _IO(RTDM_CLASS_GPIO, 4) +#define GPIO_RTIOC_RELS _IO(RTDM_CLASS_GPIO, 5) +#define GPIO_RTIOC_TS_MONO _IOR(RTDM_CLASS_GPIO, 7, int) +#define GPIO_RTIOC_TS_REAL _IOR(RTDM_CLASS_GPIO, 8, int) +#define GPIO_RTIOC_TS GPIO_RTIOC_TS_REAL + +#define GPIO_TRIGGER_NONE 0x0 /* unspecified */ +#define GPIO_TRIGGER_EDGE_RISING 0x1 +#define GPIO_TRIGGER_EDGE_FALLING 0x2 +#define GPIO_TRIGGER_LEVEL_HIGH 0x4 +#define GPIO_TRIGGER_LEVEL_LOW 0x8 +#define GPIO_TRIGGER_MASK 0xf + +#endif /* !_RTDM_UAPI_GPIO_H */ --- linux/include/xenomai/rtdm/uapi/serial.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/rtdm/uapi/serial.h 2022-03-21 12:58:32.262860946 +0100 @@ -0,0 +1,407 @@ +/** + * @file + * Real-Time Driver Model for Xenomai, serial device profile header + * + * @note Copyright (C) 2005-2007 Jan Kiszka + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * @ingroup rtserial + */ +#ifndef _RTDM_UAPI_SERIAL_H +#define _RTDM_UAPI_SERIAL_H + +#define RTSER_PROFILE_VER 3 + +/*! + * @anchor RTSER_DEF_BAUD @name RTSER_DEF_BAUD + * Default baud rate + * @{ */ +#define RTSER_DEF_BAUD 9600 +/** @} */ + +/*! + * @anchor RTSER_xxx_PARITY @name RTSER_xxx_PARITY + * Number of parity bits + * @{ */ +#define RTSER_NO_PARITY 0x00 +#define RTSER_ODD_PARITY 0x01 +#define RTSER_EVEN_PARITY 0x03 +#define RTSER_DEF_PARITY RTSER_NO_PARITY +/** @} */ + +/*! + * @anchor RTSER_xxx_BITS @name RTSER_xxx_BITS + * Number of data bits + * @{ */ +#define RTSER_5_BITS 0x00 +#define RTSER_6_BITS 0x01 +#define RTSER_7_BITS 0x02 +#define RTSER_8_BITS 0x03 +#define RTSER_DEF_BITS RTSER_8_BITS +/** @} */ + +/*! + * @anchor RTSER_xxx_STOPB @name RTSER_xxx_STOPB + * Number of stop bits + * @{ */ +#define RTSER_1_STOPB 0x00 +/** valid only in combination with 5 data bits */ +#define RTSER_1_5_STOPB 0x01 +#define RTSER_2_STOPB 0x01 +#define RTSER_DEF_STOPB RTSER_1_STOPB +/** @} */ + +/*! + * @anchor RTSER_xxx_HAND @name RTSER_xxx_HAND + * Handshake mechanisms + * @{ */ +#define RTSER_NO_HAND 0x00 +#define RTSER_RTSCTS_HAND 0x01 +#define RTSER_DEF_HAND RTSER_NO_HAND +/** @} */ + +/*! + * @anchor RTSER_RS485_xxx @name RTSER_RS485_xxx + * RS485 mode with automatic RTS handling + * @{ */ +#define RTSER_RS485_DISABLE 0x00 +#define RTSER_RS485_ENABLE 0x01 +#define RTSER_DEF_RS485 RTSER_RS485_DISABLE +/** @} */ + +/*! + * @anchor RTSER_FIFO_xxx @name RTSER_FIFO_xxx + * Reception FIFO interrupt threshold + * @{ */ +#define RTSER_FIFO_DEPTH_1 0x00 +#define RTSER_FIFO_DEPTH_4 0x40 +#define RTSER_FIFO_DEPTH_8 0x80 +#define RTSER_FIFO_DEPTH_14 0xC0 +#define RTSER_DEF_FIFO_DEPTH RTSER_FIFO_DEPTH_1 +/** @} */ + +/*! + * @anchor RTSER_TIMEOUT_xxx @name RTSER_TIMEOUT_xxx + * Special timeout values, see also @ref RTDM_TIMEOUT_xxx + * @{ */ +#define RTSER_TIMEOUT_INFINITE RTDM_TIMEOUT_INFINITE +#define RTSER_TIMEOUT_NONE RTDM_TIMEOUT_NONE +#define RTSER_DEF_TIMEOUT RTDM_TIMEOUT_INFINITE +/** @} */ + +/*! + * @anchor RTSER_xxx_TIMESTAMP_HISTORY @name RTSER_xxx_TIMESTAMP_HISTORY + * Timestamp history control + * @{ */ +#define RTSER_RX_TIMESTAMP_HISTORY 0x01 +#define RTSER_DEF_TIMESTAMP_HISTORY 0x00 +/** @} */ + +/*! + * @anchor RTSER_EVENT_xxx @name RTSER_EVENT_xxx + * Events bits + * @{ */ +#define RTSER_EVENT_RXPEND 0x01 +#define RTSER_EVENT_ERRPEND 0x02 +#define RTSER_EVENT_MODEMHI 0x04 +#define RTSER_EVENT_MODEMLO 0x08 +#define RTSER_EVENT_TXEMPTY 0x10 +#define RTSER_DEF_EVENT_MASK 0x00 +/** @} */ + + +/*! + * @anchor RTSER_SET_xxx @name RTSER_SET_xxx + * Configuration mask bits + * @{ */ +#define RTSER_SET_BAUD 0x0001 +#define RTSER_SET_PARITY 0x0002 +#define RTSER_SET_DATA_BITS 0x0004 +#define RTSER_SET_STOP_BITS 0x0008 +#define RTSER_SET_HANDSHAKE 0x0010 +#define RTSER_SET_FIFO_DEPTH 0x0020 +#define RTSER_SET_TIMEOUT_RX 0x0100 +#define RTSER_SET_TIMEOUT_TX 0x0200 +#define RTSER_SET_TIMEOUT_EVENT 0x0400 +#define RTSER_SET_TIMESTAMP_HISTORY 0x0800 +#define RTSER_SET_EVENT_MASK 0x1000 +#define RTSER_SET_RS485 0x2000 +/** @} */ + + +/*! + * @anchor RTSER_LSR_xxx @name RTSER_LSR_xxx + * Line status bits + * @{ */ +#define RTSER_LSR_DATA 0x01 +#define RTSER_LSR_OVERRUN_ERR 0x02 +#define RTSER_LSR_PARITY_ERR 0x04 +#define RTSER_LSR_FRAMING_ERR 0x08 +#define RTSER_LSR_BREAK_IND 0x10 +#define RTSER_LSR_THR_EMTPY 0x20 +#define RTSER_LSR_TRANSM_EMPTY 0x40 +#define RTSER_LSR_FIFO_ERR 0x80 +#define RTSER_SOFT_OVERRUN_ERR 0x0100 +/** @} */ + + +/*! + * @anchor RTSER_MSR_xxx @name RTSER_MSR_xxx + * Modem status bits + * @{ */ +#define RTSER_MSR_DCTS 0x01 +#define RTSER_MSR_DDSR 0x02 +#define RTSER_MSR_TERI 0x04 +#define RTSER_MSR_DDCD 0x08 +#define RTSER_MSR_CTS 0x10 +#define RTSER_MSR_DSR 0x20 +#define RTSER_MSR_RI 0x40 +#define RTSER_MSR_DCD 0x80 +/** @} */ + + +/*! + * @anchor RTSER_MCR_xxx @name RTSER_MCR_xxx + * Modem control bits + * @{ */ +#define RTSER_MCR_DTR 0x01 +#define RTSER_MCR_RTS 0x02 +#define RTSER_MCR_OUT1 0x04 +#define RTSER_MCR_OUT2 0x08 +#define RTSER_MCR_LOOP 0x10 +/** @} */ + + +/*! + * @anchor RTSER_BREAK_xxx @name RTSER_BREAK_xxx + * Break control + * @{ */ +#define RTSER_BREAK_CLR 0x00 +#define RTSER_BREAK_SET 0x01 + + +/** + * Serial device configuration + */ +typedef struct rtser_config { + /** mask specifying valid fields, see @ref RTSER_SET_xxx */ + int config_mask; + + /** baud rate, default @ref RTSER_DEF_BAUD */ + int baud_rate; + + /** number of parity bits, see @ref RTSER_xxx_PARITY */ + int parity; + + /** number of data bits, see @ref RTSER_xxx_BITS */ + int data_bits; + + /** number of stop bits, see @ref RTSER_xxx_STOPB */ + int stop_bits; + + /** handshake mechanisms, see @ref RTSER_xxx_HAND */ + int handshake; + + /** reception FIFO interrupt threshold, see @ref RTSER_FIFO_xxx */ + int fifo_depth; + + int reserved; + + /** reception timeout, see @ref RTSER_TIMEOUT_xxx for special + * values */ + nanosecs_rel_t rx_timeout; + + /** transmission timeout, see @ref RTSER_TIMEOUT_xxx for special + * values */ + nanosecs_rel_t tx_timeout; + + /** event timeout, see @ref RTSER_TIMEOUT_xxx for special values */ + nanosecs_rel_t event_timeout; + + /** enable timestamp history, see @ref RTSER_xxx_TIMESTAMP_HISTORY */ + int timestamp_history; + + /** event mask to be used with @ref RTSER_RTIOC_WAIT_EVENT, see + * @ref RTSER_EVENT_xxx */ + int event_mask; + + /** enable RS485 mode, see @ref RTSER_RS485_xxx */ + int rs485; +} rtser_config_t; + +/** + * Serial device status + */ +typedef struct rtser_status { + /** line status register, see @ref RTSER_LSR_xxx */ + int line_status; + + /** modem status register, see @ref RTSER_MSR_xxx */ + int modem_status; +} rtser_status_t; + +/** + * Additional information about serial device events + */ +typedef struct rtser_event { + /** signalled events, see @ref RTSER_EVENT_xxx */ + int events; + + /** number of pending input characters */ + int rx_pending; + + /** last interrupt timestamp */ + nanosecs_abs_t last_timestamp; + + /** reception timestamp of oldest character in input queue */ + nanosecs_abs_t rxpend_timestamp; +} rtser_event_t; + + +#define RTIOC_TYPE_SERIAL RTDM_CLASS_SERIAL + + +/*! + * @name Sub-Classes of RTDM_CLASS_SERIAL + * @{ */ +#define RTDM_SUBCLASS_16550A 0 +/** @} */ + + +/*! + * @anchor SERIOCTLs @name IOCTLs + * Serial device IOCTLs + * @{ */ + +/** + * Get serial device configuration + * + * @param[out] arg Pointer to configuration buffer (struct rtser_config) + * + * @return 0 on success, otherwise negative error code + * + * @coretags{task-unrestricted} + */ +#define RTSER_RTIOC_GET_CONFIG \ + _IOR(RTIOC_TYPE_SERIAL, 0x00, struct rtser_config) + +/** + * Set serial device configuration + * + * @param[in] arg Pointer to configuration buffer (struct rtser_config) + * + * @return 0 on success, otherwise: + * + * - -EPERM is returned if the caller's context is invalid, see note below. + * + * - -ENOMEM is returned if a new history buffer for timestamps cannot be + * allocated. + * + * @coretags{task-unrestricted} + * + * @note If rtser_config contains a valid timestamp_history and the + * addressed device has been opened in non-real-time context, this IOCTL must + * be issued in non-real-time context as well. Otherwise, this command will + * fail. + */ +#define RTSER_RTIOC_SET_CONFIG \ + _IOW(RTIOC_TYPE_SERIAL, 0x01, struct rtser_config) + +/** + * Get serial device status + * + * @param[out] arg Pointer to status buffer (struct rtser_status) + * + * @return 0 on success, otherwise negative error code + * + * @coretags{task-unrestricted} + * + * @note The error states @c RTSER_LSR_OVERRUN_ERR, @c RTSER_LSR_PARITY_ERR, + * @c RTSER_LSR_FRAMING_ERR, and @c RTSER_SOFT_OVERRUN_ERR that may have + * occured during previous read accesses to the device will be saved for being + * reported via this IOCTL. Upon return from @c RTSER_RTIOC_GET_STATUS, the + * saved state will be cleared. + */ +#define RTSER_RTIOC_GET_STATUS \ + _IOR(RTIOC_TYPE_SERIAL, 0x02, struct rtser_status) + +/** + * Get serial device's modem contol register + * + * @param[out] arg Pointer to variable receiving the content (int, see + * @ref RTSER_MCR_xxx) + * + * @return 0 on success, otherwise negative error code + * + * @coretags{task-unrestricted} + */ +#define RTSER_RTIOC_GET_CONTROL \ + _IOR(RTIOC_TYPE_SERIAL, 0x03, int) + +/** + * Set serial device's modem contol register + * + * @param[in] arg New control register content (int, see @ref RTSER_MCR_xxx) + * + * @return 0 on success, otherwise negative error code + * + * @coretags{task-unrestricted} + */ +#define RTSER_RTIOC_SET_CONTROL \ + _IOW(RTIOC_TYPE_SERIAL, 0x04, int) + +/** + * Wait on serial device events according to previously set mask + * + * @param[out] arg Pointer to event information buffer (struct rtser_event) + * + * @return 0 on success, otherwise: + * + * - -EBUSY is returned if another task is already waiting on events of this + * device. + * + * - -EBADF is returned if the file descriptor is invalid or the device has + * just been closed. + * + * @coretags{mode-unrestricted} + */ +#define RTSER_RTIOC_WAIT_EVENT \ + _IOR(RTIOC_TYPE_SERIAL, 0x05, struct rtser_event) +/** @} */ + +/** + * Set or clear break on UART output line + * + * @param[in] arg @c RTSER_BREAK_SET or @c RTSER_BREAK_CLR (int) + * + * @return 0 on success, otherwise negative error code + * + * @coretags{task-unrestricted} + * + * @note A set break condition may also be cleared on UART line + * reconfiguration. + */ +#define RTSER_RTIOC_BREAK_CTL \ + _IOR(RTIOC_TYPE_SERIAL, 0x06, int) +/** @} */ + +/*! + * @anchor SERutils @name RT Serial example and utility programs + * @{ */ +/** @example cross-link.c */ +/** @} */ + +#endif /* !_RTDM_UAPI_SERIAL_H */ --- linux/include/xenomai/rtdm/uapi/rtdm.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/rtdm/uapi/rtdm.h 2022-03-21 12:58:32.254861024 +0100 @@ -0,0 +1,203 @@ +/** + * @file + * Real-Time Driver Model for Xenomai, user API header. + * + * @note Copyright (C) 2005, 2006 Jan Kiszka + * @note Copyright (C) 2005 Joerg Langenberg + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * @ingroup rtdm_user_api + */ +#ifndef _RTDM_UAPI_RTDM_H +#define _RTDM_UAPI_RTDM_H + +/*! + * @addtogroup rtdm + * @{ + */ + +/*! + * @anchor rtdm_api_versioning @name API Versioning + * @{ */ +/** Common user and driver API version */ +#define RTDM_API_VER 9 + +/** Minimum API revision compatible with the current release */ +#define RTDM_API_MIN_COMPAT_VER 9 +/** @} API Versioning */ + +/** RTDM type for representing absolute dates. Its base type is a 64 bit + * unsigned integer. The unit is 1 nanosecond. */ +typedef uint64_t nanosecs_abs_t; + +/** RTDM type for representing relative intervals. Its base type is a 64 bit + * signed integer. The unit is 1 nanosecond. Relative intervals can also + * encode the special timeouts "infinite" and "non-blocking", see + * @ref RTDM_TIMEOUT_xxx. */ +typedef int64_t nanosecs_rel_t; + +/*! + * @anchor RTDM_TIMEOUT_xxx @name RTDM_TIMEOUT_xxx + * Special timeout values + * @{ */ +/** Block forever. */ +#define RTDM_TIMEOUT_INFINITE 0 + +/** Any negative timeout means non-blocking. */ +#define RTDM_TIMEOUT_NONE (-1) +/** @} RTDM_TIMEOUT_xxx */ +/** @} rtdm */ + +/*! + * @addtogroup rtdm_profiles + * @{ + */ + +/*! + * @anchor RTDM_CLASS_xxx @name RTDM_CLASS_xxx + * Device classes + * @{ */ +#define RTDM_CLASS_PARPORT 1 +#define RTDM_CLASS_SERIAL 2 +#define RTDM_CLASS_CAN 3 +#define RTDM_CLASS_NETWORK 4 +#define RTDM_CLASS_RTMAC 5 +#define RTDM_CLASS_TESTING 6 +#define RTDM_CLASS_RTIPC 7 +#define RTDM_CLASS_COBALT 8 +#define RTDM_CLASS_UDD 9 +#define RTDM_CLASS_MEMORY 10 +#define RTDM_CLASS_GPIO 11 +#define RTDM_CLASS_SPI 12 +#define RTDM_CLASS_PWM 13 + +#define RTDM_CLASS_MISC 223 +#define RTDM_CLASS_EXPERIMENTAL 224 +#define RTDM_CLASS_MAX 255 +/** @} RTDM_CLASS_xxx */ + +#define RTDM_SUBCLASS_GENERIC (-1) + +#define RTIOC_TYPE_COMMON 0 + +/*! + * @anchor device_naming @name Device Naming + * Maximum length of device names (excluding the final null character) + * @{ + */ +#define RTDM_MAX_DEVNAME_LEN 31 +/** @} Device Naming */ + +/** + * Device information + */ +typedef struct rtdm_device_info { + /** Device flags, see @ref dev_flags "Device Flags" for details */ + int device_flags; + + /** Device class ID, see @ref RTDM_CLASS_xxx */ + int device_class; + + /** Device sub-class, either RTDM_SUBCLASS_GENERIC or a + * RTDM_SUBCLASS_xxx definition of the related @ref rtdm_profiles + * "Device Profile" */ + int device_sub_class; + + /** Supported device profile version */ + int profile_version; +} rtdm_device_info_t; + +/*! + * @anchor RTDM_PURGE_xxx_BUFFER @name RTDM_PURGE_xxx_BUFFER + * Flags selecting buffers to be purged + * @{ */ +#define RTDM_PURGE_RX_BUFFER 0x0001 +#define RTDM_PURGE_TX_BUFFER 0x0002 +/** @} RTDM_PURGE_xxx_BUFFER*/ + +/*! + * @anchor common_IOCTLs @name Common IOCTLs + * The following IOCTLs are common to all device rtdm_profiles. + * @{ + */ + +/** + * Retrieve information about a device or socket. + * @param[out] arg Pointer to information buffer (struct rtdm_device_info) + */ +#define RTIOC_DEVICE_INFO \ + _IOR(RTIOC_TYPE_COMMON, 0x00, struct rtdm_device_info) + +/** + * Purge internal device or socket buffers. + * @param[in] arg Purge mask, see @ref RTDM_PURGE_xxx_BUFFER + */ +#define RTIOC_PURGE _IOW(RTIOC_TYPE_COMMON, 0x10, int) +/** @} Common IOCTLs */ +/** @} rtdm */ + +/* Internally used for mapping socket functions on IOCTLs */ +struct _rtdm_getsockopt_args { + int level; + int optname; + void *optval; + socklen_t *optlen; +}; + +struct _rtdm_setsockopt_args { + int level; + int optname; + const void *optval; + socklen_t optlen; +}; + +struct _rtdm_getsockaddr_args { + struct sockaddr *addr; + socklen_t *addrlen; +}; + +struct _rtdm_setsockaddr_args { + const struct sockaddr *addr; + socklen_t addrlen; +}; + +#define _RTIOC_GETSOCKOPT _IOW(RTIOC_TYPE_COMMON, 0x20, \ + struct _rtdm_getsockopt_args) +#define _RTIOC_SETSOCKOPT _IOW(RTIOC_TYPE_COMMON, 0x21, \ + struct _rtdm_setsockopt_args) +#define _RTIOC_BIND _IOW(RTIOC_TYPE_COMMON, 0x22, \ + struct _rtdm_setsockaddr_args) +#define _RTIOC_CONNECT _IOW(RTIOC_TYPE_COMMON, 0x23, \ + struct _rtdm_setsockaddr_args) +#define _RTIOC_LISTEN _IOW(RTIOC_TYPE_COMMON, 0x24, \ + int) +#define _RTIOC_ACCEPT _IOW(RTIOC_TYPE_COMMON, 0x25, \ + struct _rtdm_getsockaddr_args) +#define _RTIOC_GETSOCKNAME _IOW(RTIOC_TYPE_COMMON, 0x26, \ + struct _rtdm_getsockaddr_args) +#define _RTIOC_GETPEERNAME _IOW(RTIOC_TYPE_COMMON, 0x27, \ + struct _rtdm_getsockaddr_args) +#define _RTIOC_SHUTDOWN _IOW(RTIOC_TYPE_COMMON, 0x28, \ + int) + +/* Internally used for mmap() */ +struct _rtdm_mmap_request { + __u64 offset; + size_t length; + int prot; + int flags; +}; + +#endif /* !_RTDM_UAPI_RTDM_H */ --- linux/include/xenomai/rtdm/uapi/gpiopwm.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/rtdm/uapi/gpiopwm.h 2022-03-21 12:58:32.247861092 +0100 @@ -0,0 +1,56 @@ +/** + * @file + * Real-Time Driver Model for Xenomai, pwm header + * + * @note Copyright (C) 2015 Jorge Ramirez + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * @ingroup rttesting + */ +#ifndef _RTDM_UAPI_PWM_H +#define _RTDM_UAPI_PWM_H + +#include + +#define RTPWM_PROFILE_VER 1 + +struct gpiopwm { + unsigned int duty_cycle; + unsigned int range_min; + unsigned int range_max; + unsigned int period; + unsigned int gpio; +}; + +#define RTIOC_TYPE_PWM RTDM_CLASS_PWM + +#define GPIOPWM_RTIOC_SET_CONFIG \ + _IOW(RTIOC_TYPE_PWM, 0x00, struct gpiopwm) + +#define GPIOPWM_RTIOC_GET_CONFIG \ + _IOR(RTIOC_TYPE_PWM, 0x10, struct gpiopwm) + +#define GPIOPWM_RTIOC_START \ + _IO(RTIOC_TYPE_PWM, 0x20) + +#define GPIOPWM_RTIOC_STOP \ + _IO(RTIOC_TYPE_PWM, 0x30) + +#define GPIOPWM_RTIOC_CHANGE_DUTY_CYCLE \ + _IOW(RTIOC_TYPE_PWM, 0x40, unsigned int) + + +#endif /* !_RTDM_UAPI_TESTING_H */ --- linux/include/xenomai/rtdm/uapi/can.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/rtdm/uapi/can.h 2022-03-21 12:58:32.240861160 +0100 @@ -0,0 +1,905 @@ +/** + * @file + * Real-Time Driver Model for RT-Socket-CAN, CAN device profile header + * + * @note Copyright (C) 2006 Wolfgang Grandegger + * + * @note Copyright (C) 2005, 2006 Sebastian Smolorz + * + * + * This RTDM CAN device profile header is based on: + * + * include/linux/can.h, include/linux/socket.h, net/can/pf_can.h in + * linux-can.patch, a CAN socket framework for Linux + * + * Copyright (C) 2004, 2005, + * Robert Schwebel, Benedikt Spranger, Marc Kleine-Budde, Pengutronix + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _RTDM_UAPI_CAN_H +#define _RTDM_UAPI_CAN_H + +/** + * @addtogroup rtdm_can + * @{ + */ + +#define RTCAN_PROFILE_VER 2 + +#ifndef AF_CAN + +/** CAN address family */ +#define AF_CAN 29 + +/** CAN protocol family */ +#define PF_CAN AF_CAN + +#endif + +/** CAN socket levels + * + * Used for @ref Sockopts for the particular protocols. + */ +#define SOL_CAN_RAW 103 + +/** Type of CAN id (see @ref CAN_xxx_MASK and @ref CAN_xxx_FLAG) */ +typedef uint32_t can_id_t; +typedef uint32_t canid_t; + +/** Type of CAN error mask */ +typedef can_id_t can_err_mask_t; + +/*! + * @anchor CAN_xxx_MASK @name CAN ID masks + * Bit masks for masking CAN IDs + * @{ */ + +/** Bit mask for extended CAN IDs */ +#define CAN_EFF_MASK 0x1FFFFFFF + +/** Bit mask for standard CAN IDs */ +#define CAN_SFF_MASK 0x000007FF + +/** @} */ + +/*! + * @anchor CAN_xxx_FLAG @name CAN ID flags + * Flags within a CAN ID indicating special CAN frame attributes + * @{ */ +/** Extended frame */ +#define CAN_EFF_FLAG 0x80000000 +/** Remote transmission frame */ +#define CAN_RTR_FLAG 0x40000000 +/** Error frame (see @ref Errors), not valid in struct can_filter */ +#define CAN_ERR_FLAG 0x20000000 +/** Invert CAN filter definition, only valid in struct can_filter */ +#define CAN_INV_FILTER CAN_ERR_FLAG + +/** @} */ + +/*! + * @anchor CAN_PROTO @name Particular CAN protocols + * Possible protocols for the PF_CAN protocol family + * + * Currently only the RAW protocol is supported. + * @{ */ +/** Raw protocol of @c PF_CAN, applicable to socket type @c SOCK_RAW */ +#define CAN_RAW 1 +/** @} */ + +#define CAN_BAUDRATE_UNKNOWN ((uint32_t)-1) +#define CAN_BAUDRATE_UNCONFIGURED 0 + +/** + * Baudrate definition in bits per second + */ +typedef uint32_t can_baudrate_t; + +/** + * Supported CAN bit-time types + */ +enum CAN_BITTIME_TYPE { + /** Standard bit-time definition according to Bosch */ + CAN_BITTIME_STD, + /** Hardware-specific BTR bit-time definition */ + CAN_BITTIME_BTR +}; + +/** + * See @ref CAN_BITTIME_TYPE + */ +typedef enum CAN_BITTIME_TYPE can_bittime_type_t; + +/** + * Standard bit-time parameters according to Bosch + */ +struct can_bittime_std { + uint32_t brp; /**< Baud rate prescaler */ + uint8_t prop_seg; /**< from 1 to 8 */ + uint8_t phase_seg1; /**< from 1 to 8 */ + uint8_t phase_seg2; /**< from 1 to 8 */ + uint8_t sjw:7; /**< from 1 to 4 */ + uint8_t sam:1; /**< 1 - enable triple sampling */ +}; + +/** + * Hardware-specific BTR bit-times + */ +struct can_bittime_btr { + + uint8_t btr0; /**< Bus timing register 0 */ + uint8_t btr1; /**< Bus timing register 1 */ +}; + +/** + * Custom CAN bit-time definition + */ +struct can_bittime { + /** Type of bit-time definition */ + can_bittime_type_t type; + + union { + /** Standard bit-time */ + struct can_bittime_std std; + /** Hardware-spcific BTR bit-time */ + struct can_bittime_btr btr; + }; +}; + +/*! + * @anchor CAN_MODE @name CAN operation modes + * Modes into which CAN controllers can be set + * @{ */ +enum CAN_MODE { + /*! Set controller in Stop mode (no reception / transmission possible) */ + CAN_MODE_STOP = 0, + + /*! Set controller into normal operation. @n + * Coming from stopped mode or bus off, the controller begins with no + * errors in @ref CAN_STATE_ACTIVE. */ + CAN_MODE_START, + + /*! Set controller into Sleep mode. @n + * This is only possible if the controller is not stopped or bus-off. @n + * Notice that sleep mode will only be entered when there is no bus + * activity. If the controller detects bus activity while "sleeping" + * it will go into operating mode again. @n + * To actively leave sleep mode again trigger @c CAN_MODE_START. */ + CAN_MODE_SLEEP +}; +/** @} */ + +/** See @ref CAN_MODE */ +typedef enum CAN_MODE can_mode_t; + +/*! + * @anchor CAN_CTRLMODE @name CAN controller modes + * Special CAN controllers modes, which can be or'ed together. + * + * @note These modes are hardware-dependent. Please consult the hardware + * manual of the CAN controller for more detailed information. + * + * @{ */ + +/*! Listen-Only mode + * + * In this mode the CAN controller would give no acknowledge to the CAN-bus, + * even if a message is received successfully and messages would not be + * transmitted. This mode might be useful for bus-monitoring, hot-plugging + * or throughput analysis. */ +#define CAN_CTRLMODE_LISTENONLY 0x1 + +/*! Loopback mode + * + * In this mode the CAN controller does an internal loop-back, a message is + * transmitted and simultaneously received. That mode can be used for self + * test operation. */ +#define CAN_CTRLMODE_LOOPBACK 0x2 + +/*! Triple sampling mode + * + * In this mode the CAN controller uses Triple sampling. */ +#define CAN_CTRLMODE_3_SAMPLES 0x4 + +/** @} */ + +/** See @ref CAN_CTRLMODE */ +typedef int can_ctrlmode_t; + +/*! + * @anchor CAN_STATE @name CAN controller states + * States a CAN controller can be in. + * @{ */ +enum CAN_STATE { + /** CAN controller is error active */ + CAN_STATE_ERROR_ACTIVE = 0, + /** CAN controller is active */ + CAN_STATE_ACTIVE = 0, + + /** CAN controller is error active, warning level is reached */ + CAN_STATE_ERROR_WARNING = 1, + /** CAN controller is error active, warning level is reached */ + CAN_STATE_BUS_WARNING = 1, + + /** CAN controller is error passive */ + CAN_STATE_ERROR_PASSIVE = 2, + /** CAN controller is error passive */ + CAN_STATE_BUS_PASSIVE = 2, + + /** CAN controller went into Bus Off */ + CAN_STATE_BUS_OFF, + + /** CAN controller is scanning to get the baudrate */ + CAN_STATE_SCANNING_BAUDRATE, + + /** CAN controller is in stopped mode */ + CAN_STATE_STOPPED, + + /** CAN controller is in Sleep mode */ + CAN_STATE_SLEEPING, +}; +/** @} */ + +/** See @ref CAN_STATE */ +typedef enum CAN_STATE can_state_t; + +#define CAN_STATE_OPERATING(state) ((state) < CAN_STATE_BUS_OFF) + +/** + * Filter for reception of CAN messages. + * + * This filter works as follows: + * A received CAN ID is AND'ed bitwise with @c can_mask and then compared to + * @c can_id. This also includes the @ref CAN_EFF_FLAG and @ref CAN_RTR_FLAG + * of @ref CAN_xxx_FLAG. If this comparison is true, the message will be + * received by the socket. The logic can be inverted with the @c can_id flag + * @ref CAN_INV_FILTER : + * + * @code + * if (can_id & CAN_INV_FILTER) { + * if ((received_can_id & can_mask) != (can_id & ~CAN_INV_FILTER)) + * accept-message; + * } else { + * if ((received_can_id & can_mask) == can_id) + * accept-message; + * } + * @endcode + * + * Multiple filters can be arranged in a filter list and set with + * @ref Sockopts. If one of these filters matches a CAN ID upon reception + * of a CAN frame, this frame is accepted. + * + */ +typedef struct can_filter { + /** CAN ID which must match with incoming IDs after passing the mask. + * The filter logic can be inverted with the flag @ref CAN_INV_FILTER. */ + uint32_t can_id; + + /** Mask which is applied to incoming IDs. See @ref CAN_xxx_MASK + * "CAN ID masks" if exactly one CAN ID should come through. */ + uint32_t can_mask; +} can_filter_t; + +/** + * Socket address structure for the CAN address family + */ +struct sockaddr_can { + /** CAN address family, must be @c AF_CAN */ + sa_family_t can_family; + + /** Interface index of CAN controller. See @ref SIOCGIFINDEX. */ + int can_ifindex; +}; + +/** + * Raw CAN frame + * + * Central structure for receiving and sending CAN frames. + */ +typedef struct can_frame { + /** CAN ID of the frame + * + * See @ref CAN_xxx_FLAG "CAN ID flags" for special bits. + */ + can_id_t can_id; + + /** Size of the payload in bytes */ + uint8_t can_dlc; + + /** Payload data bytes */ + uint8_t data[8] __attribute__ ((aligned(8))); +} can_frame_t; + +/** + * CAN interface request descriptor + * + * Parameter block for submitting CAN control requests. + */ +struct can_ifreq { + union { + char ifrn_name[IFNAMSIZ]; + } ifr_ifrn; + + union { + struct can_bittime bittime; + can_baudrate_t baudrate; + can_ctrlmode_t ctrlmode; + can_mode_t mode; + can_state_t state; + int ifru_ivalue; + } ifr_ifru; +}; + +/*! + * @anchor RTCAN_TIMESTAMPS @name Timestamp switches + * Arguments to pass to @ref RTCAN_RTIOC_TAKE_TIMESTAMP + * @{ */ +#define RTCAN_TAKE_NO_TIMESTAMPS 0 /**< Switch off taking timestamps */ +#define RTCAN_TAKE_TIMESTAMPS 1 /**< Do take timestamps */ +/** @} */ + +#define RTIOC_TYPE_CAN RTDM_CLASS_CAN + +/*! + * @anchor Rawsockopts @name RAW socket options + * Setting and getting CAN RAW socket options. + * @{ */ + +/** + * CAN filter definition + * + * A CAN raw filter list with elements of struct can_filter can be installed + * with @c setsockopt. This list is used upon reception of CAN frames to + * decide whether the bound socket will receive a frame. An empty filter list + * can also be defined using optlen = 0, which is recommanded for write-only + * sockets. + * @n + * If the socket was already bound with @ref Bind, the old filter list + * gets replaced with the new one. Be aware that already received, but + * not read out CAN frames may stay in the socket buffer. + * @n + * @n + * @param [in] level @b SOL_CAN_RAW + * + * @param [in] optname @b CAN_RAW_FILTER + * + * @param [in] optval Pointer to array of struct can_filter. + * + * @param [in] optlen Size of filter list: count * sizeof( struct can_filter). + * @n + * @coretags{task-unrestricted} + * @n + * Specific return values: + * - -EFAULT (It was not possible to access user space memory area at the + * specified address.) + * - -ENOMEM (Not enough memory to fulfill the operation) + * - -EINVAL (Invalid length "optlen") + * - -ENOSPC (No space to store filter list, check RT-Socket-CAN kernel + * parameters) + * . + */ +#define CAN_RAW_FILTER 0x1 + +/** + * CAN error mask + * + * A CAN error mask (see @ref Errors) can be set with @c setsockopt. This + * mask is then used to decide if error frames are delivered to this socket + * in case of error condidtions. The error frames are marked with the + * @ref CAN_ERR_FLAG of @ref CAN_xxx_FLAG and must be handled by the + * application properly. A detailed description of the errors can be + * found in the @c can_id and the @c data fields of struct can_frame + * (see @ref Errors for futher details). + * + * @n + * @param [in] level @b SOL_CAN_RAW + * + * @param [in] optname @b CAN_RAW_ERR_FILTER + * + * @param [in] optval Pointer to error mask of type can_err_mask_t. + * + * @param [in] optlen Size of error mask: sizeof(can_err_mask_t). + * + * @coretags{task-unrestricted} + * @n + * Specific return values: + * - -EFAULT (It was not possible to access user space memory area at the + * specified address.) + * - -EINVAL (Invalid length "optlen") + * . + */ +#define CAN_RAW_ERR_FILTER 0x2 + +/** + * CAN TX loopback + * + * The TX loopback to other local sockets can be selected with this + * @c setsockopt. + * + * @note The TX loopback feature must be enabled in the kernel and then + * the loopback to other local TX sockets is enabled by default. + * + * @n + * @param [in] level @b SOL_CAN_RAW + * + * @param [in] optname @b CAN_RAW_LOOPBACK + * + * @param [in] optval Pointer to integer value. + * + * @param [in] optlen Size of int: sizeof(int). + * + * @coretags{task-unrestricted} + * @n + * Specific return values: + * - -EFAULT (It was not possible to access user space memory area at the + * specified address.) + * - -EINVAL (Invalid length "optlen") + * - -EOPNOTSUPP (not supported, check RT-Socket-CAN kernel parameters). + */ +#define CAN_RAW_LOOPBACK 0x3 + +/** + * CAN receive own messages + * + * Not supported by RT-Socket-CAN, but defined for compatibility with + * Socket-CAN. + */ +#define CAN_RAW_RECV_OWN_MSGS 0x4 + +/** @} */ + +/*! + * @anchor CANIOCTLs @name IOCTLs + * CAN device IOCTLs + * + * @deprecated Passing \c struct \c ifreq as a request descriptor + * for CAN IOCTLs is still accepted for backward compatibility, + * however it is recommended to switch to \c struct \c can_ifreq at + * the first opportunity. + * + * @{ */ + +/** + * Get CAN interface index by name + * + * @param [in,out] arg Pointer to interface request structure buffer + * (struct can_ifreq). If + * ifr_name holds a valid CAN interface + * name ifr_ifindex will be filled with + * the corresponding interface index. + * + * @return 0 on success, otherwise: + * - -EFAULT: It was not possible to access user space memory area at the + * specified address. + * - -ENODEV: No device with specified name exists. + * + * @coretags{task-unrestricted} + */ +#ifdef DOXYGEN_CPP /* For Doxygen only, already defined by kernel headers */ +#define SIOCGIFINDEX defined_by_kernel_header_file +#endif + +/** + * Set baud rate + * + * The baudrate must be specified in bits per second. The driver will + * try to calculate resonable CAN bit-timing parameters. You can use + * @ref SIOCSCANCUSTOMBITTIME to set custom bit-timing. + * + * @param [in] arg Pointer to interface request structure buffer + * (struct can_ifreq). + * ifr_name must hold a valid CAN interface name, + * ifr_ifru must be filled with an instance of + * @ref can_baudrate_t. + * + * @return 0 on success, otherwise: + * - -EFAULT: It was not possible to access user space memory area at the + * specified address. + * - -ENODEV: No device with specified name exists. + * - -EINVAL: No valid baud rate, see @ref can_baudrate_t. + * - -EDOM : Baud rate not possible. + * - -EAGAIN: Request could not be successully fulfilled. Try again. + * + * @coretags{task-unrestricted, might-switch} + * + * @note Setting the baud rate is a configuration task. It should + * be done deliberately or otherwise CAN messages will likely be lost. + */ +#define SIOCSCANBAUDRATE _IOW(RTIOC_TYPE_CAN, 0x01, struct can_ifreq) + +/** + * Get baud rate + * + * @param [in,out] arg Pointer to interface request structure buffer + * (struct can_ifreq). + * ifr_name must hold a valid CAN interface name, + * ifr_ifru will be filled with an instance of + * @ref can_baudrate_t. + * + * @return 0 on success, otherwise: + * - -EFAULT: It was not possible to access user space memory area at the + * specified address. + * - -ENODEV: No device with specified name exists. + * - -EINVAL: No baud rate was set yet. + * + * @coretags{task-unrestricted} + */ +#define SIOCGCANBAUDRATE _IOWR(RTIOC_TYPE_CAN, 0x02, struct can_ifreq) + +/** + * Set custom bit time parameter + * + * Custem-bit time could be defined in various formats (see + * struct can_bittime). + * + * @param [in] arg Pointer to interface request structure buffer + * (struct can_ifreq). + * ifr_name must hold a valid CAN interface name, + * ifr_ifru must be filled with an instance of + * struct can_bittime. + * + * @return 0 on success, otherwise: + * - -EFAULT: It was not possible to access user space memory area at the + * specified address. + * - -ENODEV: No device with specified name exists. + * - -EINVAL: No valid baud rate, see @ref can_baudrate_t. + * - -EAGAIN: Request could not be successully fulfilled. Try again. + * + * @coretags{task-unrestricted, might-switch} + * + * @note Setting the bit-time is a configuration task. It should + * be done deliberately or otherwise CAN messages will likely be lost. + */ +#define SIOCSCANCUSTOMBITTIME _IOW(RTIOC_TYPE_CAN, 0x03, struct can_ifreq) + +/** + * Get custom bit-time parameters + * + * @param [in,out] arg Pointer to interface request structure buffer + * (struct can_ifreq). + * ifr_name must hold a valid CAN interface name, + * ifr_ifru will be filled with an instance of + * struct can_bittime. + * + * @return 0 on success, otherwise: + * - -EFAULT: It was not possible to access user space memory area at the + * specified address. + * - -ENODEV: No device with specified name exists. + * - -EINVAL: No baud rate was set yet. + * + * @coretags{task-unrestricted} + */ +#define SIOCGCANCUSTOMBITTIME _IOWR(RTIOC_TYPE_CAN, 0x04, struct can_ifreq) + +/** + * Set operation mode of CAN controller + * + * See @ref CAN_MODE "CAN controller modes" for available modes. + * + * @param [in] arg Pointer to interface request structure buffer + * (struct can_ifreq). + * ifr_name must hold a valid CAN interface name, + * ifr_ifru must be filled with an instance of + * @ref can_mode_t. + * + * @return 0 on success, otherwise: + * - -EFAULT: It was not possible to access user space memory area at the + * specified address. + * - -ENODEV: No device with specified name exists. + * - -EAGAIN: (@ref CAN_MODE_START, @ref CAN_MODE_STOP) Could not successfully + * set mode, hardware is busy. Try again. + * - -EINVAL: (@ref CAN_MODE_START) Cannot start controller, + * set baud rate first. + * - -ENETDOWN: (@ref CAN_MODE_SLEEP) Cannot go into sleep mode because + controller is stopped or bus off. + * - -EOPNOTSUPP: unknown mode + * + * @coretags{task-unrestricted, might-switch} + * + * @note Setting a CAN controller into normal operation after a bus-off can + * take some time (128 occurrences of 11 consecutive recessive bits). + * In such a case, although this IOCTL will return immediately with success + * and @ref SIOCGCANSTATE will report @ref CAN_STATE_ACTIVE, + * bus-off recovery may still be in progress. @n + * If a controller is bus-off, setting it into stop mode will return no error + * but the controller remains bus-off. + */ +#define SIOCSCANMODE _IOW(RTIOC_TYPE_CAN, 0x05, struct can_ifreq) + +/** + * Get current state of CAN controller + * + * States are divided into main states and additional error indicators. A CAN + * controller is always in exactly one main state. CAN bus errors are + * registered by the CAN hardware and collected by the driver. There is one + * error indicator (bit) per error type. If this IOCTL is triggered the error + * types which occured since the last call of this IOCTL are reported and + * thereafter the error indicators are cleared. See also + * @ref CAN_STATE "CAN controller states". + * + * @param [in,out] arg Pointer to interface request structure buffer + * (struct can_ifreq). + * ifr_name must hold a valid CAN interface name, + * ifr_ifru will be filled with an instance of + * @ref can_mode_t. + * + * @return 0 on success, otherwise: + * - -EFAULT: It was not possible to access user space memory area at the + * specified address. + * - -ENODEV: No device with specified name exists. + * + * @coretags{task-unrestricted, might-switch} + */ +#define SIOCGCANSTATE _IOWR(RTIOC_TYPE_CAN, 0x06, struct can_ifreq) + +/** + * Set special controller modes + * + * Various special controller modes could be or'ed together (see + * @ref CAN_CTRLMODE for further information). + * + * @param [in] arg Pointer to interface request structure buffer + * (struct can_ifreq). + * ifr_name must hold a valid CAN interface name, + * ifr_ifru must be filled with an instance of + * @ref can_ctrlmode_t. + * + * @return 0 on success, otherwise: + * - -EFAULT: It was not possible to access user space memory area at the + * specified address. + * - -ENODEV: No device with specified name exists. + * - -EINVAL: No valid baud rate, see @ref can_baudrate_t. + * - -EAGAIN: Request could not be successully fulfilled. Try again. + * + * @coretags{task-unrestricted, might-switch} + * + * @note Setting special controller modes is a configuration task. It should + * be done deliberately or otherwise CAN messages will likely be lost. + */ +#define SIOCSCANCTRLMODE _IOW(RTIOC_TYPE_CAN, 0x07, struct can_ifreq) + +/** + * Get special controller modes + * + * + * @param [in] arg Pointer to interface request structure buffer + * (struct can_ifreq). + * ifr_name must hold a valid CAN interface name, + * ifr_ifru must be filled with an instance of + * @ref can_ctrlmode_t. + * + * @return 0 on success, otherwise: + * - -EFAULT: It was not possible to access user space memory area at the + * specified address. + * - -ENODEV: No device with specified name exists. + * - -EINVAL: No baud rate was set yet. + * + * @coretags{task-unrestricted, might-switch} + */ +#define SIOCGCANCTRLMODE _IOWR(RTIOC_TYPE_CAN, 0x08, struct can_ifreq) + +/** + * Enable or disable storing a high precision timestamp upon reception of + * a CAN frame. + * + * A newly created socket takes no timestamps by default. + * + * @param [in] arg int variable, see @ref RTCAN_TIMESTAMPS "Timestamp switches" + * + * @return 0 on success. + * + * @coretags{task-unrestricted} + * + * @note Activating taking timestamps only has an effect on newly received + * CAN messages from the bus. Frames that already are in the socket buffer do + * not have timestamps if it was deactivated before. See @ref Recv "Receive" + * for more details. + */ +#define RTCAN_RTIOC_TAKE_TIMESTAMP _IOW(RTIOC_TYPE_CAN, 0x09, int) + +/** + * Specify a reception timeout for a socket + * + * Defines a timeout for all receive operations via a + * socket which will take effect when one of the @ref Recv "receive functions" + * is called without the @c MSG_DONTWAIT flag set. + * + * The default value for a newly created socket is an infinite timeout. + * + * @note The setting of the timeout value is not done atomically to avoid + * locks. Please set the value before receiving messages from the socket. + * + * @param [in] arg Pointer to @ref nanosecs_rel_t variable. The value is + * interpreted as relative timeout in nanoseconds in case + * of a positive value. + * See @ref RTDM_TIMEOUT_xxx "Timeouts" for special timeouts. + * + * @return 0 on success, otherwise: + * - -EFAULT: It was not possible to access user space memory area at the + * specified address. + * + * @coretags{task-unrestricted} + */ +#define RTCAN_RTIOC_RCV_TIMEOUT _IOW(RTIOC_TYPE_CAN, 0x0A, nanosecs_rel_t) + +/** + * Specify a transmission timeout for a socket + * + * Defines a timeout for all send operations via a + * socket which will take effect when one of the @ref Send "send functions" + * is called without the @c MSG_DONTWAIT flag set. + * + * The default value for a newly created socket is an infinite timeout. + * + * @note The setting of the timeout value is not done atomically to avoid + * locks. Please set the value before sending messages to the socket. + * + * @param [in] arg Pointer to @ref nanosecs_rel_t variable. The value is + * interpreted as relative timeout in nanoseconds in case + * of a positive value. + * See @ref RTDM_TIMEOUT_xxx "Timeouts" for special timeouts. + * + * @return 0 on success, otherwise: + * - -EFAULT: It was not possible to access user space memory area at the + * specified address. + * + * @coretags{task-unrestricted} + */ +#define RTCAN_RTIOC_SND_TIMEOUT _IOW(RTIOC_TYPE_CAN, 0x0B, nanosecs_rel_t) +/** @} */ + +#define CAN_ERR_DLC 8 /* dlc for error frames */ + +/*! + * @anchor Errors @name Error mask + * Error class (mask) in @c can_id field of struct can_frame to + * be used with @ref CAN_RAW_ERR_FILTER. + * + * @b Note: Error reporting is hardware dependent and most CAN controllers + * report less detailed error conditions than the SJA1000. + * + * @b Note: In case of a bus-off error condition (@ref CAN_ERR_BUSOFF), the + * CAN controller is @b not restarted automatically. It is the application's + * responsibility to react appropriately, e.g. calling @ref CAN_MODE_START. + * + * @b Note: Bus error interrupts (@ref CAN_ERR_BUSERROR) are enabled when an + * application is calling a @ref Recv function on a socket listening + * on bus errors (using @ref CAN_RAW_ERR_FILTER). After one bus error has + * occured, the interrupt will be disabled to allow the application time for + * error processing and to efficiently avoid bus error interrupt flooding. + * @{ */ + +/** TX timeout (netdevice driver) */ +#define CAN_ERR_TX_TIMEOUT 0x00000001U + +/** Lost arbitration (see @ref Error0 "data[0]") */ +#define CAN_ERR_LOSTARB 0x00000002U + +/** Controller problems (see @ref Error1 "data[1]") */ +#define CAN_ERR_CRTL 0x00000004U + +/** Protocol violations (see @ref Error2 "data[2]", + @ref Error3 "data[3]") */ +#define CAN_ERR_PROT 0x00000008U + +/** Transceiver status (see @ref Error4 "data[4]") */ +#define CAN_ERR_TRX 0x00000010U + +/** Received no ACK on transmission */ +#define CAN_ERR_ACK 0x00000020U + +/** Bus off */ +#define CAN_ERR_BUSOFF 0x00000040U + +/** Bus error (may flood!) */ +#define CAN_ERR_BUSERROR 0x00000080U + +/** Controller restarted */ +#define CAN_ERR_RESTARTED 0x00000100U + +/** Omit EFF, RTR, ERR flags */ +#define CAN_ERR_MASK 0x1FFFFFFFU + +/** @} */ + +/*! + * @anchor Error0 @name Arbitration lost error + * Error in the data[0] field of struct can_frame. + * @{ */ +/* arbitration lost in bit ... / data[0] */ +#define CAN_ERR_LOSTARB_UNSPEC 0x00 /**< unspecified */ + /**< else bit number in bitstream */ +/** @} */ + +/*! + * @anchor Error1 @name Controller problems + * Error in the data[1] field of struct can_frame. + * @{ */ +/* error status of CAN-controller / data[1] */ +#define CAN_ERR_CRTL_UNSPEC 0x00 /**< unspecified */ +#define CAN_ERR_CRTL_RX_OVERFLOW 0x01 /**< RX buffer overflow */ +#define CAN_ERR_CRTL_TX_OVERFLOW 0x02 /**< TX buffer overflow */ +#define CAN_ERR_CRTL_RX_WARNING 0x04 /**< reached warning level for RX errors */ +#define CAN_ERR_CRTL_TX_WARNING 0x08 /**< reached warning level for TX errors */ +#define CAN_ERR_CRTL_RX_PASSIVE 0x10 /**< reached passive level for RX errors */ +#define CAN_ERR_CRTL_TX_PASSIVE 0x20 /**< reached passive level for TX errors */ +/** @} */ + +/*! + * @anchor Error2 @name Protocol error type + * Error in the data[2] field of struct can_frame. + * @{ */ +/* error in CAN protocol (type) / data[2] */ +#define CAN_ERR_PROT_UNSPEC 0x00 /**< unspecified */ +#define CAN_ERR_PROT_BIT 0x01 /**< single bit error */ +#define CAN_ERR_PROT_FORM 0x02 /**< frame format error */ +#define CAN_ERR_PROT_STUFF 0x04 /**< bit stuffing error */ +#define CAN_ERR_PROT_BIT0 0x08 /**< unable to send dominant bit */ +#define CAN_ERR_PROT_BIT1 0x10 /**< unable to send recessive bit */ +#define CAN_ERR_PROT_OVERLOAD 0x20 /**< bus overload */ +#define CAN_ERR_PROT_ACTIVE 0x40 /**< active error announcement */ +#define CAN_ERR_PROT_TX 0x80 /**< error occured on transmission */ +/** @} */ + +/*! + * @anchor Error3 @name Protocol error location + * Error in the data[3] field of struct can_frame. + * @{ */ +/* error in CAN protocol (location) / data[3] */ +#define CAN_ERR_PROT_LOC_UNSPEC 0x00 /**< unspecified */ +#define CAN_ERR_PROT_LOC_SOF 0x03 /**< start of frame */ +#define CAN_ERR_PROT_LOC_ID28_21 0x02 /**< ID bits 28 - 21 (SFF: 10 - 3) */ +#define CAN_ERR_PROT_LOC_ID20_18 0x06 /**< ID bits 20 - 18 (SFF: 2 - 0 )*/ +#define CAN_ERR_PROT_LOC_SRTR 0x04 /**< substitute RTR (SFF: RTR) */ +#define CAN_ERR_PROT_LOC_IDE 0x05 /**< identifier extension */ +#define CAN_ERR_PROT_LOC_ID17_13 0x07 /**< ID bits 17-13 */ +#define CAN_ERR_PROT_LOC_ID12_05 0x0F /**< ID bits 12-5 */ +#define CAN_ERR_PROT_LOC_ID04_00 0x0E /**< ID bits 4-0 */ +#define CAN_ERR_PROT_LOC_RTR 0x0C /**< RTR */ +#define CAN_ERR_PROT_LOC_RES1 0x0D /**< reserved bit 1 */ +#define CAN_ERR_PROT_LOC_RES0 0x09 /**< reserved bit 0 */ +#define CAN_ERR_PROT_LOC_DLC 0x0B /**< data length code */ +#define CAN_ERR_PROT_LOC_DATA 0x0A /**< data section */ +#define CAN_ERR_PROT_LOC_CRC_SEQ 0x08 /**< CRC sequence */ +#define CAN_ERR_PROT_LOC_CRC_DEL 0x18 /**< CRC delimiter */ +#define CAN_ERR_PROT_LOC_ACK 0x19 /**< ACK slot */ +#define CAN_ERR_PROT_LOC_ACK_DEL 0x1B /**< ACK delimiter */ +#define CAN_ERR_PROT_LOC_EOF 0x1A /**< end of frame */ +#define CAN_ERR_PROT_LOC_INTERM 0x12 /**< intermission */ +/** @} */ + +/*! + * @anchor Error4 @name Protocol error location + * Error in the data[4] field of struct can_frame. + * @{ */ +/* error status of CAN-transceiver / data[4] */ +/* CANH CANL */ +#define CAN_ERR_TRX_UNSPEC 0x00 /**< 0000 0000 */ +#define CAN_ERR_TRX_CANH_NO_WIRE 0x04 /**< 0000 0100 */ +#define CAN_ERR_TRX_CANH_SHORT_TO_BAT 0x05 /**< 0000 0101 */ +#define CAN_ERR_TRX_CANH_SHORT_TO_VCC 0x06 /**< 0000 0110 */ +#define CAN_ERR_TRX_CANH_SHORT_TO_GND 0x07 /**< 0000 0111 */ +#define CAN_ERR_TRX_CANL_NO_WIRE 0x40 /**< 0100 0000 */ +#define CAN_ERR_TRX_CANL_SHORT_TO_BAT 0x50 /**< 0101 0000 */ +#define CAN_ERR_TRX_CANL_SHORT_TO_VCC 0x60 /**< 0110 0000 */ +#define CAN_ERR_TRX_CANL_SHORT_TO_GND 0x70 /**< 0111 0000 */ +#define CAN_ERR_TRX_CANL_SHORT_TO_CANH 0x80 /**< 1000 0000 */ +/** @} */ + +/** @} */ + +#endif /* !_RTDM_UAPI_CAN_H */ --- linux/include/xenomai/rtdm/uapi/net.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/rtdm/uapi/net.h 2022-03-21 12:58:32.232861238 +0100 @@ -0,0 +1,75 @@ +/*** + * + * RTnet - real-time networking subsystem + * Copyright (C) 2005-2011 Jan Kiszka + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * As a special exception to the GNU General Public license, the RTnet + * project allows you to use this header file in unmodified form to produce + * application programs executing in user-space which use RTnet services by + * normal system calls. The resulting executable will not be covered by the + * GNU General Public License merely as a result of this header file use. + * Instead, this header file use will be considered normal use of RTnet and + * not a "derived work" in the sense of the GNU General Public License. + * + * This exception does not apply when the application code is built as a + * static or dynamically loadable portion of the Linux kernel nor does the + * exception override other reasons justifying application of the GNU General + * Public License. + * + * This exception applies only to the code released by the RTnet project + * under the name RTnet and bearing this exception notice. If you copy code + * from other sources into a copy of RTnet, the exception does not apply to + * the code that you add in this way. + * + */ + +#ifndef _RTDM_UAPI_NET_H +#define _RTDM_UAPI_NET_H + +/* sub-classes: RTDM_CLASS_NETWORK */ +#define RTDM_SUBCLASS_RTNET 0 + +#define RTIOC_TYPE_NETWORK RTDM_CLASS_NETWORK + +/* RTnet-specific IOCTLs */ +#define RTNET_RTIOC_XMITPARAMS _IOW(RTIOC_TYPE_NETWORK, 0x10, unsigned int) +#define RTNET_RTIOC_PRIORITY RTNET_RTIOC_XMITPARAMS /* legacy */ +#define RTNET_RTIOC_TIMEOUT _IOW(RTIOC_TYPE_NETWORK, 0x11, int64_t) +/* RTNET_RTIOC_CALLBACK _IOW(RTIOC_TYPE_NETWORK, 0x12, ... + * IOCTL only usable inside the kernel. */ +/* RTNET_RTIOC_NONBLOCK _IOW(RTIOC_TYPE_NETWORK, 0x13, unsigned int) + * This IOCTL is no longer supported (and it was buggy anyway). + * Use RTNET_RTIOC_TIMEOUT with any negative timeout value instead. */ +#define RTNET_RTIOC_EXTPOOL _IOW(RTIOC_TYPE_NETWORK, 0x14, unsigned int) +#define RTNET_RTIOC_SHRPOOL _IOW(RTIOC_TYPE_NETWORK, 0x15, unsigned int) + +/* socket transmission priorities */ +#define SOCK_MAX_PRIO 0 +#define SOCK_DEF_PRIO SOCK_MAX_PRIO + \ + (SOCK_MIN_PRIO-SOCK_MAX_PRIO+1)/2 +#define SOCK_MIN_PRIO SOCK_NRT_PRIO - 1 +#define SOCK_NRT_PRIO 31 + +/* socket transmission channels */ +#define SOCK_DEF_RT_CHANNEL 0 /* default rt xmit channel */ +#define SOCK_DEF_NRT_CHANNEL 1 /* default non-rt xmit channel */ +#define SOCK_USER_CHANNEL 2 /* first user-defined channel */ + +/* argument construction for RTNET_RTIOC_XMITPARAMS */ +#define SOCK_XMIT_PARAMS(priority, channel) ((priority) | ((channel) << 16)) + +#endif /* !_RTDM_UAPI_NET_H */ --- linux/include/xenomai/rtdm/uapi/spi.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/rtdm/uapi/spi.h 2022-03-21 12:58:32.225861306 +0100 @@ -0,0 +1,42 @@ +/** + * @note Copyright (C) 2016 Philippe Gerum + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _RTDM_UAPI_SPI_H +#define _RTDM_UAPI_SPI_H + +#include + +struct rtdm_spi_config { + __u32 speed_hz; + __u16 mode; + __u8 bits_per_word; +}; + +struct rtdm_spi_iobufs { + __u32 io_len; + __u32 i_offset; + __u32 o_offset; + __u32 map_len; +}; + +#define SPI_RTIOC_SET_CONFIG _IOW(RTDM_CLASS_SPI, 0, struct rtdm_spi_config) +#define SPI_RTIOC_GET_CONFIG _IOR(RTDM_CLASS_SPI, 1, struct rtdm_spi_config) +#define SPI_RTIOC_SET_IOBUFS _IOR(RTDM_CLASS_SPI, 2, struct rtdm_spi_iobufs) +#define SPI_RTIOC_TRANSFER _IO(RTDM_CLASS_SPI, 3) +#define SPI_RTIOC_TRANSFER_N _IOR(RTDM_CLASS_SPI, 4, int) + +#endif /* !_RTDM_UAPI_SPI_H */ --- linux/include/xenomai/rtdm/uapi/autotune.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/rtdm/uapi/autotune.h 2022-03-21 12:58:32.217861384 +0100 @@ -0,0 +1,40 @@ +/* + * This file is part of the Xenomai project. + * + * Copyright (C) 2014 Philippe Gerum + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _RTDM_UAPI_AUTOTUNE_H +#define _RTDM_UAPI_AUTOTUNE_H + +#include + +#define RTDM_CLASS_AUTOTUNE RTDM_CLASS_MISC +#define RTDM_SUBCLASS_AUTOTUNE 0 + +struct autotune_setup { + __u32 period; + __u32 quiet; +}; + +#define AUTOTUNE_RTIOC_IRQ _IOW(RTDM_CLASS_AUTOTUNE, 0, struct autotune_setup) +#define AUTOTUNE_RTIOC_KERN _IOW(RTDM_CLASS_AUTOTUNE, 1, struct autotune_setup) +#define AUTOTUNE_RTIOC_USER _IOW(RTDM_CLASS_AUTOTUNE, 2, struct autotune_setup) +#define AUTOTUNE_RTIOC_PULSE _IOW(RTDM_CLASS_AUTOTUNE, 3, __u64) +#define AUTOTUNE_RTIOC_RUN _IOR(RTDM_CLASS_AUTOTUNE, 4, __u32) +#define AUTOTUNE_RTIOC_RESET _IO(RTDM_CLASS_AUTOTUNE, 5) + +#endif /* !_RTDM_UAPI_AUTOTUNE_H */ --- linux/include/xenomai/rtdm/ipc.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/rtdm/ipc.h 2022-03-21 12:58:31.927864212 +0100 @@ -0,0 +1,30 @@ +/* + * This file is part of the Xenomai project. + * + * Copyright (C) 2009 Philippe Gerum + * + * Xenomai is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef _COBALT_RTDM_IPC_H +#define _COBALT_RTDM_IPC_H + +#include +#include +#include +#include +#include + +#endif /* !_COBALT_RTDM_IPC_H */ --- linux/include/xenomai/rtdm/udd.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/rtdm/udd.h 2022-03-21 12:58:31.920864280 +0100 @@ -0,0 +1,340 @@ +/** + * @file + * Copyright (C) 2014 Philippe Gerum + * + * Xenomai is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_RTDM_UDD_H +#define _COBALT_RTDM_UDD_H + +#include +#include +#include + +/** + * @ingroup rtdm_profiles + * @defgroup rtdm_udd User-space driver core + * + * This profile includes all mini-drivers sitting on top of the + * User-space Device Driver framework (UDD). The generic UDD core + * driver enables interrupt control and I/O memory access interfaces + * to user-space device drivers, as defined by the mini-drivers when + * registering. + * + * A mini-driver supplements the UDD core with ancillary functions for + * dealing with @ref udd_memory_region "memory mappings" and @ref + * udd_irq_handler "interrupt control" for a particular I/O + * card/device. + * + * UDD-compliant mini-drivers only have to provide the basic support + * for dealing with the interrupt sources present in the device, so + * that most part of the device requests can be handled from a Xenomai + * application running in user-space. Typically, a mini-driver would + * handle the interrupt top-half, and the user-space application would + * handle the bottom-half. + * + * This profile is reminiscent of the UIO framework available with the + * Linux kernel, adapted to the dual kernel Cobalt environment. + * + * @{ + */ + +/** + * @anchor udd_irq_special + * Special IRQ values for udd_device.irq + * + * @{ + */ +/** + * No IRQ managed. Passing this code implicitly disables all + * interrupt-related services, including control (disable/enable) and + * notification. + */ +#define UDD_IRQ_NONE 0 +/** + * IRQ directly managed from the mini-driver on top of the UDD + * core. The mini-driver is in charge of attaching the handler(s) to + * the IRQ(s) it manages, notifying the Cobalt threads waiting for IRQ + * events by calling the udd_notify_event() service. + */ +#define UDD_IRQ_CUSTOM (-1) +/** @} */ + +/** + * @anchor udd_memory_types @name Memory types for mapping + * Types of memory for mapping + * + * The UDD core implements a default ->mmap() handler which first + * attempts to hand over the request to the corresponding handler + * defined by the mini-driver. If not present, the UDD core + * establishes the mapping automatically, depending on the memory + * type defined for the region. + * + * @{ + */ +/** + * No memory region. Use this type code to disable an entry in the + * array of memory mappings, i.e. udd_device.mem_regions[]. + */ +#define UDD_MEM_NONE 0 +/** + * Physical I/O memory region. By default, the UDD core maps such + * memory to a virtual user range by calling the rtdm_mmap_iomem() + * service. + */ +#define UDD_MEM_PHYS 1 +/** + * Kernel logical memory region (e.g. kmalloc()). By default, the UDD + * core maps such memory to a virtual user range by calling the + * rtdm_mmap_kmem() service. */ +#define UDD_MEM_LOGICAL 2 +/** + * Virtual memory region with no direct physical mapping + * (e.g. vmalloc()). By default, the UDD core maps such memory to a + * virtual user range by calling the rtdm_mmap_vmem() service. + */ +#define UDD_MEM_VIRTUAL 3 +/** @} */ + +#define UDD_NR_MAPS 5 + +/** + * @anchor udd_memory_region + * UDD memory region descriptor. + * + * This descriptor defines the characteristics of a memory region + * declared to the UDD core by the mini-driver. All valid regions + * should be declared in the udd_device.mem_regions[] array, + * invalid/unassigned ones should bear the UDD_MEM_NONE type. + * + * The UDD core exposes each region via the mmap(2) interface to the + * application. To this end, a companion mapper device is created + * automatically when registering the mini-driver. + * + * The mapper device creates special files in the RTDM namespace for + * reaching the individual regions, which the application can open + * then map to its address space via the mmap(2) system call. + * + * For instance, declaring a region of physical memory at index #2 of + * the memory region array could be done as follows: + * + * @code + * static struct udd_device udd; + * + * static int foocard_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) + * { + * udd.device_name = "foocard"; + * ... + * udd.mem_regions[2].name = "ADC"; + * udd.mem_regions[2].addr = pci_resource_start(dev, 1); + * udd.mem_regions[2].len = pci_resource_len(dev, 1); + * udd.mem_regions[2].type = UDD_MEM_PHYS; + * ... + * return udd_register_device(&udd); + * } + * @endcode + * + * This will make such region accessible via the mapper device using + * the following sequence of code (see note), via the default + * ->mmap() handler from the UDD core: + * + * @code + * int fd, fdm; + * void *p; + * + * fd = open("/dev/rtdm/foocard", O_RDWR); + * fdm = open("/dev/rtdm/foocard,mapper2", O_RDWR); + * p = mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_SHARED, fdm, 0); + * @endcode + * + * if no valid region has been declared in the + * udd_device.mem_regions[] array, no mapper device is created. + * + * @note The example code assumes that @ref cobalt_api POSIX symbol + * wrapping is in effect, so that RTDM performs the memory mapping + * operation (not the regular kernel). + */ +struct udd_memregion { + /** Name of the region (informational but required) */ + const char *name; + /** + * Start address of the region. This may be a physical or + * virtual address, depending on the @ref udd_memory_types + * "memory type". + */ + unsigned long addr; + /** + * Length (in bytes) of the region. This value must be + * PAGE_SIZE aligned. + */ + size_t len; + /** + * Type of the region. See the discussion about @ref + * udd_memory_types "UDD memory types" for possible values. + */ + int type; +}; + +/** + * @anchor udd_device + * UDD device descriptor. + * + * This descriptor defines the characteristics of a UDD-based + * mini-driver when registering via a call to udd_register_device(). + */ +struct udd_device { + /** + * Name of the device managed by the mini-driver, appears + * automatically in the /dev/rtdm namespace upon creation. + */ + const char *device_name; + /** + * Additional device flags (e.g. RTDM_EXCLUSIVE) + * RTDM_NAMED_DEVICE may be omitted). + */ + int device_flags; + /** + * Subclass code of the device managed by the mini-driver (see + * RTDM_SUBCLASS_xxx definition in the @ref rtdm_profiles + * "Device Profiles"). The main class code is pre-set to + * RTDM_CLASS_UDD. + */ + int device_subclass; + struct { + /** + * Ancillary open() handler, optional. See + * rtdm_open_handler(). + * + * @note This handler is called from secondary mode + * only. + */ + int (*open)(struct rtdm_fd *fd, int oflags); + /** + * Ancillary close() handler, optional. See + * rtdm_close_handler(). + * + * @note This handler is called from secondary mode + * only. + */ + void (*close)(struct rtdm_fd *fd); + /** + * Ancillary ioctl() handler, optional. See + * rtdm_ioctl_handler(). + * + * If this routine returns -ENOSYS, the default action + * implemented by the UDD core for the corresponding + * request will be applied, as if no ioctl handler had + * been defined. + * + * @note This handler is called from primary mode + * only. + */ + int (*ioctl)(struct rtdm_fd *fd, + unsigned int request, void *arg); + /** + * Ancillary mmap() handler for the mapper device, + * optional. See rtdm_mmap_handler(). The mapper + * device operates on a valid region defined in the @a + * mem_regions[] array. A pointer to the region + * can be obtained by a call to udd_get_region(). + * + * If this handler is NULL, the UDD core establishes + * the mapping automatically, depending on the memory + * type defined for the region. + * + * @note This handler is called from secondary mode + * only. + */ + int (*mmap)(struct rtdm_fd *fd, + struct vm_area_struct *vma); + /** + * @anchor udd_irq_handler + * + * Ancillary handler for receiving interrupts. This + * handler must be provided if the mini-driver hands + * over IRQ handling to the UDD core, by setting the + * @a irq field to a valid value, different from + * UDD_IRQ_CUSTOM and UDD_IRQ_NONE. + * + * The ->interrupt() handler shall return one of the + * following status codes: + * + * - RTDM_IRQ_HANDLED, if the mini-driver successfully + * handled the IRQ. This flag can be combined with + * RTDM_IRQ_DISABLE to prevent the Cobalt kernel from + * re-enabling the interrupt line upon return, + * otherwise it is re-enabled automatically. + * + * - RTDM_IRQ_NONE, if the interrupt does not match + * any IRQ the mini-driver can handle. + * + * Once the ->interrupt() handler has returned, the + * UDD core notifies user-space Cobalt threads waiting + * for IRQ events (if any). + * + * @note This handler is called from primary mode + * only. + */ + int (*interrupt)(struct udd_device *udd); + } ops; + /** + * IRQ number. If valid, the UDD core manages the + * corresponding interrupt line, installing a base handler. + * Otherwise, a special value can be passed for declaring + * @ref udd_irq_special "unmanaged IRQs". + */ + int irq; + /** + * Array of memory regions defined by the device. The array + * can be sparse, with some entries bearing the UDD_MEM_NONE + * type interleaved with valid ones. See the discussion about + * @ref udd_memory_region "UDD memory regions". + */ + struct udd_memregion mem_regions[UDD_NR_MAPS]; + /** Reserved to the UDD core. */ + struct udd_reserved { + rtdm_irq_t irqh; + u32 event_count; + struct udd_signotify signfy; + struct rtdm_event pulse; + struct rtdm_driver driver; + struct rtdm_device device; + struct rtdm_driver mapper_driver; + struct udd_mapper { + struct udd_device *udd; + struct rtdm_device dev; + } mapdev[UDD_NR_MAPS]; + char *mapper_name; + int nr_maps; + } __reserved; +}; + +int udd_register_device(struct udd_device *udd); + +int udd_unregister_device(struct udd_device *udd); + +struct udd_device *udd_get_device(struct rtdm_fd *fd); + +void udd_notify_event(struct udd_device *udd); + +void udd_enable_irq(struct udd_device *udd, + rtdm_event_t *done); + +void udd_disable_irq(struct udd_device *udd, + rtdm_event_t *done); + +/** @} */ + +#endif /* !_COBALT_RTDM_UDD_H */ --- linux/include/xenomai/rtdm/testing.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/rtdm/testing.h 2022-03-21 12:58:31.912864359 +0100 @@ -0,0 +1,45 @@ +/* + * Copyright (C) 2005 Jan Kiszka + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_RTDM_TESTING_H +#define _COBALT_RTDM_TESTING_H + +#include +#include + +#ifdef CONFIG_XENO_ARCH_SYS3264 + +#include + +struct compat_rttst_overall_bench_res { + struct rttst_bench_res result; + compat_uptr_t histogram_avg; + compat_uptr_t histogram_min; + compat_uptr_t histogram_max; +}; + +struct compat_rttst_heap_stathdr { + int nrstats; + compat_uptr_t buf; +}; + +#define RTTST_RTIOC_TMBENCH_STOP_COMPAT \ + _IOWR(RTIOC_TYPE_TESTING, 0x11, struct compat_rttst_overall_bench_res) + +#endif /* CONFIG_XENO_ARCH_SYS3264 */ + +#endif /* !_COBALT_RTDM_TESTING_H */ --- linux/include/xenomai/rtdm/gpio.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/rtdm/gpio.h 2022-03-21 12:58:31.905864427 +0100 @@ -0,0 +1,82 @@ +/** + * Copyright (C) 2016 Philippe Gerum + * + * Xenomai is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_RTDM_GPIO_H +#define _COBALT_RTDM_GPIO_H + +#include +#include +#include + +struct class; +struct device_node; +struct gpio_desc; + +struct rtdm_gpio_pin { + struct rtdm_device dev; + struct list_head next; + rtdm_irq_t irqh; + rtdm_event_t event; + char *name; + struct gpio_desc *desc; + nanosecs_abs_t timestamp; + bool monotonic_timestamp; +}; + +struct rtdm_gpio_chip { + struct gpio_chip *gc; + struct rtdm_driver driver; + struct class *devclass; + struct list_head next; + rtdm_lock_t lock; + struct rtdm_gpio_pin pins[0]; +}; + +int rtdm_gpiochip_add(struct rtdm_gpio_chip *rgc, + struct gpio_chip *gc, + int gpio_subclass); + +struct rtdm_gpio_chip * +rtdm_gpiochip_alloc(struct gpio_chip *gc, + int gpio_subclass); + +void rtdm_gpiochip_remove(struct rtdm_gpio_chip *rgc); + +int rtdm_gpiochip_add_by_name(struct rtdm_gpio_chip *rgc, + const char *label, int gpio_subclass); + +int rtdm_gpiochip_post_event(struct rtdm_gpio_chip *rgc, + unsigned int offset); + +int rtdm_gpiochip_find(struct device_node *from, const char *label, int type); + +int rtdm_gpiochip_array_find(struct device_node *from, const char *label[], + int nentries, int type); + +#ifdef CONFIG_OF + +int rtdm_gpiochip_scan_of(struct device_node *from, + const char *compat, int type); + +int rtdm_gpiochip_scan_array_of(struct device_node *from, + const char *compat[], + int nentries, int type); +#endif + +void rtdm_gpiochip_remove_by_type(int type); + +#endif /* !_COBALT_RTDM_GPIO_H */ --- linux/include/xenomai/rtdm/compat.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/rtdm/compat.h 2022-03-21 12:58:31.894864534 +0100 @@ -0,0 +1,75 @@ +/* + * Copyright (C) 2014 Philippe Gerum + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_RTDM_COMPAT_H +#define _COBALT_RTDM_COMPAT_H + +#ifdef CONFIG_XENO_ARCH_SYS3264 + +#include +#include + +struct compat_rtdm_getsockopt_args { + int level; + int optname; + compat_uptr_t optval; + compat_uptr_t optlen; +}; + +struct compat_rtdm_setsockopt_args { + int level; + int optname; + const compat_uptr_t optval; + socklen_t optlen; +}; + +struct compat_rtdm_getsockaddr_args { + compat_uptr_t addr; + compat_uptr_t addrlen; +}; + +struct compat_rtdm_setsockaddr_args { + const compat_uptr_t addr; + socklen_t addrlen; +}; + +#define _RTIOC_GETSOCKOPT_COMPAT _IOW(RTIOC_TYPE_COMMON, 0x20, \ + struct compat_rtdm_getsockopt_args) +#define _RTIOC_SETSOCKOPT_COMPAT _IOW(RTIOC_TYPE_COMMON, 0x21, \ + struct compat_rtdm_setsockopt_args) +#define _RTIOC_BIND_COMPAT _IOW(RTIOC_TYPE_COMMON, 0x22, \ + struct compat_rtdm_setsockaddr_args) +#define _RTIOC_CONNECT_COMPAT _IOW(RTIOC_TYPE_COMMON, 0x23, \ + struct compat_rtdm_setsockaddr_args) +#define _RTIOC_ACCEPT_COMPAT _IOW(RTIOC_TYPE_COMMON, 0x25, \ + struct compat_rtdm_getsockaddr_args) +#define _RTIOC_GETSOCKNAME_COMPAT _IOW(RTIOC_TYPE_COMMON, 0x26, \ + struct compat_rtdm_getsockaddr_args) +#define _RTIOC_GETPEERNAME_COMPAT _IOW(RTIOC_TYPE_COMMON, 0x27, \ + struct compat_rtdm_getsockaddr_args) + +#define __COMPAT_CASE(__op) : case __op + +#else /* !CONFIG_XENO_ARCH_SYS3264 */ + +#define __COMPAT_CASE(__op) + +#endif /* !CONFIG_XENO_ARCH_SYS3264 */ + +#define COMPAT_CASE(__op) case __op __COMPAT_CASE(__op ## _COMPAT) + +#endif /* !_COBALT_RTDM_COMPAT_H */ --- linux/include/xenomai/rtdm/serial.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/rtdm/serial.h 2022-03-21 12:58:31.887864602 +0100 @@ -0,0 +1,24 @@ +/* + * Copyright (C) 2005-2007 Jan Kiszka + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_RTDM_SERIAL_H +#define _COBALT_RTDM_SERIAL_H + +#include +#include + +#endif /* !_COBALT_RTDM_SERIAL_H */ --- linux/include/xenomai/rtdm/driver.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/rtdm/driver.h 2022-03-21 12:58:31.879864680 +0100 @@ -0,0 +1,1361 @@ +/** + * @file + * Real-Time Driver Model for Xenomai, driver API header + * + * Copyright (C) 2005-2007 Jan Kiszka + * Copyright (C) 2005 Joerg Langenberg + * Copyright (C) 2008 Gilles Chanteperdrix + * Copyright (C) 2014 Philippe Gerum + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * @ingroup driverapi + */ +#ifndef _COBALT_RTDM_DRIVER_H +#define _COBALT_RTDM_DRIVER_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* debug support */ +#include +#include +#ifdef CONFIG_PCI +#include +#endif /* CONFIG_PCI */ +#include + +struct class; +typedef struct xnselector rtdm_selector_t; +enum rtdm_selecttype; + +/*! + * @addtogroup rtdm_device_register + * @{ + */ + +/*! + * @anchor dev_flags @name Device Flags + * Static flags describing a RTDM device + * @{ + */ +/** If set, only a single instance of the device can be requested by an + * application. */ +#define RTDM_EXCLUSIVE 0x0001 + +/** + * Use fixed minor provided in the rtdm_device description for + * registering. If this flag is absent, the RTDM core assigns minor + * numbers to devices managed by a driver in order of registration. + */ +#define RTDM_FIXED_MINOR 0x0002 + +/** If set, the device is addressed via a clear-text name. */ +#define RTDM_NAMED_DEVICE 0x0010 + +/** If set, the device is addressed via a combination of protocol ID and + * socket type. */ +#define RTDM_PROTOCOL_DEVICE 0x0020 + +/** Mask selecting the device type. */ +#define RTDM_DEVICE_TYPE_MASK 0x00F0 + +/** Flag indicating a secure variant of RTDM (not supported here) */ +#define RTDM_SECURE_DEVICE 0x80000000 +/** @} Device Flags */ + +/** Maximum number of named devices per driver. */ +#define RTDM_MAX_MINOR 4096 + +/** @} rtdm_device_register */ + +/*! + * @addtogroup rtdm_sync + * @{ + */ + +/*! + * @anchor RTDM_SELECTTYPE_xxx @name RTDM_SELECTTYPE_xxx + * Event types select can bind to + * @{ + */ +enum rtdm_selecttype { + /** Select input data availability events */ + RTDM_SELECTTYPE_READ = XNSELECT_READ, + + /** Select ouput buffer availability events */ + RTDM_SELECTTYPE_WRITE = XNSELECT_WRITE, + + /** Select exceptional events */ + RTDM_SELECTTYPE_EXCEPT = XNSELECT_EXCEPT +}; +/** @} RTDM_SELECTTYPE_xxx */ + +/** @} rtdm_sync */ + +/** + * @brief Device context + * + * A device context structure is associated with every open device instance. + * RTDM takes care of its creation and destruction and passes it to the + * operation handlers when being invoked. + * + * Drivers can attach arbitrary data immediately after the official + * structure. The size of this data is provided via + * rtdm_driver.context_size during device registration. + */ +struct rtdm_dev_context { + struct rtdm_fd fd; + + /** Set of active device operation handlers */ + /** Reference to owning device */ + struct rtdm_device *device; + + /** Begin of driver defined context data structure */ + char dev_private[0]; +}; + +static inline struct rtdm_dev_context *rtdm_fd_to_context(struct rtdm_fd *fd) +{ + return container_of(fd, struct rtdm_dev_context, fd); +} + +/** + * Locate the driver private area associated to a device context structure + * + * @param[in] fd File descriptor structure associated with opened + * device instance + * + * @return The address of the private driver area associated to @a + * file descriptor. + */ +static inline void *rtdm_fd_to_private(struct rtdm_fd *fd) +{ + return &rtdm_fd_to_context(fd)->dev_private[0]; +} + +/** + * Locate a device file descriptor structure from its driver private area + * + * @param[in] dev_private Address of a private context area + * + * @return The address of the file descriptor structure defining @a + * dev_private. + */ +static inline struct rtdm_fd *rtdm_private_to_fd(void *dev_private) +{ + struct rtdm_dev_context *ctx; + ctx = container_of(dev_private, struct rtdm_dev_context, dev_private); + return &ctx->fd; +} + +/** + * Tell whether the passed file descriptor belongs to an application. + * + * @param[in] fd File descriptor + * + * @return true if passed file descriptor belongs to an application, + * false otherwise. + */ +static inline bool rtdm_fd_is_user(struct rtdm_fd *fd) +{ + return rtdm_fd_owner(fd) != &cobalt_kernel_ppd; +} + +/** + * Locate a device structure from a file descriptor. + * + * @param[in] fd File descriptor + * + * @return The address of the device structure to which this file + * descriptor is attached. + */ +static inline struct rtdm_device *rtdm_fd_device(struct rtdm_fd *fd) +{ + return rtdm_fd_to_context(fd)->device; +} + +/** + * @brief RTDM profile information + * + * This descriptor details the profile information associated to a + * RTDM class of device managed by a driver. + * + * @anchor rtdm_profile_info + */ +struct rtdm_profile_info { + /** Device class name */ + const char *name; + /** Device class ID, see @ref RTDM_CLASS_xxx */ + int class_id; + /** Device sub-class, see RTDM_SUBCLASS_xxx definition in the + @ref rtdm_profiles "Device Profiles" */ + int subclass_id; + /** Supported device profile version */ + int version; + /** Reserved */ + unsigned int magic; + struct module *owner; + struct class *kdev_class; +}; + +struct rtdm_driver; + +/** + * @brief RTDM state management handlers + */ +struct rtdm_sm_ops { + /** Handler called upon transition to COBALT_STATE_WARMUP */ + int (*start)(struct rtdm_driver *drv); + /** Handler called upon transition to COBALT_STATE_TEARDOWN */ + int (*stop)(struct rtdm_driver *drv); +}; + +/** + * @brief RTDM driver + * + * This descriptor describes a RTDM device driver. The structure holds + * runtime data, therefore it must reside in writable memory. + */ +struct rtdm_driver { + /** + * Class profile information. The RTDM_PROFILE_INFO() macro @b + * must be used for filling up this field. + * @anchor rtdm_driver_profile + */ + struct rtdm_profile_info profile_info; + /** + * Device flags, see @ref dev_flags "Device Flags" for details + * @anchor rtdm_driver_flags + */ + int device_flags; + /** + * Size of the private memory area the core should + * automatically allocate for each open file descriptor, which + * is usable for storing the context data associated to each + * connection. The allocated memory is zero-initialized. The + * start of this area can be retrieved by a call to + * rtdm_fd_to_private(). + */ + size_t context_size; + /** Protocol device identification: protocol family (PF_xxx) */ + int protocol_family; + /** Protocol device identification: socket type (SOCK_xxx) */ + int socket_type; + /** I/O operation handlers */ + struct rtdm_fd_ops ops; + /** State management handlers */ + struct rtdm_sm_ops smops; + /** + * Count of devices this driver manages. This value is used to + * allocate a chrdev region for named devices. + */ + int device_count; + /** Base minor for named devices. */ + int base_minor; + /** Reserved area */ + struct { + union { + struct { + struct cdev cdev; + int major; + } named; + }; + atomic_t refcount; + struct notifier_block nb_statechange; + DECLARE_BITMAP(minor_map, RTDM_MAX_MINOR); + }; +}; + +#define RTDM_CLASS_MAGIC 0x8284636c + +/** + * @brief Initializer for class profile information. + * + * This macro must be used to fill in the @ref rtdm_profile_info + * "class profile information" field from a RTDM driver. + * + * @param __name Class name (unquoted). + * + * @param __id Class major identification number + * (profile_version.class_id). + * + * @param __subid Class minor identification number + * (profile_version.subclass_id). + * + * @param __version Profile version number. + * + * @note See @ref rtdm_profiles "Device Profiles". + */ +#define RTDM_PROFILE_INFO(__name, __id, __subid, __version) \ +{ \ + .name = ( # __name ), \ + .class_id = (__id), \ + .subclass_id = (__subid), \ + .version = (__version), \ + .magic = ~RTDM_CLASS_MAGIC, \ + .owner = THIS_MODULE, \ + .kdev_class = NULL, \ +} + +int rtdm_drv_set_sysclass(struct rtdm_driver *drv, struct class *cls); + +/** + * @brief RTDM device + * + * This descriptor describes a RTDM device instance. The structure + * holds runtime data, therefore it must reside in writable memory. + */ +struct rtdm_device { + /** Device driver. */ + struct rtdm_driver *driver; + /** Driver definable device data */ + void *device_data; + /** + * Device label template for composing the device name. A + * limited printf-like format string is assumed, with a + * provision for replacing the first %d/%i placeholder found + * in the string by the device minor number. It is up to the + * driver to actually mention this placeholder or not, + * depending on the naming convention for its devices. For + * named devices, the corresponding device node will + * automatically appear in the /dev/rtdm hierachy with + * hotplug-enabled device filesystems (DEVTMPFS). + */ + const char *label; + /** + * Minor number of the device. If RTDM_FIXED_MINOR is present + * in the driver flags, the value stored in this field is used + * verbatim by rtdm_dev_register(). Otherwise, the RTDM core + * automatically assigns minor numbers to all devices managed + * by the driver referred to by @a driver, in order of + * registration, storing the resulting values into this field. + * + * Device nodes created for named devices in the Linux /dev + * hierarchy are assigned this minor number. + * + * The minor number of the current device handling an I/O + * request can be retreived by a call to rtdm_fd_minor(). + */ + int minor; + /** Reserved area. */ + struct { + unsigned int magic; + char *name; + union { + struct { + xnhandle_t handle; + } named; + struct { + struct xnid id; + } proto; + }; + dev_t rdev; + struct device *kdev; + struct class *kdev_class; + atomic_t refcount; + struct rtdm_fd_ops ops; + wait_queue_head_t putwq; + struct list_head openfd_list; + }; +}; + +/* --- device registration --- */ + +int rtdm_dev_register(struct rtdm_device *device); + +void rtdm_dev_unregister(struct rtdm_device *device); + +#ifndef DOXYGEN_CPP /* Avoid static inline tags for RTDM in doxygen */ + +static inline struct device *rtdm_dev_to_kdev(struct rtdm_device *device) +{ + return device->kdev; +} + +/* --- clock services --- */ +static inline nanosecs_abs_t rtdm_clock_read(void) +{ + return xnclock_read_realtime(&nkclock); +} + +static inline nanosecs_abs_t rtdm_clock_read_monotonic(void) +{ + return xnclock_read_monotonic(&nkclock); +} +#endif /* !DOXYGEN_CPP */ + +/* --- timeout sequences */ + +typedef nanosecs_abs_t rtdm_toseq_t; + +void rtdm_toseq_init(rtdm_toseq_t *timeout_seq, nanosecs_rel_t timeout); + +/*! + * @addtogroup rtdm_sync + * @{ + */ + +/*! + * @defgroup rtdm_sync_biglock Big dual kernel lock + * @{ + */ + +/** + * @brief Enter atomic section (dual kernel only) + * + * This call opens a fully atomic section, serializing execution with + * respect to all interrupt handlers (including for real-time IRQs) + * and Xenomai threads running on all CPUs. + * + * @param __context name of local variable to store the context + * in. This variable updated by the real-time core will hold the + * information required to leave the atomic section properly. + * + * @note Atomic sections may be nested. The caller is allowed to sleep + * on a blocking Xenomai service from primary mode within an atomic + * section delimited by cobalt_atomic_enter/cobalt_atomic_leave calls. + * On the contrary, sleeping on a regular Linux kernel service while + * holding such lock is NOT valid. + * + * @note Since the strongest lock is acquired by this service, it can + * be used to synchronize real-time and non-real-time contexts. + * + * @warning This service is not portable to the Mercury core, and + * should be restricted to Cobalt-specific use cases, mainly for the + * purpose of porting existing dual-kernel drivers which still depend + * on the obsolete RTDM_EXECUTE_ATOMICALLY() construct. + */ +#define cobalt_atomic_enter(__context) \ + do { \ + xnlock_get_irqsave(&nklock, (__context)); \ + xnsched_lock(); \ + } while (0) + +/** + * @brief Leave atomic section (dual kernel only) + * + * This call closes an atomic section previously opened by a call to + * cobalt_atomic_enter(), restoring the preemption and interrupt state + * which prevailed prior to entering the exited section. + * + * @param __context name of local variable which stored the context. + * + * @warning This service is not portable to the Mercury core, and + * should be restricted to Cobalt-specific use cases. + */ +#define cobalt_atomic_leave(__context) \ + do { \ + xnsched_unlock(); \ + xnlock_put_irqrestore(&nklock, (__context)); \ + } while (0) + +/** + * @brief Execute code block atomically (DEPRECATED) + * + * Generally, it is illegal to suspend the current task by calling + * rtdm_task_sleep(), rtdm_event_wait(), etc. while holding a spinlock. In + * contrast, this macro allows to combine several operations including + * a potentially rescheduling call to an atomic code block with respect to + * other RTDM_EXECUTE_ATOMICALLY() blocks. The macro is a light-weight + * alternative for protecting code blocks via mutexes, and it can even be used + * to synchronise real-time and non-real-time contexts. + * + * @param code_block Commands to be executed atomically + * + * @note It is not allowed to leave the code block explicitly by using + * @c break, @c return, @c goto, etc. This would leave the global lock held + * during the code block execution in an inconsistent state. Moreover, do not + * embed complex operations into the code bock. Consider that they will be + * executed under preemption lock with interrupts switched-off. Also note that + * invocation of rescheduling calls may break the atomicity until the task + * gains the CPU again. + * + * @coretags{unrestricted} + * + * @deprecated This construct will be phased out in Xenomai + * 3.0. Please use rtdm_waitqueue services instead. + * + * @see cobalt_atomic_enter(). + */ +#ifdef DOXYGEN_CPP /* Beautify doxygen output */ +#define RTDM_EXECUTE_ATOMICALLY(code_block) \ +{ \ + \ + code_block; \ + \ +} +#else /* This is how it really works */ +static inline __attribute__((deprecated)) void +rtdm_execute_atomically(void) { } + +#define RTDM_EXECUTE_ATOMICALLY(code_block) \ +{ \ + spl_t __rtdm_s; \ + \ + rtdm_execute_atomically(); \ + xnlock_get_irqsave(&nklock, __rtdm_s); \ + xnsched_lock(); \ + code_block; \ + xnsched_unlock(); \ + xnlock_put_irqrestore(&nklock, __rtdm_s); \ +} +#endif + +/** @} Big dual kernel lock */ + +/** + * @defgroup rtdm_sync_spinlock Spinlock with preemption deactivation + * @{ + */ + +/** + * Static lock initialisation + */ +#define RTDM_LOCK_UNLOCKED(__name) PIPELINE_SPIN_LOCK_UNLOCKED(__name) + +#define DEFINE_RTDM_LOCK(__name) \ + rtdm_lock_t __name = RTDM_LOCK_UNLOCKED(__name) + +/** Lock variable */ +typedef pipeline_spinlock_t rtdm_lock_t; + +/** Variable to save the context while holding a lock */ +typedef unsigned long rtdm_lockctx_t; + +/** + * Dynamic lock initialisation + * + * @param lock Address of lock variable + * + * @coretags{task-unrestricted} + */ +static inline void rtdm_lock_init(rtdm_lock_t *lock) +{ + raw_spin_lock_init(lock); +} + +/** + * Acquire lock from non-preemptible contexts + * + * @param lock Address of lock variable + * + * @coretags{unrestricted} + */ +static inline void rtdm_lock_get(rtdm_lock_t *lock) +{ + XENO_BUG_ON(COBALT, !spltest()); + raw_spin_lock(lock); + xnsched_lock(); +} + +/** + * Release lock without preemption restoration + * + * @param lock Address of lock variable + * + * @coretags{unrestricted, might-switch} + */ +static inline void rtdm_lock_put(rtdm_lock_t *lock) +{ + raw_spin_unlock(lock); + xnsched_unlock(); +} + +/** + * Acquire lock and disable preemption, by stalling the head domain. + * + * @param __lock Address of lock variable + * @param __context name of local variable to store the context in + * + * @coretags{unrestricted} + */ +#define rtdm_lock_get_irqsave(__lock, __context) \ + ((__context) = __rtdm_lock_get_irqsave(__lock)) + +static inline rtdm_lockctx_t __rtdm_lock_get_irqsave(rtdm_lock_t *lock) +{ + rtdm_lockctx_t context; + + splhigh(context); + raw_spin_lock(lock); + xnsched_lock(); + + return context; +} + +/** + * Release lock and restore preemption state + * + * @param lock Address of lock variable + * @param context name of local variable which stored the context + * + * @coretags{unrestricted} + */ +static inline +void rtdm_lock_put_irqrestore(rtdm_lock_t *lock, rtdm_lockctx_t context) +{ + raw_spin_unlock(lock); + xnsched_unlock(); + splexit(context); +} + +/** + * Disable preemption locally + * + * @param __context name of local variable to store the context in + * + * @coretags{unrestricted} + */ +#define rtdm_lock_irqsave(__context) \ + splhigh(__context) + +/** + * Restore preemption state + * + * @param __context name of local variable which stored the context + * + * @coretags{unrestricted} + */ +#define rtdm_lock_irqrestore(__context) \ + splexit(__context) + +/** @} Spinlock with Preemption Deactivation */ + +#ifndef DOXYGEN_CPP + +struct rtdm_waitqueue { + struct xnsynch wait; +}; +typedef struct rtdm_waitqueue rtdm_waitqueue_t; + +#define RTDM_WAITQUEUE_INITIALIZER(__name) { \ + .wait = XNSYNCH_WAITQUEUE_INITIALIZER((__name).wait), \ + } + +#define DEFINE_RTDM_WAITQUEUE(__name) \ + struct rtdm_waitqueue __name = RTDM_WAITQUEUE_INITIALIZER(__name) + +#define DEFINE_RTDM_WAITQUEUE_ONSTACK(__name) \ + DEFINE_RTDM_WAITQUEUE(__name) + +static inline void rtdm_waitqueue_init(struct rtdm_waitqueue *wq) +{ + *wq = (struct rtdm_waitqueue)RTDM_WAITQUEUE_INITIALIZER(*wq); +} + +static inline void rtdm_waitqueue_destroy(struct rtdm_waitqueue *wq) +{ + xnsynch_destroy(&wq->wait); +} + +static inline int __rtdm_dowait(struct rtdm_waitqueue *wq, + nanosecs_rel_t timeout, xntmode_t timeout_mode) +{ + int ret; + + ret = xnsynch_sleep_on(&wq->wait, timeout, timeout_mode); + if (ret & XNBREAK) + return -EINTR; + if (ret & XNTIMEO) + return -ETIMEDOUT; + if (ret & XNRMID) + return -EIDRM; + return 0; +} + +static inline int __rtdm_timedwait(struct rtdm_waitqueue *wq, + nanosecs_rel_t timeout, rtdm_toseq_t *toseq) +{ + if (toseq && timeout > 0) + return __rtdm_dowait(wq, *toseq, XN_ABSOLUTE); + + return __rtdm_dowait(wq, timeout, XN_RELATIVE); +} + +#define rtdm_timedwait_condition_locked(__wq, __cond, __timeout, __toseq) \ + ({ \ + int __ret = 0; \ + while (__ret == 0 && !(__cond)) \ + __ret = __rtdm_timedwait(__wq, __timeout, __toseq); \ + __ret; \ + }) + +#define rtdm_wait_condition_locked(__wq, __cond) \ + ({ \ + int __ret = 0; \ + while (__ret == 0 && !(__cond)) \ + __ret = __rtdm_dowait(__wq, \ + XN_INFINITE, XN_RELATIVE); \ + __ret; \ + }) + +#define rtdm_timedwait_condition(__wq, __cond, __timeout, __toseq) \ + ({ \ + spl_t __s; \ + int __ret; \ + xnlock_get_irqsave(&nklock, __s); \ + __ret = rtdm_timedwait_condition_locked(__wq, __cond, \ + __timeout, __toseq); \ + xnlock_put_irqrestore(&nklock, __s); \ + __ret; \ + }) + +#define rtdm_timedwait(__wq, __timeout, __toseq) \ + __rtdm_timedwait(__wq, __timeout, __toseq) + +#define rtdm_timedwait_locked(__wq, __timeout, __toseq) \ + rtdm_timedwait(__wq, __timeout, __toseq) + +#define rtdm_wait_condition(__wq, __cond) \ + ({ \ + spl_t __s; \ + int __ret; \ + xnlock_get_irqsave(&nklock, __s); \ + __ret = rtdm_wait_condition_locked(__wq, __cond); \ + xnlock_put_irqrestore(&nklock, __s); \ + __ret; \ + }) + +#define rtdm_wait(__wq) \ + __rtdm_dowait(__wq, XN_INFINITE, XN_RELATIVE) + +#define rtdm_wait_locked(__wq) rtdm_wait(__wq) + +#define rtdm_waitqueue_lock(__wq, __context) cobalt_atomic_enter(__context) + +#define rtdm_waitqueue_unlock(__wq, __context) cobalt_atomic_leave(__context) + +#define rtdm_waitqueue_signal(__wq) \ + ({ \ + struct xnthread *__waiter; \ + __waiter = xnsynch_wakeup_one_sleeper(&(__wq)->wait); \ + xnsched_run(); \ + __waiter != NULL; \ + }) + +#define __rtdm_waitqueue_flush(__wq, __reason) \ + ({ \ + int __ret; \ + __ret = xnsynch_flush(&(__wq)->wait, __reason); \ + xnsched_run(); \ + __ret == XNSYNCH_RESCHED; \ + }) + +#define rtdm_waitqueue_broadcast(__wq) \ + __rtdm_waitqueue_flush(__wq, 0) + +#define rtdm_waitqueue_flush(__wq) \ + __rtdm_waitqueue_flush(__wq, XNBREAK) + +#define rtdm_waitqueue_wakeup(__wq, __waiter) \ + do { \ + xnsynch_wakeup_this_sleeper(&(__wq)->wait, __waiter); \ + xnsched_run(); \ + } while (0) + +#define rtdm_for_each_waiter(__pos, __wq) \ + xnsynch_for_each_sleeper(__pos, &(__wq)->wait) + +#define rtdm_for_each_waiter_safe(__pos, __tmp, __wq) \ + xnsynch_for_each_sleeper_safe(__pos, __tmp, &(__wq)->wait) + +#endif /* !DOXYGEN_CPP */ + +/** @} rtdm_sync */ + +/* --- Interrupt management services --- */ +/*! + * @addtogroup rtdm_irq + * @{ + */ + +typedef struct xnintr rtdm_irq_t; + +/*! + * @anchor RTDM_IRQTYPE_xxx @name RTDM_IRQTYPE_xxx + * Interrupt registrations flags + * @{ + */ +/** Enable IRQ-sharing with other real-time drivers */ +#define RTDM_IRQTYPE_SHARED XN_IRQTYPE_SHARED +/** Mark IRQ as edge-triggered, relevant for correct handling of shared + * edge-triggered IRQs */ +#define RTDM_IRQTYPE_EDGE XN_IRQTYPE_EDGE +/** @} RTDM_IRQTYPE_xxx */ + +/** + * Interrupt handler + * + * @param[in] irq_handle IRQ handle as returned by rtdm_irq_request() + * + * @return 0 or a combination of @ref RTDM_IRQ_xxx flags + */ +typedef int (*rtdm_irq_handler_t)(rtdm_irq_t *irq_handle); + +/*! + * @anchor RTDM_IRQ_xxx @name RTDM_IRQ_xxx + * Return flags of interrupt handlers + * @{ + */ +/** Unhandled interrupt */ +#define RTDM_IRQ_NONE XN_IRQ_NONE +/** Denote handled interrupt */ +#define RTDM_IRQ_HANDLED XN_IRQ_HANDLED +/** Request interrupt disabling on exit */ +#define RTDM_IRQ_DISABLE XN_IRQ_DISABLE +/** @} RTDM_IRQ_xxx */ + +/** + * Retrieve IRQ handler argument + * + * @param irq_handle IRQ handle + * @param type Type of the pointer to return + * + * @return The argument pointer registered on rtdm_irq_request() is returned, + * type-casted to the specified @a type. + * + * @coretags{unrestricted} + */ +#define rtdm_irq_get_arg(irq_handle, type) ((type *)irq_handle->cookie) +/** @} rtdm_irq */ + +int rtdm_irq_request(rtdm_irq_t *irq_handle, unsigned int irq_no, + rtdm_irq_handler_t handler, unsigned long flags, + const char *device_name, void *arg); + +int rtdm_irq_request_affine(rtdm_irq_t *irq_handle, unsigned int irq_no, + rtdm_irq_handler_t handler, unsigned long flags, + const char *device_name, void *arg, + const cpumask_t *cpumask); + +#ifndef DOXYGEN_CPP /* Avoid static inline tags for RTDM in doxygen */ +static inline int rtdm_irq_free(rtdm_irq_t *irq_handle) +{ + if (!XENO_ASSERT(COBALT, xnsched_root_p())) + return -EPERM; + xnintr_destroy(irq_handle); + return 0; +} + +static inline int rtdm_irq_enable(rtdm_irq_t *irq_handle) +{ + xnintr_enable(irq_handle); + return 0; +} + +static inline int rtdm_irq_disable(rtdm_irq_t *irq_handle) +{ + xnintr_disable(irq_handle); + return 0; +} + +static inline int rtdm_irq_set_affinity(rtdm_irq_t *irq_handle, + const cpumask_t *cpumask) +{ + return xnintr_affinity(irq_handle, cpumask); +} +#endif /* !DOXYGEN_CPP */ + +/* --- non-real-time signalling services --- */ + +/*! + * @addtogroup rtdm_nrtsignal + * @{ + */ + +typedef struct rtdm_nrtsig rtdm_nrtsig_t; +/** + * Non-real-time signal handler + * + * @param[in] nrt_sig Signal handle pointer as passed to rtdm_nrtsig_init() + * @param[in] arg Argument as passed to rtdm_nrtsig_init() + * + * @note The signal handler will run in soft-IRQ context of the non-real-time + * subsystem. Note the implications of this context, e.g. no invocation of + * blocking operations. + */ +typedef void (*rtdm_nrtsig_handler_t)(rtdm_nrtsig_t *nrt_sig, void *arg); + +struct rtdm_nrtsig { + struct pipeline_inband_work inband_work; /* Must be first */ + rtdm_nrtsig_handler_t handler; + void *arg; +}; + +void rtdm_schedule_nrt_work(struct work_struct *lostage_work); +/** @} rtdm_nrtsignal */ + +#ifndef DOXYGEN_CPP /* Avoid static inline tags for RTDM in doxygen */ +void __rtdm_nrtsig_execute(struct pipeline_inband_work *inband_work); + +static inline void rtdm_nrtsig_init(rtdm_nrtsig_t *nrt_sig, + rtdm_nrtsig_handler_t handler, void *arg) +{ + nrt_sig->inband_work = (struct pipeline_inband_work) + PIPELINE_INBAND_WORK_INITIALIZER(*nrt_sig, + __rtdm_nrtsig_execute); + nrt_sig->handler = handler; + nrt_sig->arg = arg; +} + +static inline void rtdm_nrtsig_destroy(rtdm_nrtsig_t *nrt_sig) +{ + nrt_sig->handler = NULL; + nrt_sig->arg = NULL; +} + +void rtdm_nrtsig_pend(rtdm_nrtsig_t *nrt_sig); +#endif /* !DOXYGEN_CPP */ + +/* --- timer services --- */ + +/*! + * @addtogroup rtdm_timer + * @{ + */ + +typedef struct xntimer rtdm_timer_t; + +/** + * Timer handler + * + * @param[in] timer Timer handle as returned by rtdm_timer_init() + */ +typedef void (*rtdm_timer_handler_t)(rtdm_timer_t *timer); + +/*! + * @anchor RTDM_TIMERMODE_xxx @name RTDM_TIMERMODE_xxx + * Timer operation modes + * @{ + */ +enum rtdm_timer_mode { + /** Monotonic timer with relative timeout */ + RTDM_TIMERMODE_RELATIVE = XN_RELATIVE, + + /** Monotonic timer with absolute timeout */ + RTDM_TIMERMODE_ABSOLUTE = XN_ABSOLUTE, + + /** Adjustable timer with absolute timeout */ + RTDM_TIMERMODE_REALTIME = XN_REALTIME +}; +/** @} RTDM_TIMERMODE_xxx */ + +/** @} rtdm_timer */ + +int rtdm_timer_init(rtdm_timer_t *timer, rtdm_timer_handler_t handler, + const char *name); + +void rtdm_timer_destroy(rtdm_timer_t *timer); + +int rtdm_timer_start(rtdm_timer_t *timer, nanosecs_abs_t expiry, + nanosecs_rel_t interval, enum rtdm_timer_mode mode); + +void rtdm_timer_stop(rtdm_timer_t *timer); + +#ifndef DOXYGEN_CPP /* Avoid static inline tags for RTDM in doxygen */ +static inline int rtdm_timer_start_in_handler(rtdm_timer_t *timer, + nanosecs_abs_t expiry, + nanosecs_rel_t interval, + enum rtdm_timer_mode mode) +{ + return xntimer_start(timer, expiry, interval, (xntmode_t)mode); +} + +static inline void rtdm_timer_stop_in_handler(rtdm_timer_t *timer) +{ + xntimer_stop(timer); +} +#endif /* !DOXYGEN_CPP */ + +/* --- task services --- */ +/*! + * @addtogroup rtdm_task + * @{ + */ + +typedef struct xnthread rtdm_task_t; + +/** + * Real-time task procedure + * + * @param[in,out] arg argument as passed to rtdm_task_init() + */ +typedef void (*rtdm_task_proc_t)(void *arg); + +/** + * @anchor rtdmtaskprio @name Task Priority Range + * Maximum and minimum task priorities + * @{ */ +#define RTDM_TASK_LOWEST_PRIORITY 0 +#define RTDM_TASK_HIGHEST_PRIORITY 99 +/** @} Task Priority Range */ + +/** + * @anchor rtdmchangetaskprio @name Task Priority Modification + * Raise or lower task priorities by one level + * @{ */ +#define RTDM_TASK_RAISE_PRIORITY (+1) +#define RTDM_TASK_LOWER_PRIORITY (-1) +/** @} Task Priority Modification */ + +/** @} rtdm_task */ + +int rtdm_task_init(rtdm_task_t *task, const char *name, + rtdm_task_proc_t task_proc, void *arg, + int priority, nanosecs_rel_t period); +int __rtdm_task_sleep(xnticks_t timeout, xntmode_t mode); +void rtdm_task_busy_sleep(nanosecs_rel_t delay); + +#ifndef DOXYGEN_CPP /* Avoid static inline tags for RTDM in doxygen */ +static inline void rtdm_task_destroy(rtdm_task_t *task) +{ + xnthread_cancel(task); + xnthread_join(task, true); +} + +static inline int rtdm_task_should_stop(void) +{ + return xnthread_test_info(xnthread_current(), XNCANCELD); +} + +void rtdm_task_join(rtdm_task_t *task); + +static inline void __deprecated rtdm_task_join_nrt(rtdm_task_t *task, + unsigned int poll_delay) +{ + rtdm_task_join(task); +} + +static inline void rtdm_task_set_priority(rtdm_task_t *task, int priority) +{ + union xnsched_policy_param param = { .rt = { .prio = priority } }; + spl_t s; + + splhigh(s); + xnthread_set_schedparam(task, &xnsched_class_rt, ¶m); + xnsched_run(); + splexit(s); +} + +static inline int rtdm_task_set_period(rtdm_task_t *task, + nanosecs_abs_t start_date, + nanosecs_rel_t period) +{ + if (period < 0) + period = 0; + if (start_date == 0) + start_date = XN_INFINITE; + + return xnthread_set_periodic(task, start_date, XN_ABSOLUTE, period); +} + +static inline int rtdm_task_unblock(rtdm_task_t *task) +{ + spl_t s; + int res; + + splhigh(s); + res = xnthread_unblock(task); + xnsched_run(); + splexit(s); + + return res; +} + +static inline rtdm_task_t *rtdm_task_current(void) +{ + return xnthread_current(); +} + +static inline int rtdm_task_wait_period(unsigned long *overruns_r) +{ + if (!XENO_ASSERT(COBALT, !xnsched_unblockable_p())) + return -EPERM; + return xnthread_wait_period(overruns_r); +} + +static inline int rtdm_task_sleep(nanosecs_rel_t delay) +{ + return __rtdm_task_sleep(delay, XN_RELATIVE); +} + +static inline int +rtdm_task_sleep_abs(nanosecs_abs_t wakeup_date, enum rtdm_timer_mode mode) +{ + /* For the sake of a consistent API usage... */ + if (mode != RTDM_TIMERMODE_ABSOLUTE && mode != RTDM_TIMERMODE_REALTIME) + return -EINVAL; + return __rtdm_task_sleep(wakeup_date, (xntmode_t)mode); +} + +/* rtdm_task_sleep_abs shall be used instead */ +static inline int __deprecated rtdm_task_sleep_until(nanosecs_abs_t wakeup_time) +{ + return __rtdm_task_sleep(wakeup_time, XN_REALTIME); +} + +#define rtdm_task_busy_wait(__condition, __spin_ns, __sleep_ns) \ + ({ \ + __label__ done; \ + nanosecs_abs_t __end; \ + int __ret = 0; \ + for (;;) { \ + __end = rtdm_clock_read_monotonic() + __spin_ns; \ + for (;;) { \ + if (__condition) \ + goto done; \ + if (rtdm_clock_read_monotonic() >= __end) \ + break; \ + } \ + __ret = rtdm_task_sleep(__sleep_ns); \ + if (__ret) \ + break; \ + } \ + done: \ + __ret; \ + }) + +#define rtdm_wait_context xnthread_wait_context + +static inline +void rtdm_wait_complete(struct rtdm_wait_context *wc) +{ + xnthread_complete_wait(wc); +} + +static inline +int rtdm_wait_is_completed(struct rtdm_wait_context *wc) +{ + return xnthread_wait_complete_p(wc); +} + +static inline void rtdm_wait_prepare(struct rtdm_wait_context *wc) +{ + xnthread_prepare_wait(wc); +} + +static inline +struct rtdm_wait_context *rtdm_wait_get_context(rtdm_task_t *task) +{ + return xnthread_get_wait_context(task); +} + +#endif /* !DOXYGEN_CPP */ + +/* --- event services --- */ + +typedef struct rtdm_event { + struct xnsynch synch_base; + DECLARE_XNSELECT(select_block); +} rtdm_event_t; + +#define RTDM_EVENT_PENDING XNSYNCH_SPARE1 + +void rtdm_event_init(rtdm_event_t *event, unsigned long pending); +int rtdm_event_select(rtdm_event_t *event, rtdm_selector_t *selector, + enum rtdm_selecttype type, unsigned fd_index); +int rtdm_event_wait(rtdm_event_t *event); +int rtdm_event_timedwait(rtdm_event_t *event, nanosecs_rel_t timeout, + rtdm_toseq_t *timeout_seq); +void rtdm_event_signal(rtdm_event_t *event); + +void rtdm_event_clear(rtdm_event_t *event); + +void rtdm_event_pulse(rtdm_event_t *event); + +void rtdm_event_destroy(rtdm_event_t *event); + +/* --- semaphore services --- */ + +typedef struct rtdm_sem { + unsigned long value; + struct xnsynch synch_base; + DECLARE_XNSELECT(select_block); +} rtdm_sem_t; + +void rtdm_sem_init(rtdm_sem_t *sem, unsigned long value); +int rtdm_sem_select(rtdm_sem_t *sem, rtdm_selector_t *selector, + enum rtdm_selecttype type, unsigned fd_index); +int rtdm_sem_down(rtdm_sem_t *sem); +int rtdm_sem_timeddown(rtdm_sem_t *sem, nanosecs_rel_t timeout, + rtdm_toseq_t *timeout_seq); +void rtdm_sem_up(rtdm_sem_t *sem); + +void rtdm_sem_destroy(rtdm_sem_t *sem); + +/* --- mutex services --- */ + +typedef struct rtdm_mutex { + struct xnsynch synch_base; + atomic_t fastlock; +} rtdm_mutex_t; + +void rtdm_mutex_init(rtdm_mutex_t *mutex); +int rtdm_mutex_lock(rtdm_mutex_t *mutex); +int rtdm_mutex_timedlock(rtdm_mutex_t *mutex, nanosecs_rel_t timeout, + rtdm_toseq_t *timeout_seq); +void rtdm_mutex_unlock(rtdm_mutex_t *mutex); +void rtdm_mutex_destroy(rtdm_mutex_t *mutex); + +/* --- utility functions --- */ + +#define rtdm_printk(format, ...) printk(format, ##__VA_ARGS__) + +#define rtdm_printk_ratelimited(fmt, ...) do { \ + if (xnclock_ratelimit()) \ + printk(fmt, ##__VA_ARGS__); \ +} while (0) + +#ifndef DOXYGEN_CPP /* Avoid static inline tags for RTDM in doxygen */ +static inline void *rtdm_malloc(size_t size) +{ + return xnmalloc(size); +} + +static inline void rtdm_free(void *ptr) +{ + xnfree(ptr); +} + +int rtdm_mmap_to_user(struct rtdm_fd *fd, + void *src_addr, size_t len, + int prot, void **pptr, + struct vm_operations_struct *vm_ops, + void *vm_private_data); + +int rtdm_iomap_to_user(struct rtdm_fd *fd, + phys_addr_t src_addr, size_t len, + int prot, void **pptr, + struct vm_operations_struct *vm_ops, + void *vm_private_data); + +int rtdm_mmap_kmem(struct vm_area_struct *vma, void *va); + +int rtdm_mmap_vmem(struct vm_area_struct *vma, void *va); + +int rtdm_mmap_iomem(struct vm_area_struct *vma, phys_addr_t pa); + +int rtdm_munmap(void *ptr, size_t len); + +static inline int rtdm_read_user_ok(struct rtdm_fd *fd, + const void __user *ptr, size_t size) +{ + return access_rok(ptr, size); +} + +static inline int rtdm_rw_user_ok(struct rtdm_fd *fd, + const void __user *ptr, size_t size) +{ + return access_wok(ptr, size); +} + +static inline int rtdm_copy_from_user(struct rtdm_fd *fd, + void *dst, const void __user *src, + size_t size) +{ + return __xn_copy_from_user(dst, src, size) ? -EFAULT : 0; +} + +static inline int rtdm_safe_copy_from_user(struct rtdm_fd *fd, + void *dst, const void __user *src, + size_t size) +{ + return cobalt_copy_from_user(dst, src, size); +} + +static inline int rtdm_copy_to_user(struct rtdm_fd *fd, + void __user *dst, const void *src, + size_t size) +{ + return __xn_copy_to_user(dst, src, size) ? -EFAULT : 0; +} + +static inline int rtdm_safe_copy_to_user(struct rtdm_fd *fd, + void __user *dst, const void *src, + size_t size) +{ + return cobalt_copy_to_user(dst, src, size); +} + +static inline int rtdm_strncpy_from_user(struct rtdm_fd *fd, + char *dst, + const char __user *src, size_t count) +{ + return cobalt_strncpy_from_user(dst, src, count); +} + +static inline bool rtdm_available(void) +{ + return realtime_core_enabled(); +} + +static inline int rtdm_rt_capable(struct rtdm_fd *fd) +{ + if (!XENO_ASSERT(COBALT, !xnsched_interrupt_p())) + return 0; + + if (!rtdm_fd_is_user(fd)) + return !xnsched_root_p(); + + return xnthread_current() != NULL; +} + +static inline int rtdm_in_rt_context(void) +{ + return is_primary_domain(); +} + +#define RTDM_IOV_FASTMAX 16 + +int rtdm_get_iovec(struct rtdm_fd *fd, struct iovec **iov, + const struct user_msghdr *msg, + struct iovec *iov_fast); + +int rtdm_put_iovec(struct rtdm_fd *fd, struct iovec *iov, + const struct user_msghdr *msg, + struct iovec *iov_fast); + +static inline +void rtdm_drop_iovec(struct iovec *iov, struct iovec *iov_fast) +{ + if (iov != iov_fast) + xnfree(iov); +} + +ssize_t rtdm_get_iov_flatlen(struct iovec *iov, int iovlen); + +#endif /* !DOXYGEN_CPP */ + +#endif /* _COBALT_RTDM_DRIVER_H */ --- linux/include/xenomai/rtdm/rtdm.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/rtdm/rtdm.h 2022-03-21 12:58:31.872864749 +0100 @@ -0,0 +1,218 @@ +/* + * Copyright (C) 2005, 2006 Jan Kiszka + * Copyright (C) 2005 Joerg Langenberg + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_RTDM_RTDM_H +#define _COBALT_RTDM_RTDM_H + +#include +#include +#include +#include +#include +#include +#include + +typedef __u32 socklen_t; + +#include + +int __rtdm_dev_open(const char *path, int oflag); + +int __rtdm_dev_socket(int protocol_family, + int socket_type, int protocol); + +static inline int rtdm_open(const char *path, int oflag, ...) +{ + return __rtdm_dev_open(path, oflag); +} + +static inline int rtdm_socket(int protocol_family, + int socket_type, int protocol) +{ + return __rtdm_dev_socket(protocol_family, socket_type, protocol); +} + +static inline int rtdm_close(int fd) +{ + return rtdm_fd_close(fd, RTDM_FD_MAGIC); +} + +#define rtdm_fcntl(__fd, __cmd, __args...) \ + rtdm_fd_fcntl(__fd, __cmd, ##__args) + +#define rtdm_ioctl(__fd, __request, __args...) \ + rtdm_fd_ioctl(__fd, __request, ##__args) + +static inline ssize_t rtdm_read(int fd, void *buf, size_t count) +{ + return rtdm_fd_read(fd, buf, count); +} + +static inline ssize_t rtdm_write(int fd, const void *buf, size_t count) +{ + return rtdm_fd_write(fd, buf, count); +} + +static inline ssize_t rtdm_recvmsg(int s, struct user_msghdr *msg, int flags) +{ + return rtdm_fd_recvmsg(s, msg, flags); +} + +static inline ssize_t rtdm_sendmsg(int s, const struct user_msghdr *msg, int flags) +{ + return rtdm_fd_sendmsg(s, msg, flags); +} + +static inline +ssize_t rtdm_recvfrom(int s, void *buf, size_t len, int flags, + struct sockaddr *from, + socklen_t *fromlen) +{ + struct user_msghdr msg; + struct iovec iov; + ssize_t ret; + + iov.iov_base = buf; + iov.iov_len = len; + msg.msg_name = from; + msg.msg_namelen = from ? *fromlen : 0; + msg.msg_iov = &iov; + msg.msg_iovlen = 1; + msg.msg_control = NULL; + msg.msg_controllen = 0; + + ret = rtdm_recvmsg(s, &msg, flags); + if (ret < 0) + return ret; + + if (from) + *fromlen = msg.msg_namelen; + + return ret; +} + +static inline ssize_t rtdm_recv(int s, void *buf, size_t len, int flags) +{ + return rtdm_recvfrom(s, buf, len, flags, NULL, NULL); +} + +static inline ssize_t rtdm_sendto(int s, const void *buf, size_t len, + int flags, const struct sockaddr *to, + socklen_t tolen) +{ + struct user_msghdr msg; + struct iovec iov; + + iov.iov_base = (void *)buf; + iov.iov_len = len; + msg.msg_name = (struct sockaddr *)to; + msg.msg_namelen = tolen; + msg.msg_iov = &iov; + msg.msg_iovlen = 1; + msg.msg_control = NULL; + msg.msg_controllen = 0; + + return rtdm_sendmsg(s, &msg, flags); +} + +static inline ssize_t rtdm_send(int s, const void *buf, size_t len, int flags) +{ + return rtdm_sendto(s, buf, len, flags, NULL, 0); +} + +static inline int rtdm_getsockopt(int s, int level, int optname, + void *optval, socklen_t *optlen) +{ + struct _rtdm_getsockopt_args args = { + level, optname, optval, optlen + }; + + return rtdm_ioctl(s, _RTIOC_GETSOCKOPT, &args); +} + +static inline int rtdm_setsockopt(int s, int level, int optname, + const void *optval, socklen_t optlen) +{ + struct _rtdm_setsockopt_args args = { + level, optname, (void *)optval, optlen + }; + + return rtdm_ioctl(s, _RTIOC_SETSOCKOPT, &args); +} + +static inline int rtdm_bind(int s, const struct sockaddr *my_addr, + socklen_t addrlen) +{ + struct _rtdm_setsockaddr_args args = { + my_addr, addrlen + }; + + return rtdm_ioctl(s, _RTIOC_BIND, &args); +} + +static inline int rtdm_connect(int s, const struct sockaddr *serv_addr, + socklen_t addrlen) +{ + struct _rtdm_setsockaddr_args args = { + serv_addr, addrlen + }; + + return rtdm_ioctl(s, _RTIOC_CONNECT, &args); +} + +static inline int rtdm_listen(int s, int backlog) +{ + return rtdm_ioctl(s, _RTIOC_LISTEN, backlog); +} + +static inline int rtdm_accept(int s, struct sockaddr *addr, + socklen_t *addrlen) +{ + struct _rtdm_getsockaddr_args args = { + addr, addrlen + }; + + return rtdm_ioctl(s, _RTIOC_ACCEPT, &args); +} + +static inline int rtdm_getsockname(int s, struct sockaddr *name, + socklen_t *namelen) +{ + struct _rtdm_getsockaddr_args args = { + name, namelen + }; + + return rtdm_ioctl(s, _RTIOC_GETSOCKNAME, &args); +} + +static inline int rtdm_getpeername(int s, struct sockaddr *name, + socklen_t *namelen) +{ + struct _rtdm_getsockaddr_args args = { + name, namelen + }; + + return rtdm_ioctl(s, _RTIOC_GETPEERNAME, &args); +} + +static inline int rtdm_shutdown(int s, int how) +{ + return rtdm_ioctl(s, _RTIOC_SHUTDOWN, how); +} + +#endif /* _COBALT_RTDM_RTDM_H */ --- linux/include/xenomai/rtdm/gpiopwm.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/rtdm/gpiopwm.h 2022-03-21 12:58:31.864864827 +0100 @@ -0,0 +1,24 @@ +/* + * Copyright (C) 2015 Jorge Ramirez + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_RTDM_PWM_H +#define _COBALT_RTDM_PWM_H + +#include +#include + +#endif /* !_COBALT_RTDM_PWM_H */ --- linux/include/xenomai/rtdm/fd.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/rtdm/fd.h 2022-03-21 12:58:31.857864895 +0100 @@ -0,0 +1,415 @@ +/* + * Copyright (C) 2005-2007 Jan Kiszka + * Copyright (C) 2005 Joerg Langenberg + * Copyright (C) 2008,2013,2014 Gilles Chanteperdrix . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_KERNEL_FD_H +#define _COBALT_KERNEL_FD_H + +#include +#include +#include +#include +#include + +struct vm_area_struct; +struct rtdm_fd; +struct _rtdm_mmap_request; +struct xnselector; +struct cobalt_ppd; +struct rtdm_device; + +/** + * @file + * @anchor File operation handlers + * @addtogroup rtdm_device_register + * @{ + */ + +/** + * Open handler for named devices + * + * @param[in] fd File descriptor associated with opened device instance + * @param[in] oflags Open flags as passed by the user + * + * The file descriptor carries a device minor information which can be + * retrieved by a call to rtdm_fd_minor(fd). The minor number can be + * used for distinguishing devices managed by a driver. + * + * @return 0 on success. On failure, a negative error code is returned. + * + * @see @c open() in IEEE Std 1003.1, + * http://www.opengroup.org/onlinepubs/009695399 + */ +int rtdm_open_handler(struct rtdm_fd *fd, int oflags); + +/** + * Socket creation handler for protocol devices + * + * @param[in] fd File descriptor associated with opened device instance + * @param[in] protocol Protocol number as passed by the user + * + * @return 0 on success. On failure, a negative error code is returned. + * + * @see @c socket() in IEEE Std 1003.1, + * http://www.opengroup.org/onlinepubs/009695399 + */ +int rtdm_socket_handler(struct rtdm_fd *fd, int protocol); + +/** + * Close handler + * + * @param[in] fd File descriptor associated with opened + * device instance. + * + * @see @c close() in IEEE Std 1003.1, + * http://www.opengroup.org/onlinepubs/009695399 + */ +void rtdm_close_handler(struct rtdm_fd *fd); + +/** + * IOCTL handler + * + * @param[in] fd File descriptor + * @param[in] request Request number as passed by the user + * @param[in,out] arg Request argument as passed by the user + * + * @return A positive value or 0 on success. On failure return either + * -ENOSYS, to request that the function be called again from the opposite + * realtime/non-realtime context, or another negative error code. + * + * @see @c ioctl() in IEEE Std 1003.1, + * http://www.opengroup.org/onlinepubs/009695399 + */ +int rtdm_ioctl_handler(struct rtdm_fd *fd, unsigned int request, void __user *arg); + +/** + * Read handler + * + * @param[in] fd File descriptor + * @param[out] buf Input buffer as passed by the user + * @param[in] size Number of bytes the user requests to read + * + * @return On success, the number of bytes read. On failure return either + * -ENOSYS, to request that this handler be called again from the opposite + * realtime/non-realtime context, or another negative error code. + * + * @see @c read() in IEEE Std 1003.1, + * http://www.opengroup.org/onlinepubs/009695399 + */ +ssize_t rtdm_read_handler(struct rtdm_fd *fd, void __user *buf, size_t size); + +/** + * Write handler + * + * @param[in] fd File descriptor + * @param[in] buf Output buffer as passed by the user + * @param[in] size Number of bytes the user requests to write + * + * @return On success, the number of bytes written. On failure return + * either -ENOSYS, to request that this handler be called again from the + * opposite realtime/non-realtime context, or another negative error code. + * + * @see @c write() in IEEE Std 1003.1, + * http://www.opengroup.org/onlinepubs/009695399 + */ +ssize_t rtdm_write_handler(struct rtdm_fd *fd, const void __user *buf, size_t size); + +/** + * Receive message handler + * + * @param[in] fd File descriptor + * @param[in,out] msg Message descriptor as passed by the user, automatically + * mirrored to safe kernel memory in case of user mode call + * @param[in] flags Message flags as passed by the user + * + * @return On success, the number of bytes received. On failure return + * either -ENOSYS, to request that this handler be called again from the + * opposite realtime/non-realtime context, or another negative error code. + * + * @see @c recvmsg() in IEEE Std 1003.1, + * http://www.opengroup.org/onlinepubs/009695399 + */ +ssize_t rtdm_recvmsg_handler(struct rtdm_fd *fd, struct user_msghdr *msg, int flags); + +/** + * Transmit message handler + * + * @param[in] fd File descriptor + * @param[in] msg Message descriptor as passed by the user, automatically + * mirrored to safe kernel memory in case of user mode call + * @param[in] flags Message flags as passed by the user + * + * @return On success, the number of bytes transmitted. On failure return + * either -ENOSYS, to request that this handler be called again from the + * opposite realtime/non-realtime context, or another negative error code. + * + * @see @c sendmsg() in IEEE Std 1003.1, + * http://www.opengroup.org/onlinepubs/009695399 + */ +ssize_t rtdm_sendmsg_handler(struct rtdm_fd *fd, const struct user_msghdr *msg, int flags); + +/** + * Select handler + * + * @param[in] fd File descriptor + * @param selector Pointer to the selector structure + * @param type Type of events (@a XNSELECT_READ, @a XNSELECT_WRITE, or @a + * XNSELECT_EXCEPT) + * @param index Index of the file descriptor + * + * @return 0 on success. On failure, a negative error code is + * returned. + * + * @see @c select() in POSIX.1-2001, + * http://pubs.opengroup.org/onlinepubs/007908799/xsh/select.html + */ +int rtdm_select_handler(struct rtdm_fd *fd, struct xnselector *selector, + unsigned int type, unsigned int index); + +/** + * Memory mapping handler + * + * @param[in] fd File descriptor + * @param[in] vma Virtual memory area descriptor + * + * @return 0 on success. On failure, a negative error code is + * returned. + * + * @see @c mmap() in POSIX.1-2001, + * http://pubs.opengroup.org/onlinepubs/7908799/xsh/mmap.html + * + * @note The address hint passed to the mmap() request is deliberately + * ignored by RTDM. + */ +int rtdm_mmap_handler(struct rtdm_fd *fd, struct vm_area_struct *vma); + +/** + * Allocate mapping region in address space + * + * When present, this optional handler should return the start address + * of a free region in the process's address space, large enough to + * cover the ongoing mmap() operation. If unspecified, the default + * architecture-defined handler is invoked. + * + * Most drivers can omit this handler, except on MMU-less platforms + * (see second note). + * + * @param[in] fd File descriptor + * @param[in] len Length of the requested region + * @param[in] pgoff Page frame number to map to (see second note). + * @param[in] flags Requested mapping flags + * + * @return The start address of the mapping region on success. On + * failure, a negative error code should be returned, with -ENOSYS + * meaning that the driver does not want to provide such information, + * in which case the ongoing mmap() operation will fail. + * + * @note The address hint passed to the mmap() request is deliberately + * ignored by RTDM, and therefore not passed to this handler. + * + * @note On MMU-less platforms, this handler is required because RTDM + * issues mapping requests over a shareable character device + * internally. In such context, the RTDM core may pass a null @a pgoff + * argument to the handler, for probing for the logical start address + * of the memory region to map to. Otherwise, when @a pgoff is + * non-zero, pgoff << PAGE_SHIFT is usually returned. + */ +unsigned long +rtdm_get_unmapped_area_handler(struct rtdm_fd *fd, + unsigned long len, unsigned long pgoff, + unsigned long flags); +/** + * @anchor rtdm_fd_ops + * @brief RTDM file operation descriptor. + * + * This structure describes the operations available with a RTDM + * device, defining handlers for submitting I/O requests. Those + * handlers are implemented by RTDM device drivers. + */ +struct rtdm_fd_ops { + /** See rtdm_open_handler(). */ + int (*open)(struct rtdm_fd *fd, int oflags); + /** See rtdm_socket_handler(). */ + int (*socket)(struct rtdm_fd *fd, int protocol); + /** See rtdm_close_handler(). */ + void (*close)(struct rtdm_fd *fd); + /** See rtdm_ioctl_handler(). */ + int (*ioctl_rt)(struct rtdm_fd *fd, + unsigned int request, void __user *arg); + /** See rtdm_ioctl_handler(). */ + int (*ioctl_nrt)(struct rtdm_fd *fd, + unsigned int request, void __user *arg); + /** See rtdm_read_handler(). */ + ssize_t (*read_rt)(struct rtdm_fd *fd, + void __user *buf, size_t size); + /** See rtdm_read_handler(). */ + ssize_t (*read_nrt)(struct rtdm_fd *fd, + void __user *buf, size_t size); + /** See rtdm_write_handler(). */ + ssize_t (*write_rt)(struct rtdm_fd *fd, + const void __user *buf, size_t size); + /** See rtdm_write_handler(). */ + ssize_t (*write_nrt)(struct rtdm_fd *fd, + const void __user *buf, size_t size); + /** See rtdm_recvmsg_handler(). */ + ssize_t (*recvmsg_rt)(struct rtdm_fd *fd, + struct user_msghdr *msg, int flags); + /** See rtdm_recvmsg_handler(). */ + ssize_t (*recvmsg_nrt)(struct rtdm_fd *fd, + struct user_msghdr *msg, int flags); + /** See rtdm_sendmsg_handler(). */ + ssize_t (*sendmsg_rt)(struct rtdm_fd *fd, + const struct user_msghdr *msg, int flags); + /** See rtdm_sendmsg_handler(). */ + ssize_t (*sendmsg_nrt)(struct rtdm_fd *fd, + const struct user_msghdr *msg, int flags); + /** See rtdm_select_handler(). */ + int (*select)(struct rtdm_fd *fd, + struct xnselector *selector, + unsigned int type, unsigned int index); + /** See rtdm_mmap_handler(). */ + int (*mmap)(struct rtdm_fd *fd, + struct vm_area_struct *vma); + /** See rtdm_get_unmapped_area_handler(). */ + unsigned long (*get_unmapped_area)(struct rtdm_fd *fd, + unsigned long len, + unsigned long pgoff, + unsigned long flags); +}; + +/** @} File operation handlers */ + +struct rtdm_fd { + unsigned int magic; + struct rtdm_fd_ops *ops; + struct cobalt_ppd *owner; + unsigned int refs; + int ufd; + int minor; + int oflags; +#ifdef CONFIG_XENO_ARCH_SYS3264 + int compat; +#endif + bool stale; + struct list_head cleanup; + struct list_head next; /* in dev->openfd_list */ +}; + +#define RTDM_FD_MAGIC 0x52544446 + +#define RTDM_FD_COMPAT __COBALT_COMPAT_BIT +#define RTDM_FD_COMPATX __COBALT_COMPATX_BIT + +int __rtdm_anon_getfd(const char *name, int flags); + +void __rtdm_anon_putfd(int ufd); + +static inline struct cobalt_ppd *rtdm_fd_owner(const struct rtdm_fd *fd) +{ + return fd->owner; +} + +static inline int rtdm_fd_ufd(const struct rtdm_fd *fd) +{ + return fd->ufd; +} + +static inline int rtdm_fd_minor(const struct rtdm_fd *fd) +{ + return fd->minor; +} + +static inline int rtdm_fd_flags(const struct rtdm_fd *fd) +{ + return fd->oflags; +} + +#ifdef CONFIG_XENO_ARCH_SYS3264 +static inline int rtdm_fd_is_compat(const struct rtdm_fd *fd) +{ + return fd->compat; +} +#else +static inline int rtdm_fd_is_compat(const struct rtdm_fd *fd) +{ + return 0; +} +#endif + +int rtdm_fd_enter(struct rtdm_fd *rtdm_fd, int ufd, + unsigned int magic, struct rtdm_fd_ops *ops); + +int rtdm_fd_register(struct rtdm_fd *fd, int ufd); + +struct rtdm_fd *rtdm_fd_get(int ufd, unsigned int magic); + +int rtdm_fd_lock(struct rtdm_fd *fd); + +void rtdm_fd_put(struct rtdm_fd *fd); + +void rtdm_fd_unlock(struct rtdm_fd *fd); + +int rtdm_fd_fcntl(int ufd, int cmd, ...); + +int rtdm_fd_ioctl(int ufd, unsigned int request, ...); + +ssize_t rtdm_fd_read(int ufd, void __user *buf, size_t size); + +ssize_t rtdm_fd_write(int ufd, const void __user *buf, size_t size); + +int rtdm_fd_close(int ufd, unsigned int magic); + +ssize_t rtdm_fd_recvmsg(int ufd, struct user_msghdr *msg, int flags); + +int __rtdm_fd_recvmmsg(int ufd, void __user *u_msgvec, unsigned int vlen, + unsigned int flags, void __user *u_timeout, + int (*get_mmsg)(struct mmsghdr *mmsg, void __user *u_mmsg), + int (*put_mmsg)(void __user **u_mmsg_p, const struct mmsghdr *mmsg), + int (*get_timespec)(struct timespec64 *ts, const void __user *u_ts)); + +int __rtdm_fd_recvmmsg64(int ufd, void __user *u_msgvec, unsigned int vlen, + unsigned int flags, void __user *u_timeout, + int (*get_mmsg)(struct mmsghdr *mmsg, void __user *u_mmsg), + int (*put_mmsg)(void __user **u_mmsg_p, const struct mmsghdr *mmsg)); + +ssize_t rtdm_fd_sendmsg(int ufd, const struct user_msghdr *msg, + int flags); + +int __rtdm_fd_sendmmsg(int ufd, void __user *u_msgvec, unsigned int vlen, + unsigned int flags, + int (*get_mmsg)(struct mmsghdr *mmsg, void __user *u_mmsg), + int (*put_mmsg)(void __user **u_mmsg_p, const struct mmsghdr *mmsg)); + +int rtdm_fd_mmap(int ufd, struct _rtdm_mmap_request *rma, + void **u_addrp); + +int rtdm_fd_valid_p(int ufd); + +int rtdm_fd_select(int ufd, struct xnselector *selector, + unsigned int type); + +int rtdm_device_new_fd(struct rtdm_fd *fd, int ufd, + struct rtdm_device *dev); + +void rtdm_device_flush_fds(struct rtdm_device *dev); + +void rtdm_fd_cleanup(struct cobalt_ppd *p); + +void rtdm_fd_init(void); + +#endif /* _COBALT_KERNEL_FD_H */ --- linux/include/xenomai/rtdm/cobalt.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/rtdm/cobalt.h 2022-03-21 12:58:31.849864973 +0100 @@ -0,0 +1,33 @@ +/* + * This file is part of the Xenomai project. + * + * Copyright (C) 2013 Philippe Gerum + * + * Xenomai is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_RTDM_COBALT_H +#define _COBALT_RTDM_COBALT_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#endif /* !_COBALT_RTDM_COBALT_H */ --- linux/include/xenomai/rtdm/analogy/rtdm_helpers.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/rtdm/analogy/rtdm_helpers.h 2022-03-21 12:58:31.842865041 +0100 @@ -0,0 +1,143 @@ +/* + * Analogy for Linux, Operation system facilities + * + * Copyright (C) 1997-2000 David A. Schleef + * Copyright (C) 2008 Alexis Berlemont + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_RTDM_ANALOGY_RTDM_HELPERS_H +#define _COBALT_RTDM_ANALOGY_RTDM_HELPERS_H + +#include +#include +#include +#include +#include +#include +#include +#include + +/* --- Trace section --- */ +#define A4L_PROMPT "Analogy: " + +#define RTDM_SUBCLASS_ANALOGY 0 + +#define __a4l_err(fmt, args...) rtdm_printk(KERN_ERR A4L_PROMPT fmt, ##args) +#define __a4l_warn(fmt, args...) rtdm_printk(KERN_WARNING A4L_PROMPT fmt, ##args) + +#ifdef CONFIG_XENO_DRIVERS_ANALOGY_DEBUG_FTRACE +#define __a4l_info(fmt, args...) trace_printk(fmt, ##args) +#else +#define __a4l_info(fmt, args...) \ + rtdm_printk(KERN_INFO A4L_PROMPT "%s: " fmt, __FUNCTION__, ##args) +#endif + +#ifdef CONFIG_XENO_DRIVERS_ANALOGY_DEBUG +#ifdef CONFIG_XENO_DRIVERS_ANALOGY_DEBUG_FTRACE +#define __a4l_dbg(level, debug, fmt, args...) \ + do { \ + if ((debug) >= (level)) \ + trace_printk(fmt, ##args); \ + } while (0) +#else +#define __a4l_dbg(level, debug, fmt, args...) \ + do { \ + if ((debug) >= (level)) \ + rtdm_printk(KERN_DEBUG A4L_PROMPT "%s: " fmt, __FUNCTION__ , ##args); \ + } while (0) +#endif + +#define core_dbg CONFIG_XENO_DRIVERS_ANALOGY_DEBUG_LEVEL +#define drv_dbg CONFIG_XENO_DRIVERS_ANALOGY_DRIVER_DEBUG_LEVEL + +#else /* !CONFIG_XENO_DRIVERS_ANALOGY_DEBUG */ + +#define __a4l_dbg(level, debug, fmt, args...) + +#endif /* CONFIG_XENO_DRIVERS_ANALOGY_DEBUG */ + +#define __a4l_dev_name(dev) \ + (dev->driver == NULL) ? "unattached dev" : dev->driver->board_name + +#define a4l_err(dev, fmt, args...) \ + __a4l_err("%s: " fmt, __a4l_dev_name(dev), ##args) + +#define a4l_warn(dev, fmt, args...) \ + __a4l_warn("%s: " fmt, __a4l_dev_name(dev), ##args) + +#define a4l_info(dev, fmt, args...) \ + __a4l_info("%s: " fmt, __a4l_dev_name(dev), ##args) + +#define a4l_dbg(level, debug, dev, fmt, args...) \ + __a4l_dbg(level, debug, "%s: " fmt, __a4l_dev_name(dev), ##args) + + +/* --- Time section --- */ +static inline void a4l_udelay(unsigned int us) +{ + rtdm_task_busy_sleep(((nanosecs_rel_t) us) * 1000); +} + +/* Function which gives absolute time */ +nanosecs_abs_t a4l_get_time(void); + +/* Function for setting up the absolute time recovery */ +void a4l_init_time(void); + +/* --- IRQ section --- */ +#define A4L_IRQ_DISABLED 0 + +typedef int (*a4l_irq_hdlr_t) (unsigned int irq, void *d); + +struct a4l_irq_descriptor { + /* These fields are useful to launch the IRQ trampoline; + that is the reason why a structure has been defined */ + a4l_irq_hdlr_t handler; + unsigned int irq; + void *cookie; + rtdm_irq_t rtdm_desc; +}; + +int __a4l_request_irq(struct a4l_irq_descriptor * dsc, + unsigned int irq, + a4l_irq_hdlr_t handler, + unsigned long flags, void *cookie); +int __a4l_free_irq(struct a4l_irq_descriptor * dsc); + +/* --- Synchronization section --- */ +#define __NRT_WAITER 1 +#define __RT_WAITER 2 +#define __EVT_PDING 3 + +struct a4l_sync { + unsigned long status; + rtdm_event_t rtdm_evt; + rtdm_nrtsig_t nrt_sig; + wait_queue_head_t wq; +}; + +#define a4l_select_sync(snc, slr, type, fd) \ + rtdm_event_select(&((snc)->rtdm_evt), slr, type, fd) + +int a4l_init_sync(struct a4l_sync * snc); +void a4l_cleanup_sync(struct a4l_sync * snc); +void a4l_flush_sync(struct a4l_sync * snc); +int a4l_wait_sync(struct a4l_sync * snc, int rt); +int a4l_timedwait_sync(struct a4l_sync * snc, + int rt, unsigned long long ns_timeout); +void a4l_signal_sync(struct a4l_sync * snc); + +#endif /* !_COBALT_RTDM_ANALOGY_RTDM_HELPERS_H */ --- linux/include/xenomai/rtdm/analogy/subdevice.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/rtdm/analogy/subdevice.h 2022-03-21 12:58:31.835865109 +0100 @@ -0,0 +1,118 @@ +/** + * @file + * Analogy for Linux, subdevice related features + * + * Copyright (C) 1997-2000 David A. Schleef + * Copyright (C) 2008 Alexis Berlemont + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_RTDM_ANALOGY_SUBDEVICE_H +#define _COBALT_RTDM_ANALOGY_SUBDEVICE_H + +#include +#include +#include +#include + +/* --- Subdevice descriptor structure --- */ + +struct a4l_device; +struct a4l_buffer; + +/*! + * @brief Structure describing the subdevice + * @see a4l_add_subd() + */ + +struct a4l_subdevice { + + struct list_head list; + /**< List stuff */ + + struct a4l_device *dev; + /**< Containing device */ + + unsigned int idx; + /**< Subdevice index */ + + struct a4l_buffer *buf; + /**< Linked buffer */ + + /* Subdevice's status (busy, linked?) */ + unsigned long status; + /**< Subdevice's status */ + + /* Descriptors stuff */ + unsigned long flags; + /**< Type flags */ + struct a4l_channels_desc *chan_desc; + /**< Tab of channels descriptors pointers */ + struct a4l_rngdesc *rng_desc; + /**< Tab of ranges descriptors pointers */ + struct a4l_cmd_desc *cmd_mask; + /**< Command capabilities mask */ + + /* Functions stuff */ + int (*insn_read) (struct a4l_subdevice *, struct a4l_kernel_instruction *); + /**< Callback for the instruction "read" */ + int (*insn_write) (struct a4l_subdevice *, struct a4l_kernel_instruction *); + /**< Callback for the instruction "write" */ + int (*insn_bits) (struct a4l_subdevice *, struct a4l_kernel_instruction *); + /**< Callback for the instruction "bits" */ + int (*insn_config) (struct a4l_subdevice *, struct a4l_kernel_instruction *); + /**< Callback for the configuration instruction */ + int (*do_cmd) (struct a4l_subdevice *, struct a4l_cmd_desc *); + /**< Callback for command handling */ + int (*do_cmdtest) (struct a4l_subdevice *, struct a4l_cmd_desc *); + /**< Callback for command checking */ + void (*cancel) (struct a4l_subdevice *); + /**< Callback for asynchronous transfer cancellation */ + void (*munge) (struct a4l_subdevice *, void *, unsigned long); + /**< Callback for munge operation */ + int (*trigger) (struct a4l_subdevice *, lsampl_t); + /**< Callback for trigger operation */ + + char priv[0]; + /**< Private data */ +}; + +/* --- Subdevice related functions and macros --- */ + +struct a4l_channel *a4l_get_chfeat(struct a4l_subdevice * sb, int idx); +struct a4l_range *a4l_get_rngfeat(struct a4l_subdevice * sb, int chidx, int rngidx); +int a4l_check_chanlist(struct a4l_subdevice * subd, + unsigned char nb_chan, unsigned int *chans); + +#define a4l_subd_is_input(x) ((A4L_SUBD_MASK_READ & (x)->flags) != 0) +/* The following macro considers that a DIO subdevice is firstly an + output subdevice */ +#define a4l_subd_is_output(x) \ + ((A4L_SUBD_MASK_WRITE & (x)->flags) != 0 || \ + (A4L_SUBD_DIO & (x)->flags) != 0) + +/* --- Upper layer functions --- */ + +struct a4l_subdevice * a4l_get_subd(struct a4l_device *dev, int idx); +struct a4l_subdevice * a4l_alloc_subd(int sizeof_priv, + void (*setup)(struct a4l_subdevice *)); +int a4l_add_subd(struct a4l_device *dev, struct a4l_subdevice * subd); +int a4l_ioctl_subdinfo(struct a4l_device_context * cxt, void *arg); +int a4l_ioctl_chaninfo(struct a4l_device_context * cxt, void *arg); +int a4l_ioctl_rnginfo(struct a4l_device_context * cxt, void *arg); +int a4l_ioctl_nbchaninfo(struct a4l_device_context * cxt, void *arg); +int a4l_ioctl_nbrnginfo(struct a4l_device_context * cxt, void *arg); + +#endif /* !_COBALT_RTDM_ANALOGY_SUBDEVICE_H */ --- linux/include/xenomai/rtdm/analogy/context.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/rtdm/analogy/context.h 2022-03-21 12:58:31.827865187 +0100 @@ -0,0 +1,48 @@ +/* + * Analogy for Linux, context structure / macros declarations + * + * Copyright (C) 1997-2000 David A. Schleef + * Copyright (C) 2008 Alexis Berlemont + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_RTDM_ANALOGY_CONTEXT_H +#define _COBALT_RTDM_ANALOGY_CONTEXT_H + +#include + +struct a4l_device; +struct a4l_buffer; + +struct a4l_device_context { + /* The adequate device pointer + (retrieved thanks to minor at open time) */ + struct a4l_device *dev; + + /* The buffer structure contains everything to transfer data + from asynchronous acquisition operations on a specific + subdevice */ + struct a4l_buffer *buffer; +}; + +static inline int a4l_get_minor(struct a4l_device_context *cxt) +{ + /* Get a pointer on the container structure */ + struct rtdm_fd *fd = rtdm_private_to_fd(cxt); + /* Get the minor index */ + return rtdm_fd_minor(fd); +} + +#endif /* !_COBALT_RTDM_ANALOGY_CONTEXT_H */ --- linux/include/xenomai/rtdm/analogy/instruction.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/rtdm/analogy/instruction.h 2022-03-21 12:58:31.820865256 +0100 @@ -0,0 +1,45 @@ +/* + * Analogy for Linux, instruction related features + * + * Copyright (C) 1997-2000 David A. Schleef + * Copyright (C) 2008 Alexis Berlemont + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_RTDM_ANALOGY_INSTRUCTION_H +#define _COBALT_RTDM_ANALOGY_INSTRUCTION_H + +struct a4l_kernel_instruction { + unsigned int type; + unsigned int idx_subd; + unsigned int chan_desc; + unsigned int data_size; + void *data; + void *__udata; +}; + +struct a4l_kernel_instruction_list { + unsigned int count; + struct a4l_kernel_instruction *insns; + a4l_insn_t *__uinsns; +}; + +/* Instruction related functions */ + +/* Upper layer functions */ +int a4l_ioctl_insnlist(struct a4l_device_context * cxt, void *arg); +int a4l_ioctl_insn(struct a4l_device_context * cxt, void *arg); + +#endif /* !_COBALT_RTDM_ANALOGY_BUFFER_H */ --- linux/include/xenomai/rtdm/analogy/channel_range.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/rtdm/analogy/channel_range.h 2022-03-21 12:58:31.812865334 +0100 @@ -0,0 +1,272 @@ +/** + * @file + * Analogy for Linux, channel, range related features + * + * Copyright (C) 1997-2000 David A. Schleef + * Copyright (C) 2008 Alexis Berlemont + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_RTDM_ANALOGY_CHANNEL_RANGE_H +#define _COBALT_RTDM_ANALOGY_CHANNEL_RANGE_H + +#include + +/** + * @ingroup analogy_driver_facilities + * @defgroup analogy_channel_range Channels and ranges + * + * Channels + * + * According to the Analogy nomenclature, the channel is the elementary + * acquisition entity. One channel is supposed to acquire one data at + * a time. A channel can be: + * - an analog input or an analog ouput; + * - a digital input or a digital ouput; + * + * Channels are defined by their type and by some other + * characteristics like: + * - their resolutions for analog channels (which usually ranges from + 8 to 32 bits); + * - their references; + * + * Such parameters must be declared for each channel composing a + * subdevice. The structure a4l_channel (struct a4l_channel) is used to + * define one channel. + * + * Another structure named a4l_channels_desc (struct a4l_channels_desc) + * gathers all channels for a specific subdevice. This latter + * structure also stores : + * - the channels count; + * - the channels declaration mode (A4L_CHAN_GLOBAL_CHANDESC or + A4L_CHAN_PERCHAN_CHANDESC): if all the channels composing a + subdevice are identical, there is no need to declare the + parameters for each channel; the global declaration mode eases + the structure composition. + * + * Usually the channels descriptor looks like this: + * @verbatim +struct a4l_channels_desc example_chan = { + mode: A4L_CHAN_GLOBAL_CHANDESC, -> Global declaration + mode is set + length: 8, -> 8 channels + chans: { + {A4L_CHAN_AREF_GROUND, 16}, -> Each channel is 16 bits + wide with the ground as + reference + }, +}; +@endverbatim + * + * Ranges + * + * So as to perform conversion from logical values acquired by the + * device to physical units, some range structure(s) must be declared + * on the driver side. + * + * Such structures contain: + * - the physical unit type (Volt, Ampere, none); + * - the minimal and maximal values; + * + * These range structures must be associated with the channels at + * subdevice registration time as a channel can work with many + * ranges. At configuration time (thanks to an Analogy command), one + * range will be selected for each enabled channel. + * + * Consequently, for each channel, the developer must declare all the + * possible ranges in a structure called struct a4l_rngtab. Here is an + * example: + * @verbatim +struct a4l_rngtab example_tab = { + length: 2, + rngs: { + RANGE_V(-5,5), + RANGE_V(-10,10), + }, +}; +@endverbatim + * + * For each subdevice, a specific structure is designed to gather all + * the ranges tabs of all the channels. In this structure, called + * struct a4l_rngdesc, three fields must be filled: + * - the declaration mode (A4L_RNG_GLOBAL_RNGDESC or + * A4L_RNG_PERCHAN_RNGDESC); + * - the number of ranges tab; + * - the tab of ranges tabs pointers; + * + * Most of the time, the channels which belong to the same subdevice + * use the same set of ranges. So, there is no need to declare the + * same ranges for each channel. A macro is defined to prevent + * redundant declarations: RNG_GLOBAL(). + * + * Here is an example: + * @verbatim +struct a4l_rngdesc example_rng = RNG_GLOBAL(example_tab); +@endverbatim + * + * @{ + */ + + +/* --- Channel section --- */ + +/*! + * @anchor A4L_CHAN_AREF_xxx @name Channel reference + * @brief Flags to define the channel's reference + * @{ + */ + +/** + * Ground reference + */ +#define A4L_CHAN_AREF_GROUND 0x1 +/** + * Common reference + */ +#define A4L_CHAN_AREF_COMMON 0x2 +/** + * Differential reference + */ +#define A4L_CHAN_AREF_DIFF 0x4 +/** + * Misc reference + */ +#define A4L_CHAN_AREF_OTHER 0x8 + + /*! @} A4L_CHAN_AREF_xxx */ + +/** + * Internal use flag (must not be used by driver developer) + */ +#define A4L_CHAN_GLOBAL 0x10 + +/*! + * @brief Structure describing some channel's characteristics + */ + +struct a4l_channel { + unsigned long flags; /*!< Channel flags to define the reference. */ + unsigned long nb_bits; /*!< Channel resolution. */ +}; + +/*! + * @anchor A4L_CHAN_xxx @name Channels declaration mode + * @brief Constant to define whether the channels in a descriptor are + * identical + * @{ + */ + +/** + * Global declaration, the set contains channels with similar + * characteristics + */ +#define A4L_CHAN_GLOBAL_CHANDESC 0 +/** + * Per channel declaration, the decriptor gathers differents channels + */ +#define A4L_CHAN_PERCHAN_CHANDESC 1 + + /*! @} A4L_CHAN_xxx */ + +/*! + * @brief Structure describing a channels set + */ + +struct a4l_channels_desc { + unsigned long mode; /*!< Declaration mode (global or per channel) */ + unsigned long length; /*!< Channels count */ + struct a4l_channel chans[]; /*!< Channels tab */ +}; + +/** + * Internal use flag (must not be used by driver developer) + */ +#define A4L_RNG_GLOBAL 0x8 + +/*! + * @brief Structure describing a (unique) range + */ + +struct a4l_range { + long min; /*!< Minimal value */ + long max; /*!< Maximal falue */ + unsigned long flags; /*!< Range flags (unit, etc.) */ +}; + +/** + * Macro to declare a (unique) range with no unit defined + */ +#define RANGE(x,y) {(x * A4L_RNG_FACTOR), (y * A4L_RNG_FACTOR), \ + A4L_RNG_NO_UNIT} +/** + * Macro to declare a (unique) range in Volt + */ +#define RANGE_V(x,y) {(x * A4L_RNG_FACTOR),(y * A4L_RNG_FACTOR), \ + A4L_RNG_VOLT_UNIT} +/** + * Macro to declare a (unique) range in milliAmpere + */ +#define RANGE_mA(x,y) {(x * A4L_RNG_FACTOR),(y * A4L_RNG_FACTOR), \ + A4L_RNG_MAMP_UNIT} +/** + * Macro to declare a (unique) range in some external reference + */ +#define RANGE_ext(x,y) {(x * A4L_RNG_FACTOR),(y * A4L_RNG_FACTOR), \ + A4L_RNG_EXT_UNIT} + + +/* Ranges tab descriptor */ +struct a4l_rngtab { + unsigned char length; + struct a4l_range rngs[]; +}; + +/** + * Constant to define a ranges descriptor as global (inter-channel) + */ +#define A4L_RNG_GLOBAL_RNGDESC 0 +/** + * Constant to define a ranges descriptor as specific for a channel + */ +#define A4L_RNG_PERCHAN_RNGDESC 1 + +/* Global ranges descriptor */ +struct a4l_rngdesc { + unsigned char mode; + unsigned char length; + struct a4l_rngtab *rngtabs[]; +}; + +/** + * Macro to declare a ranges global descriptor in one line + */ +#define RNG_GLOBAL(x) { \ + .mode = A4L_RNG_GLOBAL_RNGDESC, \ + .length = 1, \ + .rngtabs = {&(x)}, \ +} + +extern struct a4l_rngdesc a4l_range_bipolar10; +extern struct a4l_rngdesc a4l_range_bipolar5; +extern struct a4l_rngdesc a4l_range_unipolar10; +extern struct a4l_rngdesc a4l_range_unipolar5; +extern struct a4l_rngdesc a4l_range_unknown; +extern struct a4l_rngdesc a4l_range_fake; + +#define range_digital a4l_range_unipolar5 + +/*! @} channelrange */ + +#endif /* !_COBALT_RTDM_ANALOGY_CHANNEL_RANGE_H */ --- linux/include/xenomai/rtdm/analogy/driver.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/rtdm/analogy/driver.h 2022-03-21 12:58:31.805865402 +0100 @@ -0,0 +1,74 @@ +/** + * @file + * Analogy for Linux, driver facilities + * + * Copyright (C) 1997-2000 David A. Schleef + * Copyright (C) 2008 Alexis Berlemont + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_RTDM_ANALOGY_DRIVER_H +#define _COBALT_RTDM_ANALOGY_DRIVER_H + +#include +#include +#include +#include + +struct seq_file; +struct a4l_link_desc; +struct a4l_device; + +/** Structure containing driver declaration data. + * + * @see rt_task_inquire() + */ +/* Analogy driver descriptor */ +struct a4l_driver { + + /* List stuff */ + struct list_head list; + /**< List stuff */ + + /* Visible description stuff */ + struct module *owner; + /**< Pointer to module containing the code */ + unsigned int flags; + /**< Type / status driver's flags */ + char *board_name; + /**< Board name */ + char *driver_name; + /**< driver name */ + int privdata_size; + /**< Size of the driver's private data */ + + /* Init/destroy procedures */ + int (*attach) (struct a4l_device *, struct a4l_link_desc *); + /**< Attach procedure */ + int (*detach) (struct a4l_device *); + /**< Detach procedure */ + +}; + +/* Driver list related functions */ + +int a4l_register_drv(struct a4l_driver * drv); +int a4l_unregister_drv(struct a4l_driver * drv); +int a4l_lct_drv(char *pin, struct a4l_driver ** pio); +#ifdef CONFIG_PROC_FS +int a4l_rdproc_drvs(struct seq_file *p, void *data); +#endif /* CONFIG_PROC_FS */ + +#endif /* !_COBALT_RTDM_ANALOGY_DRIVER_H */ --- linux/include/xenomai/rtdm/analogy/device.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/rtdm/analogy/device.h 2022-03-21 12:58:31.797865480 +0100 @@ -0,0 +1,67 @@ +/* + * Analogy for Linux, device related features + * + * Copyright (C) 1997-2000 David A. Schleef + * Copyright (C) 2008 Alexis Berlemont + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_RTDM_ANALOGY_DEVICE_H +#define _COBALT_RTDM_ANALOGY_DEVICE_H + +#include +#include +#include + +#define A4L_NB_DEVICES 10 + +#define A4L_DEV_ATTACHED_NR 0 + +struct a4l_device { + + /* Spinlock for global device use */ + rtdm_lock_t lock; + + /* Device specific flags */ + unsigned long flags; + + /* Driver assigned to this device thanks to attaching + procedure */ + struct a4l_driver *driver; + + /* Hidden description stuff */ + struct list_head subdvsq; + + /* Context-dependent stuff */ + struct a4l_transfer transfer; + + /* Private data useful for drivers functioning */ + void *priv; +}; + +/* --- Devices tab related functions --- */ +void a4l_init_devs(void); +int a4l_check_cleanup_devs(void); +int a4l_rdproc_devs(struct seq_file *p, void *data); + +/* --- Context related function / macro --- */ +void a4l_set_dev(struct a4l_device_context *cxt); +#define a4l_get_dev(x) ((x)->dev) + +/* --- Upper layer functions --- */ +int a4l_ioctl_devcfg(struct a4l_device_context * cxt, void *arg); +int a4l_ioctl_devinfo(struct a4l_device_context * cxt, void *arg); + +#endif /* !_COBALT_RTDM_ANALOGY_DEVICE_H */ --- linux/include/xenomai/rtdm/analogy/buffer.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/rtdm/analogy/buffer.h 2022-03-21 12:58:31.790865548 +0100 @@ -0,0 +1,461 @@ +/* + * Analogy for Linux, buffer related features + * + * Copyright (C) 1997-2000 David A. Schleef + * Copyright (C) 2008 Alexis Berlemont + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_RTDM_ANALOGY_BUFFER_H +#define _COBALT_RTDM_ANALOGY_BUFFER_H + +#include +#include +#include +#include +#include +#include +#include +#include + +/* --- Events bits / flags --- */ + +#define A4L_BUF_EOBUF_NR 0 +#define A4L_BUF_EOBUF (1 << A4L_BUF_EOBUF_NR) + +#define A4L_BUF_ERROR_NR 1 +#define A4L_BUF_ERROR (1 << A4L_BUF_ERROR_NR) + +#define A4L_BUF_EOA_NR 2 +#define A4L_BUF_EOA (1 << A4L_BUF_EOA_NR) + +/* --- Status bits / flags --- */ + +#define A4L_BUF_BULK_NR 8 +#define A4L_BUF_BULK (1 << A4L_BUF_BULK_NR) + +#define A4L_BUF_MAP_NR 9 +#define A4L_BUF_MAP (1 << A4L_BUF_MAP_NR) + + +/* Buffer descriptor structure */ +struct a4l_buffer { + + /* Added by the structure update */ + struct a4l_subdevice *subd; + + /* Buffer's first virtual page pointer */ + void *buf; + + /* Buffer's global size */ + unsigned long size; + /* Tab containing buffer's pages pointers */ + unsigned long *pg_list; + + /* RT/NRT synchronization element */ + struct a4l_sync sync; + + /* Counters needed for transfer */ + unsigned long end_count; + unsigned long prd_count; + unsigned long cns_count; + unsigned long tmp_count; + + /* Status + events occuring during transfer */ + unsigned long flags; + + /* Command on progress */ + struct a4l_cmd_desc *cur_cmd; + + /* Munge counter */ + unsigned long mng_count; + + /* Theshold below which the user process should not be + awakened */ + unsigned long wake_count; +}; + +static inline void __dump_buffer_counters(struct a4l_buffer *buf) +{ + __a4l_dbg(1, core_dbg, "a4l_buffer=0x%p, p=0x%p \n", buf, buf->buf); + __a4l_dbg(1, core_dbg, "end=%06ld, prd=%06ld, cns=%06ld, tmp=%06ld \n", + buf->end_count, buf->prd_count, buf->cns_count, buf->tmp_count); +} + +/* --- Static inline functions related with + user<->kernel data transfers --- */ + +/* The function __produce is an inline function which copies data into + the asynchronous buffer and takes care of the non-contiguous issue + when looping. This function is used in read and write operations */ +static inline int __produce(struct a4l_device_context *cxt, + struct a4l_buffer *buf, void *pin, unsigned long count) +{ + unsigned long start_ptr = (buf->prd_count % buf->size); + struct rtdm_fd *fd = rtdm_private_to_fd(cxt); + unsigned long tmp_cnt = count; + int ret = 0; + + while (ret == 0 && tmp_cnt != 0) { + /* Check the data copy can be performed contiguously */ + unsigned long blk_size = (start_ptr + tmp_cnt > buf->size) ? + buf->size - start_ptr : tmp_cnt; + + /* Perform the copy */ + if (cxt == NULL) + memcpy(buf->buf + start_ptr, pin, blk_size); + else + ret = rtdm_safe_copy_from_user(fd, + buf->buf + start_ptr, + pin, blk_size); + + /* Update pointers/counts */ + pin += blk_size; + tmp_cnt -= blk_size; + start_ptr = 0; + } + + return ret; +} + +/* The function __consume is an inline function which copies data from + the asynchronous buffer and takes care of the non-contiguous issue + when looping. This function is used in read and write operations */ +static inline int __consume(struct a4l_device_context *cxt, + struct a4l_buffer *buf, void *pout, unsigned long count) +{ + unsigned long start_ptr = (buf->cns_count % buf->size); + struct rtdm_fd *fd = rtdm_private_to_fd(cxt); + unsigned long tmp_cnt = count; + int ret = 0; + + while (ret == 0 && tmp_cnt != 0) { + /* Check the data copy can be performed contiguously */ + unsigned long blk_size = (start_ptr + tmp_cnt > buf->size) ? + buf->size - start_ptr : tmp_cnt; + + /* Perform the copy */ + if (cxt == NULL) + memcpy(pout, buf->buf + start_ptr, blk_size); + else + ret = rtdm_safe_copy_to_user(fd, + pout, + buf->buf + start_ptr, + blk_size); + + /* Update pointers/counts */ + pout += blk_size; + tmp_cnt -= blk_size; + start_ptr = 0; + } + + return ret; +} + +/* The function __munge is an inline function which calls the + subdevice specific munge callback on contiguous windows within the + whole buffer. This function is used in read and write operations */ +static inline void __munge(struct a4l_subdevice * subd, + void (*munge) (struct a4l_subdevice *, + void *, unsigned long), + struct a4l_buffer * buf, unsigned long count) +{ + unsigned long start_ptr = (buf->mng_count % buf->size); + unsigned long tmp_cnt = count; + + while (tmp_cnt != 0) { + /* Check the data copy can be performed contiguously */ + unsigned long blk_size = (start_ptr + tmp_cnt > buf->size) ? + buf->size - start_ptr : tmp_cnt; + + /* Perform the munge operation */ + munge(subd, buf->buf + start_ptr, blk_size); + + /* Update the start pointer and the count */ + tmp_cnt -= blk_size; + start_ptr = 0; + } +} + +/* The function __handle_event can only be called from process context + (not interrupt service routine). It allows the client process to + retrieve the buffer status which has been updated by the driver */ +static inline int __handle_event(struct a4l_buffer * buf) +{ + int ret = 0; + + /* The event "End of acquisition" must not be cleaned + before the complete flush of the buffer */ + if (test_bit(A4L_BUF_EOA_NR, &buf->flags)) + ret = -ENOENT; + + if (test_bit(A4L_BUF_ERROR_NR, &buf->flags)) + ret = -EPIPE; + + return ret; +} + +/* --- Counters management functions --- */ + +/* Here, we may wonder why we need more than two counters / pointers. + + Theoretically, we only need two counters (or two pointers): + - one which tells where the reader should be within the buffer + - one which tells where the writer should be within the buffer + + With these two counters (or pointers), we just have to check that + the writer does not overtake the reader inside the ring buffer + BEFORE any read / write operations. + + However, if one element is a DMA controller, we have to be more + careful. Generally a DMA transfer occurs like this: + DMA shot + |-> then DMA interrupt + |-> then DMA soft handler which checks the counter + + So, the checkings occur AFTER the write operations. + + Let's take an example: the reader is a software task and the writer + is a DMA controller. At the end of the DMA shot, the write counter + is higher than the read counter. Unfortunately, a read operation + occurs between the DMA shot and the DMA interrupt, so the handler + will not notice that an overflow occured. + + That is why tmp_count comes into play: tmp_count records the + read/consumer current counter before the next DMA shot and once the + next DMA shot is done, we check that the updated writer/producer + counter is not higher than tmp_count. Thus we are sure that the DMA + writer has not overtaken the reader because it was not able to + overtake the n-1 value. */ + +static inline int __pre_abs_put(struct a4l_buffer * buf, unsigned long count) +{ + if (count - buf->tmp_count > buf->size) { + set_bit(A4L_BUF_ERROR_NR, &buf->flags); + return -EPIPE; + } + + buf->tmp_count = buf->cns_count; + + return 0; +} + +static inline int __pre_put(struct a4l_buffer * buf, unsigned long count) +{ + return __pre_abs_put(buf, buf->tmp_count + count); +} + +static inline int __pre_abs_get(struct a4l_buffer * buf, unsigned long count) +{ + /* The first time, we expect the buffer to be properly filled + before the trigger occurence; by the way, we need tmp_count to + have been initialized and tmp_count is updated right here */ + if (buf->tmp_count == 0 || buf->cns_count == 0) + goto out; + + /* At the end of the acquisition, the user application has + written the defined amount of data into the buffer; so the + last time, the DMA channel can easily overtake the tmp + frontier because no more data were sent from user space; + therefore no useless alarm should be sent */ + if (buf->end_count != 0 && (long)(count - buf->end_count) > 0) + goto out; + + /* Once the exception are passed, we check that the DMA + transfer has not overtaken the last record of the production + count (tmp_count was updated with prd_count the last time + __pre_abs_get was called). We must understand that we cannot + compare the current DMA count with the current production + count because even if, right now, the production count is + higher than the DMA count, it does not mean that the DMA count + was not greater a few cycles before; in such case, the DMA + channel would have retrieved the wrong data */ + if ((long)(count - buf->tmp_count) > 0) { + set_bit(A4L_BUF_ERROR_NR, &buf->flags); + return -EPIPE; + } + +out: + buf->tmp_count = buf->prd_count; + + return 0; +} + +static inline int __pre_get(struct a4l_buffer * buf, unsigned long count) +{ + return __pre_abs_get(buf, buf->tmp_count + count); +} + +static inline int __abs_put(struct a4l_buffer * buf, unsigned long count) +{ + unsigned long old = buf->prd_count; + + if ((long)(buf->prd_count - count) >= 0) + return -EINVAL; + + buf->prd_count = count; + + if ((old / buf->size) != (count / buf->size)) + set_bit(A4L_BUF_EOBUF_NR, &buf->flags); + + if (buf->end_count != 0 && (long)(count - buf->end_count) >= 0) + set_bit(A4L_BUF_EOA_NR, &buf->flags); + + return 0; +} + +static inline int __put(struct a4l_buffer * buf, unsigned long count) +{ + return __abs_put(buf, buf->prd_count + count); +} + +static inline int __abs_get(struct a4l_buffer * buf, unsigned long count) +{ + unsigned long old = buf->cns_count; + + if ((long)(buf->cns_count - count) >= 0) + return -EINVAL; + + buf->cns_count = count; + + if ((old / buf->size) != count / buf->size) + set_bit(A4L_BUF_EOBUF_NR, &buf->flags); + + if (buf->end_count != 0 && (long)(count - buf->end_count) >= 0) + set_bit(A4L_BUF_EOA_NR, &buf->flags); + + return 0; +} + +static inline int __get(struct a4l_buffer * buf, unsigned long count) +{ + return __abs_get(buf, buf->cns_count + count); +} + +static inline unsigned long __count_to_put(struct a4l_buffer * buf) +{ + unsigned long ret; + + if ((long) (buf->size + buf->cns_count - buf->prd_count) > 0) + ret = buf->size + buf->cns_count - buf->prd_count; + else + ret = 0; + + return ret; +} + +static inline unsigned long __count_to_get(struct a4l_buffer * buf) +{ + unsigned long ret; + + /* If the acquisition is unlimited (end_count == 0), we must + not take into account end_count */ + if (buf->end_count == 0 || (long)(buf->end_count - buf->prd_count) > 0) + ret = buf->prd_count; + else + ret = buf->end_count; + + if ((long)(ret - buf->cns_count) > 0) + ret -= buf->cns_count; + else + ret = 0; + + return ret; +} + +static inline unsigned long __count_to_end(struct a4l_buffer * buf) +{ + unsigned long ret = buf->end_count - buf->cns_count; + + if (buf->end_count == 0) + return ULONG_MAX; + + return ((long)ret) < 0 ? 0 : ret; +} + +/* --- Buffer internal functions --- */ + +int a4l_alloc_buffer(struct a4l_buffer *buf_desc, int buf_size); + +void a4l_free_buffer(struct a4l_buffer *buf_desc); + +void a4l_init_buffer(struct a4l_buffer * buf_desc); + +void a4l_cleanup_buffer(struct a4l_buffer * buf_desc); + +int a4l_setup_buffer(struct a4l_device_context *cxt, struct a4l_cmd_desc *cmd); + +void a4l_cancel_buffer(struct a4l_device_context *cxt); + +int a4l_buf_prepare_absput(struct a4l_subdevice *subd, + unsigned long count); + +int a4l_buf_commit_absput(struct a4l_subdevice *subd, + unsigned long count); + +int a4l_buf_prepare_put(struct a4l_subdevice *subd, + unsigned long count); + +int a4l_buf_commit_put(struct a4l_subdevice *subd, + unsigned long count); + +int a4l_buf_put(struct a4l_subdevice *subd, + void *bufdata, unsigned long count); + +int a4l_buf_prepare_absget(struct a4l_subdevice *subd, + unsigned long count); + +int a4l_buf_commit_absget(struct a4l_subdevice *subd, + unsigned long count); + +int a4l_buf_prepare_get(struct a4l_subdevice *subd, + unsigned long count); + +int a4l_buf_commit_get(struct a4l_subdevice *subd, + unsigned long count); + +int a4l_buf_get(struct a4l_subdevice *subd, + void *bufdata, unsigned long count); + +int a4l_buf_evt(struct a4l_subdevice *subd, unsigned long evts); + +unsigned long a4l_buf_count(struct a4l_subdevice *subd); + +/* --- Current Command management function --- */ + +static inline struct a4l_cmd_desc *a4l_get_cmd(struct a4l_subdevice *subd) +{ + return (subd->buf) ? subd->buf->cur_cmd : NULL; +} + +/* --- Munge related function --- */ + +int a4l_get_chan(struct a4l_subdevice *subd); + +/* --- IOCTL / FOPS functions --- */ + +int a4l_ioctl_mmap(struct a4l_device_context * cxt, void *arg); +int a4l_ioctl_bufcfg(struct a4l_device_context * cxt, void *arg); +int a4l_ioctl_bufcfg2(struct a4l_device_context * cxt, void *arg); +int a4l_ioctl_bufinfo(struct a4l_device_context * cxt, void *arg); +int a4l_ioctl_bufinfo2(struct a4l_device_context * cxt, void *arg); +int a4l_ioctl_poll(struct a4l_device_context * cxt, void *arg); +ssize_t a4l_read_buffer(struct a4l_device_context * cxt, void *bufdata, size_t nbytes); +ssize_t a4l_write_buffer(struct a4l_device_context * cxt, const void *bufdata, size_t nbytes); +int a4l_select(struct a4l_device_context *cxt, + rtdm_selector_t *selector, + enum rtdm_selecttype type, unsigned fd_index); + +#endif /* !_COBALT_RTDM_ANALOGY_BUFFER_H */ --- linux/include/xenomai/rtdm/analogy/command.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/rtdm/analogy/command.h 2022-03-21 12:58:31.783865616 +0100 @@ -0,0 +1,35 @@ +/** + * Copyright (C) 2008 Alexis Berlemont + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_RTDM_ANALOGY_COMMAND_H +#define _COBALT_RTDM_ANALOGY_COMMAND_H + +#include +#include + +#define CR_CHAN(a) CHAN(a) +#define CR_RNG(a) (((a)>>16)&0xff) +#define CR_AREF(a) (((a)>>24)&0xf) + +/* --- Command related function --- */ +void a4l_free_cmddesc(struct a4l_cmd_desc * desc); + +/* --- Upper layer functions --- */ +int a4l_check_cmddesc(struct a4l_device_context * cxt, struct a4l_cmd_desc * desc); +int a4l_ioctl_cmd(struct a4l_device_context * cxt, void *arg); + +#endif /* !_COBALT_RTDM_ANALOGY_COMMAND_H */ --- linux/include/xenomai/rtdm/analogy/transfer.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/rtdm/analogy/transfer.h 2022-03-21 12:58:31.775865694 +0100 @@ -0,0 +1,78 @@ +/* + * Analogy for Linux, transfer related features + * + * Copyright (C) 1997-2000 David A. Schleef + * Copyright (C) 2008 Alexis Berlemont + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_RTDM_ANALOGY_TRANSFER_H +#define _COBALT_RTDM_ANALOGY_TRANSFER_H + +#include + +/* IRQ types */ +#define A4L_IRQ_DISABLED 0 + +/* Fields init values */ +#define A4L_IRQ_UNUSED (unsigned int)((unsigned short)(~0)) +#define A4L_IDX_UNUSED (unsigned int)(~0) + +/* TODO: IRQ handling must leave transfer for os_facilities */ + +struct a4l_device; +/* Analogy transfer descriptor */ +struct a4l_transfer { + + /* Subdevices desc */ + unsigned int nb_subd; + struct a4l_subdevice **subds; + + /* Buffer stuff: the default size */ + unsigned int default_bufsize; + + /* IRQ in use */ + /* TODO: irq_desc should vanish */ + struct a4l_irq_descriptor irq_desc; +}; + +/* --- Proc function --- */ + +int a4l_rdproc_transfer(struct seq_file *p, void *data); + +/* --- Upper layer functions --- */ + +void a4l_presetup_transfer(struct a4l_device_context * cxt); +int a4l_setup_transfer(struct a4l_device_context * cxt); +int a4l_precleanup_transfer(struct a4l_device_context * cxt); +int a4l_cleanup_transfer(struct a4l_device_context * cxt); +int a4l_reserve_transfer(struct a4l_device_context * cxt, int idx_subd); +int a4l_init_transfer(struct a4l_device_context * cxt, struct a4l_cmd_desc * cmd); +int a4l_cancel_transfer(struct a4l_device_context * cxt, int idx_subd); +int a4l_cancel_transfers(struct a4l_device_context * cxt); + +ssize_t a4l_put(struct a4l_device_context * cxt, void *buf, size_t nbytes); +ssize_t a4l_get(struct a4l_device_context * cxt, void *buf, size_t nbytes); + +int a4l_request_irq(struct a4l_device *dev, + unsigned int irq, + a4l_irq_hdlr_t handler, + unsigned long flags, void *cookie); +int a4l_free_irq(struct a4l_device *dev, unsigned int irq); +unsigned int a4l_get_irq(struct a4l_device *dev); + +int a4l_ioctl_cancel(struct a4l_device_context * cxt, void *arg); + +#endif /* !_COBALT_RTDM_ANALOGY_TRANSFER_H */ --- linux/include/xenomai/rtdm/can.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/rtdm/can.h 2022-03-21 12:58:31.768865763 +0100 @@ -0,0 +1,31 @@ +/* + * Copyright (C) 2006 Wolfgang Grandegger + * + * Copyright (C) 2005, 2006 Sebastian Smolorz + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_RTDM_CAN_H +#define _COBALT_RTDM_CAN_H + +#include +#include +#include +#include +#include + +#endif /* _COBALT_RTDM_CAN_H */ --- linux/include/xenomai/rtdm/net.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/rtdm/net.h 2022-03-21 12:58:31.761865831 +0100 @@ -0,0 +1,45 @@ +/* + * RTnet - real-time networking subsystem + * Copyright (C) 2005-2011 Jan Kiszka + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef _COBALT_RTDM_NET_H +#define _COBALT_RTDM_NET_H + +#include +#include +#include + +struct rtnet_callback { + void (*func)(struct rtdm_fd *, void *); + void *arg; +}; + +#define RTNET_RTIOC_CALLBACK _IOW(RTIOC_TYPE_NETWORK, 0x12, \ + struct rtnet_callback) + +/* utility functions */ + +/* provided by rt_ipv4 */ +unsigned long rt_inet_aton(const char *ip); + +/* provided by rt_packet */ +int rt_eth_aton(unsigned char *addr_buf, const char *mac); + +#define RTNET_RTDM_VER 914 + +#endif /* _COBALT_RTDM_NET_H */ --- linux/include/xenomai/rtdm/autotune.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/rtdm/autotune.h 2022-03-21 12:58:31.753865909 +0100 @@ -0,0 +1,24 @@ +/* + * Copyright (C) 2014 Philippe Gerum + * + * Xenomai is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_RTDM_AUTOTUNE_H +#define _COBALT_RTDM_AUTOTUNE_H + +#include +#include + +#endif /* !_COBALT_RTDM_AUTOTUNE_H */ --- linux/include/xenomai/cobalt/uapi/sched.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/cobalt/uapi/sched.h 2022-03-21 12:58:32.206861492 +0100 @@ -0,0 +1,138 @@ +/* + * Copyright (C) 2005 Philippe Gerum . + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_UAPI_SCHED_H +#define _COBALT_UAPI_SCHED_H + +#include + +#define SCHED_COBALT 42 +#define SCHED_WEAK 43 + +#ifndef SCHED_SPORADIC +#define SCHED_SPORADIC 10 +#define sched_ss_low_priority sched_u.ss.__sched_low_priority +#define sched_ss_repl_period sched_u.ss.__sched_repl_period +#define sched_ss_init_budget sched_u.ss.__sched_init_budget +#define sched_ss_max_repl sched_u.ss.__sched_max_repl +#endif /* !SCHED_SPORADIC */ + +struct __sched_ss_param { + int __sched_low_priority; + struct __user_old_timespec __sched_repl_period; + struct __user_old_timespec __sched_init_budget; + int __sched_max_repl; +}; + +#define sched_rr_quantum sched_u.rr.__sched_rr_quantum + +struct __sched_rr_param { + struct __user_old_timespec __sched_rr_quantum; +}; + +#ifndef SCHED_TP +#define SCHED_TP 11 +#define sched_tp_partition sched_u.tp.__sched_partition +#endif /* !SCHED_TP */ + +struct __sched_tp_param { + int __sched_partition; +}; + +struct sched_tp_window { + struct __user_old_timespec offset; + struct __user_old_timespec duration; + int ptid; +}; + +enum { + sched_tp_install, + sched_tp_uninstall, + sched_tp_start, + sched_tp_stop, +}; + +struct __sched_config_tp { + int op; + int nr_windows; + struct sched_tp_window windows[0]; +}; + +#define sched_tp_confsz(nr_win) \ + (sizeof(struct __sched_config_tp) + nr_win * sizeof(struct sched_tp_window)) + +#ifndef SCHED_QUOTA +#define SCHED_QUOTA 12 +#define sched_quota_group sched_u.quota.__sched_group +#endif /* !SCHED_QUOTA */ + +struct __sched_quota_param { + int __sched_group; +}; + +enum { + sched_quota_add, + sched_quota_remove, + sched_quota_force_remove, + sched_quota_set, + sched_quota_get, +}; + +struct __sched_config_quota { + int op; + union { + struct { + int pshared; + } add; + struct { + int tgid; + } remove; + struct { + int tgid; + int quota; + int quota_peak; + } set; + struct { + int tgid; + } get; + }; + struct __sched_quota_info { + int tgid; + int quota; + int quota_peak; + int quota_sum; + } info; +}; + +#define sched_quota_confsz() sizeof(struct __sched_config_quota) + +struct sched_param_ex { + int sched_priority; + union { + struct __sched_ss_param ss; + struct __sched_rr_param rr; + struct __sched_tp_param tp; + struct __sched_quota_param quota; + } sched_u; +}; + +union sched_config { + struct __sched_config_tp tp; + struct __sched_config_quota quota; +}; + +#endif /* !_COBALT_UAPI_SCHED_H */ --- linux/include/xenomai/cobalt/uapi/mutex.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/cobalt/uapi/mutex.h 2022-03-21 12:58:32.199861560 +0100 @@ -0,0 +1,44 @@ +/* + * Written by Gilles Chanteperdrix . + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_UAPI_MUTEX_H +#define _COBALT_UAPI_MUTEX_H + +#include + +#define COBALT_MUTEX_MAGIC 0x86860303 + +struct cobalt_mutex_state { + atomic_t owner; + __u32 flags; +#define COBALT_MUTEX_COND_SIGNAL 0x00000001 +#define COBALT_MUTEX_ERRORCHECK 0x00000002 + __u32 ceiling; +}; + +union cobalt_mutex_union { + pthread_mutex_t native_mutex; + struct cobalt_mutex_shadow { + __u32 magic; + __u32 lockcnt; + __u32 state_offset; + xnhandle_t handle; + struct cobalt_mutexattr attr; + } shadow_mutex; +}; + +#endif /* !_COBALT_UAPI_MUTEX_H */ --- linux/include/xenomai/cobalt/uapi/kernel/synch.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/cobalt/uapi/kernel/synch.h 2022-03-21 12:58:32.192861628 +0100 @@ -0,0 +1,84 @@ +/* + * Copyright (C) 2001-2013 Philippe Gerum . + * Copyright (C) 2008, 2009 Jan Kiszka . + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_UAPI_KERNEL_SYNCH_H +#define _COBALT_UAPI_KERNEL_SYNCH_H + +#include + +/* Creation flags */ +#define XNSYNCH_FIFO 0x0 +#define XNSYNCH_PRIO 0x1 +#define XNSYNCH_PI 0x2 +#define XNSYNCH_DREORD 0x4 +#define XNSYNCH_OWNER 0x8 +#define XNSYNCH_PP 0x10 + +/* Fast lock API */ +static inline int xnsynch_fast_is_claimed(xnhandle_t handle) +{ + return (handle & XNSYNCH_FLCLAIM) != 0; +} + +static inline xnhandle_t xnsynch_fast_claimed(xnhandle_t handle) +{ + return handle | XNSYNCH_FLCLAIM; +} + +static inline xnhandle_t xnsynch_fast_ceiling(xnhandle_t handle) +{ + return handle | XNSYNCH_FLCEIL; +} + +static inline int +xnsynch_fast_owner_check(atomic_t *fastlock, xnhandle_t ownerh) +{ + return (xnhandle_get_id(atomic_read(fastlock)) == ownerh) ? + 0 : -EPERM; +} + +static inline +int xnsynch_fast_acquire(atomic_t *fastlock, xnhandle_t new_ownerh) +{ + xnhandle_t h; + + h = atomic_cmpxchg(fastlock, XN_NO_HANDLE, new_ownerh); + if (h != XN_NO_HANDLE) { + if (xnhandle_get_id(h) == new_ownerh) + return -EBUSY; + + return -EAGAIN; + } + + return 0; +} + +static inline +int xnsynch_fast_release(atomic_t *fastlock, xnhandle_t cur_ownerh) +{ + return (xnhandle_t)atomic_cmpxchg(fastlock, cur_ownerh, XN_NO_HANDLE) + == cur_ownerh; +} + +/* Local/shared property */ +static inline int xnsynch_is_shared(xnhandle_t handle) +{ + return (handle & XNSYNCH_PSHARED) != 0; +} + +#endif /* !_COBALT_UAPI_KERNEL_SYNCH_H */ --- linux/include/xenomai/cobalt/uapi/kernel/limits.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/cobalt/uapi/kernel/limits.h 2022-03-21 12:58:32.184861706 +0100 @@ -0,0 +1,23 @@ +/* + * Copyright (C) 2014 Philippe Gerum . + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_UAPI_KERNEL_LIMITS_H +#define _COBALT_UAPI_KERNEL_LIMITS_H + +#define XNOBJECT_NAME_LEN 32 + +#endif /* !_COBALT_UAPI_KERNEL_LIMITS_H */ --- linux/include/xenomai/cobalt/uapi/kernel/types.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/cobalt/uapi/kernel/types.h 2022-03-21 12:58:32.177861774 +0100 @@ -0,0 +1,113 @@ +/* + * Copyright (C) 2013 Philippe Gerum . + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_UAPI_KERNEL_TYPES_H +#define _COBALT_UAPI_KERNEL_TYPES_H + +#include +#include + +typedef __u64 xnticks_t; + +typedef __s64 xnsticks_t; + +typedef __u32 xnhandle_t; + +#define XN_NO_HANDLE ((xnhandle_t)0) +#define XN_HANDLE_INDEX_MASK ((xnhandle_t)0xf0000000) + +/* Fixed bits (part of the identifier) */ +#define XNSYNCH_PSHARED ((xnhandle_t)0x40000000) + +/* Transient bits (expressing a status) */ +#define XNSYNCH_FLCLAIM ((xnhandle_t)0x80000000) /* Contended. */ +#define XNSYNCH_FLCEIL ((xnhandle_t)0x20000000) /* Ceiling active. */ + +#define XN_HANDLE_TRANSIENT_MASK (XNSYNCH_FLCLAIM|XNSYNCH_FLCEIL) + +/* + * Strip all special bits from the handle, only retaining the object + * index value in the registry. + */ +static inline xnhandle_t xnhandle_get_index(xnhandle_t handle) +{ + return handle & ~XN_HANDLE_INDEX_MASK; +} + +/* + * Strip the transient bits from the handle, only retaining the fixed + * part making the identifier. + */ +static inline xnhandle_t xnhandle_get_id(xnhandle_t handle) +{ + return handle & ~XN_HANDLE_TRANSIENT_MASK; +} + +/* + * Our representation of time specs at the kernel<->user interface + * boundary at the moment, until we have fully transitioned to a + * y2038-safe implementation in libcobalt. Once done, those legacy + * types will be removed. + */ +struct __user_old_timespec { + long tv_sec; + long tv_nsec; +}; + +struct __user_old_itimerspec { + struct __user_old_timespec it_interval; + struct __user_old_timespec it_value; +}; + +struct __user_old_timeval { + long tv_sec; + long tv_usec; +}; + +/* Lifted from include/uapi/linux/timex.h. */ +struct __user_old_timex { + unsigned int modes; /* mode selector */ + __kernel_long_t offset; /* time offset (usec) */ + __kernel_long_t freq; /* frequency offset (scaled ppm) */ + __kernel_long_t maxerror;/* maximum error (usec) */ + __kernel_long_t esterror;/* estimated error (usec) */ + int status; /* clock command/status */ + __kernel_long_t constant;/* pll time constant */ + __kernel_long_t precision;/* clock precision (usec) (read only) */ + __kernel_long_t tolerance;/* clock frequency tolerance (ppm) + * (read only) + */ + struct __user_old_timeval time; /* (read only, except for ADJ_SETOFFSET) */ + __kernel_long_t tick; /* (modified) usecs between clock ticks */ + + __kernel_long_t ppsfreq;/* pps frequency (scaled ppm) (ro) */ + __kernel_long_t jitter; /* pps jitter (us) (ro) */ + int shift; /* interval duration (s) (shift) (ro) */ + __kernel_long_t stabil; /* pps stability (scaled ppm) (ro) */ + __kernel_long_t jitcnt; /* jitter limit exceeded (ro) */ + __kernel_long_t calcnt; /* calibration intervals (ro) */ + __kernel_long_t errcnt; /* calibration errors (ro) */ + __kernel_long_t stbcnt; /* stability limit exceeded (ro) */ + + int tai; /* TAI offset (ro) */ + + int :32; int :32; int :32; int :32; + int :32; int :32; int :32; int :32; + int :32; int :32; int :32; +}; + +#endif /* !_COBALT_UAPI_KERNEL_TYPES_H */ --- linux/include/xenomai/cobalt/uapi/kernel/urw.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/cobalt/uapi/kernel/urw.h 2022-03-21 12:58:32.170861843 +0100 @@ -0,0 +1,113 @@ +/* + * Copyright (C) 2013 Philippe Gerum . + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_UAPI_KERNEL_URW_H +#define _COBALT_UAPI_KERNEL_URW_H + +#include + +/* + * A restricted version of the kernel seqlocks with a slightly + * different interface, allowing for unsynced reads with concurrent + * write detection, without serializing writers. Caller should + * provide for proper locking to deal with concurrent updates. + * + * urw_t lock = URW_INITIALIZER; + * urwstate_t tmp; + * + * unsynced_read_block(&tmp, &lock) { + * (will redo until clean read)... + * } + * + * unsynced_write_block(&tmp, &lock) { + * ... + * } + * + * This code was inspired by Wolfgang Mauerer's linux/seqlock.h + * adaptation for Xenomai 2.6 to support the VDSO feature. + */ + +typedef struct { + __u32 sequence; +} urw_t; + +typedef struct { + __u32 token; + __u32 dirty; +} urwstate_t; + +#define URW_INITIALIZER { 0 } +#define DEFINE_URW(__name) urw_t __name = URW_INITIALIZER + +#ifndef READ_ONCE +#define READ_ONCE ACCESS_ONCE +#endif + +static inline void __try_read_start(const urw_t *urw, urwstate_t *tmp) +{ + __u32 token; +repeat: + token = READ_ONCE(urw->sequence); + smp_rmb(); + if (token & 1) { + cpu_relax(); + goto repeat; + } + + tmp->token = token; + tmp->dirty = 1; +} + +static inline void __try_read_end(const urw_t *urw, urwstate_t *tmp) +{ + smp_rmb(); + if (urw->sequence != tmp->token) { + __try_read_start(urw, tmp); + return; + } + + tmp->dirty = 0; +} + +static inline void __do_write_start(urw_t *urw, urwstate_t *tmp) +{ + urw->sequence++; + tmp->dirty = 1; + smp_wmb(); +} + +static inline void __do_write_end(urw_t *urw, urwstate_t *tmp) +{ + smp_wmb(); + tmp->dirty = 0; + urw->sequence++; +} + +static inline void unsynced_rw_init(urw_t *urw) +{ + urw->sequence = 0; +} + +#define unsynced_read_block(__tmp, __urw) \ + for (__try_read_start(__urw, __tmp); \ + (__tmp)->dirty; __try_read_end(__urw, __tmp)) + +#define unsynced_write_block(__tmp, __urw) \ + for (__do_write_start(__urw, __tmp); \ + (__tmp)->dirty; __do_write_end(__urw, __tmp)) + +#endif /* !_COBALT_UAPI_KERNEL_URW_H */ --- linux/include/xenomai/cobalt/uapi/kernel/vdso.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/cobalt/uapi/kernel/vdso.h 2022-03-21 12:58:32.162861921 +0100 @@ -0,0 +1,66 @@ +/* + * Copyright (C) 2009 Wolfgang Mauerer . + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_UAPI_KERNEL_VDSO_H +#define _COBALT_UAPI_KERNEL_VDSO_H + +#include + +/* + * I-pipe only. Dovetail enables the common vDSO for getting + * CLOCK_REALTIME timestamps from the out-of-band stage + * (XNVDSO_FEAT_HOST_REALTIME is cleared in this case). + */ +struct xnvdso_hostrt_data { + __u64 wall_sec; + __u64 wtom_sec; + __u64 cycle_last; + __u64 mask; + __u32 wall_nsec; + __u32 wtom_nsec; + __u32 mult; + __u32 shift; + __u32 live; + urw_t lock; +}; + +/* + * Data shared between the Cobalt kernel and applications, which lives + * in the shared memory heap (COBALT_MEMDEV_SHARED). + * xnvdso_hostrt_data.features tells which data is present. Notice + * that struct xnvdso may only grow, but never shrink. + */ +struct xnvdso { + __u64 features; + /* XNVDSO_FEAT_HOST_REALTIME */ + struct xnvdso_hostrt_data hostrt_data; + /* XNVDSO_FEAT_WALLCLOCK_OFFSET */ + __u64 wallclock_offset; +}; + +/* For each shared feature, add a flag below. */ + +#define XNVDSO_FEAT_HOST_REALTIME 0x0000000000000001ULL +#define XNVDSO_FEAT_WALLCLOCK_OFFSET 0x0000000000000002ULL + +static inline int xnvdso_test_feature(struct xnvdso *vdso, + __u64 feature) +{ + return (vdso->features & feature) != 0; +} + +#endif /* !_COBALT_UAPI_KERNEL_VDSO_H */ --- linux/include/xenomai/cobalt/uapi/kernel/pipe.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/cobalt/uapi/kernel/pipe.h 2022-03-21 12:58:32.155861989 +0100 @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2014 Philippe Gerum . + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_UAPI_KERNEL_PIPE_H +#define _COBALT_UAPI_KERNEL_PIPE_H + +#define XNPIPE_IOCTL_BASE 'p' + +#define XNPIPEIOC_GET_NRDEV _IOW(XNPIPE_IOCTL_BASE, 0, int) +#define XNPIPEIOC_IFLUSH _IO(XNPIPE_IOCTL_BASE, 1) +#define XNPIPEIOC_OFLUSH _IO(XNPIPE_IOCTL_BASE, 2) +#define XNPIPEIOC_FLUSH XNPIPEIOC_OFLUSH +#define XNPIPEIOC_SETSIG _IO(XNPIPE_IOCTL_BASE, 3) + +#define XNPIPE_NORMAL 0x0 +#define XNPIPE_URGENT 0x1 + +#define XNPIPE_IFLUSH 0x1 +#define XNPIPE_OFLUSH 0x2 + +#define XNPIPE_MINOR_AUTO (-1) + +#endif /* !_COBALT_UAPI_KERNEL_PIPE_H */ --- linux/include/xenomai/cobalt/uapi/kernel/thread.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/cobalt/uapi/kernel/thread.h 2022-03-21 12:58:32.147862067 +0100 @@ -0,0 +1,115 @@ +/* + * Copyright (C) 2013 Philippe Gerum . + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_UAPI_KERNEL_THREAD_H +#define _COBALT_UAPI_KERNEL_THREAD_H + +#include + +/** + * @ingroup cobalt_core_thread + * @defgroup cobalt_core_thread_states Thread state flags + * @brief Bits reporting permanent or transient states of threads + * @{ + */ + +/* State flags (shared) */ + +#define XNSUSP 0x00000001 /**< Suspended. */ +#define XNPEND 0x00000002 /**< Sleep-wait for a resource. */ +#define XNDELAY 0x00000004 /**< Delayed */ +#define XNREADY 0x00000008 /**< Linked to the ready queue. */ +#define XNDORMANT 0x00000010 /**< Not started yet */ +#define XNZOMBIE 0x00000020 /**< Zombie thread in deletion process */ +#define XNMAPPED 0x00000040 /**< Thread is mapped to a linux task */ +#define XNRELAX 0x00000080 /**< Relaxed shadow thread (blocking bit) */ +#define XNHELD 0x00000200 /**< Thread is held to process emergency. */ +#define XNBOOST 0x00000400 /**< PI/PP boost undergoing */ +#define XNSSTEP 0x00000800 /**< Single-stepped by debugger */ +#define XNLOCK 0x00001000 /**< Scheduler lock control (pseudo-bit, not in ->state) */ +#define XNRRB 0x00002000 /**< Undergoes a round-robin scheduling */ +#define XNWARN 0x00004000 /**< Issue SIGDEBUG on error detection */ +#define XNFPU 0x00008000 /**< Thread uses FPU */ +#define XNROOT 0x00010000 /**< Root thread (that is, Linux/IDLE) */ +#define XNWEAK 0x00020000 /**< Non real-time shadow (from the WEAK class) */ +#define XNUSER 0x00040000 /**< Shadow thread running in userland */ +#define XNJOINED 0x00080000 /**< Another thread waits for joining this thread */ +#define XNTRAPLB 0x00100000 /**< Trap lock break (i.e. may not sleep with sched lock) */ +#define XNDEBUG 0x00200000 /**< User-level debugging enabled */ +#define XNDBGSTOP 0x00400000 /**< Stopped for synchronous debugging */ + +/** @} */ + +/** + * @ingroup cobalt_core_thread + * @defgroup cobalt_core_thread_info Thread information flags + * @brief Bits reporting events notified to threads + * @{ + */ + +/* Information flags (shared) */ + +#define XNTIMEO 0x00000001 /**< Woken up due to a timeout condition */ +#define XNRMID 0x00000002 /**< Pending on a removed resource */ +#define XNBREAK 0x00000004 /**< Forcibly awaken from a wait state */ +#define XNKICKED 0x00000008 /**< Forced out of primary mode */ +#define XNWAKEN 0x00000010 /**< Thread waken up upon resource availability */ +#define XNROBBED 0x00000020 /**< Robbed from resource ownership */ +#define XNCANCELD 0x00000040 /**< Cancellation request is pending */ +#define XNPIALERT 0x00000080 /**< Priority inversion alert (SIGDEBUG sent) */ +#define XNSCHEDP 0x00000100 /**< schedparam propagation is pending */ +#define XNCONTHI 0x00000200 /**< Continue in primary mode after debugging */ + +/* Local information flags (private to current thread) */ + +#define XNMOVED 0x00000001 /**< CPU migration in primary mode occurred */ +#define XNLBALERT 0x00000002 /**< Scheduler lock break alert (SIGDEBUG sent) */ +#define XNDESCENT 0x00000004 /**< Adaptive transitioning to secondary mode */ +#define XNSYSRST 0x00000008 /**< Thread awaiting syscall restart after signal */ +#define XNHICCUP 0x00000010 /**< Just left from ptracing */ + +/** @} */ + +/* + * Must follow strictly the declaration order of the state flags + * defined above. Status symbols are defined as follows: + * + * 'S' -> Forcibly suspended. + * 'w'/'W' -> Waiting for a resource, with or without timeout. + * 'D' -> Delayed (without any other wait condition). + * 'R' -> Runnable. + * 'U' -> Unstarted or dormant. + * 'X' -> Relaxed shadow. + * 'H' -> Held in emergency. + * 'b' -> Priority boost undergoing. + * 'T' -> Ptraced and stopped. + * 'l' -> Locks scheduler. + * 'r' -> Undergoes round-robin. + * 't' -> Runtime mode errors notified. + * 'L' -> Lock breaks trapped. + * 's' -> Ptraced, stopped synchronously. + */ +#define XNTHREAD_STATE_LABELS "SWDRU..X.HbTlrt.....L.s" + +struct xnthread_user_window { + __u32 state; + __u32 info; + __u32 grant_value; + __u32 pp_pending; +}; + +#endif /* !_COBALT_UAPI_KERNEL_THREAD_H */ --- linux/include/xenomai/cobalt/uapi/kernel/heap.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/cobalt/uapi/kernel/heap.h 2022-03-21 12:58:32.140862135 +0100 @@ -0,0 +1,34 @@ +/* + * Copyright (C) 2013 Philippe Gerum . + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_UAPI_KERNEL_HEAP_H +#define _COBALT_UAPI_KERNEL_HEAP_H + +#include + +#define COBALT_MEMDEV_PRIVATE "memdev-private" +#define COBALT_MEMDEV_SHARED "memdev-shared" +#define COBALT_MEMDEV_SYS "memdev-sys" + +struct cobalt_memdev_stat { + __u32 size; + __u32 free; +}; + +#define MEMDEV_RTIOC_STAT _IOR(RTDM_CLASS_MEMORY, 0, struct cobalt_memdev_stat) + +#endif /* !_COBALT_UAPI_KERNEL_HEAP_H */ --- linux/include/xenomai/cobalt/uapi/kernel/trace.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/cobalt/uapi/kernel/trace.h 2022-03-21 12:58:32.133862204 +0100 @@ -0,0 +1,31 @@ +/* + * Copyright (C) 2006 Jan Kiszka . + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_UAPI_KERNEL_TRACE_H +#define _COBALT_UAPI_KERNEL_TRACE_H + +#define __xntrace_op_max_begin 0 +#define __xntrace_op_max_end 1 +#define __xntrace_op_max_reset 2 +#define __xntrace_op_user_start 3 +#define __xntrace_op_user_stop 4 +#define __xntrace_op_user_freeze 5 +#define __xntrace_op_special 6 +#define __xntrace_op_special_u64 7 +#define __xntrace_op_latpeak_freeze 8 + +#endif /* !_COBALT_UAPI_KERNEL_TRACE_H */ --- linux/include/xenomai/cobalt/uapi/signal.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/cobalt/uapi/signal.h 2022-03-21 12:58:32.125862281 +0100 @@ -0,0 +1,141 @@ +/* + * Copyright (C) 2006 Gilles Chanteperdrix . + * Copyright (C) 2013 Philippe Gerum . + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_UAPI_SIGNAL_H +#define _COBALT_UAPI_SIGNAL_H + +/* + * Those are pseudo-signals only available with pthread_kill() to + * suspend/resume/unblock threads synchronously, force them out of + * primary mode or even demote them to the SCHED_OTHER class via the + * low-level nucleus interface. Can't block those signals, queue them, + * or even set them in a sigset. Those are nasty, strictly anti-POSIX + * things; we do provide them nevertheless only because we are mean + * people doing harmful code for no valid reason. Can't go against + * your nature, right? Nah... (this said, don't blame us for POSIX, + * we are not _that_ mean). + */ +#define SIGSUSP (SIGRTMAX + 1) +#define SIGRESM (SIGRTMAX + 2) +#define SIGRELS (SIGRTMAX + 3) +#define SIGKICK (SIGRTMAX + 4) +#define SIGDEMT (SIGRTMAX + 5) + +/* + * Regular POSIX signals with specific handling by Xenomai. + */ +#define SIGSHADOW SIGWINCH +#define sigshadow_action(code) ((code) & 0xff) +#define sigshadow_arg(code) (((code) >> 8) & 0xff) +#define sigshadow_int(action, arg) ((action) | ((arg) << 8)) + +/* SIGSHADOW action codes. */ +#define SIGSHADOW_ACTION_HARDEN 1 +#define SIGSHADOW_ACTION_BACKTRACE 2 +#define SIGSHADOW_ACTION_HOME 3 +#define SIGSHADOW_BACKTRACE_DEPTH 16 + +#define SIGDEBUG SIGXCPU +#define sigdebug_code(si) ((si)->si_value.sival_int) +#define sigdebug_reason(si) (sigdebug_code(si) & 0xff) +#define sigdebug_marker 0xfccf0000 +#define sigdebug_marked(si) \ + ((sigdebug_code(si) & 0xffff0000) == sigdebug_marker) + +/* Possible values of sigdebug_reason() */ +#define SIGDEBUG_UNDEFINED 0 +#define SIGDEBUG_MIGRATE_SIGNAL 1 +#define SIGDEBUG_MIGRATE_SYSCALL 2 +#define SIGDEBUG_MIGRATE_FAULT 3 +#define SIGDEBUG_MIGRATE_PRIOINV 4 +#define SIGDEBUG_NOMLOCK 5 +#define SIGDEBUG_WATCHDOG 6 +#define SIGDEBUG_RESCNT_IMBALANCE 7 +#define SIGDEBUG_LOCK_BREAK 8 +#define SIGDEBUG_MUTEX_SLEEP 9 + +#define COBALT_DELAYMAX 2147483647U + +/* + * Internal accessors to extra siginfo/sigevent fields, extending some + * existing base field. The extra data should be grouped in a + * dedicated struct type. The extra space is taken from the padding + * area available from the original structure definitions. + * + * e.g. getting the address of the following extension to + * _sifields._rt from siginfo_t, + * + * struct bar { + * int foo; + * }; + * + * would be noted as: + * + * siginfo_t si; + * struct bar *p = __cobalt_si_extra(&si, _rt, struct bar); + * + * This code is shared between kernel and user space. Proper + * definitions of siginfo_t and sigevent_t should have been read prior + * to including this file. + * + * CAUTION: this macro does not handle alignment issues for the extra + * data. The extra type definition should take care of this. + */ +#ifdef __OPTIMIZE__ +extern void *__siginfo_overflow(void); +static inline +const void *__check_si_overflow(size_t fldsz, size_t extrasz, const void *p) +{ + siginfo_t *si __attribute__((unused)); + + if (fldsz + extrasz <= sizeof(si->_sifields)) + return p; + + return __siginfo_overflow(); +} +#define __cobalt_si_extra(__si, __basefield, __type) \ + ((__type *)__check_si_overflow(sizeof(__si->_sifields.__basefield), \ + sizeof(__type), &(__si->_sifields.__basefield) + 1)) +#else +#define __cobalt_si_extra(__si, __basefield, __type) \ + ((__type *)((&__si->_sifields.__basefield) + 1)) +#endif + +/* Same approach, this time for extending sigevent_t. */ + +#ifdef __OPTIMIZE__ +extern void *__sigevent_overflow(void); +static inline +const void *__check_sev_overflow(size_t fldsz, size_t extrasz, const void *p) +{ + sigevent_t *sev __attribute__((unused)); + + if (fldsz + extrasz <= sizeof(sev->_sigev_un)) + return p; + + return __sigevent_overflow(); +} +#define __cobalt_sev_extra(__sev, __basefield, __type) \ + ((__type *)__check_sev_overflow(sizeof(__sev->_sigev_un.__basefield), \ + sizeof(__type), &(__sev->_sigev_un.__basefield) + 1)) +#else +#define __cobalt_sev_extra(__sev, __basefield, __type) \ + ((__type *)((&__sev->_sigev_un.__basefield) + 1)) +#endif + +#endif /* !_COBALT_UAPI_SIGNAL_H */ --- linux/include/xenomai/cobalt/uapi/sem.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/cobalt/uapi/sem.h 2022-03-21 12:58:32.118862350 +0100 @@ -0,0 +1,56 @@ +/* + * Written by Gilles Chanteperdrix . + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_UAPI_SEM_H +#define _COBALT_UAPI_SEM_H + +#include + +#define COBALT_SEM_MAGIC (0x86860707) +#define COBALT_NAMED_SEM_MAGIC (0x86860D0D) + +struct cobalt_sem; + +struct cobalt_sem_state { + atomic_t value; + __u32 flags; +}; + +union cobalt_sem_union { + sem_t native_sem; + struct cobalt_sem_shadow { + __u32 magic; + __s32 state_offset; + xnhandle_t handle; + } shadow_sem; +}; + +struct cobalt_sem_info { + unsigned int value; + int flags; + int nrwait; +}; + +#define SEM_FIFO 0x1 +#define SEM_PULSE 0x2 +#define SEM_PSHARED 0x4 +#define SEM_REPORT 0x8 +#define SEM_WARNDEL 0x10 +#define SEM_RAWCLOCK 0x20 +#define SEM_NOBUSYDEL 0x40 + +#endif /* !_COBALT_UAPI_SEM_H */ --- linux/include/xenomai/cobalt/uapi/corectl.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/cobalt/uapi/corectl.h 2022-03-21 12:58:32.110862428 +0100 @@ -0,0 +1,74 @@ +/* + * Copyright (C) 2015 Philippe Gerum . + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_UAPI_CORECTL_H +#define _COBALT_UAPI_CORECTL_H + +#define _CC_COBALT_GET_VERSION 0 +#define _CC_COBALT_GET_NR_PIPES 1 +#define _CC_COBALT_GET_NR_TIMERS 2 + +#define _CC_COBALT_GET_DEBUG 3 +# define _CC_COBALT_DEBUG_ASSERT 1 +# define _CC_COBALT_DEBUG_CONTEXT 2 +# define _CC_COBALT_DEBUG_LOCKING 4 +# define _CC_COBALT_DEBUG_USER 8 +# define _CC_COBALT_DEBUG_MUTEX_RELAXED 16 +# define _CC_COBALT_DEBUG_MUTEX_SLEEP 32 +/* bit 6 (64) formerly used for DEBUG_POSIX_SYNCHRO */ +# define _CC_COBALT_DEBUG_LEGACY 128 +# define _CC_COBALT_DEBUG_TRACE_RELAX 256 +# define _CC_COBALT_DEBUG_NET 512 + +#define _CC_COBALT_GET_POLICIES 4 +# define _CC_COBALT_SCHED_FIFO 1 +# define _CC_COBALT_SCHED_RR 2 +# define _CC_COBALT_SCHED_WEAK 4 +# define _CC_COBALT_SCHED_SPORADIC 8 +# define _CC_COBALT_SCHED_QUOTA 16 +# define _CC_COBALT_SCHED_TP 32 + +#define _CC_COBALT_GET_WATCHDOG 5 +#define _CC_COBALT_GET_CORE_STATUS 6 +#define _CC_COBALT_START_CORE 7 +#define _CC_COBALT_STOP_CORE 8 + +#define _CC_COBALT_GET_NET_CONFIG 9 +# define _CC_COBALT_NET 0x00000001 +# define _CC_COBALT_NET_ETH_P_ALL 0x00000002 +# define _CC_COBALT_NET_IPV4 0x00000004 +# define _CC_COBALT_NET_ICMP 0x00000008 +# define _CC_COBALT_NET_NETROUTING 0x00000010 +# define _CC_COBALT_NET_ROUTER 0x00000020 +# define _CC_COBALT_NET_UDP 0x00000040 +# define _CC_COBALT_NET_AF_PACKET 0x00000080 +# define _CC_COBALT_NET_TDMA 0x00000100 +# define _CC_COBALT_NET_NOMAC 0x00000200 +# define _CC_COBALT_NET_CFG 0x00000400 +# define _CC_COBALT_NET_CAP 0x00000800 +# define _CC_COBALT_NET_PROXY 0x00001000 + + +enum cobalt_run_states { + COBALT_STATE_DISABLED, + COBALT_STATE_RUNNING, + COBALT_STATE_STOPPED, + COBALT_STATE_TEARDOWN, + COBALT_STATE_WARMUP, +}; + +#endif /* !_COBALT_UAPI_CORECTL_H */ --- linux/include/xenomai/cobalt/uapi/syscall.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/cobalt/uapi/syscall.h 2022-03-21 12:58:32.103862496 +0100 @@ -0,0 +1,141 @@ +/* + * Copyright (C) 2005 Philippe Gerum . + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_UAPI_SYSCALL_H +#define _COBALT_UAPI_SYSCALL_H + +#include + +#define sc_cobalt_bind 0 +#define sc_cobalt_thread_create 1 +#define sc_cobalt_thread_getpid 2 +#define sc_cobalt_thread_setmode 3 +#define sc_cobalt_thread_setname 4 +#define sc_cobalt_thread_join 5 +#define sc_cobalt_thread_kill 6 +#define sc_cobalt_thread_setschedparam_ex 7 +#define sc_cobalt_thread_getschedparam_ex 8 +#define sc_cobalt_thread_getstat 9 +#define sc_cobalt_sem_init 10 +#define sc_cobalt_sem_destroy 11 +#define sc_cobalt_sem_post 12 +#define sc_cobalt_sem_wait 13 +#define sc_cobalt_sem_trywait 14 +#define sc_cobalt_sem_getvalue 15 +#define sc_cobalt_sem_open 16 +#define sc_cobalt_sem_close 17 +#define sc_cobalt_sem_unlink 18 +#define sc_cobalt_sem_timedwait 19 +#define sc_cobalt_sem_inquire 20 +#define sc_cobalt_sem_broadcast_np 21 +#define sc_cobalt_clock_getres 22 +#define sc_cobalt_clock_gettime 23 +#define sc_cobalt_clock_settime 24 +#define sc_cobalt_clock_nanosleep 25 +#define sc_cobalt_mutex_init 26 +#define sc_cobalt_mutex_check_init 27 +#define sc_cobalt_mutex_destroy 28 +#define sc_cobalt_mutex_lock 29 +#define sc_cobalt_mutex_timedlock 30 +#define sc_cobalt_mutex_trylock 31 +#define sc_cobalt_mutex_unlock 32 +#define sc_cobalt_cond_init 33 +#define sc_cobalt_cond_destroy 34 +#define sc_cobalt_cond_wait_prologue 35 +#define sc_cobalt_cond_wait_epilogue 36 +#define sc_cobalt_mq_open 37 +#define sc_cobalt_mq_close 38 +#define sc_cobalt_mq_unlink 39 +#define sc_cobalt_mq_getattr 40 +#define sc_cobalt_mq_timedsend 41 +#define sc_cobalt_mq_timedreceive 42 +#define sc_cobalt_mq_notify 43 +#define sc_cobalt_sched_minprio 44 +#define sc_cobalt_sched_maxprio 45 +#define sc_cobalt_sched_weightprio 46 +#define sc_cobalt_sched_yield 47 +#define sc_cobalt_sched_setscheduler_ex 48 +#define sc_cobalt_sched_getscheduler_ex 49 +#define sc_cobalt_sched_setconfig_np 50 +#define sc_cobalt_sched_getconfig_np 51 +#define sc_cobalt_timer_create 52 +#define sc_cobalt_timer_delete 53 +#define sc_cobalt_timer_settime 54 +#define sc_cobalt_timer_gettime 55 +#define sc_cobalt_timer_getoverrun 56 +#define sc_cobalt_timerfd_create 57 +#define sc_cobalt_timerfd_settime 58 +#define sc_cobalt_timerfd_gettime 59 +#define sc_cobalt_sigwait 60 +#define sc_cobalt_sigwaitinfo 61 +#define sc_cobalt_sigtimedwait 62 +#define sc_cobalt_sigpending 63 +#define sc_cobalt_kill 64 +#define sc_cobalt_sigqueue 65 +#define sc_cobalt_monitor_init 66 +#define sc_cobalt_monitor_destroy 67 +#define sc_cobalt_monitor_enter 68 +#define sc_cobalt_monitor_wait 69 +#define sc_cobalt_monitor_sync 70 +#define sc_cobalt_monitor_exit 71 +#define sc_cobalt_event_init 72 +#define sc_cobalt_event_wait 73 +#define sc_cobalt_event_sync 74 +#define sc_cobalt_event_destroy 75 +#define sc_cobalt_event_inquire 76 +#define sc_cobalt_open 77 +#define sc_cobalt_socket 78 +#define sc_cobalt_close 79 +#define sc_cobalt_ioctl 80 +#define sc_cobalt_read 81 +#define sc_cobalt_write 82 +#define sc_cobalt_recvmsg 83 +#define sc_cobalt_sendmsg 84 +#define sc_cobalt_mmap 85 +#define sc_cobalt_select 86 +#define sc_cobalt_fcntl 87 +#define sc_cobalt_migrate 88 +#define sc_cobalt_archcall 89 +#define sc_cobalt_trace 90 +#define sc_cobalt_corectl 91 +#define sc_cobalt_get_current 92 +/* 93: formerly mayday */ +#define sc_cobalt_backtrace 94 +#define sc_cobalt_serialdbg 95 +#define sc_cobalt_extend 96 +#define sc_cobalt_ftrace_puts 97 +#define sc_cobalt_recvmmsg 98 +#define sc_cobalt_sendmmsg 99 +#define sc_cobalt_clock_adjtime 100 +#define sc_cobalt_thread_setschedprio 101 +#define sc_cobalt_sem_timedwait64 102 +#define sc_cobalt_clock_gettime64 103 +#define sc_cobalt_clock_settime64 104 +#define sc_cobalt_clock_nanosleep64 105 +#define sc_cobalt_clock_getres64 106 +#define sc_cobalt_clock_adjtime64 107 +#define sc_cobalt_mutex_timedlock64 108 +#define sc_cobalt_mq_timedsend64 109 +#define sc_cobalt_mq_timedreceive64 110 +#define sc_cobalt_sigtimedwait64 111 +#define sc_cobalt_monitor_wait64 112 +#define sc_cobalt_event_wait64 113 +#define sc_cobalt_recvmmsg64 114 + +#define __NR_COBALT_SYSCALLS 128 /* Power of 2 */ + +#endif /* !_COBALT_UAPI_SYSCALL_H */ --- linux/include/xenomai/cobalt/uapi/time.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/cobalt/uapi/time.h 2022-03-21 12:58:32.096862564 +0100 @@ -0,0 +1,55 @@ +/* + * Copyright (C) 2013 Philippe Gerum . + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_UAPI_TIME_H +#define _COBALT_UAPI_TIME_H + +#ifndef CLOCK_MONOTONIC_RAW +#define CLOCK_MONOTONIC_RAW 4 +#endif + +/* + * Additional clock ids we manage are supposed not to collide with any + * of the POSIX and Linux kernel definitions so that no ambiguities + * arise when porting applications in both directions. + * + * 0 .. 31 regular POSIX/linux clock ids. + * 32 .. 63 statically reserved Cobalt clocks + * 64 .. 127 dynamically registered Cobalt clocks (external) + * + * CAUTION: clock ids must fit within a 7bit value, see + * include/cobalt/uapi/thread.h (e.g. cobalt_condattr). + */ +#define __COBALT_CLOCK_STATIC(nr) ((clockid_t)(nr + 32)) + +#define CLOCK_HOST_REALTIME __COBALT_CLOCK_STATIC(0) + +#define COBALT_MAX_EXTCLOCKS 64 + +#define __COBALT_CLOCK_EXT(nr) ((clockid_t)(nr) | (1 << 6)) +#define __COBALT_CLOCK_EXT_P(id) ((int)(id) >= 64 && (int)(id) < 128) +#define __COBALT_CLOCK_EXT_INDEX(id) ((int)(id) & ~(1 << 6)) + +/* + * Additional timerfd defines + * + * when passing TFD_WAKEUP to timer_settime, any timer expiration + * unblocks the thread having issued timer_settime. + */ +#define TFD_WAKEUP (1 << 2) + +#endif /* !_COBALT_UAPI_TIME_H */ --- linux/include/xenomai/cobalt/uapi/event.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/cobalt/uapi/event.h 2022-03-21 12:58:32.088862642 +0100 @@ -0,0 +1,55 @@ +/* + * Copyright (C) 2013 Philippe Gerum . + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_UAPI_EVENT_H +#define _COBALT_UAPI_EVENT_H + +#include + +struct cobalt_event_state { + __u32 value; + __u32 flags; +#define COBALT_EVENT_PENDED 0x1 + __u32 nwaiters; +}; + +struct cobalt_event; + +/* Creation flags. */ +#define COBALT_EVENT_FIFO 0x0 +#define COBALT_EVENT_PRIO 0x1 +#define COBALT_EVENT_SHARED 0x2 + +/* Wait mode. */ +#define COBALT_EVENT_ALL 0x0 +#define COBALT_EVENT_ANY 0x1 + +struct cobalt_event_shadow { + __u32 state_offset; + __u32 flags; + xnhandle_t handle; +}; + +struct cobalt_event_info { + unsigned int value; + int flags; + int nrwait; +}; + +typedef struct cobalt_event_shadow cobalt_event_t; + +#endif /* !_COBALT_UAPI_EVENT_H */ --- linux/include/xenomai/cobalt/uapi/monitor.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/cobalt/uapi/monitor.h 2022-03-21 12:58:32.081862711 +0100 @@ -0,0 +1,46 @@ +/* + * Copyright (C) 2013 Philippe Gerum . + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_UAPI_MONITOR_H +#define _COBALT_UAPI_MONITOR_H + +#include + +struct cobalt_monitor_state { + atomic_t owner; + __u32 flags; +#define COBALT_MONITOR_GRANTED 0x01 +#define COBALT_MONITOR_DRAINED 0x02 +#define COBALT_MONITOR_SIGNALED 0x03 /* i.e. GRANTED or DRAINED */ +#define COBALT_MONITOR_BROADCAST 0x04 +#define COBALT_MONITOR_PENDED 0x08 +}; + +struct cobalt_monitor; + +struct cobalt_monitor_shadow { + __u32 state_offset; + __u32 flags; + xnhandle_t handle; +#define COBALT_MONITOR_SHARED 0x1 +#define COBALT_MONITOR_WAITGRANT 0x0 +#define COBALT_MONITOR_WAITDRAIN 0x1 +}; + +typedef struct cobalt_monitor_shadow cobalt_monitor_t; + +#endif /* !_COBALT_UAPI_MONITOR_H */ --- linux/include/xenomai/cobalt/uapi/asm-generic/arith.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/cobalt/uapi/asm-generic/arith.h 2022-03-21 12:58:32.074862779 +0100 @@ -0,0 +1,365 @@ +/** + * Generic arithmetic/conversion routines. + * Copyright © 2005 Stelian Pop. + * Copyright © 2005 Gilles Chanteperdrix. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_UAPI_ASM_GENERIC_ARITH_H +#define _COBALT_UAPI_ASM_GENERIC_ARITH_H + +#ifndef xnarch_u64tou32 +#define xnarch_u64tou32(ull, h, l) ({ \ + union { \ + unsigned long long _ull; \ + struct endianstruct _s; \ + } _u; \ + _u._ull = (ull); \ + (h) = _u._s._h; \ + (l) = _u._s._l; \ +}) +#endif /* !xnarch_u64tou32 */ + +#ifndef xnarch_u64fromu32 +#define xnarch_u64fromu32(h, l) ({ \ + union { \ + unsigned long long _ull; \ + struct endianstruct _s; \ + } _u; \ + _u._s._h = (h); \ + _u._s._l = (l); \ + _u._ull; \ +}) +#endif /* !xnarch_u64fromu32 */ + +#ifndef xnarch_ullmul +static inline __attribute__((__const__)) unsigned long long +xnarch_generic_ullmul(const unsigned m0, const unsigned m1) +{ + return (unsigned long long) m0 * m1; +} +#define xnarch_ullmul(m0,m1) xnarch_generic_ullmul((m0),(m1)) +#endif /* !xnarch_ullmul */ + +#ifndef xnarch_ulldiv +static inline unsigned long long xnarch_generic_ulldiv (unsigned long long ull, + const unsigned uld, + unsigned long *const rp) +{ + const unsigned r = do_div(ull, uld); + + if (rp) + *rp = r; + + return ull; +} +#define xnarch_ulldiv(ull,uld,rp) xnarch_generic_ulldiv((ull),(uld),(rp)) +#endif /* !xnarch_ulldiv */ + +#ifndef xnarch_uldivrem +#define xnarch_uldivrem(ull,ul,rp) ((unsigned) xnarch_ulldiv((ull),(ul),(rp))) +#endif /* !xnarch_uldivrem */ + +#ifndef xnarch_divmod64 +static inline unsigned long long +xnarch_generic_divmod64(unsigned long long a, + unsigned long long b, + unsigned long long *rem) +{ + unsigned long long q; +#if defined(__KERNEL__) && BITS_PER_LONG < 64 + unsigned long long + xnarch_generic_full_divmod64(unsigned long long a, + unsigned long long b, + unsigned long long *rem); + if (b <= 0xffffffffULL) { + unsigned long r; + q = xnarch_ulldiv(a, b, &r); + if (rem) + *rem = r; + } else { + if (a < b) { + if (rem) + *rem = a; + return 0; + } + + return xnarch_generic_full_divmod64(a, b, rem); + } +#else /* !(__KERNEL__ && BITS_PER_LONG < 64) */ + q = a / b; + if (rem) + *rem = a % b; +#endif /* !(__KERNEL__ && BITS_PER_LONG < 64) */ + return q; +} +#define xnarch_divmod64(a,b,rp) xnarch_generic_divmod64((a),(b),(rp)) +#endif /* !xnarch_divmod64 */ + +#ifndef xnarch_imuldiv +static inline __attribute__((__const__)) int xnarch_generic_imuldiv(int i, + int mult, + int div) +{ + /* (int)i = (unsigned long long)i*(unsigned)(mult)/(unsigned)div. */ + const unsigned long long ull = xnarch_ullmul(i, mult); + return xnarch_uldivrem(ull, div, NULL); +} +#define xnarch_imuldiv(i,m,d) xnarch_generic_imuldiv((i),(m),(d)) +#endif /* !xnarch_imuldiv */ + +#ifndef xnarch_imuldiv_ceil +static inline __attribute__((__const__)) int xnarch_generic_imuldiv_ceil(int i, + int mult, + int div) +{ + /* Same as xnarch_generic_imuldiv, rounding up. */ + const unsigned long long ull = xnarch_ullmul(i, mult); + return xnarch_uldivrem(ull + (unsigned)div - 1, div, NULL); +} +#define xnarch_imuldiv_ceil(i,m,d) xnarch_generic_imuldiv_ceil((i),(m),(d)) +#endif /* !xnarch_imuldiv_ceil */ + +/* Division of an unsigned 96 bits ((h << 32) + l) by an unsigned 32 bits. + Building block for llimd. Without const qualifiers, gcc reload registers + after each call to uldivrem. */ +static inline unsigned long long +xnarch_generic_div96by32(const unsigned long long h, + const unsigned l, + const unsigned d, + unsigned long *const rp) +{ + unsigned long rh; + const unsigned qh = xnarch_uldivrem(h, d, &rh); + const unsigned long long t = xnarch_u64fromu32(rh, l); + const unsigned ql = xnarch_uldivrem(t, d, rp); + + return xnarch_u64fromu32(qh, ql); +} + +#ifndef xnarch_llimd +static inline __attribute__((__const__)) +unsigned long long xnarch_generic_ullimd(const unsigned long long op, + const unsigned m, + const unsigned d) +{ + unsigned int oph, opl, tlh, tll; + unsigned long long th, tl; + + xnarch_u64tou32(op, oph, opl); + tl = xnarch_ullmul(opl, m); + xnarch_u64tou32(tl, tlh, tll); + th = xnarch_ullmul(oph, m); + th += tlh; + + return xnarch_generic_div96by32(th, tll, d, NULL); +} + +static inline __attribute__((__const__)) long long +xnarch_generic_llimd (long long op, unsigned m, unsigned d) +{ + long long ret; + int sign = 0; + + if (op < 0LL) { + sign = 1; + op = -op; + } + ret = xnarch_generic_ullimd(op, m, d); + + return sign ? -ret : ret; +} +#define xnarch_llimd(ll,m,d) xnarch_generic_llimd((ll),(m),(d)) +#endif /* !xnarch_llimd */ + +#ifndef _xnarch_u96shift +#define xnarch_u96shift(h, m, l, s) ({ \ + unsigned int _l = (l); \ + unsigned int _m = (m); \ + unsigned int _s = (s); \ + _l >>= _s; \ + _l |= (_m << (32 - _s)); \ + _m >>= _s; \ + _m |= ((h) << (32 - _s)); \ + xnarch_u64fromu32(_m, _l); \ +}) +#endif /* !xnarch_u96shift */ + +static inline long long xnarch_llmi(int i, int j) +{ + /* Fast 32x32->64 signed multiplication */ + return (long long) i * j; +} + +#ifndef xnarch_llmulshft +/* Fast scaled-math-based replacement for long long multiply-divide */ +static inline long long +xnarch_generic_llmulshft(const long long op, + const unsigned m, + const unsigned s) +{ + unsigned int oph, opl, tlh, tll, thh, thl; + unsigned long long th, tl; + + xnarch_u64tou32(op, oph, opl); + tl = xnarch_ullmul(opl, m); + xnarch_u64tou32(tl, tlh, tll); + th = xnarch_llmi(oph, m); + th += tlh; + xnarch_u64tou32(th, thh, thl); + + return xnarch_u96shift(thh, thl, tll, s); +} +#define xnarch_llmulshft(ll, m, s) xnarch_generic_llmulshft((ll), (m), (s)) +#endif /* !xnarch_llmulshft */ + +#ifdef XNARCH_HAVE_NODIV_LLIMD + +/* Representation of a 32 bits fraction. */ +struct xnarch_u32frac { + unsigned long long frac; + unsigned integ; +}; + +static inline void xnarch_init_u32frac(struct xnarch_u32frac *const f, + const unsigned m, + const unsigned d) +{ + /* + * Avoid clever compiler optimizations to occur when d is + * known at compile-time. The performance of this function is + * not critical since it is only called at init time. + */ + volatile unsigned vol_d = d; + f->integ = m / d; + f->frac = xnarch_generic_div96by32 + (xnarch_u64fromu32(m % d, 0), 0, vol_d, NULL); +} + +#ifndef xnarch_nodiv_imuldiv +static inline __attribute__((__const__)) unsigned +xnarch_generic_nodiv_imuldiv(unsigned op, const struct xnarch_u32frac f) +{ + return (xnarch_ullmul(op, f.frac >> 32) >> 32) + f.integ * op; +} +#define xnarch_nodiv_imuldiv(op, f) xnarch_generic_nodiv_imuldiv((op),(f)) +#endif /* xnarch_nodiv_imuldiv */ + +#ifndef xnarch_nodiv_imuldiv_ceil +static inline __attribute__((__const__)) unsigned +xnarch_generic_nodiv_imuldiv_ceil(unsigned op, const struct xnarch_u32frac f) +{ + unsigned long long full = xnarch_ullmul(op, f.frac >> 32) + ~0U; + return (full >> 32) + f.integ * op; +} +#define xnarch_nodiv_imuldiv_ceil(op, f) \ + xnarch_generic_nodiv_imuldiv_ceil((op),(f)) +#endif /* xnarch_nodiv_imuldiv_ceil */ + +#ifndef xnarch_nodiv_ullimd + +#ifndef xnarch_add96and64 +#error "xnarch_add96and64 must be implemented." +#endif + +static inline __attribute__((__const__)) unsigned long long +xnarch_mul64by64_high(const unsigned long long op, const unsigned long long m) +{ + /* Compute high 64 bits of multiplication 64 bits x 64 bits. */ + register unsigned long long t0, t1, t2, t3; + register unsigned int oph, opl, mh, ml, t0h, t0l, t1h, t1l, t2h, t2l, t3h, t3l; + + xnarch_u64tou32(op, oph, opl); + xnarch_u64tou32(m, mh, ml); + t0 = xnarch_ullmul(opl, ml); + xnarch_u64tou32(t0, t0h, t0l); + t3 = xnarch_ullmul(oph, mh); + xnarch_u64tou32(t3, t3h, t3l); + xnarch_add96and64(t3h, t3l, t0h, 0, t0l >> 31); + t1 = xnarch_ullmul(oph, ml); + xnarch_u64tou32(t1, t1h, t1l); + xnarch_add96and64(t3h, t3l, t0h, t1h, t1l); + t2 = xnarch_ullmul(opl, mh); + xnarch_u64tou32(t2, t2h, t2l); + xnarch_add96and64(t3h, t3l, t0h, t2h, t2l); + + return xnarch_u64fromu32(t3h, t3l); +} + +static inline unsigned long long +xnarch_generic_nodiv_ullimd(const unsigned long long op, + const unsigned long long frac, + unsigned int integ) +{ + return xnarch_mul64by64_high(op, frac) + integ * op; +} +#define xnarch_nodiv_ullimd(op, f, i) xnarch_generic_nodiv_ullimd((op),(f), (i)) +#endif /* !xnarch_nodiv_ullimd */ + +#ifndef xnarch_nodiv_llimd +static inline __attribute__((__const__)) long long +xnarch_generic_nodiv_llimd(long long op, unsigned long long frac, + unsigned int integ) +{ + long long ret; + int sign = 0; + + if (op < 0LL) { + sign = 1; + op = -op; + } + ret = xnarch_nodiv_ullimd(op, frac, integ); + + return sign ? -ret : ret; +} +#define xnarch_nodiv_llimd(ll,frac,integ) xnarch_generic_nodiv_llimd((ll),(frac),(integ)) +#endif /* !xnarch_nodiv_llimd */ + +#endif /* XNARCH_HAVE_NODIV_LLIMD */ + +static inline void xnarch_init_llmulshft(const unsigned m_in, + const unsigned d_in, + unsigned *m_out, + unsigned *s_out) +{ + /* + * Avoid clever compiler optimizations to occur when d is + * known at compile-time. The performance of this function is + * not critical since it is only called at init time. + */ + volatile unsigned int vol_d = d_in; + unsigned long long mult; + + *s_out = 31; + while (1) { + mult = ((unsigned long long)m_in) << *s_out; + do_div(mult, vol_d); + if (mult <= 0x7FFFFFFF) + break; + (*s_out)--; + } + *m_out = (unsigned int)mult; +} + +#define xnarch_ullmod(ull,uld,rem) ({ xnarch_ulldiv(ull,uld,rem); (*rem); }) +#define xnarch_uldiv(ull, d) xnarch_uldivrem(ull, d, NULL) +#define xnarch_ulmod(ull, d) ({ unsigned long _rem; \ + xnarch_uldivrem(ull,d,&_rem); _rem; }) + +#define xnarch_div64(a,b) xnarch_divmod64((a),(b),NULL) +#define xnarch_mod64(a,b) ({ unsigned long long _rem; \ + xnarch_divmod64((a),(b),&_rem); _rem; }) + +#endif /* _COBALT_UAPI_ASM_GENERIC_ARITH_H */ --- linux/include/xenomai/cobalt/uapi/asm-generic/syscall.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/cobalt/uapi/asm-generic/syscall.h 2022-03-21 12:58:32.066862857 +0100 @@ -0,0 +1,39 @@ +/* + * Copyright (C) 2013 Philippe Gerum . + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_UAPI_ASM_GENERIC_SYSCALL_H +#define _COBALT_UAPI_ASM_GENERIC_SYSCALL_H + +#include +#include +#include + +#define __COBALT_SYSCALL_BIT 0x10000000 + +struct cobalt_bindreq { + /** Features userland requires. */ + __u32 feat_req; + /** ABI revision userland uses. */ + __u32 abi_rev; + /** Features the Cobalt core provides. */ + struct cobalt_featinfo feat_ret; +}; + +#define COBALT_SECONDARY 0 +#define COBALT_PRIMARY 1 + +#endif /* !_COBALT_UAPI_ASM_GENERIC_SYSCALL_H */ --- linux/include/xenomai/cobalt/uapi/asm-generic/features.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/cobalt/uapi/asm-generic/features.h 2022-03-21 12:58:32.059862925 +0100 @@ -0,0 +1,114 @@ +/* + * Copyright (C) 2005 Philippe Gerum . + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_UAPI_ASM_GENERIC_FEATURES_H +#define _COBALT_UAPI_ASM_GENERIC_FEATURES_H + +#include + +#define XNFEAT_STRING_LEN 64 + +struct cobalt_featinfo { + /** Real-time clock frequency */ + __u64 clock_freq; + /** Offset of nkvdso in the sem heap. */ + __u32 vdso_offset; + /** ABI revision level. */ + __u32 feat_abirev; + /** Available feature set. */ + __u32 feat_all; + /** Mandatory features (when requested). */ + __u32 feat_man; + /** Requested feature set. */ + __u32 feat_req; + /** Missing features. */ + __u32 feat_mis; + char feat_all_s[XNFEAT_STRING_LEN]; + char feat_man_s[XNFEAT_STRING_LEN]; + char feat_req_s[XNFEAT_STRING_LEN]; + char feat_mis_s[XNFEAT_STRING_LEN]; + /* Architecture-specific features. */ + struct cobalt_featinfo_archdep feat_arch; +}; + +#define __xn_feat_smp 0x80000000 +#define __xn_feat_nosmp 0x40000000 +#define __xn_feat_fastsynch 0x20000000 +#define __xn_feat_nofastsynch 0x10000000 +#define __xn_feat_control 0x08000000 +#define __xn_feat_prioceiling 0x04000000 + +#ifdef CONFIG_SMP +#define __xn_feat_smp_mask __xn_feat_smp +#else +#define __xn_feat_smp_mask __xn_feat_nosmp +#endif + +/* + * Revisit: all archs currently support fast locking, and there is no + * reason for any future port not to provide this. This will be + * written in stone at the next ABI update, when fastsynch support is + * dropped from the optional feature set. + */ +#define __xn_feat_fastsynch_mask __xn_feat_fastsynch + +/* List of generic features kernel or userland may support */ +#define __xn_feat_generic_mask \ + (__xn_feat_smp_mask | \ + __xn_feat_fastsynch_mask | \ + __xn_feat_prioceiling) + +/* + * List of features both sides have to agree on: If userland supports + * it, the kernel has to provide it, too. This means backward + * compatibility between older userland and newer kernel may be + * supported for those features, but forward compatibility between + * newer userland and older kernel cannot. + */ +#define __xn_feat_generic_man_mask \ + (__xn_feat_fastsynch | \ + __xn_feat_nofastsynch | \ + __xn_feat_nosmp | \ + __xn_feat_prioceiling) + +static inline +const char *get_generic_feature_label(unsigned int feature) +{ + switch (feature) { + case __xn_feat_smp: + return "smp"; + case __xn_feat_nosmp: + return "nosmp"; + case __xn_feat_fastsynch: + return "fastsynch"; + case __xn_feat_nofastsynch: + return "nofastsynch"; + case __xn_feat_control: + return "control"; + case __xn_feat_prioceiling: + return "prioceiling"; + default: + return 0; + } +} + +static inline int check_abi_revision(unsigned long abirev) +{ + return abirev == XENOMAI_ABI_REV; +} + +#endif /* !_COBALT_UAPI_ASM_GENERIC_FEATURES_H */ --- linux/include/xenomai/cobalt/uapi/cond.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/cobalt/uapi/cond.h 2022-03-21 12:58:32.052862993 +0100 @@ -0,0 +1,39 @@ +/* + * Written by Gilles Chanteperdrix . + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_UAPI_COND_H +#define _COBALT_UAPI_COND_H + +#include + +#define COBALT_COND_MAGIC 0x86860505 + +struct cobalt_cond_state { + __u32 pending_signals; + __u32 mutex_state_offset; +}; + +union cobalt_cond_union { + pthread_cond_t native_cond; + struct cobalt_cond_shadow { + __u32 magic; + __u32 state_offset; + xnhandle_t handle; + } shadow_cond; +}; + +#endif /* !_COBALT_UAPI_COND_H */ --- linux/include/xenomai/cobalt/uapi/thread.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/cobalt/uapi/thread.h 2022-03-21 12:58:32.044863071 +0100 @@ -0,0 +1,55 @@ +/* + * Copyright (C) 2005 Philippe Gerum . + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ +#ifndef _COBALT_UAPI_THREAD_H +#define _COBALT_UAPI_THREAD_H + +#include + +#define PTHREAD_WARNSW XNWARN +#define PTHREAD_LOCK_SCHED XNLOCK +#define PTHREAD_DISABLE_LOCKBREAK XNTRAPLB +#define PTHREAD_CONFORMING 0 + +struct cobalt_mutexattr { + int type : 3; + int protocol : 3; + int pshared : 1; + int __pad : 1; + int ceiling : 8; /* prio-1, (XN)SCHED_FIFO range. */ +}; + +struct cobalt_condattr { + int clock : 7; + int pshared : 1; +}; + +struct cobalt_threadstat { + __u64 xtime; + __u64 timeout; + __u64 msw; + __u64 csw; + __u64 xsc; + __u32 status; + __u32 pf; + int cpu; + int cprio; + char name[XNOBJECT_NAME_LEN]; + char personality[XNOBJECT_NAME_LEN]; +}; + +#endif /* !_COBALT_UAPI_THREAD_H */ --- linux/include/xenomai/cobalt/kernel/schedparam.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/cobalt/kernel/schedparam.h 2022-03-21 12:58:31.742866016 +0100 @@ -0,0 +1,77 @@ +/* + * Copyright (C) 2008 Philippe Gerum . + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_KERNEL_SCHEDPARAM_H +#define _COBALT_KERNEL_SCHEDPARAM_H + +/** + * @addtogroup cobalt_core_sched + * @{ + */ + +struct xnsched_idle_param { + int prio; +}; + +struct xnsched_weak_param { + int prio; +}; + +struct xnsched_rt_param { + int prio; +}; + +struct xnsched_tp_param { + int prio; + int ptid; /* partition id. */ +}; + +struct xnsched_sporadic_param { + xnticks_t init_budget; + xnticks_t repl_period; + int max_repl; + int low_prio; + int normal_prio; + int current_prio; +}; + +struct xnsched_quota_param { + int prio; + int tgid; /* thread group id. */ +}; + +union xnsched_policy_param { + struct xnsched_idle_param idle; + struct xnsched_rt_param rt; +#ifdef CONFIG_XENO_OPT_SCHED_WEAK + struct xnsched_weak_param weak; +#endif +#ifdef CONFIG_XENO_OPT_SCHED_TP + struct xnsched_tp_param tp; +#endif +#ifdef CONFIG_XENO_OPT_SCHED_SPORADIC + struct xnsched_sporadic_param pss; +#endif +#ifdef CONFIG_XENO_OPT_SCHED_QUOTA + struct xnsched_quota_param quota; +#endif +}; + +/** @} */ + +#endif /* !_COBALT_KERNEL_SCHEDPARAM_H */ --- linux/include/xenomai/cobalt/kernel/vfile.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/cobalt/kernel/vfile.h 2022-03-21 12:58:31.735866084 +0100 @@ -0,0 +1,667 @@ +/* + * Copyright (C) 2010 Philippe Gerum + * + * Xenomai is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef _COBALT_KERNEL_VFILE_H +#define _COBALT_KERNEL_VFILE_H + +#if defined(CONFIG_XENO_OPT_VFILE) || defined(DOXYGEN_CPP) + +#include +#include +#include + +/** + * @addtogroup cobalt_core_vfile + * @{ + */ + +struct xnvfile_directory; +struct xnvfile_regular_iterator; +struct xnvfile_snapshot_iterator; +struct xnvfile_lock_ops; + +struct xnvfile { + struct proc_dir_entry *pde; + struct file *file; + struct xnvfile_lock_ops *lockops; + int refcnt; + void *private; +}; + +/** + * @brief Vfile locking operations + * @anchor vfile_lockops + * + * This structure describes the operations to be provided for + * implementing locking support on vfiles. They apply to both + * snapshot-driven and regular vfiles. + */ +struct xnvfile_lock_ops { + /** + * @anchor lockops_get + * This handler should grab the desired lock. + * + * @param vfile A pointer to the virtual file which needs + * locking. + * + * @return zero should be returned if the call + * succeeds. Otherwise, a negative error code can be returned; + * upon error, the current vfile operation is aborted, and the + * user-space caller is passed back the error value. + */ + int (*get)(struct xnvfile *vfile); + /** + * @anchor lockops_put This handler should release the lock + * previously grabbed by the @ref lockops_get "get() handler". + * + * @param vfile A pointer to the virtual file which currently + * holds the lock to release. + */ + void (*put)(struct xnvfile *vfile); +}; + +struct xnvfile_hostlock_class { + struct xnvfile_lock_ops ops; + struct mutex mutex; +}; + +struct xnvfile_nklock_class { + struct xnvfile_lock_ops ops; + spl_t s; +}; + +struct xnvfile_input { + const char __user *u_buf; + size_t size; + struct xnvfile *vfile; +}; + +/** + * @brief Regular vfile operation descriptor + * @anchor regular_ops + * + * This structure describes the operations available with a regular + * vfile. It defines handlers for sending back formatted kernel data + * upon a user-space read request, and for obtaining user data upon a + * user-space write request. + */ +struct xnvfile_regular_ops { + /** + * @anchor regular_rewind This handler is called only once, + * when the virtual file is opened, before the @ref + * regular_begin "begin() handler" is invoked. + * + * @param it A pointer to the vfile iterator which will be + * used to read the file contents. + * + * @return Zero should be returned upon success. Otherwise, a + * negative error code aborts the operation, and is passed + * back to the reader. + * + * @note This handler is optional. It should not be used to + * allocate resources but rather to perform consistency + * checks, since no closure call is issued in case the open + * sequence eventually fails. + */ + int (*rewind)(struct xnvfile_regular_iterator *it); + /** + * @anchor regular_begin + * This handler should prepare for iterating over the records + * upon a read request, starting from the specified position. + * + * @param it A pointer to the current vfile iterator. On + * entry, it->pos is set to the (0-based) position of the + * first record to output. This handler may be called multiple + * times with different position requests. + * + * @return A pointer to the first record to format and output, + * to be passed to the @ref regular_show "show() handler" as + * its @a data parameter, if the call succeeds. Otherwise: + * + * - NULL in case no record is available, in which case the + * read operation will terminate immediately with no output. + * + * - VFILE_SEQ_START, a special value indicating that @ref + * regular_show "the show() handler" should receive a NULL + * data pointer first, in order to output a header. + * + * - ERR_PTR(errno), where errno is a negative error code; + * upon error, the current operation will be aborted + * immediately. + * + * @note This handler is optional; if none is given in the + * operation descriptor (i.e. NULL value), the @ref + * regular_show "show() handler()" will be called only once + * for a read operation, with a NULL @a data parameter. This + * particular setting is convenient for simple regular vfiles + * having a single, fixed record to output. + */ + void *(*begin)(struct xnvfile_regular_iterator *it); + /** + * @anchor regular_next + * This handler should return the address of the next record + * to format and output by the @ref regular_show "show() + * handler". + * + * @param it A pointer to the current vfile iterator. On + * entry, it->pos is set to the (0-based) position of the + * next record to output. + * + * @return A pointer to the next record to format and output, + * to be passed to the @ref regular_show "show() handler" as + * its @a data parameter, if the call succeeds. Otherwise: + * + * - NULL in case no record is available, in which case the + * read operation will terminate immediately with no output. + * + * - ERR_PTR(errno), where errno is a negative error code; + * upon error, the current operation will be aborted + * immediately. + * + * @note This handler is optional; if none is given in the + * operation descriptor (i.e. NULL value), the read operation + * will stop after the first invocation of the @ref regular_show + * "show() handler". + */ + void *(*next)(struct xnvfile_regular_iterator *it); + /** + * @anchor regular_end + * This handler is called after all records have been output. + * + * @param it A pointer to the current vfile iterator. + * + * @note This handler is optional and the pointer may be NULL. + */ + void (*end)(struct xnvfile_regular_iterator *it); + /** + * @anchor regular_show + * This handler should format and output a record. + * + * xnvfile_printf(), xnvfile_write(), xnvfile_puts() and + * xnvfile_putc() are available to format and/or emit the + * output. All routines take the iterator argument @a it as + * their first parameter. + * + * @param it A pointer to the current vfile iterator. + * + * @param data A pointer to the record to format then + * output. The first call to the handler may receive a NULL @a + * data pointer, depending on the presence and/or return of a + * @ref regular_begin "hander"; the show handler should test + * this special value to output any header that fits, prior to + * receiving more calls with actual records. + * + * @return zero if the call succeeds, also indicating that the + * handler should be called for the next record if + * any. Otherwise: + * + * - A negative error code. This will abort the output phase, + * and return this status to the reader. + * + * - VFILE_SEQ_SKIP, a special value indicating that the + * current record should be skipped and will not be output. + */ + int (*show)(struct xnvfile_regular_iterator *it, void *data); + /** + * @anchor regular_store + * This handler receives data written to the vfile, likely for + * updating some kernel setting, or triggering any other + * action which fits. This is the only handler which deals + * with the write-side of a vfile. It is called when writing + * to the /proc entry of the vfile from a user-space process. + * + * The input data is described by a descriptor passed to the + * handler, which may be subsequently passed to parsing helper + * routines. For instance, xnvfile_get_string() will accept + * the input descriptor for returning the written data as a + * null-terminated character string. On the other hand, + * xnvfile_get_integer() will attempt to return a long integer + * from the input data. + * + * @param input A pointer to an input descriptor. It refers to + * an opaque data from the handler's standpoint. + * + * @return the number of bytes read from the input descriptor + * if the call succeeds. Otherwise, a negative error code. + * Return values from parsing helper routines are commonly + * passed back to the caller by the @ref regular_store + * "store() handler". + * + * @note This handler is optional, and may be omitted for + * read-only vfiles. + */ + ssize_t (*store)(struct xnvfile_input *input); +}; + +struct xnvfile_regular { + struct xnvfile entry; + size_t privsz; + struct xnvfile_regular_ops *ops; +}; + +struct xnvfile_regular_template { + size_t privsz; + struct xnvfile_regular_ops *ops; + struct xnvfile_lock_ops *lockops; +}; + +/** + * @brief Regular vfile iterator + * @anchor regular_iterator + * + * This structure defines an iterator over a regular vfile. + */ +struct xnvfile_regular_iterator { + /** Current record position while iterating. */ + loff_t pos; + /** Backlink to the host sequential file supporting the vfile. */ + struct seq_file *seq; + /** Backlink to the vfile being read. */ + struct xnvfile_regular *vfile; + /** + * Start of private area. Use xnvfile_iterator_priv() to + * address it. + */ + char private[0]; +}; + +/** + * @brief Snapshot vfile operation descriptor + * @anchor snapshot_ops + * + * This structure describes the operations available with a + * snapshot-driven vfile. It defines handlers for returning a + * printable snapshot of some Xenomai object contents upon a + * user-space read request, and for updating this object upon a + * user-space write request. + */ +struct xnvfile_snapshot_ops { + /** + * @anchor snapshot_rewind + * This handler (re-)initializes the data collection, moving + * the seek pointer at the first record. When the file + * revision tag is touched while collecting data, the current + * reading is aborted, all collected data dropped, and the + * vfile is eventually rewound. + * + * @param it A pointer to the current snapshot iterator. Two + * useful information can be retrieved from this iterator in + * this context: + * + * - it->vfile is a pointer to the descriptor of the virtual + * file being rewound. + * + * - xnvfile_iterator_priv(it) returns a pointer to the + * private data area, available from the descriptor, which + * size is vfile->privsz. If the latter size is zero, the + * returned pointer is meaningless and should not be used. + * + * @return A negative error code aborts the data collection, + * and is passed back to the reader. Otherwise: + * + * - a strictly positive value is interpreted as the total + * number of records which will be returned by the @ref + * snapshot_next "next() handler" during the data collection + * phase. If no @ref snapshot_begin "begin() handler" is + * provided in the @ref snapshot_ops "operation descriptor", + * this value is used to allocate the snapshot buffer + * internally. The size of this buffer would then be + * vfile->datasz * value. + * + * - zero leaves the allocation to the @ref snapshot_begin + * "begin() handler" if present, or indicates that no record + * is to be output in case such handler is not given. + * + * @note This handler is optional; a NULL value indicates that + * nothing needs to be done for rewinding the vfile. It is + * called with the vfile lock held. + */ + int (*rewind)(struct xnvfile_snapshot_iterator *it); + /** + * @anchor snapshot_begin + * This handler should allocate the snapshot buffer to hold + * records during the data collection phase. When specified, + * all records collected via the @ref snapshot_next "next() + * handler" will be written to a cell from the memory area + * returned by begin(). + * + * @param it A pointer to the current snapshot iterator. + * + * @return A pointer to the record buffer, if the call + * succeeds. Otherwise: + * + * - NULL in case of allocation error. This will abort the data + * collection, and return -ENOMEM to the reader. + * + * - VFILE_SEQ_EMPTY, a special value indicating that no + * record will be output. In such a case, the @ref + * snapshot_next "next() handler" will not be called, and the + * data collection will stop immediately. However, the @ref + * snapshot_show "show() handler" will still be called once, + * with a NULL data pointer (i.e. header display request). + * + * @note This handler is optional; if none is given, an + * internal allocation depending on the value returned by the + * @ref snapshot_rewind "rewind() handler" can be obtained. + */ + void *(*begin)(struct xnvfile_snapshot_iterator *it); + /** + * @anchor snapshot_end + * This handler releases the memory buffer previously obtained + * from begin(). It is usually called after the snapshot data + * has been output by show(), but it may also be called before + * rewinding the vfile after a revision change, to release the + * dropped buffer. + * + * @param it A pointer to the current snapshot iterator. + * + * @param buf A pointer to the buffer to release. + * + * @note This routine is optional and the pointer may be + * NULL. It is not needed upon internal buffer allocation; + * see the description of the @ref snapshot_rewind "rewind() + * handler". + */ + void (*end)(struct xnvfile_snapshot_iterator *it, void *buf); + /** + * @anchor snapshot_next + * This handler fetches the next record, as part of the + * snapshot data to be sent back to the reader via the + * show(). + * + * @param it A pointer to the current snapshot iterator. + * + * @param data A pointer to the record to fill in. + * + * @return a strictly positive value, if the call succeeds and + * leaves a valid record into @a data, which should be passed + * to the @ref snapshot_show "show() handler()" during the + * formatting and output phase. Otherwise: + * + * - A negative error code. This will abort the data + * collection, and return this status to the reader. + * + * - VFILE_SEQ_SKIP, a special value indicating that the + * current record should be skipped. In such a case, the @a + * data pointer is not advanced to the next position before + * the @ref snapshot_next "next() handler" is called anew. + * + * @note This handler is called with the vfile lock + * held. Before each invocation of this handler, the vfile + * core checks whether the revision tag has been touched, in + * which case the data collection is restarted from scratch. A + * data collection phase succeeds whenever all records can be + * fetched via the @ref snapshot_next "next() handler", while + * the revision tag remains unchanged, which indicates that a + * consistent snapshot of the object state was taken. + */ + int (*next)(struct xnvfile_snapshot_iterator *it, void *data); + /** + * @anchor snapshot_show + * This handler should format and output a record from the + * collected data. + * + * xnvfile_printf(), xnvfile_write(), xnvfile_puts() and + * xnvfile_putc() are available to format and/or emit the + * output. All routines take the iterator argument @a it as + * their first parameter. + * + * @param it A pointer to the current snapshot iterator. + * + * @param data A pointer to the record to format then + * output. The first call to the handler is always passed a + * NULL @a data pointer; the show handler should test this + * special value to output any header that fits, prior to + * receiving more calls with actual records. + * + * @return zero if the call succeeds, also indicating that the + * handler should be called for the next record if + * any. Otherwise: + * + * - A negative error code. This will abort the output phase, + * and return this status to the reader. + * + * - VFILE_SEQ_SKIP, a special value indicating that the + * current record should be skipped and will not be output. + */ + int (*show)(struct xnvfile_snapshot_iterator *it, void *data); + /** + * @anchor snapshot_store + * This handler receives data written to the vfile, likely for + * updating the associated Xenomai object's state, or + * triggering any other action which fits. This is the only + * handler which deals with the write-side of a vfile. It is + * called when writing to the /proc entry of the vfile + * from a user-space process. + * + * The input data is described by a descriptor passed to the + * handler, which may be subsequently passed to parsing helper + * routines. For instance, xnvfile_get_string() will accept + * the input descriptor for returning the written data as a + * null-terminated character string. On the other hand, + * xnvfile_get_integer() will attempt to return a long integer + * from the input data. + * + * @param input A pointer to an input descriptor. It refers to + * an opaque data from the handler's standpoint. + * + * @return the number of bytes read from the input descriptor + * if the call succeeds. Otherwise, a negative error code. + * Return values from parsing helper routines are commonly + * passed back to the caller by the @ref snapshot_store + * "store() handler". + * + * @note This handler is optional, and may be omitted for + * read-only vfiles. + */ + ssize_t (*store)(struct xnvfile_input *input); +}; + +/** + * @brief Snapshot revision tag + * @anchor revision_tag + * + * This structure defines a revision tag to be used with @ref + * snapshot_vfile "snapshot-driven vfiles". + */ +struct xnvfile_rev_tag { + /** Current revision number. */ + int rev; +}; + +struct xnvfile_snapshot_template { + size_t privsz; + size_t datasz; + struct xnvfile_rev_tag *tag; + struct xnvfile_snapshot_ops *ops; + struct xnvfile_lock_ops *lockops; +}; + +/** + * @brief Snapshot vfile descriptor + * @anchor snapshot_vfile + * + * This structure describes a snapshot-driven vfile. Reading from + * such a vfile involves a preliminary data collection phase under + * lock protection, and a subsequent formatting and output phase of + * the collected data records. Locking is done in a way that does not + * increase worst-case latency, regardless of the number of records to + * be collected for output. + */ +struct xnvfile_snapshot { + struct xnvfile entry; + size_t privsz; + size_t datasz; + struct xnvfile_rev_tag *tag; + struct xnvfile_snapshot_ops *ops; +}; + +/** + * @brief Snapshot-driven vfile iterator + * @anchor snapshot_iterator + * + * This structure defines an iterator over a snapshot-driven vfile. + */ +struct xnvfile_snapshot_iterator { + /** Number of collected records. */ + int nrdata; + /** Address of record buffer. */ + caddr_t databuf; + /** Backlink to the host sequential file supporting the vfile. */ + struct seq_file *seq; + /** Backlink to the vfile being read. */ + struct xnvfile_snapshot *vfile; + /** Buffer release handler. */ + void (*endfn)(struct xnvfile_snapshot_iterator *it, void *buf); + /** + * Start of private area. Use xnvfile_iterator_priv() to + * address it. + */ + char private[0]; +}; + +struct xnvfile_directory { + struct xnvfile entry; +}; + +struct xnvfile_link { + struct xnvfile entry; +}; + +/* vfile.begin()=> */ +#define VFILE_SEQ_EMPTY ((void *)-1) +/* =>vfile.show() */ +#define VFILE_SEQ_START SEQ_START_TOKEN +/* vfile.next/show()=> */ +#define VFILE_SEQ_SKIP 2 + +#define xnvfile_printf(it, args...) seq_printf((it)->seq, ##args) +#define xnvfile_write(it, data, len) seq_write((it)->seq, (data),(len)) +#define xnvfile_puts(it, s) seq_puts((it)->seq, (s)) +#define xnvfile_putc(it, c) seq_putc((it)->seq, (c)) + +static inline void xnvfile_touch_tag(struct xnvfile_rev_tag *tag) +{ + tag->rev++; +} + +static inline void xnvfile_touch(struct xnvfile_snapshot *vfile) +{ + xnvfile_touch_tag(vfile->tag); +} + +#define xnvfile_noentry \ + { \ + .pde = NULL, \ + .private = NULL, \ + .file = NULL, \ + .refcnt = 0, \ + } + +#define xnvfile_nodir { .entry = xnvfile_noentry } +#define xnvfile_nolink { .entry = xnvfile_noentry } +#define xnvfile_nofile { .entry = xnvfile_noentry } + +#define xnvfile_priv(e) ((e)->entry.private) +#define xnvfile_nref(e) ((e)->entry.refcnt) +#define xnvfile_file(e) ((e)->entry.file) +#define xnvfile_iterator_priv(it) ((void *)(&(it)->private)) + +extern struct xnvfile_nklock_class xnvfile_nucleus_lock; + +extern struct xnvfile_directory cobalt_vfroot; + +int xnvfile_init_root(void); + +void xnvfile_destroy_root(void); + +int xnvfile_init_snapshot(const char *name, + struct xnvfile_snapshot *vfile, + struct xnvfile_directory *parent); + +int xnvfile_init_regular(const char *name, + struct xnvfile_regular *vfile, + struct xnvfile_directory *parent); + +int xnvfile_init_dir(const char *name, + struct xnvfile_directory *vdir, + struct xnvfile_directory *parent); + +int xnvfile_init_link(const char *from, + const char *to, + struct xnvfile_link *vlink, + struct xnvfile_directory *parent); + +void xnvfile_destroy(struct xnvfile *vfile); + +ssize_t xnvfile_get_blob(struct xnvfile_input *input, + void *data, size_t size); + +ssize_t xnvfile_get_string(struct xnvfile_input *input, + char *s, size_t maxlen); + +ssize_t xnvfile_get_integer(struct xnvfile_input *input, long *valp); + +int __vfile_hostlock_get(struct xnvfile *vfile); + +void __vfile_hostlock_put(struct xnvfile *vfile); + +static inline +void xnvfile_destroy_snapshot(struct xnvfile_snapshot *vfile) +{ + xnvfile_destroy(&vfile->entry); +} + +static inline +void xnvfile_destroy_regular(struct xnvfile_regular *vfile) +{ + xnvfile_destroy(&vfile->entry); +} + +static inline +void xnvfile_destroy_dir(struct xnvfile_directory *vdir) +{ + xnvfile_destroy(&vdir->entry); +} + +static inline +void xnvfile_destroy_link(struct xnvfile_link *vlink) +{ + xnvfile_destroy(&vlink->entry); +} + +#define DEFINE_VFILE_HOSTLOCK(name) \ + struct xnvfile_hostlock_class name = { \ + .ops = { \ + .get = __vfile_hostlock_get, \ + .put = __vfile_hostlock_put, \ + }, \ + .mutex = __MUTEX_INITIALIZER(name.mutex), \ + } + +#else /* !CONFIG_XENO_OPT_VFILE */ + +#define xnvfile_touch_tag(tag) do { } while (0) + +#define xnvfile_touch(vfile) do { } while (0) + +#endif /* !CONFIG_XENO_OPT_VFILE */ + +/** @} */ + +#endif /* !_COBALT_KERNEL_VFILE_H */ --- linux/include/xenomai/cobalt/kernel/synch.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/cobalt/kernel/synch.h 2022-03-21 12:58:31.727866163 +0100 @@ -0,0 +1,179 @@ +/* + * Copyright (C) 2001,2002,2003 Philippe Gerum . + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_KERNEL_SYNCH_H +#define _COBALT_KERNEL_SYNCH_H + +#include +#include +#include +#include +#include + +/** + * @addtogroup cobalt_core_synch + * @{ + */ +#define XNSYNCH_CLAIMED 0x100 /* Claimed by other thread(s) (PI) */ +#define XNSYNCH_CEILING 0x200 /* Actively boosting (PP) */ + +/* Spare flags usable by upper interfaces */ +#define XNSYNCH_SPARE0 0x01000000 +#define XNSYNCH_SPARE1 0x02000000 +#define XNSYNCH_SPARE2 0x04000000 +#define XNSYNCH_SPARE3 0x08000000 +#define XNSYNCH_SPARE4 0x10000000 +#define XNSYNCH_SPARE5 0x20000000 +#define XNSYNCH_SPARE6 0x40000000 +#define XNSYNCH_SPARE7 0x80000000 + +/* Statuses */ +#define XNSYNCH_DONE 0 /* Resource available / operation complete */ +#define XNSYNCH_WAIT 1 /* Calling thread blocked -- start rescheduling */ +#define XNSYNCH_RESCHED 2 /* Force rescheduling */ + +struct xnthread; +struct xnsynch; + +struct xnsynch { + /** wait (weighted) prio in thread->boosters */ + int wprio; + /** thread->boosters */ + struct list_head next; + /** + * &variable holding the current priority ceiling value + * (xnsched_class_rt-based, [1..255], XNSYNCH_PP). + */ + u32 *ceiling_ref; + /** Status word */ + unsigned long status; + /** Pending threads */ + struct list_head pendq; + /** Thread which owns the resource */ + struct xnthread *owner; + /** Pointer to fast lock word */ + atomic_t *fastlock; + /* Cleanup handler */ + void (*cleanup)(struct xnsynch *synch); +}; + +#define XNSYNCH_WAITQUEUE_INITIALIZER(__name) { \ + .status = XNSYNCH_PRIO, \ + .wprio = -1, \ + .pendq = LIST_HEAD_INIT((__name).pendq), \ + .owner = NULL, \ + .cleanup = NULL, \ + .fastlock = NULL, \ + } + +#define DEFINE_XNWAITQ(__name) \ + struct xnsynch __name = XNSYNCH_WAITQUEUE_INITIALIZER(__name) + +static inline void xnsynch_set_status(struct xnsynch *synch, int bits) +{ + synch->status |= bits; +} + +static inline void xnsynch_clear_status(struct xnsynch *synch, int bits) +{ + synch->status &= ~bits; +} + +#define xnsynch_for_each_sleeper(__pos, __synch) \ + list_for_each_entry(__pos, &(__synch)->pendq, plink) + +#define xnsynch_for_each_sleeper_safe(__pos, __tmp, __synch) \ + list_for_each_entry_safe(__pos, __tmp, &(__synch)->pendq, plink) + +static inline int xnsynch_pended_p(struct xnsynch *synch) +{ + return !list_empty(&synch->pendq); +} + +static inline struct xnthread *xnsynch_owner(struct xnsynch *synch) +{ + return synch->owner; +} + +#define xnsynch_fastlock(synch) ((synch)->fastlock) +#define xnsynch_fastlock_p(synch) ((synch)->fastlock != NULL) +#define xnsynch_owner_check(synch, thread) \ + xnsynch_fast_owner_check((synch)->fastlock, thread->handle) + +#ifdef CONFIG_XENO_OPT_DEBUG_MUTEX_RELAXED + +void xnsynch_detect_relaxed_owner(struct xnsynch *synch, + struct xnthread *sleeper); + +void xnsynch_detect_boosted_relax(struct xnthread *owner); + +#else /* !CONFIG_XENO_OPT_DEBUG_MUTEX_RELAXED */ + +static inline void xnsynch_detect_relaxed_owner(struct xnsynch *synch, + struct xnthread *sleeper) { } + +static inline void xnsynch_detect_boosted_relax(struct xnthread *owner) { } + +#endif /* !CONFIG_XENO_OPT_DEBUG_MUTEX_RELAXED */ + +void xnsynch_init(struct xnsynch *synch, int flags, + atomic_t *fastlock); + +void xnsynch_init_protect(struct xnsynch *synch, int flags, + atomic_t *fastlock, u32 *ceiling_ref); + +int xnsynch_destroy(struct xnsynch *synch); + +void xnsynch_commit_ceiling(struct xnthread *curr); + +static inline void xnsynch_register_cleanup(struct xnsynch *synch, + void (*handler)(struct xnsynch *)) +{ + synch->cleanup = handler; +} + +int __must_check xnsynch_sleep_on(struct xnsynch *synch, + xnticks_t timeout, + xntmode_t timeout_mode); + +struct xnthread *xnsynch_wakeup_one_sleeper(struct xnsynch *synch); + +int xnsynch_wakeup_many_sleepers(struct xnsynch *synch, int nr); + +void xnsynch_wakeup_this_sleeper(struct xnsynch *synch, + struct xnthread *sleeper); + +int __must_check xnsynch_acquire(struct xnsynch *synch, + xnticks_t timeout, + xntmode_t timeout_mode); + +int __must_check xnsynch_try_acquire(struct xnsynch *synch); + +bool xnsynch_release(struct xnsynch *synch, struct xnthread *thread); + +struct xnthread *xnsynch_peek_pendq(struct xnsynch *synch); + +int xnsynch_flush(struct xnsynch *synch, int reason); + +void xnsynch_requeue_sleeper(struct xnthread *thread); + +void xnsynch_forget_sleeper(struct xnthread *thread); + +/** @} */ + +#endif /* !_COBALT_KERNEL_SYNCH_H_ */ --- linux/include/xenomai/cobalt/kernel/sched.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/cobalt/kernel/sched.h 2022-03-21 12:58:31.720866231 +0100 @@ -0,0 +1,674 @@ +/* + * Copyright (C) 2008 Philippe Gerum . + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_KERNEL_SCHED_H +#define _COBALT_KERNEL_SCHED_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** + * @addtogroup cobalt_core_sched + * @{ + */ + +/* Sched status flags */ +#define XNRESCHED 0x10000000 /* Needs rescheduling */ +#define XNINSW 0x20000000 /* In context switch */ +#define XNINTCK 0x40000000 /* In master tick handler context */ + +/* Sched local flags */ +#define XNIDLE 0x00010000 /* Idle (no outstanding timer) */ +#define XNHTICK 0x00008000 /* Host tick pending */ +#define XNINIRQ 0x00004000 /* In IRQ handling context */ +#define XNHDEFER 0x00002000 /* Host tick deferred */ + +/* + * Hardware timer is stopped. + */ +#define XNTSTOP 0x00000800 + +struct xnsched_rt { + xnsched_queue_t runnable; /*!< Runnable thread queue. */ +}; + +/*! + * \brief Scheduling information structure. + */ + +struct xnsched { + /*!< Scheduler specific status bitmask. */ + unsigned long status; + /*!< Scheduler specific local flags bitmask. */ + unsigned long lflags; + /*!< Current thread. */ + struct xnthread *curr; +#ifdef CONFIG_SMP + /*!< Owner CPU id. */ + int cpu; + /*!< Mask of CPUs needing rescheduling. */ + cpumask_t resched; +#endif + /*!< Context of built-in real-time class. */ + struct xnsched_rt rt; +#ifdef CONFIG_XENO_OPT_SCHED_WEAK + /*!< Context of weak scheduling class. */ + struct xnsched_weak weak; +#endif +#ifdef CONFIG_XENO_OPT_SCHED_TP + /*!< Context of TP class. */ + struct xnsched_tp tp; +#endif +#ifdef CONFIG_XENO_OPT_SCHED_SPORADIC + /*!< Context of sporadic scheduling class. */ + struct xnsched_sporadic pss; +#endif +#ifdef CONFIG_XENO_OPT_SCHED_QUOTA + /*!< Context of runtime quota scheduling. */ + struct xnsched_quota quota; +#endif + /*!< Interrupt nesting level. */ + volatile unsigned inesting; + /*!< Host timer. */ + struct xntimer htimer; + /*!< Round-robin timer. */ + struct xntimer rrbtimer; + /*!< Root thread control block. */ + struct xnthread rootcb; +#ifdef CONFIG_XENO_ARCH_FPU + /*!< Thread owning the current FPU context. */ + struct xnthread *fpuholder; +#endif +#ifdef CONFIG_XENO_OPT_WATCHDOG + /*!< Watchdog timer object. */ + struct xntimer wdtimer; +#endif +#ifdef CONFIG_XENO_OPT_STATS + /*!< Last account switch date (ticks). */ + xnticks_t last_account_switch; + /*!< Currently active account */ + xnstat_exectime_t *current_account; +#endif +}; + +DECLARE_PER_CPU(struct xnsched, nksched); + +extern cpumask_t cobalt_cpu_affinity; + +extern struct list_head nkthreadq; + +extern int cobalt_nrthreads; + +#ifdef CONFIG_XENO_OPT_VFILE +extern struct xnvfile_rev_tag nkthreadlist_tag; +#endif + +union xnsched_policy_param; + +struct xnsched_class { + void (*sched_init)(struct xnsched *sched); + void (*sched_enqueue)(struct xnthread *thread); + void (*sched_dequeue)(struct xnthread *thread); + void (*sched_requeue)(struct xnthread *thread); + struct xnthread *(*sched_pick)(struct xnsched *sched); + void (*sched_tick)(struct xnsched *sched); + void (*sched_rotate)(struct xnsched *sched, + const union xnsched_policy_param *p); + void (*sched_migrate)(struct xnthread *thread, + struct xnsched *sched); + int (*sched_chkparam)(struct xnthread *thread, + const union xnsched_policy_param *p); + /** + * Set base scheduling parameters. This routine is indirectly + * called upon a change of base scheduling settings through + * __xnthread_set_schedparam() -> xnsched_set_policy(), + * exclusively. + * + * The scheduling class implementation should do the necessary + * housekeeping to comply with the new settings. + * thread->base_class is up to date before the call is made, + * and should be considered for the new weighted priority + * calculation. On the contrary, thread->sched_class should + * NOT be referred to by this handler. + * + * sched_setparam() is NEVER involved in PI or PP + * management. However it must deny a priority update if it + * contradicts an ongoing boost for @a thread. This is + * typically what the xnsched_set_effective_priority() helper + * does for such handler. + * + * @param thread Affected thread. + * @param p New base policy settings. + * + * @return True if the effective priority was updated + * (thread->cprio). + */ + bool (*sched_setparam)(struct xnthread *thread, + const union xnsched_policy_param *p); + void (*sched_getparam)(struct xnthread *thread, + union xnsched_policy_param *p); + void (*sched_trackprio)(struct xnthread *thread, + const union xnsched_policy_param *p); + void (*sched_protectprio)(struct xnthread *thread, int prio); + int (*sched_declare)(struct xnthread *thread, + const union xnsched_policy_param *p); + void (*sched_forget)(struct xnthread *thread); + void (*sched_kick)(struct xnthread *thread); +#ifdef CONFIG_XENO_OPT_VFILE + int (*sched_init_vfile)(struct xnsched_class *schedclass, + struct xnvfile_directory *vfroot); + void (*sched_cleanup_vfile)(struct xnsched_class *schedclass); +#endif + int nthreads; + struct xnsched_class *next; + int weight; + int policy; + const char *name; +}; + +#define XNSCHED_CLASS_WEIGHT(n) (n * XNSCHED_CLASS_WEIGHT_FACTOR) + +/* Placeholder for current thread priority */ +#define XNSCHED_RUNPRIO 0x80000000 + +#define xnsched_for_each_thread(__thread) \ + list_for_each_entry(__thread, &nkthreadq, glink) + +#ifdef CONFIG_SMP +static inline int xnsched_cpu(struct xnsched *sched) +{ + return sched->cpu; +} +#else /* !CONFIG_SMP */ +static inline int xnsched_cpu(struct xnsched *sched) +{ + return 0; +} +#endif /* CONFIG_SMP */ + +static inline struct xnsched *xnsched_struct(int cpu) +{ + return &per_cpu(nksched, cpu); +} + +static inline struct xnsched *xnsched_current(void) +{ + /* IRQs off */ + return raw_cpu_ptr(&nksched); +} + +static inline struct xnthread *xnsched_current_thread(void) +{ + return xnsched_current()->curr; +} + +/* Test resched flag of given sched. */ +static inline int xnsched_resched_p(struct xnsched *sched) +{ + return sched->status & XNRESCHED; +} + +/* Set self resched flag for the current scheduler. */ +static inline void xnsched_set_self_resched(struct xnsched *sched) +{ + sched->status |= XNRESCHED; +} + +/* Set resched flag for the given scheduler. */ +#ifdef CONFIG_SMP + +static inline void xnsched_set_resched(struct xnsched *sched) +{ + struct xnsched *current_sched = xnsched_current(); + + if (current_sched == sched) + current_sched->status |= XNRESCHED; + else if (!xnsched_resched_p(sched)) { + cpumask_set_cpu(xnsched_cpu(sched), ¤t_sched->resched); + sched->status |= XNRESCHED; + current_sched->status |= XNRESCHED; + } +} + +#define xnsched_realtime_cpus cobalt_pipeline.supported_cpus + +static inline int xnsched_supported_cpu(int cpu) +{ + return cpumask_test_cpu(cpu, &xnsched_realtime_cpus); +} + +static inline int xnsched_threading_cpu(int cpu) +{ + return cpumask_test_cpu(cpu, &cobalt_cpu_affinity); +} + +#else /* !CONFIG_SMP */ + +static inline void xnsched_set_resched(struct xnsched *sched) +{ + xnsched_set_self_resched(sched); +} + +#define xnsched_realtime_cpus CPU_MASK_ALL + +static inline int xnsched_supported_cpu(int cpu) +{ + return 1; +} + +static inline int xnsched_threading_cpu(int cpu) +{ + return 1; +} + +#endif /* !CONFIG_SMP */ + +#define for_each_realtime_cpu(cpu) \ + for_each_online_cpu(cpu) \ + if (xnsched_supported_cpu(cpu)) \ + +int ___xnsched_run(struct xnsched *sched); + +void __xnsched_run_handler(void); + +static inline int __xnsched_run(struct xnsched *sched) +{ + /* + * Reschedule if XNSCHED is pending, but never over an IRQ + * handler or in the middle of unlocked context switch. + */ + if (((sched->status|sched->lflags) & + (XNINIRQ|XNINSW|XNRESCHED)) != XNRESCHED) + return 0; + + return pipeline_schedule(sched); +} + +static inline int xnsched_run(void) +{ + struct xnsched *sched = xnsched_current(); + /* + * sched->curr is shared locklessly with ___xnsched_run(). + * READ_ONCE() makes sure the compiler never uses load tearing + * for reading this pointer piecemeal, so that multiple stores + * occurring concurrently on remote CPUs never yield a + * spurious merged value on the local one. + */ + struct xnthread *curr = READ_ONCE(sched->curr); + + /* + * If running over the root thread, hard irqs must be off + * (asserted out of line in ___xnsched_run()). + */ + return curr->lock_count > 0 ? 0 : __xnsched_run(sched); +} + +void xnsched_lock(void); + +void xnsched_unlock(void); + +static inline int xnsched_interrupt_p(void) +{ + return xnsched_current()->lflags & XNINIRQ; +} + +static inline int xnsched_root_p(void) +{ + return xnthread_test_state(xnsched_current_thread(), XNROOT); +} + +static inline int xnsched_unblockable_p(void) +{ + return xnsched_interrupt_p() || xnsched_root_p(); +} + +static inline int xnsched_primary_p(void) +{ + return !xnsched_unblockable_p(); +} + +bool xnsched_set_effective_priority(struct xnthread *thread, + int prio); + +#include +#include + +int xnsched_init_proc(void); + +void xnsched_cleanup_proc(void); + +void xnsched_register_classes(void); + +void xnsched_init_all(void); + +void xnsched_destroy_all(void); + +struct xnthread *xnsched_pick_next(struct xnsched *sched); + +void xnsched_putback(struct xnthread *thread); + +int xnsched_set_policy(struct xnthread *thread, + struct xnsched_class *sched_class, + const union xnsched_policy_param *p); + +void xnsched_track_policy(struct xnthread *thread, + struct xnthread *target); + +void xnsched_protect_priority(struct xnthread *thread, + int prio); + +void xnsched_migrate(struct xnthread *thread, + struct xnsched *sched); + +void xnsched_migrate_passive(struct xnthread *thread, + struct xnsched *sched); + +/** + * @fn void xnsched_rotate(struct xnsched *sched, struct xnsched_class *sched_class, const union xnsched_policy_param *sched_param) + * @brief Rotate a scheduler runqueue. + * + * The specified scheduling class is requested to rotate its runqueue + * for the given scheduler. Rotation is performed according to the + * scheduling parameter specified by @a sched_param. + * + * @note The nucleus supports round-robin scheduling for the members + * of the RT class. + * + * @param sched The per-CPU scheduler hosting the target scheduling + * class. + * + * @param sched_class The scheduling class which should rotate its + * runqueue. + * + * @param sched_param The scheduling parameter providing rotation + * information to the specified scheduling class. + * + * @coretags{unrestricted, atomic-entry} + */ +static inline void xnsched_rotate(struct xnsched *sched, + struct xnsched_class *sched_class, + const union xnsched_policy_param *sched_param) +{ + sched_class->sched_rotate(sched, sched_param); +} + +static inline int xnsched_init_thread(struct xnthread *thread) +{ + int ret = 0; + + xnsched_idle_init_thread(thread); + xnsched_rt_init_thread(thread); + +#ifdef CONFIG_XENO_OPT_SCHED_TP + ret = xnsched_tp_init_thread(thread); + if (ret) + return ret; +#endif /* CONFIG_XENO_OPT_SCHED_TP */ +#ifdef CONFIG_XENO_OPT_SCHED_SPORADIC + ret = xnsched_sporadic_init_thread(thread); + if (ret) + return ret; +#endif /* CONFIG_XENO_OPT_SCHED_SPORADIC */ +#ifdef CONFIG_XENO_OPT_SCHED_QUOTA + ret = xnsched_quota_init_thread(thread); + if (ret) + return ret; +#endif /* CONFIG_XENO_OPT_SCHED_QUOTA */ + + return ret; +} + +static inline int xnsched_root_priority(struct xnsched *sched) +{ + return sched->rootcb.cprio; +} + +static inline struct xnsched_class *xnsched_root_class(struct xnsched *sched) +{ + return sched->rootcb.sched_class; +} + +static inline void xnsched_tick(struct xnsched *sched) +{ + struct xnthread *curr = sched->curr; + struct xnsched_class *sched_class = curr->sched_class; + /* + * A thread that undergoes round-robin scheduling only + * consumes its time slice when it runs within its own + * scheduling class, which excludes temporary PI boosts, and + * does not hold the scheduler lock. + */ + if (sched_class == curr->base_class && + sched_class->sched_tick && + xnthread_test_state(curr, XNTHREAD_BLOCK_BITS|XNRRB) == XNRRB && + curr->lock_count == 0) + sched_class->sched_tick(sched); +} + +static inline int xnsched_chkparam(struct xnsched_class *sched_class, + struct xnthread *thread, + const union xnsched_policy_param *p) +{ + if (sched_class->sched_chkparam) + return sched_class->sched_chkparam(thread, p); + + return 0; +} + +static inline int xnsched_declare(struct xnsched_class *sched_class, + struct xnthread *thread, + const union xnsched_policy_param *p) +{ + int ret; + + if (sched_class->sched_declare) { + ret = sched_class->sched_declare(thread, p); + if (ret) + return ret; + } + if (sched_class != thread->base_class) + sched_class->nthreads++; + + return 0; +} + +static inline int xnsched_calc_wprio(struct xnsched_class *sched_class, + int prio) +{ + return prio + sched_class->weight; +} + +#ifdef CONFIG_XENO_OPT_SCHED_CLASSES + +static inline void xnsched_enqueue(struct xnthread *thread) +{ + struct xnsched_class *sched_class = thread->sched_class; + + if (sched_class != &xnsched_class_idle) + sched_class->sched_enqueue(thread); +} + +static inline void xnsched_dequeue(struct xnthread *thread) +{ + struct xnsched_class *sched_class = thread->sched_class; + + if (sched_class != &xnsched_class_idle) + sched_class->sched_dequeue(thread); +} + +static inline void xnsched_requeue(struct xnthread *thread) +{ + struct xnsched_class *sched_class = thread->sched_class; + + if (sched_class != &xnsched_class_idle) + sched_class->sched_requeue(thread); +} + +static inline +bool xnsched_setparam(struct xnthread *thread, + const union xnsched_policy_param *p) +{ + return thread->base_class->sched_setparam(thread, p); +} + +static inline void xnsched_getparam(struct xnthread *thread, + union xnsched_policy_param *p) +{ + thread->sched_class->sched_getparam(thread, p); +} + +static inline void xnsched_trackprio(struct xnthread *thread, + const union xnsched_policy_param *p) +{ + thread->sched_class->sched_trackprio(thread, p); + thread->wprio = xnsched_calc_wprio(thread->sched_class, thread->cprio); +} + +static inline void xnsched_protectprio(struct xnthread *thread, int prio) +{ + thread->sched_class->sched_protectprio(thread, prio); + thread->wprio = xnsched_calc_wprio(thread->sched_class, thread->cprio); +} + +static inline void xnsched_forget(struct xnthread *thread) +{ + struct xnsched_class *sched_class = thread->base_class; + + --sched_class->nthreads; + + if (sched_class->sched_forget) + sched_class->sched_forget(thread); +} + +static inline void xnsched_kick(struct xnthread *thread) +{ + struct xnsched_class *sched_class = thread->base_class; + + xnthread_set_info(thread, XNKICKED); + + if (sched_class->sched_kick) + sched_class->sched_kick(thread); + + xnsched_set_resched(thread->sched); +} + +#else /* !CONFIG_XENO_OPT_SCHED_CLASSES */ + +/* + * If only the RT and IDLE scheduling classes are compiled in, we can + * fully inline common helpers for dealing with those. + */ + +static inline void xnsched_enqueue(struct xnthread *thread) +{ + struct xnsched_class *sched_class = thread->sched_class; + + if (sched_class != &xnsched_class_idle) + __xnsched_rt_enqueue(thread); +} + +static inline void xnsched_dequeue(struct xnthread *thread) +{ + struct xnsched_class *sched_class = thread->sched_class; + + if (sched_class != &xnsched_class_idle) + __xnsched_rt_dequeue(thread); +} + +static inline void xnsched_requeue(struct xnthread *thread) +{ + struct xnsched_class *sched_class = thread->sched_class; + + if (sched_class != &xnsched_class_idle) + __xnsched_rt_requeue(thread); +} + +static inline bool xnsched_setparam(struct xnthread *thread, + const union xnsched_policy_param *p) +{ + struct xnsched_class *sched_class = thread->base_class; + + if (sched_class == &xnsched_class_idle) + return __xnsched_idle_setparam(thread, p); + + return __xnsched_rt_setparam(thread, p); +} + +static inline void xnsched_getparam(struct xnthread *thread, + union xnsched_policy_param *p) +{ + struct xnsched_class *sched_class = thread->sched_class; + + if (sched_class == &xnsched_class_idle) + __xnsched_idle_getparam(thread, p); + else + __xnsched_rt_getparam(thread, p); +} + +static inline void xnsched_trackprio(struct xnthread *thread, + const union xnsched_policy_param *p) +{ + struct xnsched_class *sched_class = thread->sched_class; + + if (sched_class == &xnsched_class_idle) + __xnsched_idle_trackprio(thread, p); + else + __xnsched_rt_trackprio(thread, p); + + thread->wprio = xnsched_calc_wprio(sched_class, thread->cprio); +} + +static inline void xnsched_protectprio(struct xnthread *thread, int prio) +{ + struct xnsched_class *sched_class = thread->sched_class; + + if (sched_class == &xnsched_class_idle) + __xnsched_idle_protectprio(thread, prio); + else + __xnsched_rt_protectprio(thread, prio); + + thread->wprio = xnsched_calc_wprio(sched_class, thread->cprio); +} + +static inline void xnsched_forget(struct xnthread *thread) +{ + --thread->base_class->nthreads; + __xnsched_rt_forget(thread); +} + +static inline void xnsched_kick(struct xnthread *thread) +{ + xnthread_set_info(thread, XNKICKED); + xnsched_set_resched(thread->sched); +} + +#endif /* !CONFIG_XENO_OPT_SCHED_CLASSES */ + +/** @} */ + +#endif /* !_COBALT_KERNEL_SCHED_H */ --- linux/include/xenomai/cobalt/kernel/map.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/cobalt/kernel/map.h 2022-03-21 12:58:31.713866299 +0100 @@ -0,0 +1,74 @@ +/* + * Copyright (C) 2007 Philippe Gerum . + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_KERNEL_MAP_H +#define _COBALT_KERNEL_MAP_H + +#include + +/** + * @addtogroup cobalt_core_map + * @{ + */ + +#define XNMAP_MAX_KEYS (BITS_PER_LONG * BITS_PER_LONG) + +struct xnmap { + int nkeys; + int ukeys; + int offset; + unsigned long himask; + unsigned long himap; +#define __IDMAP_LONGS ((XNMAP_MAX_KEYS+BITS_PER_LONG-1)/BITS_PER_LONG) + unsigned long lomap[__IDMAP_LONGS]; +#undef __IDMAP_LONGS + void *objarray[1]; +}; + +struct xnmap *xnmap_create(int nkeys, + int reserve, + int offset); + +void xnmap_delete(struct xnmap *map); + +int xnmap_enter(struct xnmap *map, + int key, + void *objaddr); + +int xnmap_remove(struct xnmap *map, + int key); + +static inline void *xnmap_fetch_nocheck(struct xnmap *map, int key) +{ + int ofkey = key - map->offset; + return map->objarray[ofkey]; +} + +static inline void *xnmap_fetch(struct xnmap *map, int key) +{ + int ofkey = key - map->offset; + + if (ofkey < 0 || ofkey >= map->nkeys) + return NULL; + + return map->objarray[ofkey]; +} + +/** @} */ + +#endif /* !_COBALT_KERNEL_MAP_H */ --- linux/include/xenomai/cobalt/kernel/sched-idle.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/cobalt/kernel/sched-idle.h 2022-03-21 12:58:31.705866377 +0100 @@ -0,0 +1,71 @@ +/* + * Copyright (C) 2008 Philippe Gerum . + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_KERNEL_SCHED_IDLE_H +#define _COBALT_KERNEL_SCHED_IDLE_H + +#ifndef _COBALT_KERNEL_SCHED_H +#error "please don't include cobalt/kernel/sched-idle.h directly" +#endif + +/** + * @addtogroup cobalt_core_sched + * @{ + */ + +/* Idle priority level - actually never used for indexing. */ +#define XNSCHED_IDLE_PRIO -1 + +extern struct xnsched_class xnsched_class_idle; + +static inline bool __xnsched_idle_setparam(struct xnthread *thread, + const union xnsched_policy_param *p) +{ + xnthread_clear_state(thread, XNWEAK); + return xnsched_set_effective_priority(thread, p->idle.prio); +} + +static inline void __xnsched_idle_getparam(struct xnthread *thread, + union xnsched_policy_param *p) +{ + p->idle.prio = thread->cprio; +} + +static inline void __xnsched_idle_trackprio(struct xnthread *thread, + const union xnsched_policy_param *p) +{ + if (p) + /* Inheriting a priority-less class makes no sense. */ + XENO_WARN_ON_ONCE(COBALT, 1); + else + thread->cprio = XNSCHED_IDLE_PRIO; +} + +static inline void __xnsched_idle_protectprio(struct xnthread *thread, int prio) +{ + XENO_WARN_ON_ONCE(COBALT, 1); +} + +static inline int xnsched_idle_init_thread(struct xnthread *thread) +{ + return 0; +} + +/** @} */ + +#endif /* !_COBALT_KERNEL_SCHED_IDLE_H */ --- linux/include/xenomai/cobalt/kernel/sched-weak.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/cobalt/kernel/sched-weak.h 2022-03-21 12:58:31.698866445 +0100 @@ -0,0 +1,59 @@ +/* + * Copyright (C) 2013 Philippe Gerum . + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_KERNEL_SCHED_WEAK_H +#define _COBALT_KERNEL_SCHED_WEAK_H + +#ifndef _COBALT_KERNEL_SCHED_H +#error "please don't include cobalt/kernel/sched-weak.h directly" +#endif + +/** + * @addtogroup cobalt_core_sched + * @{ + */ + +#ifdef CONFIG_XENO_OPT_SCHED_WEAK + +#define XNSCHED_WEAK_MIN_PRIO 0 +#define XNSCHED_WEAK_MAX_PRIO 99 +#define XNSCHED_WEAK_NR_PRIO \ + (XNSCHED_WEAK_MAX_PRIO - XNSCHED_WEAK_MIN_PRIO + 1) + +#if XNSCHED_WEAK_NR_PRIO > XNSCHED_CLASS_WEIGHT_FACTOR || \ + (defined(CONFIG_XENO_OPT_SCALABLE_SCHED) && \ + XNSCHED_WEAK_NR_PRIO > XNSCHED_MLQ_LEVELS) +#error "WEAK class has too many priority levels" +#endif + +extern struct xnsched_class xnsched_class_weak; + +struct xnsched_weak { + xnsched_queue_t runnable; /*!< Runnable thread queue. */ +}; + +static inline int xnsched_weak_init_thread(struct xnthread *thread) +{ + return 0; +} + +#endif /* CONFIG_XENO_OPT_SCHED_WEAK */ + +/** @} */ + +#endif /* !_COBALT_KERNEL_SCHED_WEAK_H */ --- linux/include/xenomai/cobalt/kernel/arith.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/cobalt/kernel/arith.h 2022-03-21 12:58:31.691866513 +0100 @@ -0,0 +1,35 @@ +/* + * Generic arithmetic/conversion routines. + * Copyright © 2005 Stelian Pop. + * Copyright © 2005 Gilles Chanteperdrix. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139, + * USA; either version 2 of the License, or (at your option) any later + * version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_KERNEL_ARITH_H +#define _COBALT_KERNEL_ARITH_H + +#include +#include + +#ifdef __BIG_ENDIAN +#define endianstruct { unsigned int _h; unsigned int _l; } +#else /* __LITTLE_ENDIAN */ +#define endianstruct { unsigned int _l; unsigned int _h; } +#endif + +#include + +#endif /* _COBALT_KERNEL_ARITH_H */ --- linux/include/xenomai/cobalt/kernel/sched-tp.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/cobalt/kernel/sched-tp.h 2022-03-21 12:58:31.683866592 +0100 @@ -0,0 +1,99 @@ +/* + * Copyright (C) 2008 Philippe Gerum . + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_KERNEL_SCHED_TP_H +#define _COBALT_KERNEL_SCHED_TP_H + +#ifndef _COBALT_KERNEL_SCHED_H +#error "please don't include cobalt/kernel/sched-tp.h directly" +#endif + +/** + * @addtogroup cobalt_core_sched + * @{ + */ + +#ifdef CONFIG_XENO_OPT_SCHED_TP + +#define XNSCHED_TP_MIN_PRIO 1 +#define XNSCHED_TP_MAX_PRIO 255 +#define XNSCHED_TP_NR_PRIO \ + (XNSCHED_TP_MAX_PRIO - XNSCHED_TP_MIN_PRIO + 1) + +extern struct xnsched_class xnsched_class_tp; + +struct xnsched_tp_window { + xnticks_t w_offset; + int w_part; +}; + +struct xnsched_tp_schedule { + int pwin_nr; + xnticks_t tf_duration; + atomic_t refcount; + struct xnsched_tp_window pwins[0]; +}; + +struct xnsched_tp { + struct xnsched_tpslot { + /** Per-partition runqueue. */ + xnsched_queue_t runnable; + } partitions[CONFIG_XENO_OPT_SCHED_TP_NRPART]; + /** Idle slot for passive windows. */ + struct xnsched_tpslot idle; + /** Active partition slot */ + struct xnsched_tpslot *tps; + /** Time frame timer */ + struct xntimer tf_timer; + /** Global partition schedule */ + struct xnsched_tp_schedule *gps; + /** Window index of next partition */ + int wnext; + /** Start of next time frame */ + xnticks_t tf_start; + /** Assigned thread queue */ + struct list_head threads; +}; + +static inline int xnsched_tp_init_thread(struct xnthread *thread) +{ + thread->tps = NULL; + + return 0; +} + +struct xnsched_tp_schedule * +xnsched_tp_set_schedule(struct xnsched *sched, + struct xnsched_tp_schedule *gps); + +void xnsched_tp_start_schedule(struct xnsched *sched); + +void xnsched_tp_stop_schedule(struct xnsched *sched); + +int xnsched_tp_get_partition(struct xnsched *sched); + +struct xnsched_tp_schedule * +xnsched_tp_get_schedule(struct xnsched *sched); + +void xnsched_tp_put_schedule(struct xnsched_tp_schedule *gps); + +#endif /* CONFIG_XENO_OPT_SCHED_TP */ + +/** @} */ + +#endif /* !_COBALT_KERNEL_SCHED_TP_H */ --- linux/include/xenomai/cobalt/kernel/ppd.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/cobalt/kernel/ppd.h 2022-03-21 12:58:31.676866660 +0100 @@ -0,0 +1,42 @@ +/* + * Copyright © 2006 Gilles Chanteperdrix + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139, + * USA; either version 2 of the License, or (at your option) any later + * version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_KERNEL_PPD_H +#define _COBALT_KERNEL_PPD_H + +#include +#include +#include +#include + +struct cobalt_umm { + struct xnheap heap; + atomic_t refcount; + void (*release)(struct cobalt_umm *umm); +}; + +struct cobalt_ppd { + struct cobalt_umm umm; + atomic_t refcnt; + char *exe_path; + struct rb_root fds; +}; + +extern struct cobalt_ppd cobalt_kernel_ppd; + +#endif /* _COBALT_KERNEL_PPD_H */ --- linux/include/xenomai/cobalt/kernel/compat.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/cobalt/kernel/compat.h 2022-03-21 12:58:31.668866738 +0100 @@ -0,0 +1,167 @@ +/* + * Copyright (C) 2014 Philippe Gerum + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_KERNEL_COMPAT_H +#define _COBALT_KERNEL_COMPAT_H + +#ifdef CONFIG_XENO_ARCH_SYS3264 + +#include +#include +#include +#include + +struct mq_attr; + +struct __compat_sched_ss_param { + int __sched_low_priority; + struct old_timespec32 __sched_repl_period; + struct old_timespec32 __sched_init_budget; + int __sched_max_repl; +}; + +struct __compat_sched_rr_param { + struct old_timespec32 __sched_rr_quantum; +}; + +struct compat_sched_param_ex { + int sched_priority; + union { + struct __compat_sched_ss_param ss; + struct __compat_sched_rr_param rr; + struct __sched_tp_param tp; + struct __sched_quota_param quota; + } sched_u; +}; + +struct compat_mq_attr { + compat_long_t mq_flags; + compat_long_t mq_maxmsg; + compat_long_t mq_msgsize; + compat_long_t mq_curmsgs; +}; + +struct compat_sched_tp_window { + struct old_timespec32 offset; + struct old_timespec32 duration; + int ptid; +}; + +struct __compat_sched_config_tp { + int op; + int nr_windows; + struct compat_sched_tp_window windows[0]; +}; + +union compat_sched_config { + struct __compat_sched_config_tp tp; + struct __sched_config_quota quota; +}; + +#define compat_sched_tp_confsz(nr_win) \ + (sizeof(struct __compat_sched_config_tp) + nr_win * sizeof(struct compat_sched_tp_window)) + +typedef struct { + compat_ulong_t fds_bits[__FD_SETSIZE / (8 * sizeof(compat_long_t))]; +} compat_fd_set; + +struct compat_rtdm_mmap_request { + u64 offset; + compat_size_t length; + int prot; + int flags; +}; + +int sys32_get_timespec(struct timespec64 *ts, + const struct old_timespec32 __user *cts); + +int sys32_put_timespec(struct old_timespec32 __user *cts, + const struct timespec64 *ts); + +int sys32_get_itimerspec(struct itimerspec64 *its, + const struct old_itimerspec32 __user *cits); + +int sys32_put_itimerspec(struct old_itimerspec32 __user *cits, + const struct itimerspec64 *its); + +int sys32_get_timeval(struct __kernel_old_timeval *tv, + const struct old_timeval32 __user *ctv); + +int sys32_put_timeval(struct old_timeval32 __user *ctv, + const struct __kernel_old_timeval *tv); + +int sys32_get_timex(struct __kernel_timex *tx, + const struct old_timex32 __user *ctx); + +int sys32_put_timex(struct old_timex32 __user *ctx, + const struct __kernel_timex *tx); + +int sys32_get_fdset(fd_set *fds, const compat_fd_set __user *cfds, + size_t cfdsize); + +int sys32_put_fdset(compat_fd_set __user *cfds, const fd_set *fds, + size_t fdsize); + +int sys32_get_param_ex(int policy, + struct sched_param_ex *p, + const struct compat_sched_param_ex __user *u_cp); + +int sys32_put_param_ex(int policy, + struct compat_sched_param_ex __user *u_cp, + const struct sched_param_ex *p); + +int sys32_get_mqattr(struct mq_attr *ap, + const struct compat_mq_attr __user *u_cap); + +int sys32_put_mqattr(struct compat_mq_attr __user *u_cap, + const struct mq_attr *ap); + +int sys32_get_sigevent(struct sigevent *ev, + const struct compat_sigevent *__user u_cev); + +int sys32_get_sigset(sigset_t *set, const compat_sigset_t *u_cset); + +int sys32_put_sigset(compat_sigset_t *u_cset, const sigset_t *set); + +int sys32_get_sigval(union sigval *val, const union compat_sigval *u_cval); + +int sys32_put_siginfo(void __user *u_si, const struct siginfo *si, + int overrun); + +int sys32_get_msghdr(struct user_msghdr *msg, + const struct compat_msghdr __user *u_cmsg); + +int sys32_get_mmsghdr(struct mmsghdr *mmsg, + const struct compat_mmsghdr __user *u_cmmsg); + +int sys32_put_msghdr(struct compat_msghdr __user *u_cmsg, + const struct user_msghdr *msg); + +int sys32_put_mmsghdr(struct compat_mmsghdr __user *u_cmmsg, + const struct mmsghdr *mmsg); + +int sys32_get_iovec(struct iovec *iov, + const struct compat_iovec __user *ciov, + int ciovlen); + +int sys32_put_iovec(struct compat_iovec __user *u_ciov, + const struct iovec *iov, + int iovlen); + +#endif /* CONFIG_XENO_ARCH_SYS3264 */ + +#endif /* !_COBALT_KERNEL_COMPAT_H */ --- linux/include/xenomai/cobalt/kernel/assert.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/cobalt/kernel/assert.h 2022-03-21 12:58:31.661866806 +0100 @@ -0,0 +1,74 @@ +/* + * Copyright (C) 2006 Philippe Gerum . + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_KERNEL_ASSERT_H +#define _COBALT_KERNEL_ASSERT_H + +#include + +#define XENO_INFO KERN_INFO "[Xenomai] " +#define XENO_WARNING KERN_WARNING "[Xenomai] " +#define XENO_ERR KERN_ERR "[Xenomai] " + +#define XENO_DEBUG(__subsys) \ + IS_ENABLED(CONFIG_XENO_OPT_DEBUG_##__subsys) +#define XENO_ASSERT(__subsys, __cond) \ + (!WARN_ON(XENO_DEBUG(__subsys) && !(__cond))) +#define XENO_BUG(__subsys) \ + BUG_ON(XENO_DEBUG(__subsys)) +#define XENO_BUG_ON(__subsys, __cond) \ + BUG_ON(XENO_DEBUG(__subsys) && (__cond)) +#define XENO_WARN(__subsys, __cond, __fmt...) \ + WARN(XENO_DEBUG(__subsys) && (__cond), __fmt) +#define XENO_WARN_ON(__subsys, __cond) \ + WARN_ON(XENO_DEBUG(__subsys) && (__cond)) +#define XENO_WARN_ON_ONCE(__subsys, __cond) \ + WARN_ON_ONCE(XENO_DEBUG(__subsys) && (__cond)) +#ifdef CONFIG_SMP +#define XENO_BUG_ON_SMP(__subsys, __cond) \ + XENO_BUG_ON(__subsys, __cond) +#define XENO_WARN_ON_SMP(__subsys, __cond) \ + XENO_WARN_ON(__subsys, __cond) +#define XENO_WARN_ON_ONCE_SMP(__subsys, __cond) \ + XENO_WARN_ON_ONCE(__subsys, __cond) +#else +#define XENO_BUG_ON_SMP(__subsys, __cond) \ + do { } while (0) +#define XENO_WARN_ON_SMP(__subsys, __cond) \ + do { } while (0) +#define XENO_WARN_ON_ONCE_SMP(__subsys, __cond) \ + do { } while (0) +#endif + +#define TODO() BUILD_BUG_ON(IS_ENABLED(CONFIG_XENO_TODO)) + +#define primary_mode_only() XENO_BUG_ON(CONTEXT, is_secondary_domain()) +#define secondary_mode_only() XENO_BUG_ON(CONTEXT, !is_secondary_domain()) +#define interrupt_only() XENO_BUG_ON(CONTEXT, !xnsched_interrupt_p()) +#define realtime_cpu_only() XENO_BUG_ON(CONTEXT, !xnsched_supported_cpu(raw_smp_processor_id())) +#define thread_only() XENO_BUG_ON(CONTEXT, xnsched_interrupt_p()) +#define irqoff_only() XENO_BUG_ON(CONTEXT, hard_irqs_disabled() == 0) +#ifdef CONFIG_XENO_OPT_DEBUG_LOCKING +#define atomic_only() XENO_BUG_ON(CONTEXT, (xnlock_is_owner(&nklock) && hard_irqs_disabled()) == 0) +#define preemptible_only() XENO_BUG_ON(CONTEXT, xnlock_is_owner(&nklock) || hard_irqs_disabled()) +#else +#define atomic_only() XENO_BUG_ON(CONTEXT, hard_irqs_disabled() == 0) +#define preemptible_only() XENO_BUG_ON(CONTEXT, hard_irqs_disabled() != 0) +#endif + +#endif /* !_COBALT_KERNEL_ASSERT_H */ --- linux/include/xenomai/cobalt/kernel/timer.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/cobalt/kernel/timer.h 2022-03-21 12:58:31.654866874 +0100 @@ -0,0 +1,551 @@ +/* + * Copyright (C) 2001,2002,2003 Philippe Gerum . + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ + +#ifndef _COBALT_KERNEL_TIMER_H +#define _COBALT_KERNEL_TIMER_H + +#include +#include +#include +#include +#include +#include + +/** + * @addtogroup cobalt_core_timer + * @{ + */ +#define XN_INFINITE ((xnticks_t)0) +#define XN_NONBLOCK ((xnticks_t)-1) + +/* Timer modes */ +typedef enum xntmode { + XN_RELATIVE, + XN_ABSOLUTE, + XN_REALTIME +} xntmode_t; + +/* Timer status */ +#define XNTIMER_DEQUEUED 0x00000001 +#define XNTIMER_KILLED 0x00000002 +#define XNTIMER_PERIODIC 0x00000004 +#define XNTIMER_REALTIME 0x00000008 +#define XNTIMER_FIRED 0x00000010 +#define XNTIMER_RUNNING 0x00000020 +#define XNTIMER_KGRAVITY 0x00000040 +#define XNTIMER_UGRAVITY 0x00000080 +#define XNTIMER_IGRAVITY 0 /* most conservative */ + +#define XNTIMER_GRAVITY_MASK (XNTIMER_KGRAVITY|XNTIMER_UGRAVITY) +#define XNTIMER_INIT_MASK XNTIMER_GRAVITY_MASK + +/* These flags are available to the real-time interfaces */ +#define XNTIMER_SPARE0 0x01000000 +#define XNTIMER_SPARE1 0x02000000 +#define XNTIMER_SPARE2 0x04000000 +#define XNTIMER_SPARE3 0x08000000 +#define XNTIMER_SPARE4 0x10000000 +#define XNTIMER_SPARE5 0x20000000 +#define XNTIMER_SPARE6 0x40000000 +#define XNTIMER_SPARE7 0x80000000 + +/* Timer priorities */ +#define XNTIMER_LOPRIO (-999999999) +#define XNTIMER_STDPRIO 0 +#define XNTIMER_HIPRIO 999999999 + +struct xntlholder { + struct list_head link; + xnticks_t key; + int prio; +}; + +#define xntlholder_date(h) ((h)->key) +#define xntlholder_prio(h) ((h)->prio) +#define xntlist_init(q) INIT_LIST_HEAD(q) +#define xntlist_empty(q) list_empty(q) + +static inline struct xntlholder *xntlist_head(struct list_head *q) +{ + if (list_empty(q)) + return NULL; + + return list_first_entry(q, struct xntlholder, link); +} + +static inline struct xntlholder *xntlist_next(struct list_head *q, + struct xntlholder *h) +{ + if (list_is_last(&h->link, q)) + return NULL; + + return list_entry(h->link.next, struct xntlholder, link); +} + +static inline struct xntlholder *xntlist_second(struct list_head *q, + struct xntlholder *h) +{ + return xntlist_next(q, h); +} + +static inline void xntlist_insert(struct list_head *q, struct xntlholder *holder) +{ + struct xntlholder *p; + + if (list_empty(q)) { + list_add(&holder->link, q); + return; + } + + /* + * Insert the new timer at the proper place in the single + * queue. O(N) here, but this is the price for the increased + * flexibility... + */ + list_for_each_entry_reverse(p, q, link) { + if ((xnsticks_t) (holder->key - p->key) > 0 || + (holder->key == p->key && holder->prio <= p->prio)) + break; + } + + list_add(&holder->link, &p->link); +} + +#define xntlist_remove(q, h) \ + do { \ + (void)(q); \ + list_del(&(h)->link); \ + } while (0) + +#if defined(CONFIG_XENO_OPT_TIMER_RBTREE) + +#include + +typedef struct { + unsigned long long date; + unsigned prio; + struct rb_node link; +} xntimerh_t; + +#define xntimerh_date(h) ((h)->date) +#define xntimerh_prio(h) ((h)->prio) +#define xntimerh_init(h) do { } while (0) + +typedef struct { + struct rb_root root; + xntimerh_t *head; +} xntimerq_t; + +#define xntimerq_init(q) \ + ({ \ + xntimerq_t *_q = (q); \ + _q->root = RB_ROOT; \ + _q->head = NULL; \ + }) + +#define xntimerq_destroy(q) do { } while (0) +#define xntimerq_empty(q) ((q)->head == NULL) + +#define xntimerq_head(q) ((q)->head) + +#define xntimerq_next(q, h) \ + ({ \ + struct rb_node *_node = rb_next(&(h)->link); \ + _node ? (container_of(_node, xntimerh_t, link)) : NULL; \ + }) + +#define xntimerq_second(q, h) xntimerq_next(q, h) + +void xntimerq_insert(xntimerq_t *q, xntimerh_t *holder); + +static inline void xntimerq_remove(xntimerq_t *q, xntimerh_t *holder) +{ + if (holder == q->head) + q->head = xntimerq_second(q, holder); + + rb_erase(&holder->link, &q->root); +} + +typedef struct { } xntimerq_it_t; + +#define xntimerq_it_begin(q,i) ((void) (i), xntimerq_head(q)) +#define xntimerq_it_next(q,i,h) ((void) (i), xntimerq_next((q),(h))) + +#else /* CONFIG_XENO_OPT_TIMER_LIST */ + +typedef struct xntlholder xntimerh_t; + +#define xntimerh_date(h) xntlholder_date(h) +#define xntimerh_prio(h) xntlholder_prio(h) +#define xntimerh_init(h) do { } while (0) + +typedef struct list_head xntimerq_t; + +#define xntimerq_init(q) xntlist_init(q) +#define xntimerq_destroy(q) do { } while (0) +#define xntimerq_empty(q) xntlist_empty(q) +#define xntimerq_head(q) xntlist_head(q) +#define xntimerq_second(q, h) xntlist_second((q),(h)) +#define xntimerq_insert(q, h) xntlist_insert((q),(h)) +#define xntimerq_remove(q, h) xntlist_remove((q),(h)) + +typedef struct { } xntimerq_it_t; + +#define xntimerq_it_begin(q,i) ((void) (i), xntlist_head(q)) +#define xntimerq_it_next(q,i,h) ((void) (i), xntlist_next((q),(h))) + +#endif /* CONFIG_XENO_OPT_TIMER_LIST */ + +struct xnsched; + +struct xntimerdata { + xntimerq_t q; +}; + +static inline struct xntimerdata * +xnclock_percpu_timerdata(struct xnclock *clock, int cpu) +{ + return per_cpu_ptr(clock->timerdata, cpu); +} + +static inline struct xntimerdata * +xnclock_this_timerdata(struct xnclock *clock) +{ + return raw_cpu_ptr(clock->timerdata); +} + +struct xntimer { +#ifdef CONFIG_XENO_OPT_EXTCLOCK + struct xnclock *clock; +#endif + /** Link in timers list. */ + xntimerh_t aplink; + struct list_head adjlink; + /** Timer status. */ + unsigned long status; + /** Periodic interval (clock ticks, 0 == one shot). */ + xnticks_t interval; + /** Periodic interval (nanoseconds, 0 == one shot). */ + xnticks_t interval_ns; + /** Count of timer ticks in periodic mode. */ + xnticks_t periodic_ticks; + /** First tick date in periodic mode. */ + xnticks_t start_date; + /** Date of next periodic release point (timer ticks). */ + xnticks_t pexpect_ticks; + /** Sched structure to which the timer is attached. */ + struct xnsched *sched; + /** Timeout handler. */ + void (*handler)(struct xntimer *timer); +#ifdef CONFIG_XENO_OPT_STATS +#ifdef CONFIG_XENO_OPT_EXTCLOCK + struct xnclock *tracker; +#endif + /** Timer name to be displayed. */ + char name[XNOBJECT_NAME_LEN]; + /** Timer holder in timebase. */ + struct list_head next_stat; + /** Number of timer schedules. */ + xnstat_counter_t scheduled; + /** Number of timer events. */ + xnstat_counter_t fired; +#endif /* CONFIG_XENO_OPT_STATS */ +}; + +#ifdef CONFIG_XENO_OPT_EXTCLOCK + +static inline struct xnclock *xntimer_clock(struct xntimer *timer) +{ + return timer->clock; +} + +void xntimer_set_clock(struct xntimer *timer, + struct xnclock *newclock); + +#else /* !CONFIG_XENO_OPT_EXTCLOCK */ + +static inline struct xnclock *xntimer_clock(struct xntimer *timer) +{ + return &nkclock; +} + +static inline void xntimer_set_clock(struct xntimer *timer, + struct xnclock *newclock) +{ + XENO_BUG_ON(COBALT, newclock != &nkclock); +} + +#endif /* !CONFIG_XENO_OPT_EXTCLOCK */ + +#ifdef CONFIG_SMP +static inline struct xnsched *xntimer_sched(struct xntimer *timer) +{ + return timer->sched; +} +#else /* !CONFIG_SMP */ +#define xntimer_sched(t) xnsched_current() +#endif /* !CONFIG_SMP */ + +#define xntimer_percpu_queue(__timer) \ + ({ \ + struct xntimerdata *tmd; \ + int cpu = xnsched_cpu((__timer)->sched); \ + tmd = xnclock_percpu_timerdata(xntimer_clock(__timer), cpu); \ + &tmd->q; \ + }) + +static inline unsigned long xntimer_gravity(struct xntimer *timer) +{ + struct xnclock *clock = xntimer_clock(timer); + + if (timer->status & XNTIMER_KGRAVITY) + return clock->gravity.kernel; + + if (timer->status & XNTIMER_UGRAVITY) + return clock->gravity.user; + + return clock->gravity.irq; +} + +static inline void xntimer_update_date(struct xntimer *timer) +{ + xntimerh_date(&timer->aplink) = timer->start_date + + xnclock_ns_to_ticks(xntimer_clock(timer), + timer->periodic_ticks * timer->interval_ns) + - xntimer_gravity(timer); +} + +static inline xnticks_t xntimer_pexpect(struct xntimer *timer) +{ + return timer->start_date + + xnclock_ns_to_ticks(xntimer_clock(timer), + timer->pexpect_ticks * timer->interval_ns); +} + +static inline void xntimer_set_priority(struct xntimer *timer, + int prio) +{ + xntimerh_prio(&timer->aplink) = prio; +} + +static inline int xntimer_active_p(struct xntimer *timer) +{ + return timer->sched != NULL; +} + +static inline int xntimer_running_p(struct xntimer *timer) +{ + return (timer->status & XNTIMER_RUNNING) != 0; +} + +static inline int xntimer_fired_p(struct xntimer *timer) +{ + return (timer->status & XNTIMER_FIRED) != 0; +} + +static inline int xntimer_periodic_p(struct xntimer *timer) +{ + return (timer->status & XNTIMER_PERIODIC) != 0; +} + +void __xntimer_init(struct xntimer *timer, + struct xnclock *clock, + void (*handler)(struct xntimer *timer), + struct xnsched *sched, + int flags); + +void xntimer_set_gravity(struct xntimer *timer, + int gravity); + +#ifdef CONFIG_XENO_OPT_STATS + +#define xntimer_init(__timer, __clock, __handler, __sched, __flags) \ +do { \ + __xntimer_init(__timer, __clock, __handler, __sched, __flags); \ + xntimer_set_name(__timer, #__handler); \ +} while (0) + +static inline void xntimer_reset_stats(struct xntimer *timer) +{ + xnstat_counter_set(&timer->scheduled, 0); + xnstat_counter_set(&timer->fired, 0); +} + +static inline void xntimer_account_scheduled(struct xntimer *timer) +{ + xnstat_counter_inc(&timer->scheduled); +} + +static inline void xntimer_account_fired(struct xntimer *timer) +{ + xnstat_counter_inc(&timer->fired); +} + +static inline void xntimer_set_name(struct xntimer *timer, const char *name) +{ + knamecpy(timer->name, name); +} + +#else /* !CONFIG_XENO_OPT_STATS */ + +#define xntimer_init __xntimer_init + +static inline void xntimer_reset_stats(struct xntimer *timer) { } + +static inline void xntimer_account_scheduled(struct xntimer *timer) { } + +static inline void xntimer_account_fired(struct xntimer *timer) { } + +static inline void xntimer_set_name(struct xntimer *timer, const char *name) { } + +#endif /* !CONFIG_XENO_OPT_STATS */ + +#if defined(CONFIG_XENO_OPT_EXTCLOCK) && defined(CONFIG_XENO_OPT_STATS) +void xntimer_switch_tracking(struct xntimer *timer, + struct xnclock *newclock); +#else +static inline +void xntimer_switch_tracking(struct xntimer *timer, + struct xnclock *newclock) { } +#endif + +void xntimer_destroy(struct xntimer *timer); + +/** + * @fn xnticks_t xntimer_interval(struct xntimer *timer) + * + * @brief Return the timer interval value. + * + * Return the timer interval value in nanoseconds. + * + * @param timer The address of a valid timer descriptor. + * + * @return The duration of a period in nanoseconds. The special value + * XN_INFINITE is returned if @a timer is currently disabled or + * one shot. + * + * @coretags{unrestricted, atomic-entry} + */ +static inline xnticks_t xntimer_interval(struct xntimer *timer) +{ + return timer->interval_ns; +} + +static inline xnticks_t xntimer_expiry(struct xntimer *timer) +{ + /* Real expiry date in ticks without anticipation (no gravity) */ + return xntimerh_date(&timer->aplink) + xntimer_gravity(timer); +} + +int xntimer_start(struct xntimer *timer, + xnticks_t value, + xnticks_t interval, + xntmode_t mode); + +void __xntimer_stop(struct xntimer *timer); + +xnticks_t xntimer_get_date(struct xntimer *timer); + +xnticks_t __xntimer_get_timeout(struct xntimer *timer); + +xnticks_t xntimer_get_interval(struct xntimer *timer); + +int xntimer_heading_p(struct xntimer *timer); + +static inline void xntimer_stop(struct xntimer *timer) +{ + if (timer->status & XNTIMER_RUNNING) + __xntimer_stop(timer); +} + +static inline xnticks_t xntimer_get_timeout(struct xntimer *timer) +{ + if (!xntimer_running_p(timer)) + return XN_INFINITE; + + return __xntimer_get_timeout(timer); +} + +static inline xnticks_t xntimer_get_timeout_stopped(struct xntimer *timer) +{ + return __xntimer_get_timeout(timer); +} + +static inline void xntimer_enqueue(struct xntimer *timer, + xntimerq_t *q) +{ + xntimerq_insert(q, &timer->aplink); + timer->status &= ~XNTIMER_DEQUEUED; + xntimer_account_scheduled(timer); +} + +static inline void xntimer_dequeue(struct xntimer *timer, + xntimerq_t *q) +{ + xntimerq_remove(q, &timer->aplink); + timer->status |= XNTIMER_DEQUEUED; +} + +unsigned long long xntimer_get_overruns(struct xntimer *timer, + struct xnthread *waiter, + xnticks_t now); + +#ifdef CONFIG_SMP + +void __xntimer_migrate(struct xntimer *timer, struct xnsched *sched); + +static inline +void xntimer_migrate(struct xntimer *timer, struct xnsched *sched) +{ /* nklocked, IRQs off */ + if (timer->sched != sched) + __xntimer_migrate(timer, sched); +} + +void __xntimer_set_affinity(struct xntimer *timer, + struct xnsched *sched); + +static inline void xntimer_set_affinity(struct xntimer *timer, + struct xnsched *sched) +{ + if (sched != xntimer_sched(timer)) + __xntimer_set_affinity(timer, sched); +} + +#else /* ! CONFIG_SMP */ + +static inline void xntimer_migrate(struct xntimer *timer, + struct xnsched *sched) +{ + timer->sched = sched; +} + +static inline void xntimer_set_affinity(struct xntimer *timer, + struct xnsched *sched) +{ + xntimer_migrate(timer, sched); +} + +#endif /* CONFIG_SMP */ + +char *xntimer_format_time(xnticks_t ns, + char *buf, size_t bufsz); + +/** @} */ + +#endif /* !_COBALT_KERNEL_TIMER_H */ --- linux/include/xenomai/cobalt/kernel/init.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/cobalt/kernel/init.h 2022-03-21 12:58:31.646866952 +0100 @@ -0,0 +1,54 @@ +/* + * Copyright (C) 2013 Philippe Gerum . + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_KERNEL_INIT_H +#define _COBALT_KERNEL_INIT_H + +#include +#include +#include + +extern atomic_t cobalt_runstate; + +static inline enum cobalt_run_states realtime_core_state(void) +{ + return atomic_read(&cobalt_runstate); +} + +static inline int realtime_core_enabled(void) +{ + return atomic_read(&cobalt_runstate) != COBALT_STATE_DISABLED; +} + +static inline int realtime_core_running(void) +{ + return atomic_read(&cobalt_runstate) == COBALT_STATE_RUNNING; +} + +static inline void set_realtime_core_state(enum cobalt_run_states state) +{ + atomic_set(&cobalt_runstate, state); +} + +void cobalt_add_state_chain(struct notifier_block *nb); + +void cobalt_remove_state_chain(struct notifier_block *nb); + +void cobalt_call_state_chain(enum cobalt_run_states newstate); + +#endif /* !_COBALT_KERNEL_INIT_H_ */ --- linux/include/xenomai/cobalt/kernel/registry.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/cobalt/kernel/registry.h 2022-03-21 12:58:31.639867021 +0100 @@ -0,0 +1,202 @@ +/* + * Copyright (C) 2004 Philippe Gerum + * + * Xenomai is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_KERNEL_REGISTRY_H +#define _COBALT_KERNEL_REGISTRY_H + +#include +#include +#include + +/** + * @addtogroup cobalt_core_registry + * + * @{ + */ +struct xnpnode; + +struct xnobject { + void *objaddr; + const char *key; /* !< Hash key. May be NULL if anonynous. */ + unsigned long cstamp; /* !< Creation stamp. */ +#ifdef CONFIG_XENO_OPT_VFILE + struct xnpnode *pnode; /* !< v-file information class. */ + union { + struct { + struct xnvfile_rev_tag tag; + struct xnvfile_snapshot file; + } vfsnap; /* !< virtual snapshot file. */ + struct xnvfile_regular vfreg; /* !< virtual regular file */ + struct xnvfile_link link; /* !< virtual link. */ + } vfile_u; + struct xnvfile *vfilp; +#endif /* CONFIG_XENO_OPT_VFILE */ + struct hlist_node hlink; /* !< Link in h-table */ + struct list_head link; +}; + +int xnregistry_init(void); + +void xnregistry_cleanup(void); + +#ifdef CONFIG_XENO_OPT_VFILE + +#define XNOBJECT_EXPORT_SCHEDULED ((struct xnvfile *)1L) +#define XNOBJECT_EXPORT_INPROGRESS ((struct xnvfile *)2L) +#define XNOBJECT_EXPORT_ABORTED ((struct xnvfile *)3L) + +struct xnptree { + const char *dirname; + /* hidden */ + int entries; + struct xnvfile_directory vdir; +}; + +#define DEFINE_XNPTREE(__var, __name) \ + struct xnptree __var = { \ + .dirname = __name, \ + .entries = 0, \ + .vdir = xnvfile_nodir, \ + } + +struct xnpnode_ops { + int (*export)(struct xnobject *object, struct xnpnode *pnode); + void (*unexport)(struct xnobject *object, struct xnpnode *pnode); + void (*touch)(struct xnobject *object); +}; + +struct xnpnode { + const char *dirname; + struct xnptree *root; + struct xnpnode_ops *ops; + /* hidden */ + int entries; + struct xnvfile_directory vdir; +}; + +struct xnpnode_snapshot { + struct xnpnode node; + struct xnvfile_snapshot_template vfile; +}; + +struct xnpnode_regular { + struct xnpnode node; + struct xnvfile_regular_template vfile; +}; + +struct xnpnode_link { + struct xnpnode node; + char *(*target)(void *obj); +}; + +#else /* !CONFIG_XENO_OPT_VFILE */ + +#define DEFINE_XNPTREE(__var, __name); + +/* Placeholders. */ + +struct xnpnode { + const char *dirname; +}; + +struct xnpnode_snapshot { + struct xnpnode node; +}; + +struct xnpnode_regular { + struct xnpnode node; +}; + +struct xnpnode_link { + struct xnpnode node; +}; + +#endif /* !CONFIG_XENO_OPT_VFILE */ + +/* Public interface. */ + +extern struct xnobject *registry_obj_slots; + +static inline struct xnobject *xnregistry_validate(xnhandle_t handle) +{ + struct xnobject *object; + /* + * Careful: a removed object which is still in flight to be + * unexported carries a NULL objaddr, so we have to check this + * as well. + */ + handle = xnhandle_get_index(handle); + if (likely(handle && handle < CONFIG_XENO_OPT_REGISTRY_NRSLOTS)) { + object = ®istry_obj_slots[handle]; + return object->objaddr ? object : NULL; + } + + return NULL; +} + +static inline const char *xnregistry_key(xnhandle_t handle) +{ + struct xnobject *object = xnregistry_validate(handle); + return object ? object->key : NULL; +} + +int xnregistry_enter(const char *key, + void *objaddr, + xnhandle_t *phandle, + struct xnpnode *pnode); + +static inline int +xnregistry_enter_anon(void *objaddr, xnhandle_t *phandle) +{ + return xnregistry_enter(NULL, objaddr, phandle, NULL); +} + +int xnregistry_bind(const char *key, + xnticks_t timeout, + int timeout_mode, + xnhandle_t *phandle); + +int xnregistry_remove(xnhandle_t handle); + +static inline +void *xnregistry_lookup(xnhandle_t handle, + unsigned long *cstamp_r) +{ + struct xnobject *object = xnregistry_validate(handle); + + if (object == NULL) + return NULL; + + if (cstamp_r) + *cstamp_r = object->cstamp; + + return object->objaddr; +} + +int xnregistry_unlink(const char *key); + +unsigned xnregistry_hash_size(void); + +extern struct xnpnode_ops xnregistry_vfsnap_ops; + +extern struct xnpnode_ops xnregistry_vlink_ops; + +extern struct xnpnode_ops xnregistry_vfreg_ops; + +/** @} */ + +#endif /* !_COBALT_KERNEL_REGISTRY_H */ --- linux/include/xenomai/cobalt/kernel/sched-rt.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/cobalt/kernel/sched-rt.h 2022-03-21 12:58:31.631867099 +0100 @@ -0,0 +1,150 @@ +/* + * Copyright (C) 2008 Philippe Gerum . + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_KERNEL_SCHED_RT_H +#define _COBALT_KERNEL_SCHED_RT_H + +#ifndef _COBALT_KERNEL_SCHED_H +#error "please don't include cobalt/kernel/sched-rt.h directly" +#endif + +/** + * @addtogroup cobalt_core_sched + * @{ + */ + +/* + * Global priority scale for Xenomai's core scheduling class, + * available to SCHED_COBALT members. + */ +#define XNSCHED_CORE_MIN_PRIO 0 +#define XNSCHED_CORE_MAX_PRIO 259 +#define XNSCHED_CORE_NR_PRIO \ + (XNSCHED_CORE_MAX_PRIO - XNSCHED_CORE_MIN_PRIO + 1) + +/* + * Priority range for SCHED_FIFO, and all other classes Cobalt + * implements except SCHED_COBALT. + */ +#define XNSCHED_FIFO_MIN_PRIO 1 +#define XNSCHED_FIFO_MAX_PRIO 256 + +#if XNSCHED_CORE_NR_PRIO > XNSCHED_CLASS_WEIGHT_FACTOR || \ + (defined(CONFIG_XENO_OPT_SCALABLE_SCHED) && \ + XNSCHED_CORE_NR_PRIO > XNSCHED_MLQ_LEVELS) +#error "XNSCHED_MLQ_LEVELS is too low" +#endif + +extern struct xnsched_class xnsched_class_rt; + +static inline void __xnsched_rt_requeue(struct xnthread *thread) +{ + xnsched_addq(&thread->sched->rt.runnable, thread); +} + +static inline void __xnsched_rt_enqueue(struct xnthread *thread) +{ + xnsched_addq_tail(&thread->sched->rt.runnable, thread); +} + +static inline void __xnsched_rt_dequeue(struct xnthread *thread) +{ + xnsched_delq(&thread->sched->rt.runnable, thread); +} + +static inline void __xnsched_rt_track_weakness(struct xnthread *thread) +{ + /* + * We have to track threads exiting weak scheduling, i.e. any + * thread leaving the WEAK class code if compiled in, or + * assigned a zero priority if weak threads are hosted by the + * RT class. + * + * CAUTION: since we need to check the effective priority + * level for determining the weakness state, this can only + * apply to non-boosted threads. + */ + if (IS_ENABLED(CONFIG_XENO_OPT_SCHED_WEAK) || thread->cprio) + xnthread_clear_state(thread, XNWEAK); + else + xnthread_set_state(thread, XNWEAK); +} + +static inline bool __xnsched_rt_setparam(struct xnthread *thread, + const union xnsched_policy_param *p) +{ + bool ret = xnsched_set_effective_priority(thread, p->rt.prio); + + if (!xnthread_test_state(thread, XNBOOST)) + __xnsched_rt_track_weakness(thread); + + return ret; +} + +static inline void __xnsched_rt_getparam(struct xnthread *thread, + union xnsched_policy_param *p) +{ + p->rt.prio = thread->cprio; +} + +static inline void __xnsched_rt_trackprio(struct xnthread *thread, + const union xnsched_policy_param *p) +{ + if (p) + thread->cprio = p->rt.prio; /* Force update. */ + else { + thread->cprio = thread->bprio; + /* Leaving PI/PP, so non-boosted by definition. */ + __xnsched_rt_track_weakness(thread); + } +} + +static inline void __xnsched_rt_protectprio(struct xnthread *thread, int prio) +{ + /* + * The RT class supports the widest priority range from + * XNSCHED_CORE_MIN_PRIO to XNSCHED_CORE_MAX_PRIO inclusive, + * no need to cap the input value which is guaranteed to be in + * the range [1..XNSCHED_CORE_MAX_PRIO]. + */ + thread->cprio = prio; +} + +static inline void __xnsched_rt_forget(struct xnthread *thread) +{ +} + +static inline int xnsched_rt_init_thread(struct xnthread *thread) +{ + return 0; +} + +#ifdef CONFIG_XENO_OPT_SCHED_CLASSES +struct xnthread *xnsched_rt_pick(struct xnsched *sched); +#else +static inline struct xnthread *xnsched_rt_pick(struct xnsched *sched) +{ + return xnsched_getq(&sched->rt.runnable); +} +#endif + +void xnsched_rt_tick(struct xnsched *sched); + +/** @} */ + +#endif /* !_COBALT_KERNEL_SCHED_RT_H */ --- linux/include/xenomai/cobalt/kernel/time.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/cobalt/kernel/time.h 2022-03-21 12:58:31.624867167 +0100 @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _COBALT_KERNEL_TIME_H +#define _COBALT_KERNEL_TIME_H + +#include +#include + +/** + * Read struct __kernel_timespec from userspace and convert to + * struct timespec64 + * + * @param ts The destination, will be filled + * @param uts The source, provided by an application + * @return 0 on success, -EFAULT otherwise + */ +int cobalt_get_timespec64(struct timespec64 *ts, + const struct __kernel_timespec __user *uts); + +/** + * Covert struct timespec64 to struct __kernel_timespec + * and copy to userspace + * + * @param ts The source, provided by kernel + * @param uts The destination, will be filled + * @return 0 on success, -EFAULT otherwise + */ +int cobalt_put_timespec64(const struct timespec64 *ts, + struct __kernel_timespec __user *uts); + +#endif //_COBALT_KERNEL_TIME_H --- linux/include/xenomai/cobalt/kernel/intr.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/cobalt/kernel/intr.h 2022-03-21 12:58:31.617867235 +0100 @@ -0,0 +1,157 @@ +/* + * Copyright (C) 2001,2002,2003 Philippe Gerum . + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_KERNEL_INTR_H +#define _COBALT_KERNEL_INTR_H + +#include +#include +#include + +/** + * @addtogroup cobalt_core_irq + * @{ + */ + +/* Possible return values of a handler. */ +#define XN_IRQ_NONE 0x1 +#define XN_IRQ_HANDLED 0x2 +#define XN_IRQ_STATMASK (XN_IRQ_NONE|XN_IRQ_HANDLED) +#define XN_IRQ_PROPAGATE 0x100 +#define XN_IRQ_DISABLE 0x200 + +/* Init flags. */ +#define XN_IRQTYPE_SHARED 0x1 +#define XN_IRQTYPE_EDGE 0x2 + +/* Status bits. */ +#define XN_IRQSTAT_ATTACHED 0 +#define _XN_IRQSTAT_ATTACHED (1 << XN_IRQSTAT_ATTACHED) +#define XN_IRQSTAT_DISABLED 1 +#define _XN_IRQSTAT_DISABLED (1 << XN_IRQSTAT_DISABLED) + +struct xnintr; +struct xnsched; + +typedef int (*xnisr_t)(struct xnintr *intr); + +typedef void (*xniack_t)(unsigned irq, void *arg); + +struct xnirqstat { + /** Number of handled receipts since attachment. */ + xnstat_counter_t hits; + /** Runtime accounting entity */ + xnstat_exectime_t account; + /** Accumulated accounting entity */ + xnstat_exectime_t sum; +}; + +struct xnintr { +#ifdef CONFIG_XENO_OPT_SHIRQ + /** Next object in the IRQ-sharing chain. */ + struct xnintr *next; +#endif + /** Number of consequent unhandled interrupts */ + unsigned int unhandled; + /** Interrupt service routine. */ + xnisr_t isr; + /** User-defined cookie value. */ + void *cookie; + /** runtime status */ + unsigned long status; + /** Creation flags. */ + int flags; + /** IRQ number. */ + unsigned int irq; + /** Interrupt acknowledge routine. */ + xniack_t iack; + /** Symbolic name. */ + const char *name; + /** Descriptor maintenance lock. */ + raw_spinlock_t lock; +#ifdef CONFIG_XENO_OPT_STATS_IRQS + /** Statistics. */ + struct xnirqstat *stats; +#endif +}; + +struct xnintr_iterator { + int cpu; /** Current CPU in iteration. */ + unsigned long hits; /** Current hit counter. */ + xnticks_t exectime_period; /** Used CPU time in current accounting period. */ + xnticks_t account_period; /** Length of accounting period. */ + xnticks_t exectime_total; /** Overall CPU time consumed. */ + int list_rev; /** System-wide xnintr list revision (internal use). */ + struct xnintr *prev; /** Previously visited xnintr object (internal use). */ +}; + +void xnintr_core_clock_handler(void); + +void xnintr_host_tick(struct xnsched *sched); + + /* Public interface. */ + +int xnintr_init(struct xnintr *intr, + const char *name, + unsigned irq, + xnisr_t isr, + xniack_t iack, + int flags); + +void xnintr_destroy(struct xnintr *intr); + +int xnintr_attach(struct xnintr *intr, + void *cookie, const cpumask_t *cpumask); + +void xnintr_detach(struct xnintr *intr); + +void xnintr_enable(struct xnintr *intr); + +void xnintr_disable(struct xnintr *intr); + +int xnintr_affinity(struct xnintr *intr, const cpumask_t *cpumask); + +#ifdef CONFIG_XENO_OPT_STATS_IRQS + +int xnintr_query_init(struct xnintr_iterator *iterator); + +int xnintr_get_query_lock(void); + +void xnintr_put_query_lock(void); + +int xnintr_query_next(int irq, struct xnintr_iterator *iterator, + char *name_buf); + +#else /* !CONFIG_XENO_OPT_STATS_IRQS */ + +static inline int xnintr_query_init(struct xnintr_iterator *iterator) +{ + return 0; +} + +static inline int xnintr_get_query_lock(void) +{ + return 0; +} + +static inline void xnintr_put_query_lock(void) {} +#endif /* !CONFIG_XENO_OPT_STATS_IRQS */ + +/** @} */ + +#endif /* !_COBALT_KERNEL_INTR_H */ --- linux/include/xenomai/cobalt/kernel/schedqueue.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/cobalt/kernel/schedqueue.h 2022-03-21 12:58:31.609867313 +0100 @@ -0,0 +1,106 @@ +/* + * Copyright (C) 2008 Philippe Gerum . + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_KERNEL_SCHEDQUEUE_H +#define _COBALT_KERNEL_SCHEDQUEUE_H + +#include + +/** + * @addtogroup cobalt_core_sched + * @{ + */ + +#define XNSCHED_CLASS_WEIGHT_FACTOR 1024 + +#ifdef CONFIG_XENO_OPT_SCALABLE_SCHED + +#include + +/* + * Multi-level priority queue, suitable for handling the runnable + * thread queue of the core scheduling class with O(1) property. We + * only manage a descending queuing order, i.e. highest numbered + * priorities come first. + */ +#define XNSCHED_MLQ_LEVELS 260 /* i.e. XNSCHED_CORE_NR_PRIO */ + +struct xnsched_mlq { + int elems; + DECLARE_BITMAP(prio_map, XNSCHED_MLQ_LEVELS); + struct list_head heads[XNSCHED_MLQ_LEVELS]; +}; + +struct xnthread; + +void xnsched_initq(struct xnsched_mlq *q); + +void xnsched_addq(struct xnsched_mlq *q, + struct xnthread *thread); + +void xnsched_addq_tail(struct xnsched_mlq *q, + struct xnthread *thread); + +void xnsched_delq(struct xnsched_mlq *q, + struct xnthread *thread); + +struct xnthread *xnsched_getq(struct xnsched_mlq *q); + +static inline int xnsched_emptyq_p(struct xnsched_mlq *q) +{ + return q->elems == 0; +} + +static inline int xnsched_weightq(struct xnsched_mlq *q) +{ + return find_first_bit(q->prio_map, XNSCHED_MLQ_LEVELS); +} + +typedef struct xnsched_mlq xnsched_queue_t; + +#else /* ! CONFIG_XENO_OPT_SCALABLE_SCHED */ + +typedef struct list_head xnsched_queue_t; + +#define xnsched_initq(__q) INIT_LIST_HEAD(__q) +#define xnsched_emptyq_p(__q) list_empty(__q) +#define xnsched_addq(__q, __t) list_add_prilf(__t, __q, cprio, rlink) +#define xnsched_addq_tail(__q, __t) list_add_priff(__t, __q, cprio, rlink) +#define xnsched_delq(__q, __t) (void)(__q), list_del(&(__t)->rlink) +#define xnsched_getq(__q) \ + ({ \ + struct xnthread *__t = NULL; \ + if (!list_empty(__q)) \ + __t = list_get_entry(__q, struct xnthread, rlink); \ + __t; \ + }) +#define xnsched_weightq(__q) \ + ({ \ + struct xnthread *__t; \ + __t = list_first_entry(__q, struct xnthread, rlink); \ + __t->cprio; \ + }) + + +#endif /* !CONFIG_XENO_OPT_SCALABLE_SCHED */ + +struct xnthread *xnsched_findq(xnsched_queue_t *q, int prio); + +/** @} */ + +#endif /* !_COBALT_KERNEL_SCHEDQUEUE_H */ --- linux/include/xenomai/cobalt/kernel/vdso.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/cobalt/kernel/vdso.h 2022-03-21 12:58:31.602867381 +0100 @@ -0,0 +1,49 @@ +/* + * Copyright (C) 2009 Wolfgang Mauerer . + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_KERNEL_VDSO_H +#define _COBALT_KERNEL_VDSO_H + +#include +#include +#include +#include +#include + +extern struct xnvdso *nkvdso; + +/* + * Define the available feature set here. We have a single feature + * defined for now, only in the I-pipe case. + */ +#ifdef CONFIG_IPIPE_HAVE_HOSTRT + +#define XNVDSO_FEATURES XNVDSO_FEAT_HOST_REALTIME + +static inline struct xnvdso_hostrt_data *get_hostrt_data(void) +{ + return &nkvdso->hostrt_data; +} + +#else + +#define XNVDSO_FEATURES 0 + +#endif + +#endif /* _COBALT_KERNEL_VDSO_H */ --- linux/include/xenomai/cobalt/kernel/pipe.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/cobalt/kernel/pipe.h 2022-03-21 12:58:31.595867450 +0100 @@ -0,0 +1,136 @@ +/* + * Copyright (C) 2001,2002,2003 Philippe Gerum. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA + * 02139, USA; either version 2 of the License, or (at your option) + * any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_KERNEL_PIPE_H +#define _COBALT_KERNEL_PIPE_H + +#include +#include +#include +#include +#include + +#define XNPIPE_NDEVS CONFIG_XENO_OPT_PIPE_NRDEV +#define XNPIPE_DEV_MAJOR 150 + +#define XNPIPE_KERN_CONN 0x1 +#define XNPIPE_KERN_LCLOSE 0x2 +#define XNPIPE_USER_CONN 0x4 +#define XNPIPE_USER_SIGIO 0x8 +#define XNPIPE_USER_WREAD 0x10 +#define XNPIPE_USER_WREAD_READY 0x20 +#define XNPIPE_USER_WSYNC 0x40 +#define XNPIPE_USER_WSYNC_READY 0x80 +#define XNPIPE_USER_LCONN 0x100 + +#define XNPIPE_USER_ALL_WAIT \ +(XNPIPE_USER_WREAD|XNPIPE_USER_WSYNC) + +#define XNPIPE_USER_ALL_READY \ +(XNPIPE_USER_WREAD_READY|XNPIPE_USER_WSYNC_READY) + +struct xnpipe_mh { + size_t size; + size_t rdoff; + struct list_head link; +}; + +struct xnpipe_state; + +struct xnpipe_operations { + void (*output)(struct xnpipe_mh *mh, void *xstate); + int (*input)(struct xnpipe_mh *mh, int retval, void *xstate); + void *(*alloc_ibuf)(size_t size, void *xstate); + void (*free_ibuf)(void *buf, void *xstate); + void (*free_obuf)(void *buf, void *xstate); + void (*release)(void *xstate); +}; + +struct xnpipe_state { + struct list_head slink; /* Link on sleep queue */ + struct list_head alink; /* Link on async queue */ + + struct list_head inq; /* From user-space to kernel */ + int nrinq; + struct list_head outq; /* From kernel to user-space */ + int nroutq; + struct xnsynch synchbase; + struct xnpipe_operations ops; + void *xstate; /* Extra state managed by caller */ + + /* Linux kernel part */ + unsigned long status; + struct fasync_struct *asyncq; + wait_queue_head_t readq; /* open/read/poll waiters */ + wait_queue_head_t syncq; /* sync waiters */ + int wcount; /* number of waiters on this minor */ + size_t ionrd; +}; + +extern struct xnpipe_state xnpipe_states[]; + +#define xnminor_from_state(s) (s - xnpipe_states) + +#ifdef CONFIG_XENO_OPT_PIPE +int xnpipe_mount(void); +void xnpipe_umount(void); +#else /* !CONFIG_XENO_OPT_PIPE */ +static inline int xnpipe_mount(void) { return 0; } +static inline void xnpipe_umount(void) { } +#endif /* !CONFIG_XENO_OPT_PIPE */ + +/* Entry points of the kernel interface. */ + +int xnpipe_connect(int minor, + struct xnpipe_operations *ops, void *xstate); + +int xnpipe_disconnect(int minor); + +ssize_t xnpipe_send(int minor, + struct xnpipe_mh *mh, size_t size, int flags); + +ssize_t xnpipe_mfixup(int minor, struct xnpipe_mh *mh, ssize_t size); + +ssize_t xnpipe_recv(int minor, + struct xnpipe_mh **pmh, xnticks_t timeout); + +int xnpipe_flush(int minor, int mode); + +int xnpipe_pollstate(int minor, unsigned int *mask_r); + +static inline unsigned int __xnpipe_pollstate(int minor) +{ + struct xnpipe_state *state = xnpipe_states + minor; + unsigned int mask = POLLOUT; + + if (!list_empty(&state->inq)) + mask |= POLLIN; + + return mask; +} + +static inline char *xnpipe_m_data(struct xnpipe_mh *mh) +{ + return (char *)(mh + 1); +} + +#define xnpipe_m_size(mh) ((mh)->size) + +#define xnpipe_m_rdoff(mh) ((mh)->rdoff) + +#endif /* !_COBALT_KERNEL_PIPE_H */ --- linux/include/xenomai/cobalt/kernel/ancillaries.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/cobalt/kernel/ancillaries.h 2022-03-21 12:58:31.587867528 +0100 @@ -0,0 +1,68 @@ +/* + * Copyright (C) 2014 Philippe Gerum . + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_KERNEL_ANCILLARIES_H +#define _COBALT_KERNEL_ANCILLARIES_H + +#include +#include +#include +#include + +#define ksformat(__dst, __len, __fmt, __args...) \ + ({ \ + size_t __ret; \ + __ret = snprintf(__dst, __len, __fmt, ##__args); \ + if (__ret >= __len) \ + __dst[__len-1] = '\0'; \ + __ret; \ + }) + +#define kasformat(__fmt, __args...) \ + ({ \ + kasprintf(GFP_KERNEL, __fmt, ##__args); \ + }) + +#define kvsformat(__dst, __len, __fmt, __ap) \ + ({ \ + size_t __ret; \ + __ret = vsnprintf(__dst, __len, __fmt, __ap); \ + if (__ret >= __len) \ + __dst[__len-1] = '\0'; \ + __ret; \ + }) + +#define kvasformat(__fmt, __ap) \ + ({ \ + kvasprintf(GFP_KERNEL, __fmt, __ap); \ + }) + +void __knamecpy_requires_character_array_as_destination(void); + +#define knamecpy(__dst, __src) \ + ({ \ + if (!__builtin_types_compatible_p(typeof(__dst), char[])) \ + __knamecpy_requires_character_array_as_destination(); \ + strncpy((__dst), __src, sizeof(__dst)); \ + __dst[sizeof(__dst) - 1] = '\0'; \ + __dst; \ + }) + +#define get_current_uuid() from_kuid_munged(current_user_ns(), current_uid()) + +#endif /* !_COBALT_KERNEL_ANCILLARIES_H */ --- linux/include/xenomai/cobalt/kernel/bufd.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/cobalt/kernel/bufd.h 2022-03-21 12:58:31.580867596 +0100 @@ -0,0 +1,94 @@ +/* + * Copyright (C) 2009 Philippe Gerum . + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_KERNEL_BUFD_H +#define _COBALT_KERNEL_BUFD_H + +#include + +/** + * @addtogroup cobalt_core_bufd + * + * @{ + */ + +struct mm_struct; + +struct xnbufd { + caddr_t b_ptr; /* src/dst buffer address */ + size_t b_len; /* total length of buffer */ + off_t b_off; /* # of bytes read/written */ + struct mm_struct *b_mm; /* src/dst address space */ + caddr_t b_carry; /* pointer to carry over area */ + char b_buf[64]; /* fast carry over area */ +}; + +void xnbufd_map_umem(struct xnbufd *bufd, + void __user *ptr, size_t len); + +static inline void xnbufd_map_uread(struct xnbufd *bufd, + const void __user *ptr, size_t len) +{ + xnbufd_map_umem(bufd, (void __user *)ptr, len); +} + +static inline void xnbufd_map_uwrite(struct xnbufd *bufd, + void __user *ptr, size_t len) +{ + xnbufd_map_umem(bufd, ptr, len); +} + +ssize_t xnbufd_unmap_uread(struct xnbufd *bufd); + +ssize_t xnbufd_unmap_uwrite(struct xnbufd *bufd); + +void xnbufd_map_kmem(struct xnbufd *bufd, + void *ptr, size_t len); + +static inline void xnbufd_map_kread(struct xnbufd *bufd, + const void *ptr, size_t len) +{ + xnbufd_map_kmem(bufd, (void *)ptr, len); +} + +static inline void xnbufd_map_kwrite(struct xnbufd *bufd, + void *ptr, size_t len) +{ + xnbufd_map_kmem(bufd, ptr, len); +} + +ssize_t xnbufd_unmap_kread(struct xnbufd *bufd); + +ssize_t xnbufd_unmap_kwrite(struct xnbufd *bufd); + +ssize_t xnbufd_copy_to_kmem(void *ptr, + struct xnbufd *bufd, size_t len); + +ssize_t xnbufd_copy_from_kmem(struct xnbufd *bufd, + void *from, size_t len); + +void xnbufd_invalidate(struct xnbufd *bufd); + +static inline void xnbufd_reset(struct xnbufd *bufd) +{ + bufd->b_off = 0; +} + +/** @} */ + +#endif /* !_COBALT_KERNEL_BUFD_H */ --- linux/include/xenomai/cobalt/kernel/sched-quota.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/cobalt/kernel/sched-quota.h 2022-03-21 12:58:31.572867674 +0100 @@ -0,0 +1,93 @@ +/* + * Copyright (C) 2013 Philippe Gerum . + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_KERNEL_SCHED_QUOTA_H +#define _COBALT_KERNEL_SCHED_QUOTA_H + +#ifndef _COBALT_KERNEL_SCHED_H +#error "please don't include cobalt/kernel/sched-quota.h directly" +#endif + +/** + * @addtogroup cobalt_core_sched + * @{ + */ + +#ifdef CONFIG_XENO_OPT_SCHED_QUOTA + +#define XNSCHED_QUOTA_MIN_PRIO 1 +#define XNSCHED_QUOTA_MAX_PRIO 255 +#define XNSCHED_QUOTA_NR_PRIO \ + (XNSCHED_QUOTA_MAX_PRIO - XNSCHED_QUOTA_MIN_PRIO + 1) + +extern struct xnsched_class xnsched_class_quota; + +struct xnsched_quota_group { + struct xnsched *sched; + xnticks_t quota_ns; + xnticks_t quota_peak_ns; + xnticks_t run_start_ns; + xnticks_t run_budget_ns; + xnticks_t run_credit_ns; + struct list_head members; + struct list_head expired; + struct list_head next; + int nr_active; + int nr_threads; + int tgid; + int quota_percent; + int quota_peak_percent; +}; + +struct xnsched_quota { + xnticks_t period_ns; + struct xntimer refill_timer; + struct xntimer limit_timer; + struct list_head groups; +}; + +static inline int xnsched_quota_init_thread(struct xnthread *thread) +{ + thread->quota = NULL; + INIT_LIST_HEAD(&thread->quota_expired); + + return 0; +} + +int xnsched_quota_create_group(struct xnsched_quota_group *tg, + struct xnsched *sched, + int *quota_sum_r); + +int xnsched_quota_destroy_group(struct xnsched_quota_group *tg, + int force, + int *quota_sum_r); + +void xnsched_quota_set_limit(struct xnsched_quota_group *tg, + int quota_percent, int quota_peak_percent, + int *quota_sum_r); + +struct xnsched_quota_group * +xnsched_quota_find_group(struct xnsched *sched, int tgid); + +int xnsched_quota_sum_all(struct xnsched *sched); + +#endif /* !CONFIG_XENO_OPT_SCHED_QUOTA */ + +/** @} */ + +#endif /* !_COBALT_KERNEL_SCHED_QUOTA_H */ --- linux/include/xenomai/cobalt/kernel/tree.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/cobalt/kernel/tree.h 2022-03-21 12:58:31.565867742 +0100 @@ -0,0 +1,94 @@ +/* + * Copyright (C) 2014 Gilles Chanteperdrix . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_KERNEL_TREE_H +#define _COBALT_KERNEL_TREE_H + +#include +#include +#include + +typedef unsigned long long xnkey_t; + +static inline xnkey_t PTR_KEY(void *p) +{ + return (xnkey_t)(long)p; +} + +struct xnid { + xnkey_t key; + struct rb_node link; +}; + +#define xnid_entry(ptr, type, member) \ + ({ \ + typeof(ptr) _ptr = (ptr); \ + (_ptr ? container_of(_ptr, type, member.link) : NULL); \ + }) + +#define xnid_next_entry(ptr, member) \ + xnid_entry(rb_next(&ptr->member.link), typeof(*ptr), member) + +static inline void xntree_init(struct rb_root *t) +{ + *t = RB_ROOT; +} + +#define xntree_for_each_entry(pos, root, member) \ + for (pos = xnid_entry(rb_first(root), typeof(*pos), member); \ + pos; pos = xnid_next_entry(pos, member)) + +void xntree_cleanup(struct rb_root *t, void *cookie, + void (*destroy)(void *cookie, struct xnid *id)); + +int xnid_enter(struct rb_root *t, struct xnid *xnid, xnkey_t key); + +static inline xnkey_t xnid_key(struct xnid *i) +{ + return i->key; +} + +static inline +struct xnid *xnid_fetch(struct rb_root *t, xnkey_t key) +{ + struct rb_node *node = t->rb_node; + + while (node) { + struct xnid *i = container_of(node, struct xnid, link); + + if (key < i->key) + node = node->rb_left; + else if (key > i->key) + node = node->rb_right; + else + return i; + } + + return NULL; +} + +static inline int xnid_remove(struct rb_root *t, struct xnid *xnid) +{ +#ifdef CONFIG_XENO_OPT_DEBUG_COBALT + if (xnid_fetch(t, xnid->key) != xnid) + return -ENOENT; +#endif + rb_erase(&xnid->link, t); + return 0; +} + +#endif /* _COBALT_KERNEL_TREE_H */ --- linux/include/xenomai/cobalt/kernel/sched-sporadic.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/cobalt/kernel/sched-sporadic.h 2022-03-21 12:58:31.558867811 +0100 @@ -0,0 +1,75 @@ +/* + * Copyright (C) 2009 Philippe Gerum . + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_KERNEL_SCHED_SPORADIC_H +#define _COBALT_KERNEL_SCHED_SPORADIC_H + +#ifndef _COBALT_KERNEL_SCHED_H +#error "please don't include cobalt/kernel/sched-sporadic.h directly" +#endif + +/** + * @addtogroup cobalt_core_sched + * @{ + */ + +#ifdef CONFIG_XENO_OPT_SCHED_SPORADIC + +#define XNSCHED_SPORADIC_MIN_PRIO 1 +#define XNSCHED_SPORADIC_MAX_PRIO 255 +#define XNSCHED_SPORADIC_NR_PRIO \ + (XNSCHED_SPORADIC_MAX_PRIO - XNSCHED_SPORADIC_MIN_PRIO + 1) + +extern struct xnsched_class xnsched_class_sporadic; + +struct xnsched_sporadic_repl { + xnticks_t date; + xnticks_t amount; +}; + +struct xnsched_sporadic_data { + xnticks_t resume_date; + xnticks_t budget; + int repl_in; + int repl_out; + int repl_pending; + struct xntimer repl_timer; + struct xntimer drop_timer; + struct xnsched_sporadic_repl repl_data[CONFIG_XENO_OPT_SCHED_SPORADIC_MAXREPL]; + struct xnsched_sporadic_param param; + struct xnthread *thread; +}; + +struct xnsched_sporadic { +#ifdef CONFIG_XENO_OPT_DEBUG_COBALT + unsigned long drop_retries; +#endif +}; + +static inline int xnsched_sporadic_init_thread(struct xnthread *thread) +{ + thread->pss = NULL; + + return 0; +} + +#endif /* !CONFIG_XENO_OPT_SCHED_SPORADIC */ + +/** @} */ + +#endif /* !_COBALT_KERNEL_SCHED_SPORADIC_H */ --- linux/include/xenomai/cobalt/kernel/stat.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/cobalt/kernel/stat.h 2022-03-21 12:58:31.550867889 +0100 @@ -0,0 +1,152 @@ +/* + * Copyright (C) 2006 Jan Kiszka . + * Copyright (C) 2006 Dmitry Adamushko . + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_KERNEL_STAT_H +#define _COBALT_KERNEL_STAT_H + +#include + +/** + * @ingroup cobalt_core_thread + * @defgroup cobalt_core_stat Thread runtime statistics + * @{ + */ +#ifdef CONFIG_XENO_OPT_STATS + +typedef struct xnstat_exectime { + + xnticks_t start; /* Start of execution time accumulation */ + + xnticks_t total; /* Accumulated execution time */ + +} xnstat_exectime_t; + +/* Return current date which can be passed to other xnstat services for + immediate or lazy accounting. */ +#define xnstat_exectime_now() xnclock_core_read_raw() + +/* Accumulate exectime of the current account until the given date. */ +#define xnstat_exectime_update(sched, date) \ +do { \ + xnticks_t __date = date; \ + (sched)->current_account->total += \ + __date - (sched)->last_account_switch; \ + (sched)->last_account_switch = __date; \ + /* All changes must be committed before changing the current_account \ + reference in sched (required for xnintr_sync_stat_references) */ \ + smp_wmb(); \ +} while (0) + +/* Update the current account reference, returning the previous one. */ +#define xnstat_exectime_set_current(sched, new_account) \ +({ \ + xnstat_exectime_t *__prev; \ + __prev = (xnstat_exectime_t *) \ + atomic_long_xchg((atomic_long_t *)&(sched)->current_account, \ + (long)(new_account)); \ + __prev; \ +}) + +/* Return the currently active accounting entity. */ +#define xnstat_exectime_get_current(sched) ((sched)->current_account) + +/* Finalize an account (no need to accumulate the exectime, just mark the + switch date and set the new account). */ +#define xnstat_exectime_finalize(sched, new_account) \ +do { \ + (sched)->last_account_switch = xnclock_core_read_raw(); \ + (sched)->current_account = (new_account); \ +} while (0) + +/* Obtain content of xnstat_exectime_t */ +#define xnstat_exectime_get_start(account) ((account)->start) +#define xnstat_exectime_get_total(account) ((account)->total) + +/* Obtain last account switch date of considered sched */ +#define xnstat_exectime_get_last_switch(sched) ((sched)->last_account_switch) + +/* Reset statistics from inside the accounted entity (e.g. after CPU + migration). */ +#define xnstat_exectime_reset_stats(stat) \ +do { \ + (stat)->total = 0; \ + (stat)->start = xnclock_core_read_raw(); \ +} while (0) + + +typedef struct xnstat_counter { + unsigned long counter; +} xnstat_counter_t; + +static inline unsigned long xnstat_counter_inc(xnstat_counter_t *c) +{ + return c->counter++; +} + +static inline unsigned long xnstat_counter_get(xnstat_counter_t *c) +{ + return c->counter; +} + +static inline void xnstat_counter_set(xnstat_counter_t *c, unsigned long value) +{ + c->counter = value; +} + +#else /* !CONFIG_XENO_OPT_STATS */ +typedef struct xnstat_exectime { +} xnstat_exectime_t; + +#define xnstat_exectime_now() ({ 0; }) +#define xnstat_exectime_update(sched, date) do { } while (0) +#define xnstat_exectime_set_current(sched, new_account) ({ (void)sched; NULL; }) +#define xnstat_exectime_get_current(sched) ({ (void)sched; NULL; }) +#define xnstat_exectime_finalize(sched, new_account) do { } while (0) +#define xnstat_exectime_get_start(account) ({ 0; }) +#define xnstat_exectime_get_total(account) ({ 0; }) +#define xnstat_exectime_get_last_switch(sched) ({ 0; }) +#define xnstat_exectime_reset_stats(account) do { } while (0) + +typedef struct xnstat_counter { +} xnstat_counter_t; + +#define xnstat_counter_inc(c) ({ do { } while(0); 0; }) +#define xnstat_counter_get(c) ({ 0; }) +#define xnstat_counter_set(c, value) do { } while (0) +#endif /* CONFIG_XENO_OPT_STATS */ + +/* Account the exectime of the current account until now, switch to + new_account, and return the previous one. */ +#define xnstat_exectime_switch(sched, new_account) \ +({ \ + xnstat_exectime_update(sched, xnstat_exectime_now()); \ + xnstat_exectime_set_current(sched, new_account); \ +}) + +/* Account the exectime of the current account until given start time, switch + to new_account, and return the previous one. */ +#define xnstat_exectime_lazy_switch(sched, new_account, date) \ +({ \ + xnstat_exectime_update(sched, date); \ + xnstat_exectime_set_current(sched, new_account); \ +}) + +/** @} */ + +#endif /* !_COBALT_KERNEL_STAT_H */ --- linux/include/xenomai/cobalt/kernel/thread.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/cobalt/kernel/thread.h 2022-03-21 12:58:31.543867957 +0100 @@ -0,0 +1,581 @@ +/* + * Copyright (C) 2001,2002,2003 Philippe Gerum . + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_KERNEL_THREAD_H +#define _COBALT_KERNEL_THREAD_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** + * @addtogroup cobalt_core_thread + * @{ + */ +#define XNTHREAD_BLOCK_BITS (XNSUSP|XNPEND|XNDELAY|XNDORMANT|XNRELAX|XNHELD|XNDBGSTOP) +#define XNTHREAD_MODE_BITS (XNRRB|XNWARN|XNTRAPLB) + +#define XNTHREAD_SIGDEBUG 0 +#define XNTHREAD_SIGSHADOW_HARDEN 1 +#define XNTHREAD_SIGSHADOW_BACKTRACE 2 +#define XNTHREAD_SIGSHADOW_HOME 3 +#define XNTHREAD_SIGTERM 4 +#define XNTHREAD_MAX_SIGNALS 5 + +struct xnthread; +struct xnsched; +struct xnselector; +struct xnsched_class; +struct xnsched_tpslot; +struct xnthread_personality; +struct completion; + +struct lostage_signal { + struct pipeline_inband_work inband_work; /* Must be first. */ + struct task_struct *task; + int signo, sigval; + struct lostage_signal *self; /* Revisit: I-pipe requirement */ +}; + +struct xnthread_init_attr { + struct xnthread_personality *personality; + cpumask_t affinity; + int flags; + const char *name; +}; + +struct xnthread_start_attr { + int mode; + void (*entry)(void *cookie); + void *cookie; +}; + +struct xnthread_wait_context { + int posted; +}; + +struct xnthread_personality { + const char *name; + unsigned int magic; + int xid; + atomic_t refcnt; + struct { + void *(*attach_process)(void); + void (*detach_process)(void *arg); + void (*map_thread)(struct xnthread *thread); + struct xnthread_personality *(*relax_thread)(struct xnthread *thread); + struct xnthread_personality *(*harden_thread)(struct xnthread *thread); + struct xnthread_personality *(*move_thread)(struct xnthread *thread, + int dest_cpu); + struct xnthread_personality *(*exit_thread)(struct xnthread *thread); + struct xnthread_personality *(*finalize_thread)(struct xnthread *thread); + } ops; + struct module *module; +}; + +struct xnthread { + struct xnarchtcb tcb; /* Architecture-dependent block */ + + __u32 state; /* Thread state flags */ + __u32 info; /* Thread information flags */ + __u32 local_info; /* Local thread information flags */ + + struct xnsched *sched; /* Thread scheduler */ + struct xnsched_class *sched_class; /* Current scheduling class */ + struct xnsched_class *base_class; /* Base scheduling class */ + +#ifdef CONFIG_XENO_OPT_SCHED_TP + struct xnsched_tpslot *tps; /* Current partition slot for TP scheduling */ + struct list_head tp_link; /* Link in per-sched TP thread queue */ +#endif +#ifdef CONFIG_XENO_OPT_SCHED_SPORADIC + struct xnsched_sporadic_data *pss; /* Sporadic scheduling data. */ +#endif +#ifdef CONFIG_XENO_OPT_SCHED_QUOTA + struct xnsched_quota_group *quota; /* Quota scheduling group. */ + struct list_head quota_expired; + struct list_head quota_next; +#endif + cpumask_t affinity; /* Processor affinity. */ + + /** Base priority (before PI/PP boost) */ + int bprio; + + /** Current (effective) priority */ + int cprio; + + /** + * Weighted priority (cprio + scheduling class weight). + */ + int wprio; + + int lock_count; /** Scheduler lock count. */ + + /** + * Thread holder in xnsched run queue. Ordered by + * thread->cprio. + */ + struct list_head rlink; + + /** + * Thread holder in xnsynch pendq. Prioritized by + * thread->cprio + scheduling class weight. + */ + struct list_head plink; + + /** Thread holder in global queue. */ + struct list_head glink; + + /** + * List of xnsynch owned by this thread which cause a priority + * boost due to one of the following reasons: + * + * - they are currently claimed by other thread(s) when + * enforcing the priority inheritance protocol (XNSYNCH_PI). + * + * - they require immediate priority ceiling (XNSYNCH_PP). + * + * This list is ordered by decreasing (weighted) thread + * priorities. + */ + struct list_head boosters; + + struct xnsynch *wchan; /* Resource the thread pends on */ + + struct xnsynch *wwake; /* Wait channel the thread was resumed from */ + + int res_count; /* Held resources count */ + + struct xntimer rtimer; /* Resource timer */ + + struct xntimer ptimer; /* Periodic timer */ + + xnticks_t rrperiod; /* Allotted round-robin period (ns) */ + + struct xnthread_wait_context *wcontext; /* Active wait context. */ + + struct { + xnstat_counter_t ssw; /* Primary -> secondary mode switch count */ + xnstat_counter_t csw; /* Context switches (includes secondary -> primary switches) */ + xnstat_counter_t xsc; /* Xenomai syscalls */ + xnstat_counter_t pf; /* Number of page faults */ + xnstat_exectime_t account; /* Execution time accounting entity */ + xnstat_exectime_t lastperiod; /* Interval marker for execution time reports */ + } stat; + + struct xnselector *selector; /* For select. */ + + xnhandle_t handle; /* Handle in registry */ + + char name[XNOBJECT_NAME_LEN]; /* Symbolic name of thread */ + + void (*entry)(void *cookie); /* Thread entry routine */ + void *cookie; /* Cookie to pass to the entry routine */ + + /** + * Thread data visible from userland through a window on the + * global heap. + */ + struct xnthread_user_window *u_window; + + struct xnthread_personality *personality; + + struct completion exited; + +#ifdef CONFIG_XENO_OPT_DEBUG + const char *exe_path; /* Executable path */ + u32 proghash; /* Hash value for exe_path */ +#endif + struct lostage_signal sigarray[XNTHREAD_MAX_SIGNALS]; +}; + +static inline int xnthread_get_state(const struct xnthread *thread) +{ + return thread->state; +} + +static inline int xnthread_test_state(struct xnthread *thread, int bits) +{ + return thread->state & bits; +} + +static inline void xnthread_set_state(struct xnthread *thread, int bits) +{ + thread->state |= bits; +} + +static inline void xnthread_clear_state(struct xnthread *thread, int bits) +{ + thread->state &= ~bits; +} + +static inline int xnthread_test_info(struct xnthread *thread, int bits) +{ + return thread->info & bits; +} + +static inline void xnthread_set_info(struct xnthread *thread, int bits) +{ + thread->info |= bits; +} + +static inline void xnthread_clear_info(struct xnthread *thread, int bits) +{ + thread->info &= ~bits; +} + +static inline int xnthread_test_localinfo(struct xnthread *curr, int bits) +{ + return curr->local_info & bits; +} + +static inline void xnthread_set_localinfo(struct xnthread *curr, int bits) +{ + curr->local_info |= bits; +} + +static inline void xnthread_clear_localinfo(struct xnthread *curr, int bits) +{ + curr->local_info &= ~bits; +} + +static inline struct xnarchtcb *xnthread_archtcb(struct xnthread *thread) +{ + return &thread->tcb; +} + +static inline int xnthread_base_priority(const struct xnthread *thread) +{ + return thread->bprio; +} + +static inline int xnthread_current_priority(const struct xnthread *thread) +{ + return thread->cprio; +} + +static inline struct task_struct *xnthread_host_task(struct xnthread *thread) +{ + return xnarch_host_task(xnthread_archtcb(thread)); +} + +#define xnthread_for_each_booster(__pos, __thread) \ + list_for_each_entry(__pos, &(__thread)->boosters, next) + +#define xnthread_for_each_booster_safe(__pos, __tmp, __thread) \ + list_for_each_entry_safe(__pos, __tmp, &(__thread)->boosters, next) + +#define xnthread_run_handler(__t, __h, __a...) \ + do { \ + struct xnthread_personality *__p__ = (__t)->personality; \ + if ((__p__)->ops.__h) \ + (__p__)->ops.__h(__t, ##__a); \ + } while (0) + +#define xnthread_run_handler_stack(__t, __h, __a...) \ + do { \ + struct xnthread_personality *__p__ = (__t)->personality; \ + do { \ + if ((__p__)->ops.__h == NULL) \ + break; \ + __p__ = (__p__)->ops.__h(__t, ##__a); \ + } while (__p__); \ + } while (0) + +static inline +struct xnthread_wait_context *xnthread_get_wait_context(struct xnthread *thread) +{ + return thread->wcontext; +} + +static inline +int xnthread_register(struct xnthread *thread, const char *name) +{ + return xnregistry_enter(name, thread, &thread->handle, NULL); +} + +static inline +struct xnthread *xnthread_lookup(xnhandle_t threadh) +{ + struct xnthread *thread = xnregistry_lookup(threadh, NULL); + return thread && thread->handle == xnhandle_get_index(threadh) ? thread : NULL; +} + +static inline void xnthread_sync_window(struct xnthread *thread) +{ + if (thread->u_window) { + thread->u_window->state = thread->state; + thread->u_window->info = thread->info; + } +} + +static inline +void xnthread_clear_sync_window(struct xnthread *thread, int state_bits) +{ + if (thread->u_window) { + thread->u_window->state = thread->state & ~state_bits; + thread->u_window->info = thread->info; + } +} + +static inline +void xnthread_set_sync_window(struct xnthread *thread, int state_bits) +{ + if (thread->u_window) { + thread->u_window->state = thread->state | state_bits; + thread->u_window->info = thread->info; + } +} + +static inline int normalize_priority(int prio) +{ + return prio < MAX_RT_PRIO ? prio : MAX_RT_PRIO - 1; +} + +int __xnthread_init(struct xnthread *thread, + const struct xnthread_init_attr *attr, + struct xnsched *sched, + struct xnsched_class *sched_class, + const union xnsched_policy_param *sched_param); + +void __xnthread_test_cancel(struct xnthread *curr); + +void __xnthread_cleanup(struct xnthread *curr); + +void __xnthread_discard(struct xnthread *thread); + +/** + * @fn struct xnthread *xnthread_current(void) + * @brief Retrieve the current Cobalt core TCB. + * + * Returns the address of the current Cobalt core thread descriptor, + * or NULL if running over a regular Linux task. This call is not + * affected by the current runtime mode of the core thread. + * + * @note The returned value may differ from xnsched_current_thread() + * called from the same context, since the latter returns the root + * thread descriptor for the current CPU if the caller is running in + * secondary mode. + * + * @coretags{unrestricted} + */ +static inline struct xnthread *xnthread_current(void) +{ + return pipeline_current()->thread; +} + +/** + * @fn struct xnthread *xnthread_from_task(struct task_struct *p) + * @brief Retrieve the Cobalt core TCB attached to a Linux task. + * + * Returns the address of the Cobalt core thread descriptor attached + * to the Linux task @a p, or NULL if @a p is a regular Linux + * task. This call is not affected by the current runtime mode of the + * core thread. + * + * @coretags{unrestricted} + */ +static inline struct xnthread *xnthread_from_task(struct task_struct *p) +{ + return pipeline_thread_from_task(p); +} + +/** + * @fn void xnthread_test_cancel(void) + * @brief Introduce a thread cancellation point. + * + * Terminates the current thread if a cancellation request is pending + * for it, i.e. if xnthread_cancel() was called. + * + * @coretags{mode-unrestricted} + */ +static inline void xnthread_test_cancel(void) +{ + struct xnthread *curr = xnthread_current(); + + if (curr && xnthread_test_info(curr, XNCANCELD)) + __xnthread_test_cancel(curr); +} + +static inline +void xnthread_complete_wait(struct xnthread_wait_context *wc) +{ + wc->posted = 1; +} + +static inline +int xnthread_wait_complete_p(struct xnthread_wait_context *wc) +{ + return wc->posted; +} + +#ifdef CONFIG_XENO_ARCH_FPU +void xnthread_switch_fpu(struct xnsched *sched); +#else +static inline void xnthread_switch_fpu(struct xnsched *sched) { } +#endif /* CONFIG_XENO_ARCH_FPU */ + +void xnthread_deregister(struct xnthread *thread); + +char *xnthread_format_status(unsigned long status, + char *buf, int size); + +pid_t xnthread_host_pid(struct xnthread *thread); + +int xnthread_set_clock(struct xnthread *thread, + struct xnclock *newclock); + +xnticks_t xnthread_get_timeout(struct xnthread *thread, + xnticks_t ns); + +xnticks_t xnthread_get_period(struct xnthread *thread); + +void xnthread_prepare_wait(struct xnthread_wait_context *wc); + +int xnthread_init(struct xnthread *thread, + const struct xnthread_init_attr *attr, + struct xnsched_class *sched_class, + const union xnsched_policy_param *sched_param); + +int xnthread_start(struct xnthread *thread, + const struct xnthread_start_attr *attr); + +int xnthread_set_mode(int clrmask, + int setmask); + +void xnthread_suspend(struct xnthread *thread, + int mask, + xnticks_t timeout, + xntmode_t timeout_mode, + struct xnsynch *wchan); + +void xnthread_resume(struct xnthread *thread, + int mask); + +int xnthread_unblock(struct xnthread *thread); + +int xnthread_set_periodic(struct xnthread *thread, + xnticks_t idate, + xntmode_t timeout_mode, + xnticks_t period); + +int xnthread_wait_period(unsigned long *overruns_r); + +int xnthread_set_slice(struct xnthread *thread, + xnticks_t quantum); + +void xnthread_cancel(struct xnthread *thread); + +int xnthread_join(struct xnthread *thread, bool uninterruptible); + +int xnthread_harden(void); + +void xnthread_relax(int notify, int reason); + +void __xnthread_kick(struct xnthread *thread); + +void xnthread_kick(struct xnthread *thread); + +void __xnthread_demote(struct xnthread *thread); + +void xnthread_demote(struct xnthread *thread); + +void __xnthread_signal(struct xnthread *thread, int sig, int arg); + +void xnthread_signal(struct xnthread *thread, int sig, int arg); + +void xnthread_pin_initial(struct xnthread *thread); + +void xnthread_call_mayday(struct xnthread *thread, int reason); + +static inline void xnthread_get_resource(struct xnthread *curr) +{ + if (xnthread_test_state(curr, XNWEAK|XNDEBUG)) + curr->res_count++; +} + +static inline int xnthread_put_resource(struct xnthread *curr) +{ + if (xnthread_test_state(curr, XNWEAK) || + IS_ENABLED(CONFIG_XENO_OPT_DEBUG_MUTEX_SLEEP)) { + if (unlikely(curr->res_count == 0)) { + if (xnthread_test_state(curr, XNWARN)) + xnthread_signal(curr, SIGDEBUG, + SIGDEBUG_RESCNT_IMBALANCE); + return -EPERM; + } + curr->res_count--; + } + + return 0; +} + +static inline void xnthread_commit_ceiling(struct xnthread *curr) +{ + if (curr->u_window->pp_pending) + xnsynch_commit_ceiling(curr); +} + +#ifdef CONFIG_SMP + +void xnthread_migrate_passive(struct xnthread *thread, + struct xnsched *sched); +#else + +static inline void xnthread_migrate_passive(struct xnthread *thread, + struct xnsched *sched) +{ } + +#endif + +int __xnthread_set_schedparam(struct xnthread *thread, + struct xnsched_class *sched_class, + const union xnsched_policy_param *sched_param); + +int xnthread_set_schedparam(struct xnthread *thread, + struct xnsched_class *sched_class, + const union xnsched_policy_param *sched_param); + +int xnthread_killall(int grace, int mask); + +void __xnthread_propagate_schedparam(struct xnthread *curr); + +static inline void xnthread_propagate_schedparam(struct xnthread *curr) +{ + if (xnthread_test_info(curr, XNSCHEDP)) + __xnthread_propagate_schedparam(curr); +} + +extern struct xnthread_personality xenomai_personality; + +/** @} */ + +#endif /* !_COBALT_KERNEL_THREAD_H */ --- linux/include/xenomai/cobalt/kernel/select.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/cobalt/kernel/select.h 2022-03-21 12:58:31.535868035 +0100 @@ -0,0 +1,147 @@ +/* + * Copyright (C) 2008 Efixo + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_KERNEL_SELECT_H +#define _COBALT_KERNEL_SELECT_H + +#include +#include + +/** + * @addtogroup cobalt_core_select + * @{ + */ + +#define XNSELECT_READ 0 +#define XNSELECT_WRITE 1 +#define XNSELECT_EXCEPT 2 +#define XNSELECT_MAX_TYPES 3 + +struct xnselector { + struct xnsynch synchbase; + struct fds { + fd_set expected; + fd_set pending; + } fds [XNSELECT_MAX_TYPES]; + struct list_head destroy_link; + struct list_head bindings; /* only used by xnselector_destroy */ +}; + +#define __NFDBITS__ (8 * sizeof(unsigned long)) +#define __FDSET_LONGS__ (__FD_SETSIZE/__NFDBITS__) +#define __FDELT__(d) ((d) / __NFDBITS__) +#define __FDMASK__(d) (1UL << ((d) % __NFDBITS__)) + +static inline void __FD_SET__(unsigned long __fd, __kernel_fd_set *__fdsetp) +{ + unsigned long __tmp = __fd / __NFDBITS__; + unsigned long __rem = __fd % __NFDBITS__; + __fdsetp->fds_bits[__tmp] |= (1UL<<__rem); +} + +static inline void __FD_CLR__(unsigned long __fd, __kernel_fd_set *__fdsetp) +{ + unsigned long __tmp = __fd / __NFDBITS__; + unsigned long __rem = __fd % __NFDBITS__; + __fdsetp->fds_bits[__tmp] &= ~(1UL<<__rem); +} + +static inline int __FD_ISSET__(unsigned long __fd, const __kernel_fd_set *__p) +{ + unsigned long __tmp = __fd / __NFDBITS__; + unsigned long __rem = __fd % __NFDBITS__; + return (__p->fds_bits[__tmp] & (1UL<<__rem)) != 0; +} + +static inline void __FD_ZERO__(__kernel_fd_set *__p) +{ + unsigned long *__tmp = __p->fds_bits; + int __i; + + __i = __FDSET_LONGS__; + while (__i) { + __i--; + *__tmp = 0; + __tmp++; + } +} + +struct xnselect { + struct list_head bindings; +}; + +#define DECLARE_XNSELECT(name) struct xnselect name + +struct xnselect_binding { + struct xnselector *selector; + struct xnselect *fd; + unsigned int type; + unsigned int bit_index; + struct list_head link; /* link in selected fds list. */ + struct list_head slink; /* link in selector list */ +}; + +void xnselect_init(struct xnselect *select_block); + +int xnselect_bind(struct xnselect *select_block, + struct xnselect_binding *binding, + struct xnselector *selector, + unsigned int type, + unsigned int bit_index, + unsigned int state); + +int __xnselect_signal(struct xnselect *select_block, unsigned int state); + +/** + * Signal a file descriptor state change. + * + * @param select_block pointer to an @a xnselect structure representing the file + * descriptor whose state changed; + * @param state new value of the state. + * + * @retval 1 if rescheduling is needed; + * @retval 0 otherwise. + */ +static inline int +xnselect_signal(struct xnselect *select_block, unsigned int state) +{ + if (!list_empty(&select_block->bindings)) + return __xnselect_signal(select_block, state); + + return 0; +} + +void xnselect_destroy(struct xnselect *select_block); + +int xnselector_init(struct xnselector *selector); + +int xnselect(struct xnselector *selector, + fd_set *out_fds[XNSELECT_MAX_TYPES], + fd_set *in_fds[XNSELECT_MAX_TYPES], + int nfds, + xnticks_t timeout, xntmode_t timeout_mode); + +void xnselector_destroy(struct xnselector *selector); + +int xnselect_mount(void); + +int xnselect_umount(void); + +/** @} */ + +#endif /* _COBALT_KERNEL_SELECT_H */ --- linux/include/xenomai/cobalt/kernel/lock.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/cobalt/kernel/lock.h 2022-03-21 12:58:31.528868103 +0100 @@ -0,0 +1,248 @@ +/* + * Copyright (C) 2001-2008,2012 Philippe Gerum . + * Copyright (C) 2004,2005 Gilles Chanteperdrix . + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_KERNEL_LOCK_H +#define _COBALT_KERNEL_LOCK_H + +#include +#include +#include +#include + +/** + * @addtogroup cobalt_core_lock + * + * @{ + */ +#ifdef CONFIG_XENO_OPT_DEBUG_LOCKING + +struct xnlock { + unsigned owner; + arch_spinlock_t alock; + const char *file; + const char *function; + unsigned int line; + int cpu; + unsigned long long spin_time; + unsigned long long lock_date; +}; + +struct xnlockinfo { + unsigned long long spin_time; + unsigned long long lock_time; + const char *file; + const char *function; + unsigned int line; +}; + +#define XNARCH_LOCK_UNLOCKED (struct xnlock) { \ + ~0, \ + __ARCH_SPIN_LOCK_UNLOCKED, \ + NULL, \ + NULL, \ + 0, \ + -1, \ + 0LL, \ + 0LL, \ +} + +#define XNLOCK_DBG_CONTEXT , __FILE__, __LINE__, __FUNCTION__ +#define XNLOCK_DBG_CONTEXT_ARGS \ + , const char *file, int line, const char *function +#define XNLOCK_DBG_PASS_CONTEXT , file, line, function + +void xnlock_dbg_prepare_acquire(unsigned long long *start); +void xnlock_dbg_prepare_spin(unsigned int *spin_limit); +void xnlock_dbg_acquired(struct xnlock *lock, int cpu, + unsigned long long *start, + const char *file, int line, + const char *function); +int xnlock_dbg_release(struct xnlock *lock, + const char *file, int line, + const char *function); + +DECLARE_PER_CPU(struct xnlockinfo, xnlock_stats); + +#else /* !CONFIG_XENO_OPT_DEBUG_LOCKING */ + +struct xnlock { + unsigned owner; + arch_spinlock_t alock; +}; + +#define XNARCH_LOCK_UNLOCKED \ + (struct xnlock) { \ + ~0, \ + __ARCH_SPIN_LOCK_UNLOCKED, \ + } + +#define XNLOCK_DBG_CONTEXT +#define XNLOCK_DBG_CONTEXT_ARGS +#define XNLOCK_DBG_PASS_CONTEXT + +static inline +void xnlock_dbg_prepare_acquire(unsigned long long *start) +{ +} + +static inline +void xnlock_dbg_prepare_spin(unsigned int *spin_limit) +{ +} + +static inline void +xnlock_dbg_acquired(struct xnlock *lock, int cpu, + unsigned long long *start) +{ +} + +static inline int xnlock_dbg_release(struct xnlock *lock) +{ + return 0; +} + +#endif /* !CONFIG_XENO_OPT_DEBUG_LOCKING */ + +#if defined(CONFIG_SMP) || defined(CONFIG_XENO_OPT_DEBUG_LOCKING) + +#define xnlock_get(lock) __xnlock_get(lock XNLOCK_DBG_CONTEXT) +#define xnlock_put(lock) __xnlock_put(lock XNLOCK_DBG_CONTEXT) +#define xnlock_get_irqsave(lock,x) \ + ((x) = __xnlock_get_irqsave(lock XNLOCK_DBG_CONTEXT)) +#define xnlock_put_irqrestore(lock,x) \ + __xnlock_put_irqrestore(lock,x XNLOCK_DBG_CONTEXT) +#define xnlock_clear_irqoff(lock) xnlock_put_irqrestore(lock, 1) +#define xnlock_clear_irqon(lock) xnlock_put_irqrestore(lock, 0) + +static inline void xnlock_init (struct xnlock *lock) +{ + *lock = XNARCH_LOCK_UNLOCKED; +} + +#define DECLARE_XNLOCK(lock) struct xnlock lock +#define DECLARE_EXTERN_XNLOCK(lock) extern struct xnlock lock +#define DEFINE_XNLOCK(lock) struct xnlock lock = XNARCH_LOCK_UNLOCKED +#define DEFINE_PRIVATE_XNLOCK(lock) static DEFINE_XNLOCK(lock) + +static inline int ____xnlock_get(struct xnlock *lock /*, */ XNLOCK_DBG_CONTEXT_ARGS) +{ + int cpu = raw_smp_processor_id(); + unsigned long long start; + + if (lock->owner == cpu) + return 2; + + xnlock_dbg_prepare_acquire(&start); + + arch_spin_lock(&lock->alock); + lock->owner = cpu; + + xnlock_dbg_acquired(lock, cpu, &start /*, */ XNLOCK_DBG_PASS_CONTEXT); + + return 0; +} + +static inline void ____xnlock_put(struct xnlock *lock /*, */ XNLOCK_DBG_CONTEXT_ARGS) +{ + if (xnlock_dbg_release(lock /*, */ XNLOCK_DBG_PASS_CONTEXT)) + return; + + lock->owner = ~0U; + arch_spin_unlock(&lock->alock); +} + +#ifndef CONFIG_XENO_ARCH_OUTOFLINE_XNLOCK +#define ___xnlock_get ____xnlock_get +#define ___xnlock_put ____xnlock_put +#else /* out of line xnlock */ +int ___xnlock_get(struct xnlock *lock /*, */ XNLOCK_DBG_CONTEXT_ARGS); + +void ___xnlock_put(struct xnlock *lock /*, */ XNLOCK_DBG_CONTEXT_ARGS); +#endif /* out of line xnlock */ + +static inline spl_t +__xnlock_get_irqsave(struct xnlock *lock /*, */ XNLOCK_DBG_CONTEXT_ARGS) +{ + unsigned long flags; + + splhigh(flags); + + if (__locking_active__) + flags |= ___xnlock_get(lock /*, */ XNLOCK_DBG_PASS_CONTEXT); + + return flags; +} + +static inline void __xnlock_put_irqrestore(struct xnlock *lock, spl_t flags + /*, */ XNLOCK_DBG_CONTEXT_ARGS) +{ + /* Only release the lock if we didn't take it recursively. */ + if (__locking_active__ && !(flags & 2)) + ___xnlock_put(lock /*, */ XNLOCK_DBG_PASS_CONTEXT); + + splexit(flags & 1); +} + +static inline int xnlock_is_owner(struct xnlock *lock) +{ + if (__locking_active__) + return lock->owner == raw_smp_processor_id(); + + return 1; +} + +static inline int __xnlock_get(struct xnlock *lock /*, */ XNLOCK_DBG_CONTEXT_ARGS) +{ + if (__locking_active__) + return ___xnlock_get(lock /* , */ XNLOCK_DBG_PASS_CONTEXT); + + return 0; +} + +static inline void __xnlock_put(struct xnlock *lock /*, */ XNLOCK_DBG_CONTEXT_ARGS) +{ + if (__locking_active__) + ___xnlock_put(lock /*, */ XNLOCK_DBG_PASS_CONTEXT); +} + +#undef __locking_active__ + +#else /* !(CONFIG_SMP || CONFIG_XENO_OPT_DEBUG_LOCKING) */ + +#define xnlock_init(lock) do { } while(0) +#define xnlock_get(lock) do { } while(0) +#define xnlock_put(lock) do { } while(0) +#define xnlock_get_irqsave(lock,x) splhigh(x) +#define xnlock_put_irqrestore(lock,x) splexit(x) +#define xnlock_clear_irqoff(lock) splmax() +#define xnlock_clear_irqon(lock) splnone() +#define xnlock_is_owner(lock) 1 + +#define DECLARE_XNLOCK(lock) +#define DECLARE_EXTERN_XNLOCK(lock) +#define DEFINE_XNLOCK(lock) +#define DEFINE_PRIVATE_XNLOCK(lock) + +#endif /* !(CONFIG_SMP || CONFIG_XENO_OPT_DEBUG_LOCKING) */ + +DECLARE_EXTERN_XNLOCK(nklock); + +/** @} */ + +#endif /* !_COBALT_KERNEL_LOCK_H */ --- linux/include/xenomai/cobalt/kernel/heap.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/cobalt/kernel/heap.h 2022-03-21 12:58:31.521868171 +0100 @@ -0,0 +1,172 @@ +/* + * Copyright (C) 2001,2002,2003 Philippe Gerum . + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_KERNEL_HEAP_H +#define _COBALT_KERNEL_HEAP_H + +#include +#include +#include +#include +#include +#include + +/** + * @addtogroup cobalt_core_heap + * @{ + */ + +#define XNHEAP_PAGE_SHIFT 9 /* 2^9 => 512 bytes */ +#define XNHEAP_PAGE_SIZE (1UL << XNHEAP_PAGE_SHIFT) +#define XNHEAP_PAGE_MASK (~(XNHEAP_PAGE_SIZE - 1)) +#define XNHEAP_MIN_LOG2 4 /* 16 bytes */ +/* + * Use bucketed memory for sizes between 2^XNHEAP_MIN_LOG2 and + * 2^(XNHEAP_PAGE_SHIFT-1). + */ +#define XNHEAP_MAX_BUCKETS (XNHEAP_PAGE_SHIFT - XNHEAP_MIN_LOG2) +#define XNHEAP_MIN_ALIGN (1U << XNHEAP_MIN_LOG2) +/* Maximum size of a heap (4Gb - PAGE_SIZE). */ +#define XNHEAP_MAX_HEAPSZ (4294967295U - PAGE_SIZE + 1) +/* Bits we need for encoding a page # */ +#define XNHEAP_PGENT_BITS (32 - XNHEAP_PAGE_SHIFT) +/* Each page is represented by a page map entry. */ +#define XNHEAP_PGMAP_BYTES sizeof(struct xnheap_pgentry) + +struct xnheap_pgentry { + /* Linkage in bucket list. */ + unsigned int prev : XNHEAP_PGENT_BITS; + unsigned int next : XNHEAP_PGENT_BITS; + /* page_list or log2. */ + unsigned int type : 6; + /* + * We hold either a spatial map of busy blocks within the page + * for bucketed memory (up to 32 blocks per page), or the + * overall size of the multi-page block if entry.type == + * page_list. + */ + union { + u32 map; + u32 bsize; + }; +}; + +/* + * A range descriptor is stored at the beginning of the first page of + * a range of free pages. xnheap_range.size is nrpages * + * XNHEAP_PAGE_SIZE. Ranges are indexed by address and size in + * rbtrees. + */ +struct xnheap_range { + struct rb_node addr_node; + struct rb_node size_node; + size_t size; +}; + +struct xnheap { + void *membase; + struct rb_root addr_tree; + struct rb_root size_tree; + struct xnheap_pgentry *pagemap; + size_t usable_size; + size_t used_size; + u32 buckets[XNHEAP_MAX_BUCKETS]; + char name[XNOBJECT_NAME_LEN]; + DECLARE_XNLOCK(lock); + struct list_head next; +}; + +extern struct xnheap cobalt_heap; + +#define xnmalloc(size) xnheap_alloc(&cobalt_heap, size) +#define xnfree(ptr) xnheap_free(&cobalt_heap, ptr) + +static inline void *xnheap_get_membase(const struct xnheap *heap) +{ + return heap->membase; +} + +static inline +size_t xnheap_get_size(const struct xnheap *heap) +{ + return heap->usable_size; +} + +static inline +size_t xnheap_get_used(const struct xnheap *heap) +{ + return heap->used_size; +} + +static inline +size_t xnheap_get_free(const struct xnheap *heap) +{ + return heap->usable_size - heap->used_size; +} + +int xnheap_init(struct xnheap *heap, + void *membase, size_t size); + +void xnheap_destroy(struct xnheap *heap); + +void *xnheap_alloc(struct xnheap *heap, size_t size); + +void xnheap_free(struct xnheap *heap, void *block); + +ssize_t xnheap_check_block(struct xnheap *heap, void *block); + +void xnheap_set_name(struct xnheap *heap, + const char *name, ...); + +void *xnheap_vmalloc(size_t size); + +void xnheap_vfree(void *p); + +static inline void *xnheap_zalloc(struct xnheap *heap, size_t size) +{ + void *p; + + p = xnheap_alloc(heap, size); + if (p) + memset(p, 0, size); + + return p; +} + +static inline char *xnstrdup(const char *s) +{ + char *p; + + p = xnmalloc(strlen(s) + 1); + if (p == NULL) + return NULL; + + return strcpy(p, s); +} + +#ifdef CONFIG_XENO_OPT_VFILE +void xnheap_init_proc(void); +void xnheap_cleanup_proc(void); +#else /* !CONFIG_XENO_OPT_VFILE */ +static inline void xnheap_init_proc(void) { } +static inline void xnheap_cleanup_proc(void) { } +#endif /* !CONFIG_XENO_OPT_VFILE */ + +/** @} */ + +#endif /* !_COBALT_KERNEL_HEAP_H */ --- linux/include/xenomai/cobalt/kernel/trace.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/cobalt/kernel/trace.h 2022-03-21 12:58:31.513868249 +0100 @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + */ + +#ifndef _COBALT_KERNEL_TRACE_H +#define _COBALT_KERNEL_TRACE_H + +#include + +#endif /* !_COBALT_KERNEL_TRACE_H */ --- linux/include/xenomai/cobalt/kernel/clock.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/cobalt/kernel/clock.h 2022-03-21 12:58:31.506868318 +0100 @@ -0,0 +1,360 @@ +/* + * Copyright (C) 2006,2007 Philippe Gerum . + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_KERNEL_CLOCK_H +#define _COBALT_KERNEL_CLOCK_H + +#include +#include +#include +#include +#include +#include + +/** + * @addtogroup cobalt_core_clock + * @{ + */ + +struct xnsched; +struct xntimerdata; +struct __kernel_timex; + +struct xnclock_gravity { + unsigned long irq; + unsigned long kernel; + unsigned long user; +}; + +struct xnclock { + /** (ns) */ + xnsticks_t wallclock_offset; + /** (ns) */ + xnticks_t resolution; + /** (raw clock ticks). */ + struct xnclock_gravity gravity; + /** Clock name. */ + const char *name; + struct { +#ifdef CONFIG_XENO_OPT_EXTCLOCK + xnticks_t (*read_raw)(struct xnclock *clock); + xnticks_t (*read_monotonic)(struct xnclock *clock); + int (*set_time)(struct xnclock *clock, + const struct timespec64 *ts); + xnsticks_t (*ns_to_ticks)(struct xnclock *clock, + xnsticks_t ns); + xnsticks_t (*ticks_to_ns)(struct xnclock *clock, + xnsticks_t ticks); + xnsticks_t (*ticks_to_ns_rounded)(struct xnclock *clock, + xnsticks_t ticks); + void (*program_local_shot)(struct xnclock *clock, + struct xnsched *sched); + void (*program_remote_shot)(struct xnclock *clock, + struct xnsched *sched); +#endif + int (*adjust_time)(struct xnclock *clock, + struct __kernel_timex *tx); + int (*set_gravity)(struct xnclock *clock, + const struct xnclock_gravity *p); + void (*reset_gravity)(struct xnclock *clock); +#ifdef CONFIG_XENO_OPT_VFILE + void (*print_status)(struct xnclock *clock, + struct xnvfile_regular_iterator *it); +#endif + } ops; + /* Private section. */ + struct xntimerdata *timerdata; + int id; +#ifdef CONFIG_SMP + /** Possible CPU affinity of clock beat. */ + cpumask_t affinity; +#endif +#ifdef CONFIG_XENO_OPT_STATS + struct xnvfile_snapshot timer_vfile; + struct xnvfile_rev_tag timer_revtag; + struct list_head timerq; + int nrtimers; +#endif /* CONFIG_XENO_OPT_STATS */ +#ifdef CONFIG_XENO_OPT_VFILE + struct xnvfile_regular vfile; +#endif +}; + +struct xnclock_ratelimit_state { + xnticks_t interval; + xnticks_t begin; + int burst; + int printed; + int missed; +}; + +extern struct xnclock nkclock; + +int xnclock_register(struct xnclock *clock, + const cpumask_t *affinity); + +void xnclock_deregister(struct xnclock *clock); + +void xnclock_tick(struct xnclock *clock); + +void xnclock_core_local_shot(struct xnsched *sched); + +void xnclock_core_remote_shot(struct xnsched *sched); + +xnsticks_t xnclock_core_ns_to_ticks(xnsticks_t ns); + +xnsticks_t xnclock_core_ticks_to_ns(xnsticks_t ticks); + +xnsticks_t xnclock_core_ticks_to_ns_rounded(xnsticks_t ticks); + +xnticks_t xnclock_core_read_monotonic(void); + +static inline xnticks_t xnclock_core_read_raw(void) +{ + return pipeline_read_cycle_counter(); +} + +/* We use the Linux defaults */ +#define XN_RATELIMIT_INTERVAL 5000000000LL +#define XN_RATELIMIT_BURST 10 + +int __xnclock_ratelimit(struct xnclock_ratelimit_state *rs, const char *func); + +#define xnclock_ratelimit() ({ \ + static struct xnclock_ratelimit_state __state = { \ + .interval = XN_RATELIMIT_INTERVAL, \ + .burst = XN_RATELIMIT_BURST, \ + }; \ + __xnclock_ratelimit(&__state, __func__); \ +}) + +#ifdef CONFIG_XENO_OPT_EXTCLOCK + +static inline void xnclock_program_shot(struct xnclock *clock, + struct xnsched *sched) +{ + if (likely(clock == &nkclock)) + xnclock_core_local_shot(sched); + else if (clock->ops.program_local_shot) + clock->ops.program_local_shot(clock, sched); +} + +static inline void xnclock_remote_shot(struct xnclock *clock, + struct xnsched *sched) +{ +#ifdef CONFIG_SMP + if (likely(clock == &nkclock)) + xnclock_core_remote_shot(sched); + else if (clock->ops.program_remote_shot) + clock->ops.program_remote_shot(clock, sched); +#endif +} + +static inline xnticks_t xnclock_read_raw(struct xnclock *clock) +{ + if (likely(clock == &nkclock)) + return xnclock_core_read_raw(); + + return clock->ops.read_raw(clock); +} + +static inline xnsticks_t xnclock_ns_to_ticks(struct xnclock *clock, + xnsticks_t ns) +{ + if (likely(clock == &nkclock)) + return xnclock_core_ns_to_ticks(ns); + + return clock->ops.ns_to_ticks(clock, ns); +} + +static inline xnsticks_t xnclock_ticks_to_ns(struct xnclock *clock, + xnsticks_t ticks) +{ + if (likely(clock == &nkclock)) + return xnclock_core_ticks_to_ns(ticks); + + return clock->ops.ticks_to_ns(clock, ticks); +} + +static inline xnsticks_t xnclock_ticks_to_ns_rounded(struct xnclock *clock, + xnsticks_t ticks) +{ + if (likely(clock == &nkclock)) + return xnclock_core_ticks_to_ns_rounded(ticks); + + return clock->ops.ticks_to_ns_rounded(clock, ticks); +} + +static inline xnticks_t xnclock_read_monotonic(struct xnclock *clock) +{ + if (likely(clock == &nkclock)) + return xnclock_core_read_monotonic(); + + return clock->ops.read_monotonic(clock); +} + +static inline int xnclock_set_time(struct xnclock *clock, + const struct timespec64 *ts) +{ + if (likely(clock == &nkclock)) + return -EINVAL; + + return clock->ops.set_time(clock, ts); +} + +#else /* !CONFIG_XENO_OPT_EXTCLOCK */ + +static inline void xnclock_program_shot(struct xnclock *clock, + struct xnsched *sched) +{ + xnclock_core_local_shot(sched); +} + +static inline void xnclock_remote_shot(struct xnclock *clock, + struct xnsched *sched) +{ +#ifdef CONFIG_SMP + xnclock_core_remote_shot(sched); +#endif +} + +static inline xnticks_t xnclock_read_raw(struct xnclock *clock) +{ + return xnclock_core_read_raw(); +} + +static inline xnsticks_t xnclock_ns_to_ticks(struct xnclock *clock, + xnsticks_t ns) +{ + return xnclock_core_ns_to_ticks(ns); +} + +static inline xnsticks_t xnclock_ticks_to_ns(struct xnclock *clock, + xnsticks_t ticks) +{ + return xnclock_core_ticks_to_ns(ticks); +} + +static inline xnsticks_t xnclock_ticks_to_ns_rounded(struct xnclock *clock, + xnsticks_t ticks) +{ + return xnclock_core_ticks_to_ns_rounded(ticks); +} + +static inline xnticks_t xnclock_read_monotonic(struct xnclock *clock) +{ + return xnclock_core_read_monotonic(); +} + +static inline int xnclock_set_time(struct xnclock *clock, + const struct timespec64 *ts) +{ + /* + * There is no way to change the core clock's idea of time. + */ + return -EINVAL; +} + +#endif /* !CONFIG_XENO_OPT_EXTCLOCK */ + +static inline int xnclock_adjust_time(struct xnclock *clock, + struct __kernel_timex *tx) +{ + if (clock->ops.adjust_time == NULL) + return -EOPNOTSUPP; + + return clock->ops.adjust_time(clock, tx); +} + +static inline xnticks_t xnclock_get_offset(struct xnclock *clock) +{ + return clock->wallclock_offset; +} + +static inline xnticks_t xnclock_get_resolution(struct xnclock *clock) +{ + return clock->resolution; /* ns */ +} + +static inline void xnclock_set_resolution(struct xnclock *clock, + xnticks_t resolution) +{ + clock->resolution = resolution; /* ns */ +} + +static inline int xnclock_set_gravity(struct xnclock *clock, + const struct xnclock_gravity *gravity) +{ + if (clock->ops.set_gravity) + return clock->ops.set_gravity(clock, gravity); + + return -EINVAL; +} + +static inline void xnclock_reset_gravity(struct xnclock *clock) +{ + if (clock->ops.reset_gravity) + clock->ops.reset_gravity(clock); +} + +#define xnclock_get_gravity(__clock, __type) ((__clock)->gravity.__type) + +static inline xnticks_t xnclock_read_realtime(struct xnclock *clock) +{ + if (likely(clock == &nkclock)) + return pipeline_read_wallclock(); + /* + * Return an adjusted value of the monotonic time with the + * translated system wallclock offset. + */ + return xnclock_read_monotonic(clock) + xnclock_get_offset(clock); +} + +void xnclock_apply_offset(struct xnclock *clock, + xnsticks_t delta_ns); + +void xnclock_set_wallclock(xnticks_t epoch_ns); + +unsigned long long xnclock_divrem_billion(unsigned long long value, + unsigned long *rem); + +#ifdef CONFIG_XENO_OPT_VFILE + +void xnclock_init_proc(void); + +void xnclock_cleanup_proc(void); + +static inline void xnclock_print_status(struct xnclock *clock, + struct xnvfile_regular_iterator *it) +{ + if (clock->ops.print_status) + clock->ops.print_status(clock, it); +} + +#else +static inline void xnclock_init_proc(void) { } +static inline void xnclock_cleanup_proc(void) { } +#endif + +int xnclock_init(void); + +void xnclock_cleanup(void); + +/** @} */ + +#endif /* !_COBALT_KERNEL_CLOCK_H */ --- linux/include/xenomai/cobalt/kernel/list.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/xenomai/cobalt/kernel/list.h 2022-03-21 12:58:31.498868396 +0100 @@ -0,0 +1,65 @@ +/* + * Copyright (C) 2013 Philippe Gerum . + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_KERNEL_LIST_H +#define _COBALT_KERNEL_LIST_H + +#include + +#define __list_add_pri(__new, __head, __member_pri, __member_next, __relop) \ +do { \ + typeof(*__new) *__pos; \ + if (list_empty(__head)) \ + list_add(&(__new)->__member_next, __head); \ + else { \ + list_for_each_entry_reverse(__pos, __head, __member_next) { \ + if ((__new)->__member_pri __relop __pos->__member_pri) \ + break; \ + } \ + list_add(&(__new)->__member_next, &__pos->__member_next); \ + } \ +} while (0) + +#define list_add_priff(__new, __head, __member_pri, __member_next) \ + __list_add_pri(__new, __head, __member_pri, __member_next, <=) + +#define list_add_prilf(__new, __head, __member_pri, __member_next) \ + __list_add_pri(__new, __head, __member_pri, __member_next, <) + +#define list_get_entry(__head, __type, __member) \ + ({ \ + __type *__item; \ + __item = list_first_entry(__head, __type, __member); \ + list_del(&__item->__member); \ + __item; \ + }) + +#define list_get_entry_init(__head, __type, __member) \ + ({ \ + __type *__item; \ + __item = list_first_entry(__head, __type, __member); \ + list_del_init(&__item->__member); \ + __item; \ + }) + +#ifndef list_next_entry +#define list_next_entry(__item, __member) \ + list_entry((__item)->__member.next, typeof(*(__item)), __member) +#endif + +#endif /* !_COBALT_KERNEL_LIST_H_ */ --- linux/include/linux/xenomai/wrappers.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/linux/xenomai/wrappers.h 2022-03-21 12:58:28.942893320 +0100 @@ -0,0 +1,44 @@ +/* + * Copyright (C) 2017 Philippe Gerum . + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_LINUX_WRAPPERS_H +#define _COBALT_LINUX_WRAPPERS_H + +#include + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 20, 0) +#include +typedef siginfo_t kernel_siginfo_t; +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,11,0) +#include +#include +#else +#include +#include +#include +#include +#include +#include +#include +#endif + +#include + +#endif /* !_COBALT_LINUX_WRAPPERS_H */ --- linux/include/asm-generic/xenomai/wrappers.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/asm-generic/xenomai/wrappers.h 2022-03-21 12:58:28.937893369 +0100 @@ -0,0 +1,253 @@ +/* + * Copyright (C) 2005-2012 Philippe Gerum . + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_ASM_GENERIC_WRAPPERS_H + +#include + +#define COBALT_BACKPORT(__sym) __cobalt_backport_ ##__sym + +/* + * To keep the #ifdefery as readable as possible, please: + * + * - keep the conditional structure flat, no nesting (e.g. do not fold + * the pre-3.11 conditions into the pre-3.14 ones). + * - group all wrappers for a single kernel revision. + * - list conditional blocks in order of kernel release, latest first + * - identify the first kernel release for which the wrapper should + * be defined, instead of testing the existence of a preprocessor + * symbol, so that obsolete wrappers can be spotted. + */ + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,11,0) +#define raw_copy_to_user(__to, __from, __n) __copy_to_user_inatomic(__to, __from, __n) +#define raw_copy_from_user(__to, __from, __n) __copy_from_user_inatomic(__to, __from, __n) +#define raw_put_user(__from, __to) __put_user_inatomic(__from, __to) +#define raw_get_user(__to, __from) __get_user_inatomic(__to, __from) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,6,0) +#define in_ia32_syscall() (current_thread_info()->status & TS_COMPAT) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0) +#define cobalt_gpiochip_dev(__gc) ((__gc)->dev) +#else +#define cobalt_gpiochip_dev(__gc) ((__gc)->parent) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,0,0) +#define cobalt_get_restart_block(p) (&task_thread_info(p)->restart_block) +#else +#define cobalt_get_restart_block(p) (&(p)->restart_block) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0) +#define user_msghdr msghdr +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0) +#include + +#undef alloc_netdev +#define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \ + alloc_netdev_mqs(sizeof_priv, name, setup, 1, 1) + +#include + +static inline unsigned char * +trace_seq_buffer_ptr(struct trace_seq *s) +{ + return s->buffer + s->len; +} + +#endif /* < 3.17 */ + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0) +#define smp_mb__before_atomic() smp_mb() +#define smp_mb__after_atomic() smp_mb() +#endif /* < 3.16 */ + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0) +#define raw_cpu_ptr(v) __this_cpu_ptr(v) +#endif /* < 3.15 */ + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0) +#include + +#ifdef CONFIG_PCI +#define pci_enable_msix_range COBALT_BACKPORT(pci_enable_msix_range) +#ifdef CONFIG_PCI_MSI +int pci_enable_msix_range(struct pci_dev *dev, + struct msix_entry *entries, + int minvec, int maxvec); +#else /* !CONFIG_PCI_MSI */ +static inline +int pci_enable_msix_range(struct pci_dev *dev, + struct msix_entry *entries, + int minvec, int maxvec) +{ + return -ENOSYS; +} +#endif /* !CONFIG_PCI_MSI */ +#endif /* CONFIG_PCI */ +#endif /* < 3.14 */ + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0) +#include +#include + +#define dma_set_mask_and_coherent COBALT_BACKPORT(dma_set_mask_and_coherent) +static inline +int dma_set_mask_and_coherent(struct device *dev, u64 mask) +{ + int rc = dma_set_mask(dev, mask); + if (rc == 0) + dma_set_coherent_mask(dev, mask); + return rc; +} + +#ifdef CONFIG_HWMON +#define hwmon_device_register_with_groups \ + COBALT_BACKPORT(hwmon_device_register_with_groups) +struct device * +hwmon_device_register_with_groups(struct device *dev, const char *name, + void *drvdata, + const struct attribute_group **groups); + +#define devm_hwmon_device_register_with_groups \ + COBALT_BACKPORT(devm_hwmon_device_register_with_groups) +struct device * +devm_hwmon_device_register_with_groups(struct device *dev, const char *name, + void *drvdata, + const struct attribute_group **groups); +#endif /* !CONFIG_HWMON */ + +#define reinit_completion(__x) INIT_COMPLETION(*(__x)) + +#endif /* < 3.13 */ + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,11,0) +#define DEVICE_ATTR_RW(_name) __ATTR_RW(_name) +#define DEVICE_ATTR_RO(_name) __ATTR_RO(_name) +#define DEVICE_ATTR_WO(_name) __ATTR_WO(_name) +#endif /* < 3.11 */ + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) +#error "Xenomai/cobalt requires Linux kernel 3.10 or above" +#endif /* < 3.10 */ + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5,0,0) +#define __kernel_timex timex +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5,1,0) +#define old_timex32 compat_timex +#define SO_RCVTIMEO_OLD SO_RCVTIMEO +#define SO_SNDTIMEO_OLD SO_SNDTIMEO +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,2,0) +#define mmiowb() do { } while (0) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0) +#define __kernel_old_timeval timeval +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5,8,0) +#define mmap_read_lock(__mm) down_read(&mm->mmap_sem) +#define mmap_read_unlock(__mm) up_read(&mm->mmap_sem) +#define mmap_write_lock(__mm) down_write(&mm->mmap_sem) +#define mmap_write_unlock(__mm) up_write(&mm->mmap_sem) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5,6,0) +#define DEFINE_PROC_OPS(__name, __open, __release, __read, __write) \ + struct file_operations __name = { \ + .open = (__open), \ + .release = (__release), \ + .read = (__read), \ + .write = (__write), \ + .llseek = seq_lseek, \ +} +#else +#define DEFINE_PROC_OPS(__name, __open, __release, __read, __write) \ + struct proc_ops __name = { \ + .proc_open = (__open), \ + .proc_release = (__release), \ + .proc_read = (__read), \ + .proc_write = (__write), \ + .proc_lseek = seq_lseek, \ +} +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5,8,0) +#define vmalloc_kernel(__size, __flags) __vmalloc(__size, GFP_KERNEL|__flags, PAGE_KERNEL) +#else +#define vmalloc_kernel(__size, __flags) __vmalloc(__size, GFP_KERNEL|__flags) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5,7,0) +#define pci_aer_clear_nonfatal_status pci_cleanup_aer_uncorrect_error_status +#define old_timespec32 compat_timespec +#define old_itimerspec32 compat_itimerspec +#define old_timeval32 compat_timeval +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5,8,0) +#define vmalloc_kernel(__size, __flags) __vmalloc(__size, GFP_KERNEL|__flags, PAGE_KERNEL) +#else +#define vmalloc_kernel(__size, __flags) __vmalloc(__size, GFP_KERNEL|__flags) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5,9,0) +#define read_file_from_kernel(__file, __buf, __buf_size, __file_size, __id) \ + ({ \ + loff_t ___file_size; \ + int __ret; \ + __ret = kernel_read_file(__file, __buf, &___file_size, \ + __buf_size, __id); \ + (*__file_size) = ___file_size; \ + __ret; \ + }) +#else +#define read_file_from_kernel(__file, __buf, __buf_size, __file_size, __id) \ + kernel_read_file(__file, 0, __buf, __buf_size, __file_size, __id) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0) +#if __has_attribute(__fallthrough__) +# define fallthrough __attribute__((__fallthrough__)) +#else +# define fallthrough do {} while (0) /* fallthrough */ +#endif +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5,11,0) +#define IRQ_WORK_INIT(_func) (struct irq_work) { \ + .flags = ATOMIC_INIT(0), \ + .func = (_func), \ +} +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5,14,0) +#define close_fd(__ufd) __close_fd(current->files, __ufd) +#endif + +#endif /* _COBALT_ASM_GENERIC_WRAPPERS_H */ --- linux/include/asm-generic/xenomai/syscall.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/asm-generic/xenomai/syscall.h 2022-03-21 12:58:28.933893408 +0100 @@ -0,0 +1,154 @@ +/* + * Copyright (C) 2001,2002,2003,2004,2005 Philippe Gerum . + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_ASM_GENERIC_SYSCALL_H +#define _COBALT_ASM_GENERIC_SYSCALL_H + +#include +#include +#include +#include +#include +#include +#include +#include + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0) +#define access_rok(addr, size) access_ok((addr), (size)) +#define access_wok(addr, size) access_ok((addr), (size)) +#else +#define access_rok(addr, size) access_ok(VERIFY_READ, (addr), (size)) +#define access_wok(addr, size) access_ok(VERIFY_WRITE, (addr), (size)) +#endif + +#define __xn_copy_from_user(dstP, srcP, n) raw_copy_from_user(dstP, srcP, n) +#define __xn_copy_to_user(dstP, srcP, n) raw_copy_to_user(dstP, srcP, n) +#define __xn_put_user(src, dstP) __put_user(src, dstP) +#define __xn_get_user(dst, srcP) __get_user(dst, srcP) +#define __xn_strncpy_from_user(dstP, srcP, n) strncpy_from_user(dstP, srcP, n) + +static inline int cobalt_copy_from_user(void *dst, const void __user *src, + size_t size) +{ + size_t remaining = size; + + if (likely(access_rok(src, size))) + remaining = __xn_copy_from_user(dst, src, size); + + if (unlikely(remaining > 0)) { + memset(dst + (size - remaining), 0, remaining); + return -EFAULT; + } + return 0; +} + +static inline int cobalt_copy_to_user(void __user *dst, const void *src, + size_t size) +{ + if (unlikely(!access_wok(dst, size) || + __xn_copy_to_user(dst, src, size))) + return -EFAULT; + return 0; +} + +static inline int cobalt_strncpy_from_user(char *dst, const char __user *src, + size_t count) +{ + if (unlikely(!access_rok(src, 1))) + return -EFAULT; + + return __xn_strncpy_from_user(dst, src, count); +} + + +/* + * NOTE: those copy helpers won't work in compat mode: use + * sys32_get_*(), sys32_put_*() instead. + */ + +static inline int cobalt_get_u_timespec(struct timespec64 *dst, + const struct __user_old_timespec __user *src) +{ + struct __user_old_timespec u_ts; + int ret; + + ret = cobalt_copy_from_user(&u_ts, src, sizeof(u_ts)); + if (ret) + return ret; + + dst->tv_sec = u_ts.tv_sec; + dst->tv_nsec = u_ts.tv_nsec; + + return 0; +} + +static inline int cobalt_put_u_timespec( + struct __user_old_timespec __user *dst, + const struct timespec64 *src) +{ + struct __user_old_timespec u_ts; + int ret; + + u_ts.tv_sec = src->tv_sec; + u_ts.tv_nsec = src->tv_nsec; + + ret = cobalt_copy_to_user(dst, &u_ts, sizeof(*dst)); + if (ret) + return ret; + + return 0; +} + +static inline int cobalt_get_u_itimerspec(struct itimerspec64 *dst, + const struct __user_old_itimerspec __user *src) +{ + struct __user_old_itimerspec u_its; + int ret; + + ret = cobalt_copy_from_user(&u_its, src, sizeof(u_its)); + if (ret) + return ret; + + dst->it_interval.tv_sec = u_its.it_interval.tv_sec; + dst->it_interval.tv_nsec = u_its.it_interval.tv_nsec; + dst->it_value.tv_sec = u_its.it_value.tv_sec; + dst->it_value.tv_nsec = u_its.it_value.tv_nsec; + + return 0; +} + +static inline int cobalt_put_u_itimerspec( + struct __user_old_itimerspec __user *dst, + const struct itimerspec64 *src) +{ + struct __user_old_itimerspec u_its; + + u_its.it_interval.tv_sec = src->it_interval.tv_sec; + u_its.it_interval.tv_nsec = src->it_interval.tv_nsec; + u_its.it_value.tv_sec = src->it_value.tv_sec; + u_its.it_value.tv_nsec = src->it_value.tv_nsec; + + return cobalt_copy_to_user(dst, &u_its, sizeof(*dst)); +} + +/* 32bit syscall emulation */ +#define __COBALT_COMPAT_BIT 0x1 +/* 32bit syscall emulation - extended form */ +#define __COBALT_COMPATX_BIT 0x2 + +#endif /* !_COBALT_ASM_GENERIC_SYSCALL_H */ --- linux/include/asm-generic/xenomai/pci_ids.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/asm-generic/xenomai/pci_ids.h 2022-03-21 12:58:28.930893437 +0100 @@ -0,0 +1,61 @@ +/* + * Copyright (C) 2009 Gilles Chanteperdrix . + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_ASM_GENERIC_PCI_IDS_H +#define _COBALT_ASM_GENERIC_PCI_IDS_H + +#include + +/* SMI */ +#ifndef PCI_DEVICE_ID_INTEL_ESB2_0 +#define PCI_DEVICE_ID_INTEL_ESB2_0 0x2670 +#endif +#ifndef PCI_DEVICE_ID_INTEL_ICH7_0 +#define PCI_DEVICE_ID_INTEL_ICH7_0 0x27b8 +#endif +#ifndef PCI_DEVICE_ID_INTEL_ICH7_1 +#define PCI_DEVICE_ID_INTEL_ICH7_1 0x27b9 +#endif +#ifndef PCI_DEVICE_ID_INTEL_ICH8_4 +#define PCI_DEVICE_ID_INTEL_ICH8_4 0x2815 +#endif +#ifndef PCI_DEVICE_ID_INTEL_ICH9_1 +#define PCI_DEVICE_ID_INTEL_ICH9_1 0x2917 +#endif +#ifndef PCI_DEVICE_ID_INTEL_ICH9_5 +#define PCI_DEVICE_ID_INTEL_ICH9_5 0x2919 +#endif +#ifndef PCI_DEVICE_ID_INTEL_ICH10_1 +#define PCI_DEVICE_ID_INTEL_ICH10_1 0x3a16 +#endif +#ifndef PCI_DEVICE_ID_INTEL_PCH_LPC_MIN +#define PCI_DEVICE_ID_INTEL_PCH_LPC_MIN 0x3b00 +#endif + +/* RTCAN */ +#ifndef PCI_VENDOR_ID_ESDGMBH +#define PCI_VENDOR_ID_ESDGMBH 0x12fe +#endif +#ifndef PCI_DEVICE_ID_PLX_9030 +#define PCI_DEVICE_ID_PLX_9030 0x9030 +#endif +#ifndef PCI_DEVICE_ID_PLX_9056 +#define PCI_DEVICE_ID_PLX_9056 0x9056 +#endif + +#endif /* _COBALT_ASM_GENERIC_PCI_IDS_H */ --- linux/include/asm-generic/xenomai/ipipe/thread.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/asm-generic/xenomai/ipipe/thread.h 2022-03-21 12:58:28.926893476 +0100 @@ -0,0 +1,39 @@ +/* + * Copyright (C) 2013 Philippe Gerum . + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_ASM_GENERIC_IPIPE_THREAD_H +#define _COBALT_ASM_GENERIC_IPIPE_THREAD_H + +#include +#include + +struct task_struct; + +struct xntcb { + struct task_struct *host_task; + struct thread_struct *tsp; + struct mm_struct *mm; + struct mm_struct *active_mm; + struct thread_struct ts; + struct thread_info *tip; +#ifdef CONFIG_XENO_ARCH_FPU + struct task_struct *user_fpu_owner; +#endif +}; + +#endif /* !_COBALT_ASM_GENERIC_IPIPE_THREAD_H */ --- linux/include/asm-generic/xenomai/machine.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/asm-generic/xenomai/machine.h 2022-03-21 12:58:28.923893505 +0100 @@ -0,0 +1,28 @@ +/** + * Copyright © 2012 Philippe Gerum. + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139, + * USA; either version 2 of the License, or (at your option) any later + * version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_ASM_GENERIC_MACHINE_H +#define _COBALT_ASM_GENERIC_MACHINE_H + +#include + +#ifndef xnarch_cache_aliasing +#define xnarch_cache_aliasing() 0 +#endif + +#endif /* !_COBALT_ASM_GENERIC_MACHINE_H */ --- linux/include/asm-generic/xenomai/dovetail/thread.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/asm-generic/xenomai/dovetail/thread.h 2022-03-21 12:58:28.919893545 +0100 @@ -0,0 +1,24 @@ +/* + * Copyright (C) 2021 Philippe Gerum . + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ +#ifndef _COBALT_ASM_GENERIC_DOVETAIL_THREAD_H +#define _COBALT_ASM_GENERIC_DOVETAIL_THREAD_H + +#include + +struct xnarchtcb { + struct dovetail_altsched_context altsched; +}; + +static inline +struct task_struct *xnarch_host_task(struct xnarchtcb *tcb) +{ + return tcb->altsched.task; +} + +#endif /* !_COBALT_ASM_GENERIC_DOVETAIL_THREAD_H */ --- linux/include/asm-generic/xenomai/syscall32.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/asm-generic/xenomai/syscall32.h 2022-03-21 12:58:28.916893574 +0100 @@ -0,0 +1,30 @@ +/* + * Copyright (C) 2014 Philippe Gerum . + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#ifndef _COBALT_ASM_GENERIC_SYSCALL32_H +#define _COBALT_ASM_GENERIC_SYSCALL32_H + +#define __COBALT_CALL32_INITHAND(__handler) + +#define __COBALT_CALL32_INITMODE(__mode) + +#define __COBALT_CALL32_ENTRY(__name, __handler) + +#define __COBALT_CALL_COMPAT(__reg) 0 + +#endif /* !_COBALT_ASM_GENERIC_SYSCALL32_H */ --- linux/include/trace/events/cobalt-posix.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/trace/events/cobalt-posix.h 2022-03-21 12:58:28.910893632 +0100 @@ -0,0 +1,1186 @@ +/* + * Copyright (C) 2014 Jan Kiszka . + * Copyright (C) 2014 Philippe Gerum . + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM cobalt_posix + +#if !defined(_TRACE_COBALT_POSIX_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_COBALT_POSIX_H + +#include +#include +#include +#include +#include + +#define __timespec_fields(__name) \ + __field(time64_t, tv_sec_##__name) \ + __field(long, tv_nsec_##__name) + +#define __assign_timespec(__to, __from) \ + do { \ + __entry->tv_sec_##__to = (__from)->tv_sec; \ + __entry->tv_nsec_##__to = (__from)->tv_nsec; \ + } while (0) + +#define __timespec_args(__name) \ + (long long)__entry->tv_sec_##__name, __entry->tv_nsec_##__name + +#ifdef CONFIG_IA32_EMULATION +#define __sc_compat(__name) , { sc_cobalt_##__name + __COBALT_IA32_BASE, "compat-" #__name } +#else +#define __sc_compat(__name) +#endif + +#define __cobalt_symbolic_syscall(__name) \ + { sc_cobalt_##__name, #__name } \ + __sc_compat(__name) \ + +#define __cobalt_syscall_name(__nr) \ + __print_symbolic((__nr), \ + __cobalt_symbolic_syscall(bind), \ + __cobalt_symbolic_syscall(thread_create), \ + __cobalt_symbolic_syscall(thread_getpid), \ + __cobalt_symbolic_syscall(thread_setmode), \ + __cobalt_symbolic_syscall(thread_setname), \ + __cobalt_symbolic_syscall(thread_join), \ + __cobalt_symbolic_syscall(thread_kill), \ + __cobalt_symbolic_syscall(thread_setschedparam_ex), \ + __cobalt_symbolic_syscall(thread_getschedparam_ex), \ + __cobalt_symbolic_syscall(thread_setschedprio), \ + __cobalt_symbolic_syscall(thread_getstat), \ + __cobalt_symbolic_syscall(sem_init), \ + __cobalt_symbolic_syscall(sem_destroy), \ + __cobalt_symbolic_syscall(sem_post), \ + __cobalt_symbolic_syscall(sem_wait), \ + __cobalt_symbolic_syscall(sem_trywait), \ + __cobalt_symbolic_syscall(sem_getvalue), \ + __cobalt_symbolic_syscall(sem_open), \ + __cobalt_symbolic_syscall(sem_close), \ + __cobalt_symbolic_syscall(sem_unlink), \ + __cobalt_symbolic_syscall(sem_timedwait), \ + __cobalt_symbolic_syscall(sem_inquire), \ + __cobalt_symbolic_syscall(sem_broadcast_np), \ + __cobalt_symbolic_syscall(clock_getres), \ + __cobalt_symbolic_syscall(clock_gettime), \ + __cobalt_symbolic_syscall(clock_settime), \ + __cobalt_symbolic_syscall(clock_nanosleep), \ + __cobalt_symbolic_syscall(mutex_init), \ + __cobalt_symbolic_syscall(mutex_check_init), \ + __cobalt_symbolic_syscall(mutex_destroy), \ + __cobalt_symbolic_syscall(mutex_lock), \ + __cobalt_symbolic_syscall(mutex_timedlock), \ + __cobalt_symbolic_syscall(mutex_trylock), \ + __cobalt_symbolic_syscall(mutex_unlock), \ + __cobalt_symbolic_syscall(cond_init), \ + __cobalt_symbolic_syscall(cond_destroy), \ + __cobalt_symbolic_syscall(cond_wait_prologue), \ + __cobalt_symbolic_syscall(cond_wait_epilogue), \ + __cobalt_symbolic_syscall(mq_open), \ + __cobalt_symbolic_syscall(mq_close), \ + __cobalt_symbolic_syscall(mq_unlink), \ + __cobalt_symbolic_syscall(mq_getattr), \ + __cobalt_symbolic_syscall(mq_timedsend), \ + __cobalt_symbolic_syscall(mq_timedreceive), \ + __cobalt_symbolic_syscall(mq_notify), \ + __cobalt_symbolic_syscall(sched_minprio), \ + __cobalt_symbolic_syscall(sched_maxprio), \ + __cobalt_symbolic_syscall(sched_weightprio), \ + __cobalt_symbolic_syscall(sched_yield), \ + __cobalt_symbolic_syscall(sched_setscheduler_ex), \ + __cobalt_symbolic_syscall(sched_getscheduler_ex), \ + __cobalt_symbolic_syscall(sched_setconfig_np), \ + __cobalt_symbolic_syscall(sched_getconfig_np), \ + __cobalt_symbolic_syscall(timer_create), \ + __cobalt_symbolic_syscall(timer_delete), \ + __cobalt_symbolic_syscall(timer_settime), \ + __cobalt_symbolic_syscall(timer_gettime), \ + __cobalt_symbolic_syscall(timer_getoverrun), \ + __cobalt_symbolic_syscall(timerfd_create), \ + __cobalt_symbolic_syscall(timerfd_settime), \ + __cobalt_symbolic_syscall(timerfd_gettime), \ + __cobalt_symbolic_syscall(sigwait), \ + __cobalt_symbolic_syscall(sigwaitinfo), \ + __cobalt_symbolic_syscall(sigtimedwait), \ + __cobalt_symbolic_syscall(sigpending), \ + __cobalt_symbolic_syscall(kill), \ + __cobalt_symbolic_syscall(sigqueue), \ + __cobalt_symbolic_syscall(monitor_init), \ + __cobalt_symbolic_syscall(monitor_destroy), \ + __cobalt_symbolic_syscall(monitor_enter), \ + __cobalt_symbolic_syscall(monitor_wait), \ + __cobalt_symbolic_syscall(monitor_sync), \ + __cobalt_symbolic_syscall(monitor_exit), \ + __cobalt_symbolic_syscall(event_init), \ + __cobalt_symbolic_syscall(event_wait), \ + __cobalt_symbolic_syscall(event_sync), \ + __cobalt_symbolic_syscall(event_destroy), \ + __cobalt_symbolic_syscall(event_inquire), \ + __cobalt_symbolic_syscall(open), \ + __cobalt_symbolic_syscall(socket), \ + __cobalt_symbolic_syscall(close), \ + __cobalt_symbolic_syscall(ioctl), \ + __cobalt_symbolic_syscall(read), \ + __cobalt_symbolic_syscall(write), \ + __cobalt_symbolic_syscall(recvmsg), \ + __cobalt_symbolic_syscall(sendmsg), \ + __cobalt_symbolic_syscall(mmap), \ + __cobalt_symbolic_syscall(select), \ + __cobalt_symbolic_syscall(fcntl), \ + __cobalt_symbolic_syscall(migrate), \ + __cobalt_symbolic_syscall(archcall), \ + __cobalt_symbolic_syscall(trace), \ + __cobalt_symbolic_syscall(corectl), \ + __cobalt_symbolic_syscall(get_current), \ + __cobalt_symbolic_syscall(backtrace), \ + __cobalt_symbolic_syscall(serialdbg), \ + __cobalt_symbolic_syscall(extend), \ + __cobalt_symbolic_syscall(ftrace_puts), \ + __cobalt_symbolic_syscall(recvmmsg), \ + __cobalt_symbolic_syscall(sendmmsg), \ + __cobalt_symbolic_syscall(clock_adjtime), \ + __cobalt_symbolic_syscall(sem_timedwait64), \ + __cobalt_symbolic_syscall(clock_gettime64), \ + __cobalt_symbolic_syscall(clock_settime64), \ + __cobalt_symbolic_syscall(clock_nanosleep64), \ + __cobalt_symbolic_syscall(clock_getres64), \ + __cobalt_symbolic_syscall(clock_adjtime64), \ + __cobalt_symbolic_syscall(mutex_timedlock64), \ + __cobalt_symbolic_syscall(mq_timedsend64), \ + __cobalt_symbolic_syscall(mq_timedreceive64), \ + __cobalt_symbolic_syscall(sigtimedwait64), \ + __cobalt_symbolic_syscall(monitor_wait64), \ + __cobalt_symbolic_syscall(event_wait64), \ + __cobalt_symbolic_syscall(recvmmsg64)) + +DECLARE_EVENT_CLASS(cobalt_syscall_entry, + TP_PROTO(unsigned int nr), + TP_ARGS(nr), + + TP_STRUCT__entry( + __field(unsigned int, nr) + ), + + TP_fast_assign( + __entry->nr = nr; + ), + + TP_printk("syscall=%s", __cobalt_syscall_name(__entry->nr)) +); + +DECLARE_EVENT_CLASS(cobalt_syscall_exit, + TP_PROTO(long result), + TP_ARGS(result), + + TP_STRUCT__entry( + __field(long, result) + ), + + TP_fast_assign( + __entry->result = result; + ), + + TP_printk("result=%ld", __entry->result) +); + +#define cobalt_print_sched_policy(__policy) \ + __print_symbolic(__policy, \ + {SCHED_NORMAL, "normal"}, \ + {SCHED_FIFO, "fifo"}, \ + {SCHED_RR, "rr"}, \ + {SCHED_TP, "tp"}, \ + {SCHED_QUOTA, "quota"}, \ + {SCHED_SPORADIC, "sporadic"}, \ + {SCHED_COBALT, "cobalt"}, \ + {SCHED_WEAK, "weak"}) + +const char *cobalt_trace_parse_sched_params(struct trace_seq *, int, + struct sched_param_ex *); + +#define __parse_sched_params(policy, params) \ + cobalt_trace_parse_sched_params(p, policy, \ + (struct sched_param_ex *)(params)) + +DECLARE_EVENT_CLASS(cobalt_posix_schedparam, + TP_PROTO(unsigned long pth, int policy, + const struct sched_param_ex *param_ex), + TP_ARGS(pth, policy, param_ex), + + TP_STRUCT__entry( + __field(unsigned long, pth) + __field(int, policy) + __dynamic_array(char, param_ex, sizeof(struct sched_param_ex)) + ), + + TP_fast_assign( + __entry->pth = pth; + __entry->policy = policy; + memcpy(__get_dynamic_array(param_ex), param_ex, sizeof(*param_ex)); + ), + + TP_printk("pth=%p policy=%s param={ %s }", + (void *)__entry->pth, + cobalt_print_sched_policy(__entry->policy), + __parse_sched_params(__entry->policy, + __get_dynamic_array(param_ex)) + ) +); + +DECLARE_EVENT_CLASS(cobalt_posix_scheduler, + TP_PROTO(pid_t pid, int policy, + const struct sched_param_ex *param_ex), + TP_ARGS(pid, policy, param_ex), + + TP_STRUCT__entry( + __field(pid_t, pid) + __field(int, policy) + __dynamic_array(char, param_ex, sizeof(struct sched_param_ex)) + ), + + TP_fast_assign( + __entry->pid = pid; + __entry->policy = policy; + memcpy(__get_dynamic_array(param_ex), param_ex, sizeof(*param_ex)); + ), + + TP_printk("pid=%d policy=%s param={ %s }", + __entry->pid, + cobalt_print_sched_policy(__entry->policy), + __parse_sched_params(__entry->policy, + __get_dynamic_array(param_ex)) + ) +); + +DECLARE_EVENT_CLASS(cobalt_void, + TP_PROTO(int dummy), + TP_ARGS(dummy), + TP_STRUCT__entry( + __field(int, dummy) + ), + TP_fast_assign( + (void)dummy; + ), + TP_printk("%s", "") +); + +DEFINE_EVENT(cobalt_syscall_entry, cobalt_head_sysentry, + TP_PROTO(unsigned int nr), + TP_ARGS(nr) +); + +DEFINE_EVENT(cobalt_syscall_exit, cobalt_head_sysexit, + TP_PROTO(long result), + TP_ARGS(result) +); + +DEFINE_EVENT(cobalt_syscall_entry, cobalt_root_sysentry, + TP_PROTO(unsigned int nr), + TP_ARGS(nr) +); + +DEFINE_EVENT(cobalt_syscall_exit, cobalt_root_sysexit, + TP_PROTO(long result), + TP_ARGS(result) +); + +DEFINE_EVENT(cobalt_posix_schedparam, cobalt_pthread_create, + TP_PROTO(unsigned long pth, int policy, + const struct sched_param_ex *param_ex), + TP_ARGS(pth, policy, param_ex) +); + +DEFINE_EVENT(cobalt_posix_schedparam, cobalt_pthread_setschedparam, + TP_PROTO(unsigned long pth, int policy, + const struct sched_param_ex *param_ex), + TP_ARGS(pth, policy, param_ex) +); + +DEFINE_EVENT(cobalt_posix_schedparam, cobalt_pthread_getschedparam, + TP_PROTO(unsigned long pth, int policy, + const struct sched_param_ex *param_ex), + TP_ARGS(pth, policy, param_ex) +); + +TRACE_EVENT(cobalt_pthread_setschedprio, + TP_PROTO(unsigned long pth, int prio), + TP_ARGS(pth, prio), + TP_STRUCT__entry( + __field(unsigned long, pth) + __field(int, prio) + ), + TP_fast_assign( + __entry->pth = pth; + __entry->prio = prio; + ), + TP_printk("pth=%p prio=%d", (void *)__entry->pth, __entry->prio) +); + +#define cobalt_print_thread_mode(__mode) \ + __print_flags(__mode, "|", \ + {PTHREAD_WARNSW, "warnsw"}, \ + {PTHREAD_LOCK_SCHED, "lock"}, \ + {PTHREAD_DISABLE_LOCKBREAK, "nolockbreak"}) + +TRACE_EVENT(cobalt_pthread_setmode, + TP_PROTO(int clrmask, int setmask), + TP_ARGS(clrmask, setmask), + TP_STRUCT__entry( + __field(int, clrmask) + __field(int, setmask) + ), + TP_fast_assign( + __entry->clrmask = clrmask; + __entry->setmask = setmask; + ), + TP_printk("clrmask=%#x(%s) setmask=%#x(%s)", + __entry->clrmask, cobalt_print_thread_mode(__entry->clrmask), + __entry->setmask, cobalt_print_thread_mode(__entry->setmask)) +); + +TRACE_EVENT(cobalt_pthread_setname, + TP_PROTO(unsigned long pth, const char *name), + TP_ARGS(pth, name), + TP_STRUCT__entry( + __field(unsigned long, pth) + __string(name, name) + ), + TP_fast_assign( + __entry->pth = pth; + __assign_str(name, name); + ), + TP_printk("pth=%p name=%s", (void *)__entry->pth, __get_str(name)) +); + +DECLARE_EVENT_CLASS(cobalt_posix_pid, + TP_PROTO(pid_t pid), + TP_ARGS(pid), + TP_STRUCT__entry( + __field(pid_t, pid) + ), + TP_fast_assign( + __entry->pid = pid; + ), + TP_printk("pid=%d", __entry->pid) +); + +DEFINE_EVENT(cobalt_posix_pid, cobalt_pthread_stat, + TP_PROTO(pid_t pid), + TP_ARGS(pid) +); + +TRACE_EVENT(cobalt_pthread_kill, + TP_PROTO(unsigned long pth, int sig), + TP_ARGS(pth, sig), + TP_STRUCT__entry( + __field(unsigned long, pth) + __field(int, sig) + ), + TP_fast_assign( + __entry->pth = pth; + __entry->sig = sig; + ), + TP_printk("pth=%p sig=%d", (void *)__entry->pth, __entry->sig) +); + +TRACE_EVENT(cobalt_pthread_join, + TP_PROTO(unsigned long pth), + TP_ARGS(pth), + TP_STRUCT__entry( + __field(unsigned long, pth) + ), + TP_fast_assign( + __entry->pth = pth; + ), + TP_printk("pth=%p", (void *)__entry->pth) +); + +TRACE_EVENT(cobalt_pthread_pid, + TP_PROTO(unsigned long pth), + TP_ARGS(pth), + TP_STRUCT__entry( + __field(unsigned long, pth) + ), + TP_fast_assign( + __entry->pth = pth; + ), + TP_printk("pth=%p", (void *)__entry->pth) +); + +TRACE_EVENT(cobalt_pthread_extend, + TP_PROTO(unsigned long pth, const char *name), + TP_ARGS(pth, name), + TP_STRUCT__entry( + __field(unsigned long, pth) + __string(name, name) + ), + TP_fast_assign( + __entry->pth = pth; + __assign_str(name, name); + ), + TP_printk("pth=%p +personality=%s", (void *)__entry->pth, __get_str(name)) +); + +TRACE_EVENT(cobalt_pthread_restrict, + TP_PROTO(unsigned long pth, const char *name), + TP_ARGS(pth, name), + TP_STRUCT__entry( + __field(unsigned long, pth) + __string(name, name) + ), + TP_fast_assign( + __entry->pth = pth; + __assign_str(name, name); + ), + TP_printk("pth=%p -personality=%s", (void *)__entry->pth, __get_str(name)) +); + +DEFINE_EVENT(cobalt_void, cobalt_pthread_yield, + TP_PROTO(int dummy), + TP_ARGS(dummy) +); + +TRACE_EVENT(cobalt_sched_setconfig, + TP_PROTO(int cpu, int policy, size_t len), + TP_ARGS(cpu, policy, len), + TP_STRUCT__entry( + __field(int, cpu) + __field(int, policy) + __field(size_t, len) + ), + TP_fast_assign( + __entry->cpu = cpu; + __entry->policy = policy; + __entry->len = len; + ), + TP_printk("cpu=%d policy=%d(%s) len=%zu", + __entry->cpu, __entry->policy, + cobalt_print_sched_policy(__entry->policy), + __entry->len) +); + +TRACE_EVENT(cobalt_sched_get_config, + TP_PROTO(int cpu, int policy, size_t rlen), + TP_ARGS(cpu, policy, rlen), + TP_STRUCT__entry( + __field(int, cpu) + __field(int, policy) + __field(ssize_t, rlen) + ), + TP_fast_assign( + __entry->cpu = cpu; + __entry->policy = policy; + __entry->rlen = rlen; + ), + TP_printk("cpu=%d policy=%d(%s) rlen=%Zd", + __entry->cpu, __entry->policy, + cobalt_print_sched_policy(__entry->policy), + __entry->rlen) +); + +DEFINE_EVENT(cobalt_posix_scheduler, cobalt_sched_setscheduler, + TP_PROTO(pid_t pid, int policy, + const struct sched_param_ex *param_ex), + TP_ARGS(pid, policy, param_ex) +); + +DEFINE_EVENT(cobalt_posix_pid, cobalt_sched_getscheduler, + TP_PROTO(pid_t pid), + TP_ARGS(pid) +); + +DECLARE_EVENT_CLASS(cobalt_posix_prio_bound, + TP_PROTO(int policy, int prio), + TP_ARGS(policy, prio), + TP_STRUCT__entry( + __field(int, policy) + __field(int, prio) + ), + TP_fast_assign( + __entry->policy = policy; + __entry->prio = prio; + ), + TP_printk("policy=%d(%s) prio=%d", + __entry->policy, + cobalt_print_sched_policy(__entry->policy), + __entry->prio) +); + +DEFINE_EVENT(cobalt_posix_prio_bound, cobalt_sched_min_prio, + TP_PROTO(int policy, int prio), + TP_ARGS(policy, prio) +); + +DEFINE_EVENT(cobalt_posix_prio_bound, cobalt_sched_max_prio, + TP_PROTO(int policy, int prio), + TP_ARGS(policy, prio) +); + +DECLARE_EVENT_CLASS(cobalt_posix_sem, + TP_PROTO(xnhandle_t handle), + TP_ARGS(handle), + TP_STRUCT__entry( + __field(xnhandle_t, handle) + ), + TP_fast_assign( + __entry->handle = handle; + ), + TP_printk("sem=%#x", __entry->handle) +); + +DEFINE_EVENT(cobalt_posix_sem, cobalt_psem_wait, + TP_PROTO(xnhandle_t handle), + TP_ARGS(handle) +); + +DEFINE_EVENT(cobalt_posix_sem, cobalt_psem_trywait, + TP_PROTO(xnhandle_t handle), + TP_ARGS(handle) +); + +DEFINE_EVENT(cobalt_posix_sem, cobalt_psem_timedwait, + TP_PROTO(xnhandle_t handle), + TP_ARGS(handle) +); + +DEFINE_EVENT(cobalt_posix_sem, cobalt_psem_post, + TP_PROTO(xnhandle_t handle), + TP_ARGS(handle) +); + +DEFINE_EVENT(cobalt_posix_sem, cobalt_psem_destroy, + TP_PROTO(xnhandle_t handle), + TP_ARGS(handle) +); + +DEFINE_EVENT(cobalt_posix_sem, cobalt_psem_broadcast, + TP_PROTO(xnhandle_t handle), + TP_ARGS(handle) +); + +DEFINE_EVENT(cobalt_posix_sem, cobalt_psem_inquire, + TP_PROTO(xnhandle_t handle), + TP_ARGS(handle) +); + +TRACE_EVENT(cobalt_psem_getvalue, + TP_PROTO(xnhandle_t handle, int value), + TP_ARGS(handle, value), + TP_STRUCT__entry( + __field(xnhandle_t, handle) + __field(int, value) + ), + TP_fast_assign( + __entry->handle = handle; + __entry->value = value; + ), + TP_printk("sem=%#x value=%d", __entry->handle, __entry->value) +); + +#define cobalt_print_sem_flags(__flags) \ + __print_flags(__flags, "|", \ + {SEM_FIFO, "fifo"}, \ + {SEM_PULSE, "pulse"}, \ + {SEM_PSHARED, "pshared"}, \ + {SEM_REPORT, "report"}, \ + {SEM_WARNDEL, "warndel"}, \ + {SEM_RAWCLOCK, "rawclock"}, \ + {SEM_NOBUSYDEL, "nobusydel"}) + +TRACE_EVENT(cobalt_psem_init, + TP_PROTO(const char *name, xnhandle_t handle, + int flags, unsigned int value), + TP_ARGS(name, handle, flags, value), + TP_STRUCT__entry( + __string(name, name) + __field(xnhandle_t, handle) + __field(int, flags) + __field(unsigned int, value) + ), + TP_fast_assign( + __assign_str(name, name); + __entry->handle = handle; + __entry->flags = flags; + __entry->value = value; + ), + TP_printk("sem=%#x(%s) flags=%#x(%s) value=%u", + __entry->handle, + __get_str(name), + __entry->flags, + cobalt_print_sem_flags(__entry->flags), + __entry->value) +); + +TRACE_EVENT(cobalt_psem_init_failed, + TP_PROTO(const char *name, int flags, unsigned int value, int status), + TP_ARGS(name, flags, value, status), + TP_STRUCT__entry( + __string(name, name) + __field(int, flags) + __field(unsigned int, value) + __field(int, status) + ), + TP_fast_assign( + __assign_str(name, name); + __entry->flags = flags; + __entry->value = value; + __entry->status = status; + ), + TP_printk("name=%s flags=%#x(%s) value=%u error=%d", + __get_str(name), + __entry->flags, + cobalt_print_sem_flags(__entry->flags), + __entry->value, __entry->status) +); + +#define cobalt_print_oflags(__flags) \ + __print_flags(__flags, "|", \ + {O_RDONLY, "rdonly"}, \ + {O_WRONLY, "wronly"}, \ + {O_RDWR, "rdwr"}, \ + {O_CREAT, "creat"}, \ + {O_EXCL, "excl"}, \ + {O_DIRECT, "direct"}, \ + {O_NONBLOCK, "nonblock"}, \ + {O_TRUNC, "trunc"}) + +TRACE_EVENT(cobalt_psem_open, + TP_PROTO(const char *name, xnhandle_t handle, + int oflags, mode_t mode, unsigned int value), + TP_ARGS(name, handle, oflags, mode, value), + TP_STRUCT__entry( + __string(name, name) + __field(xnhandle_t, handle) + __field(int, oflags) + __field(mode_t, mode) + __field(unsigned int, value) + ), + TP_fast_assign( + __assign_str(name, name); + __entry->handle = handle; + __entry->oflags = oflags; + if (oflags & O_CREAT) { + __entry->mode = mode; + __entry->value = value; + } else { + __entry->mode = 0; + __entry->value = 0; + } + ), + TP_printk("named_sem=%#x=(%s) oflags=%#x(%s) mode=%o value=%u", + __entry->handle, __get_str(name), + __entry->oflags, cobalt_print_oflags(__entry->oflags), + __entry->mode, __entry->value) +); + +TRACE_EVENT(cobalt_psem_open_failed, + TP_PROTO(const char *name, int oflags, mode_t mode, + unsigned int value, int status), + TP_ARGS(name, oflags, mode, value, status), + TP_STRUCT__entry( + __string(name, name) + __field(int, oflags) + __field(mode_t, mode) + __field(unsigned int, value) + __field(int, status) + ), + TP_fast_assign( + __assign_str(name, name); + __entry->oflags = oflags; + __entry->status = status; + if (oflags & O_CREAT) { + __entry->mode = mode; + __entry->value = value; + } else { + __entry->mode = 0; + __entry->value = 0; + } + ), + TP_printk("named_sem=%s oflags=%#x(%s) mode=%o value=%u error=%d", + __get_str(name), + __entry->oflags, cobalt_print_oflags(__entry->oflags), + __entry->mode, __entry->value, __entry->status) +); + +DEFINE_EVENT(cobalt_posix_sem, cobalt_psem_close, + TP_PROTO(xnhandle_t handle), + TP_ARGS(handle) +); + +TRACE_EVENT(cobalt_psem_unlink, + TP_PROTO(const char *name), + TP_ARGS(name), + TP_STRUCT__entry( + __string(name, name) + ), + TP_fast_assign( + __assign_str(name, name); + ), + TP_printk("name=%s", __get_str(name)) +); + +DECLARE_EVENT_CLASS(cobalt_clock_timespec, + TP_PROTO(clockid_t clk_id, const struct timespec64 *val), + TP_ARGS(clk_id, val), + + TP_STRUCT__entry( + __field(clockid_t, clk_id) + __timespec_fields(val) + ), + + TP_fast_assign( + __entry->clk_id = clk_id; + __assign_timespec(val, val); + ), + + TP_printk("clock_id=%d timeval=(%lld.%09ld)", + __entry->clk_id, + __timespec_args(val) + ) +); + +DEFINE_EVENT(cobalt_clock_timespec, cobalt_clock_getres, + TP_PROTO(clockid_t clk_id, const struct timespec64 *res), + TP_ARGS(clk_id, res) +); + +DEFINE_EVENT(cobalt_clock_timespec, cobalt_clock_gettime, + TP_PROTO(clockid_t clk_id, const struct timespec64 *time), + TP_ARGS(clk_id, time) +); + +DEFINE_EVENT(cobalt_clock_timespec, cobalt_clock_settime, + TP_PROTO(clockid_t clk_id, const struct timespec64 *time), + TP_ARGS(clk_id, time) +); + +TRACE_EVENT(cobalt_clock_adjtime, + TP_PROTO(clockid_t clk_id, struct __kernel_timex *tx), + TP_ARGS(clk_id, tx), + + TP_STRUCT__entry( + __field(clockid_t, clk_id) + __field(struct __kernel_timex *, tx) + ), + + TP_fast_assign( + __entry->clk_id = clk_id; + __entry->tx = tx; + ), + + TP_printk("clock_id=%d timex=%p", + __entry->clk_id, + __entry->tx + ) +); + +#define cobalt_print_timer_flags(__flags) \ + __print_flags(__flags, "|", \ + {TIMER_ABSTIME, "TIMER_ABSTIME"}) + +TRACE_EVENT(cobalt_clock_nanosleep, + TP_PROTO(clockid_t clk_id, int flags, const struct timespec64 *time), + TP_ARGS(clk_id, flags, time), + + TP_STRUCT__entry( + __field(clockid_t, clk_id) + __field(int, flags) + __timespec_fields(time) + ), + + TP_fast_assign( + __entry->clk_id = clk_id; + __entry->flags = flags; + __assign_timespec(time, time); + ), + + TP_printk("clock_id=%d flags=%#x(%s) rqt=(%lld.%09ld)", + __entry->clk_id, + __entry->flags, cobalt_print_timer_flags(__entry->flags), + __timespec_args(time) + ) +); + +DECLARE_EVENT_CLASS(cobalt_clock_ident, + TP_PROTO(const char *name, clockid_t clk_id), + TP_ARGS(name, clk_id), + TP_STRUCT__entry( + __string(name, name) + __field(clockid_t, clk_id) + ), + TP_fast_assign( + __assign_str(name, name); + __entry->clk_id = clk_id; + ), + TP_printk("name=%s, id=%#x", __get_str(name), __entry->clk_id) +); + +DEFINE_EVENT(cobalt_clock_ident, cobalt_clock_register, + TP_PROTO(const char *name, clockid_t clk_id), + TP_ARGS(name, clk_id) +); + +DEFINE_EVENT(cobalt_clock_ident, cobalt_clock_deregister, + TP_PROTO(const char *name, clockid_t clk_id), + TP_ARGS(name, clk_id) +); + +#define cobalt_print_clock(__clk_id) \ + __print_symbolic(__clk_id, \ + {CLOCK_MONOTONIC, "CLOCK_MONOTONIC"}, \ + {CLOCK_MONOTONIC_RAW, "CLOCK_MONOTONIC_RAW"}, \ + {CLOCK_REALTIME, "CLOCK_REALTIME"}) + +TRACE_EVENT(cobalt_cond_init, + TP_PROTO(const struct cobalt_cond_shadow __user *u_cnd, + const struct cobalt_condattr *attr), + TP_ARGS(u_cnd, attr), + TP_STRUCT__entry( + __field(const struct cobalt_cond_shadow __user *, u_cnd) + __field(clockid_t, clk_id) + __field(int, pshared) + ), + TP_fast_assign( + __entry->u_cnd = u_cnd; + __entry->clk_id = attr->clock; + __entry->pshared = attr->pshared; + ), + TP_printk("cond=%p attr={ .clock=%s, .pshared=%d }", + __entry->u_cnd, + cobalt_print_clock(__entry->clk_id), + __entry->pshared) +); + +TRACE_EVENT(cobalt_cond_destroy, + TP_PROTO(const struct cobalt_cond_shadow __user *u_cnd), + TP_ARGS(u_cnd), + TP_STRUCT__entry( + __field(const struct cobalt_cond_shadow __user *, u_cnd) + ), + TP_fast_assign( + __entry->u_cnd = u_cnd; + ), + TP_printk("cond=%p", __entry->u_cnd) +); + +TRACE_EVENT(cobalt_cond_timedwait, + TP_PROTO(const struct cobalt_cond_shadow __user *u_cnd, + const struct cobalt_mutex_shadow __user *u_mx, + const struct timespec64 *timeout), + TP_ARGS(u_cnd, u_mx, timeout), + TP_STRUCT__entry( + __field(const struct cobalt_cond_shadow __user *, u_cnd) + __field(const struct cobalt_mutex_shadow __user *, u_mx) + __timespec_fields(timeout) + ), + TP_fast_assign( + __entry->u_cnd = u_cnd; + __entry->u_mx = u_mx; + __assign_timespec(timeout, timeout); + ), + TP_printk("cond=%p, mutex=%p, timeout=(%lld.%09ld)", + __entry->u_cnd, __entry->u_mx, __timespec_args(timeout)) +); + +TRACE_EVENT(cobalt_cond_wait, + TP_PROTO(const struct cobalt_cond_shadow __user *u_cnd, + const struct cobalt_mutex_shadow __user *u_mx), + TP_ARGS(u_cnd, u_mx), + TP_STRUCT__entry( + __field(const struct cobalt_cond_shadow __user *, u_cnd) + __field(const struct cobalt_mutex_shadow __user *, u_mx) + ), + TP_fast_assign( + __entry->u_cnd = u_cnd; + __entry->u_mx = u_mx; + ), + TP_printk("cond=%p, mutex=%p", + __entry->u_cnd, __entry->u_mx) +); + +TRACE_EVENT(cobalt_mq_open, + TP_PROTO(const char *name, int oflags, mode_t mode), + TP_ARGS(name, oflags, mode), + + TP_STRUCT__entry( + __string(name, name) + __field(int, oflags) + __field(mode_t, mode) + ), + + TP_fast_assign( + __assign_str(name, name); + __entry->oflags = oflags; + __entry->mode = (oflags & O_CREAT) ? mode : 0; + ), + + TP_printk("name=%s oflags=%#x(%s) mode=%o", + __get_str(name), + __entry->oflags, cobalt_print_oflags(__entry->oflags), + __entry->mode) +); + +TRACE_EVENT(cobalt_mq_notify, + TP_PROTO(mqd_t mqd, const struct sigevent *sev), + TP_ARGS(mqd, sev), + + TP_STRUCT__entry( + __field(mqd_t, mqd) + __field(int, signo) + ), + + TP_fast_assign( + __entry->mqd = mqd; + __entry->signo = sev && sev->sigev_notify != SIGEV_NONE ? + sev->sigev_signo : 0; + ), + + TP_printk("mqd=%d signo=%d", + __entry->mqd, __entry->signo) +); + +TRACE_EVENT(cobalt_mq_close, + TP_PROTO(mqd_t mqd), + TP_ARGS(mqd), + + TP_STRUCT__entry( + __field(mqd_t, mqd) + ), + + TP_fast_assign( + __entry->mqd = mqd; + ), + + TP_printk("mqd=%d", __entry->mqd) +); + +TRACE_EVENT(cobalt_mq_unlink, + TP_PROTO(const char *name), + TP_ARGS(name), + + TP_STRUCT__entry( + __string(name, name) + ), + + TP_fast_assign( + __assign_str(name, name); + ), + + TP_printk("name=%s", __get_str(name)) +); + +TRACE_EVENT(cobalt_mq_send, + TP_PROTO(mqd_t mqd, const void __user *u_buf, size_t len, + unsigned int prio), + TP_ARGS(mqd, u_buf, len, prio), + TP_STRUCT__entry( + __field(mqd_t, mqd) + __field(const void __user *, u_buf) + __field(size_t, len) + __field(unsigned int, prio) + ), + TP_fast_assign( + __entry->mqd = mqd; + __entry->u_buf = u_buf; + __entry->len = len; + __entry->prio = prio; + ), + TP_printk("mqd=%d buf=%p len=%zu prio=%u", + __entry->mqd, __entry->u_buf, __entry->len, + __entry->prio) +); + +TRACE_EVENT(cobalt_mq_timedreceive, + TP_PROTO(mqd_t mqd, const void __user *u_buf, size_t len, + const struct timespec64 *timeout), + TP_ARGS(mqd, u_buf, len, timeout), + TP_STRUCT__entry( + __field(mqd_t, mqd) + __field(const void __user *, u_buf) + __field(size_t, len) + __timespec_fields(timeout) + ), + TP_fast_assign( + __entry->mqd = mqd; + __entry->u_buf = u_buf; + __entry->len = len; + __assign_timespec(timeout, timeout); + ), + TP_printk("mqd=%d buf=%p len=%zu timeout=(%lld.%09ld)", + __entry->mqd, __entry->u_buf, __entry->len, + __timespec_args(timeout)) +); + +TRACE_EVENT(cobalt_mq_receive, + TP_PROTO(mqd_t mqd, const void __user *u_buf, size_t len), + TP_ARGS(mqd, u_buf, len), + TP_STRUCT__entry( + __field(mqd_t, mqd) + __field(const void __user *, u_buf) + __field(size_t, len) + ), + TP_fast_assign( + __entry->mqd = mqd; + __entry->u_buf = u_buf; + __entry->len = len; + ), + TP_printk("mqd=%d buf=%p len=%zu", + __entry->mqd, __entry->u_buf, __entry->len) +); + +DECLARE_EVENT_CLASS(cobalt_posix_mqattr, + TP_PROTO(mqd_t mqd, const struct mq_attr *attr), + TP_ARGS(mqd, attr), + TP_STRUCT__entry( + __field(mqd_t, mqd) + __field(long, flags) + __field(long, curmsgs) + __field(long, msgsize) + __field(long, maxmsg) + ), + TP_fast_assign( + __entry->mqd = mqd; + __entry->flags = attr->mq_flags; + __entry->curmsgs = attr->mq_curmsgs; + __entry->msgsize = attr->mq_msgsize; + __entry->maxmsg = attr->mq_maxmsg; + ), + TP_printk("mqd=%d flags=%#lx(%s) curmsgs=%ld msgsize=%ld maxmsg=%ld", + __entry->mqd, + __entry->flags, cobalt_print_oflags(__entry->flags), + __entry->curmsgs, + __entry->msgsize, + __entry->maxmsg + ) +); + +DEFINE_EVENT(cobalt_posix_mqattr, cobalt_mq_getattr, + TP_PROTO(mqd_t mqd, const struct mq_attr *attr), + TP_ARGS(mqd, attr) +); + +DEFINE_EVENT(cobalt_posix_mqattr, cobalt_mq_setattr, + TP_PROTO(mqd_t mqd, const struct mq_attr *attr), + TP_ARGS(mqd, attr) +); + +#define cobalt_print_evflags(__flags) \ + __print_flags(__flags, "|", \ + {COBALT_EVENT_SHARED, "shared"}, \ + {COBALT_EVENT_PRIO, "prio"}) + +TRACE_EVENT(cobalt_event_init, + TP_PROTO(const struct cobalt_event_shadow __user *u_event, + unsigned long value, int flags), + TP_ARGS(u_event, value, flags), + TP_STRUCT__entry( + __field(const struct cobalt_event_shadow __user *, u_event) + __field(unsigned long, value) + __field(int, flags) + ), + TP_fast_assign( + __entry->u_event = u_event; + __entry->value = value; + __entry->flags = flags; + ), + TP_printk("event=%p value=%lu flags=%#x(%s)", + __entry->u_event, __entry->value, + __entry->flags, cobalt_print_evflags(__entry->flags)) +); + +#define cobalt_print_evmode(__mode) \ + __print_symbolic(__mode, \ + {COBALT_EVENT_ANY, "any"}, \ + {COBALT_EVENT_ALL, "all"}) + +TRACE_EVENT(cobalt_event_timedwait, + TP_PROTO(const struct cobalt_event_shadow __user *u_event, + unsigned long bits, int mode, + const struct timespec64 *timeout), + TP_ARGS(u_event, bits, mode, timeout), + TP_STRUCT__entry( + __field(const struct cobalt_event_shadow __user *, u_event) + __field(unsigned long, bits) + __field(int, mode) + __timespec_fields(timeout) + ), + TP_fast_assign( + __entry->u_event = u_event; + __entry->bits = bits; + __entry->mode = mode; + __assign_timespec(timeout, timeout); + ), + TP_printk("event=%p bits=%#lx mode=%#x(%s) timeout=(%lld.%09ld)", + __entry->u_event, __entry->bits, __entry->mode, + cobalt_print_evmode(__entry->mode), + __timespec_args(timeout)) +); + +TRACE_EVENT(cobalt_event_wait, + TP_PROTO(const struct cobalt_event_shadow __user *u_event, + unsigned long bits, int mode), + TP_ARGS(u_event, bits, mode), + TP_STRUCT__entry( + __field(const struct cobalt_event_shadow __user *, u_event) + __field(unsigned long, bits) + __field(int, mode) + ), + TP_fast_assign( + __entry->u_event = u_event; + __entry->bits = bits; + __entry->mode = mode; + ), + TP_printk("event=%p bits=%#lx mode=%#x(%s)", + __entry->u_event, __entry->bits, __entry->mode, + cobalt_print_evmode(__entry->mode)) +); + +DECLARE_EVENT_CLASS(cobalt_event_ident, + TP_PROTO(const struct cobalt_event_shadow __user *u_event), + TP_ARGS(u_event), + TP_STRUCT__entry( + __field(const struct cobalt_event_shadow __user *, u_event) + ), + TP_fast_assign( + __entry->u_event = u_event; + ), + TP_printk("event=%p", __entry->u_event) +); + +DEFINE_EVENT(cobalt_event_ident, cobalt_event_destroy, + TP_PROTO(const struct cobalt_event_shadow __user *u_event), + TP_ARGS(u_event) +); + +DEFINE_EVENT(cobalt_event_ident, cobalt_event_sync, + TP_PROTO(const struct cobalt_event_shadow __user *u_event), + TP_ARGS(u_event) +); + +DEFINE_EVENT(cobalt_event_ident, cobalt_event_inquire, + TP_PROTO(const struct cobalt_event_shadow __user *u_event), + TP_ARGS(u_event) +); + +#endif /* _TRACE_COBALT_POSIX_H */ + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE cobalt-posix +#include --- linux/include/trace/events/cobalt-rtdm.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/trace/events/cobalt-rtdm.h 2022-03-21 12:58:28.904893691 +0100 @@ -0,0 +1,554 @@ +/* + * Copyright (C) 2014 Jan Kiszka . + * Copyright (C) 2014 Philippe Gerum . + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM cobalt_rtdm + +#if !defined(_TRACE_COBALT_RTDM_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_COBALT_RTDM_H + +#include +#include +#include + +struct rtdm_fd; +struct rtdm_event; +struct rtdm_sem; +struct rtdm_mutex; +struct xnthread; +struct rtdm_device; +struct rtdm_dev_context; +struct _rtdm_mmap_request; + +DECLARE_EVENT_CLASS(fd_event, + TP_PROTO(struct rtdm_fd *fd, int ufd), + TP_ARGS(fd, ufd), + + TP_STRUCT__entry( + __field(struct rtdm_device *, dev) + __field(int, ufd) + ), + + TP_fast_assign( + __entry->dev = rtdm_fd_to_context(fd)->device; + __entry->ufd = ufd; + ), + + TP_printk("device=%p fd=%d", + __entry->dev, __entry->ufd) +); + +DECLARE_EVENT_CLASS(fd_request, + TP_PROTO(struct task_struct *task, + struct rtdm_fd *fd, int ufd, unsigned long arg), + TP_ARGS(task, fd, ufd, arg), + + TP_STRUCT__entry( + __array(char, comm, TASK_COMM_LEN) + __field(pid_t, pid) + __field(struct rtdm_device *, dev) + __field(int, ufd) + __field(unsigned long, arg) + ), + + TP_fast_assign( + memcpy(__entry->comm, task->comm, TASK_COMM_LEN); + __entry->pid = task_pid_nr(task); + __entry->dev = rtdm_fd_to_context(fd)->device; + __entry->ufd = ufd; + __entry->arg = arg; + ), + + TP_printk("device=%p fd=%d arg=%#lx pid=%d comm=%s", + __entry->dev, __entry->ufd, __entry->arg, + __entry->pid, __entry->comm) +); + +DECLARE_EVENT_CLASS(fd_request_status, + TP_PROTO(struct task_struct *task, + struct rtdm_fd *fd, int ufd, int status), + TP_ARGS(task, fd, ufd, status), + + TP_STRUCT__entry( + __array(char, comm, TASK_COMM_LEN) + __field(pid_t, pid) + __field(struct rtdm_device *, dev) + __field(int, ufd) + ), + + TP_fast_assign( + memcpy(__entry->comm, task->comm, TASK_COMM_LEN); + __entry->pid = task_pid_nr(task); + __entry->dev = + !IS_ERR(fd) ? rtdm_fd_to_context(fd)->device : NULL; + __entry->ufd = ufd; + ), + + TP_printk("device=%p fd=%d pid=%d comm=%s", + __entry->dev, __entry->ufd, __entry->pid, __entry->comm) +); + +DECLARE_EVENT_CLASS(task_op, + TP_PROTO(struct xnthread *task), + TP_ARGS(task), + + TP_STRUCT__entry( + __field(struct xnthread *, task) + __string(task_name, task->name) + ), + + TP_fast_assign( + __entry->task = task; + __assign_str(task_name, task->name); + ), + + TP_printk("task %p(%s)", __entry->task, __get_str(task_name)) +); + +DECLARE_EVENT_CLASS(event_op, + TP_PROTO(struct rtdm_event *ev), + TP_ARGS(ev), + + TP_STRUCT__entry( + __field(struct rtdm_event *, ev) + ), + + TP_fast_assign( + __entry->ev = ev; + ), + + TP_printk("event=%p", __entry->ev) +); + +DECLARE_EVENT_CLASS(sem_op, + TP_PROTO(struct rtdm_sem *sem), + TP_ARGS(sem), + + TP_STRUCT__entry( + __field(struct rtdm_sem *, sem) + ), + + TP_fast_assign( + __entry->sem = sem; + ), + + TP_printk("sem=%p", __entry->sem) +); + +DECLARE_EVENT_CLASS(mutex_op, + TP_PROTO(struct rtdm_mutex *mutex), + TP_ARGS(mutex), + + TP_STRUCT__entry( + __field(struct rtdm_mutex *, mutex) + ), + + TP_fast_assign( + __entry->mutex = mutex; + ), + + TP_printk("mutex=%p", __entry->mutex) +); + +TRACE_EVENT(cobalt_device_register, + TP_PROTO(struct rtdm_device *dev), + TP_ARGS(dev), + + TP_STRUCT__entry( + __field(struct rtdm_device *, dev) + __string(device_name, dev->name) + __field(int, flags) + __field(int, class_id) + __field(int, subclass_id) + __field(int, profile_version) + ), + + TP_fast_assign( + __entry->dev = dev; + __assign_str(device_name, dev->name); + __entry->flags = dev->driver->device_flags; + __entry->class_id = dev->driver->profile_info.class_id; + __entry->subclass_id = dev->driver->profile_info.subclass_id; + __entry->profile_version = dev->driver->profile_info.version; + ), + + TP_printk("%s device %s=%p flags=0x%x, class=%d.%d profile=%d", + (__entry->flags & RTDM_DEVICE_TYPE_MASK) + == RTDM_NAMED_DEVICE ? "named" : "protocol", + __get_str(device_name), __entry->dev, + __entry->flags, __entry->class_id, __entry->subclass_id, + __entry->profile_version) +); + +TRACE_EVENT(cobalt_device_unregister, + TP_PROTO(struct rtdm_device *dev), + TP_ARGS(dev), + + TP_STRUCT__entry( + __field(struct rtdm_device *, dev) + __string(device_name, dev->name) + ), + + TP_fast_assign( + __entry->dev = dev; + __assign_str(device_name, dev->name); + ), + + TP_printk("device %s=%p", + __get_str(device_name), __entry->dev) +); + +DEFINE_EVENT(fd_event, cobalt_fd_created, + TP_PROTO(struct rtdm_fd *fd, int ufd), + TP_ARGS(fd, ufd) +); + +DEFINE_EVENT(fd_request, cobalt_fd_open, + TP_PROTO(struct task_struct *task, + struct rtdm_fd *fd, int ufd, + unsigned long oflags), + TP_ARGS(task, fd, ufd, oflags) +); + +DEFINE_EVENT(fd_request, cobalt_fd_close, + TP_PROTO(struct task_struct *task, + struct rtdm_fd *fd, int ufd, + unsigned long lock_count), + TP_ARGS(task, fd, ufd, lock_count) +); + +DEFINE_EVENT(fd_request, cobalt_fd_socket, + TP_PROTO(struct task_struct *task, + struct rtdm_fd *fd, int ufd, + unsigned long protocol_family), + TP_ARGS(task, fd, ufd, protocol_family) +); + +DEFINE_EVENT(fd_request, cobalt_fd_read, + TP_PROTO(struct task_struct *task, + struct rtdm_fd *fd, int ufd, + unsigned long len), + TP_ARGS(task, fd, ufd, len) +); + +DEFINE_EVENT(fd_request, cobalt_fd_write, + TP_PROTO(struct task_struct *task, + struct rtdm_fd *fd, int ufd, + unsigned long len), + TP_ARGS(task, fd, ufd, len) +); + +DEFINE_EVENT(fd_request, cobalt_fd_ioctl, + TP_PROTO(struct task_struct *task, + struct rtdm_fd *fd, int ufd, + unsigned long request), + TP_ARGS(task, fd, ufd, request) +); + +DEFINE_EVENT(fd_request, cobalt_fd_sendmsg, + TP_PROTO(struct task_struct *task, + struct rtdm_fd *fd, int ufd, + unsigned long flags), + TP_ARGS(task, fd, ufd, flags) +); + +DEFINE_EVENT(fd_request, cobalt_fd_sendmmsg, + TP_PROTO(struct task_struct *task, + struct rtdm_fd *fd, int ufd, + unsigned long flags), + TP_ARGS(task, fd, ufd, flags) +); + +DEFINE_EVENT(fd_request, cobalt_fd_recvmsg, + TP_PROTO(struct task_struct *task, + struct rtdm_fd *fd, int ufd, + unsigned long flags), + TP_ARGS(task, fd, ufd, flags) +); + +DEFINE_EVENT(fd_request, cobalt_fd_recvmmsg, + TP_PROTO(struct task_struct *task, + struct rtdm_fd *fd, int ufd, + unsigned long flags), + TP_ARGS(task, fd, ufd, flags) +); + +#define cobalt_print_protbits(__prot) \ + __print_flags(__prot, "|", \ + {PROT_EXEC, "exec"}, \ + {PROT_READ, "read"}, \ + {PROT_WRITE, "write"}) + +#define cobalt_print_mapbits(__flags) \ + __print_flags(__flags, "|", \ + {MAP_SHARED, "shared"}, \ + {MAP_PRIVATE, "private"}, \ + {MAP_ANONYMOUS, "anon"}, \ + {MAP_FIXED, "fixed"}, \ + {MAP_HUGETLB, "huge"}, \ + {MAP_NONBLOCK, "nonblock"}, \ + {MAP_NORESERVE, "noreserve"}, \ + {MAP_POPULATE, "populate"}, \ + {MAP_UNINITIALIZED, "uninit"}) + +TRACE_EVENT(cobalt_fd_mmap, + TP_PROTO(struct task_struct *task, + struct rtdm_fd *fd, int ufd, struct _rtdm_mmap_request *rma), + TP_ARGS(task, fd, ufd, rma), + + TP_STRUCT__entry( + __array(char, comm, TASK_COMM_LEN) + __field(pid_t, pid) + __field(struct rtdm_device *, dev) + __field(int, ufd) + __field(size_t, length) + __field(off_t, offset) + __field(int, prot) + __field(int, flags) + ), + + TP_fast_assign( + memcpy(__entry->comm, task->comm, TASK_COMM_LEN); + __entry->pid = task_pid_nr(task); + __entry->dev = rtdm_fd_to_context(fd)->device; + __entry->ufd = ufd; + __entry->length = rma->length; + __entry->offset = rma->offset; + __entry->prot = rma->prot; + __entry->flags = rma->flags; + ), + + TP_printk("device=%p fd=%d area={ len:%zu, off:%Lu }" + " prot=%#x(%s) flags=%#x(%s) pid=%d comm=%s", + __entry->dev, __entry->ufd, __entry->length, + (unsigned long long)__entry->offset, + __entry->prot, cobalt_print_protbits(__entry->prot), + __entry->flags, cobalt_print_mapbits(__entry->flags), + __entry->pid, __entry->comm) +); + +DEFINE_EVENT(fd_request_status, cobalt_fd_ioctl_status, + TP_PROTO(struct task_struct *task, + struct rtdm_fd *fd, int ufd, + int status), + TP_ARGS(task, fd, ufd, status) +); + +DEFINE_EVENT(fd_request_status, cobalt_fd_read_status, + TP_PROTO(struct task_struct *task, + struct rtdm_fd *fd, int ufd, + int status), + TP_ARGS(task, fd, ufd, status) +); + +DEFINE_EVENT(fd_request_status, cobalt_fd_write_status, + TP_PROTO(struct task_struct *task, + struct rtdm_fd *fd, int ufd, + int status), + TP_ARGS(task, fd, ufd, status) +); + +DEFINE_EVENT(fd_request_status, cobalt_fd_recvmsg_status, + TP_PROTO(struct task_struct *task, + struct rtdm_fd *fd, int ufd, + int status), + TP_ARGS(task, fd, ufd, status) +); + +DEFINE_EVENT(fd_request_status, cobalt_fd_recvmmsg_status, + TP_PROTO(struct task_struct *task, + struct rtdm_fd *fd, int ufd, + int status), + TP_ARGS(task, fd, ufd, status) +); + +DEFINE_EVENT(fd_request_status, cobalt_fd_sendmsg_status, + TP_PROTO(struct task_struct *task, + struct rtdm_fd *fd, int ufd, + int status), + TP_ARGS(task, fd, ufd, status) +); + +DEFINE_EVENT(fd_request_status, cobalt_fd_sendmmsg_status, + TP_PROTO(struct task_struct *task, + struct rtdm_fd *fd, int ufd, + int status), + TP_ARGS(task, fd, ufd, status) +); + +DEFINE_EVENT(fd_request_status, cobalt_fd_mmap_status, + TP_PROTO(struct task_struct *task, + struct rtdm_fd *fd, int ufd, + int status), + TP_ARGS(task, fd, ufd, status) +); + +DEFINE_EVENT(task_op, cobalt_driver_task_join, + TP_PROTO(struct xnthread *task), + TP_ARGS(task) +); + +TRACE_EVENT(cobalt_driver_event_init, + TP_PROTO(struct rtdm_event *ev, unsigned long pending), + TP_ARGS(ev, pending), + + TP_STRUCT__entry( + __field(struct rtdm_event *, ev) + __field(unsigned long, pending) + ), + + TP_fast_assign( + __entry->ev = ev; + __entry->pending = pending; + ), + + TP_printk("event=%p pending=%#lx", + __entry->ev, __entry->pending) +); + +TRACE_EVENT(cobalt_driver_event_wait, + TP_PROTO(struct rtdm_event *ev, struct xnthread *task), + TP_ARGS(ev, task), + + TP_STRUCT__entry( + __field(struct xnthread *, task) + __string(task_name, task->name) + __field(struct rtdm_event *, ev) + ), + + TP_fast_assign( + __entry->task = task; + __assign_str(task_name, task->name); + __entry->ev = ev; + ), + + TP_printk("event=%p task=%p(%s)", + __entry->ev, __entry->task, __get_str(task_name)) +); + +DEFINE_EVENT(event_op, cobalt_driver_event_signal, + TP_PROTO(struct rtdm_event *ev), + TP_ARGS(ev) +); + +DEFINE_EVENT(event_op, cobalt_driver_event_clear, + TP_PROTO(struct rtdm_event *ev), + TP_ARGS(ev) +); + +DEFINE_EVENT(event_op, cobalt_driver_event_pulse, + TP_PROTO(struct rtdm_event *ev), + TP_ARGS(ev) +); + +DEFINE_EVENT(event_op, cobalt_driver_event_destroy, + TP_PROTO(struct rtdm_event *ev), + TP_ARGS(ev) +); + +TRACE_EVENT(cobalt_driver_sem_init, + TP_PROTO(struct rtdm_sem *sem, unsigned long value), + TP_ARGS(sem, value), + + TP_STRUCT__entry( + __field(struct rtdm_sem *, sem) + __field(unsigned long, value) + ), + + TP_fast_assign( + __entry->sem = sem; + __entry->value = value; + ), + + TP_printk("sem=%p value=%lu", + __entry->sem, __entry->value) +); + +TRACE_EVENT(cobalt_driver_sem_wait, + TP_PROTO(struct rtdm_sem *sem, struct xnthread *task), + TP_ARGS(sem, task), + + TP_STRUCT__entry( + __field(struct xnthread *, task) + __string(task_name, task->name) + __field(struct rtdm_sem *, sem) + ), + + TP_fast_assign( + __entry->task = task; + __assign_str(task_name, task->name); + __entry->sem = sem; + ), + + TP_printk("sem=%p task=%p(%s)", + __entry->sem, __entry->task, __get_str(task_name)) +); + +DEFINE_EVENT(sem_op, cobalt_driver_sem_up, + TP_PROTO(struct rtdm_sem *sem), + TP_ARGS(sem) +); + +DEFINE_EVENT(sem_op, cobalt_driver_sem_destroy, + TP_PROTO(struct rtdm_sem *sem), + TP_ARGS(sem) +); + +DEFINE_EVENT(mutex_op, cobalt_driver_mutex_init, + TP_PROTO(struct rtdm_mutex *mutex), + TP_ARGS(mutex) +); + +DEFINE_EVENT(mutex_op, cobalt_driver_mutex_release, + TP_PROTO(struct rtdm_mutex *mutex), + TP_ARGS(mutex) +); + +DEFINE_EVENT(mutex_op, cobalt_driver_mutex_destroy, + TP_PROTO(struct rtdm_mutex *mutex), + TP_ARGS(mutex) +); + +TRACE_EVENT(cobalt_driver_mutex_wait, + TP_PROTO(struct rtdm_mutex *mutex, struct xnthread *task), + TP_ARGS(mutex, task), + + TP_STRUCT__entry( + __field(struct xnthread *, task) + __string(task_name, task->name) + __field(struct rtdm_mutex *, mutex) + ), + + TP_fast_assign( + __entry->task = task; + __assign_str(task_name, task->name); + __entry->mutex = mutex; + ), + + TP_printk("mutex=%p task=%p(%s)", + __entry->mutex, __entry->task, __get_str(task_name)) +); + +#endif /* _TRACE_COBALT_RTDM_H */ + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE cobalt-rtdm +#include --- linux/include/trace/events/cobalt-core.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/include/trace/events/cobalt-core.h 2022-03-21 12:58:28.896893769 +0100 @@ -0,0 +1,908 @@ +/* + * Copyright (C) 2014 Jan Kiszka . + * Copyright (C) 2014 Philippe Gerum . + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM cobalt_core + +#if !defined(_TRACE_COBALT_CORE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_COBALT_CORE_H + +#include +#include +#include +#include +#include + +struct xnsched; +struct xnthread; +struct xnsynch; +struct xnsched_class; +struct xnsched_quota_group; +struct xnthread_init_attr; + +DECLARE_EVENT_CLASS(thread_event, + TP_PROTO(struct xnthread *thread), + TP_ARGS(thread), + + TP_STRUCT__entry( + __field(pid_t, pid) + __field(unsigned long, state) + __field(unsigned long, info) + ), + + TP_fast_assign( + __entry->state = thread->state; + __entry->info = thread->info; + __entry->pid = xnthread_host_pid(thread); + ), + + TP_printk("pid=%d state=0x%lx info=0x%lx", + __entry->pid, __entry->state, __entry->info) +); + +DECLARE_EVENT_CLASS(curr_thread_event, + TP_PROTO(struct xnthread *thread), + TP_ARGS(thread), + + TP_STRUCT__entry( + __field(struct xnthread *, thread) + __field(unsigned long, state) + __field(unsigned long, info) + ), + + TP_fast_assign( + __entry->state = thread->state; + __entry->info = thread->info; + ), + + TP_printk("state=0x%lx info=0x%lx", + __entry->state, __entry->info) +); + +DECLARE_EVENT_CLASS(synch_wait_event, + TP_PROTO(struct xnsynch *synch), + TP_ARGS(synch), + + TP_STRUCT__entry( + __field(struct xnsynch *, synch) + ), + + TP_fast_assign( + __entry->synch = synch; + ), + + TP_printk("synch=%p", __entry->synch) +); + +DECLARE_EVENT_CLASS(synch_post_event, + TP_PROTO(struct xnsynch *synch), + TP_ARGS(synch), + + TP_STRUCT__entry( + __field(struct xnsynch *, synch) + ), + + TP_fast_assign( + __entry->synch = synch; + ), + + TP_printk("synch=%p", __entry->synch) +); + +DECLARE_EVENT_CLASS(irq_event, + TP_PROTO(unsigned int irq), + TP_ARGS(irq), + + TP_STRUCT__entry( + __field(unsigned int, irq) + ), + + TP_fast_assign( + __entry->irq = irq; + ), + + TP_printk("irq=%u", __entry->irq) +); + +DECLARE_EVENT_CLASS(clock_event, + TP_PROTO(unsigned int irq), + TP_ARGS(irq), + + TP_STRUCT__entry( + __field(unsigned int, irq) + ), + + TP_fast_assign( + __entry->irq = irq; + ), + + TP_printk("clock_irq=%u", __entry->irq) +); + +DECLARE_EVENT_CLASS(timer_event, + TP_PROTO(struct xntimer *timer), + TP_ARGS(timer), + + TP_STRUCT__entry( + __field(struct xntimer *, timer) + ), + + TP_fast_assign( + __entry->timer = timer; + ), + + TP_printk("timer=%p", __entry->timer) +); + +DECLARE_EVENT_CLASS(registry_event, + TP_PROTO(const char *key, void *addr), + TP_ARGS(key, addr), + + TP_STRUCT__entry( + __string(key, key ?: "(anon)") + __field(void *, addr) + ), + + TP_fast_assign( + __assign_str(key, key ?: "(anon)"); + __entry->addr = addr; + ), + + TP_printk("key=%s, addr=%p", __get_str(key), __entry->addr) +); + +TRACE_EVENT(cobalt_schedule, + TP_PROTO(struct xnsched *sched), + TP_ARGS(sched), + + TP_STRUCT__entry( + __field(unsigned long, status) + ), + + TP_fast_assign( + __entry->status = sched->status; + ), + + TP_printk("status=0x%lx", __entry->status) +); + +TRACE_EVENT(cobalt_schedule_remote, + TP_PROTO(struct xnsched *sched), + TP_ARGS(sched), + + TP_STRUCT__entry( + __field(unsigned long, status) + ), + + TP_fast_assign( + __entry->status = sched->status; + ), + + TP_printk("status=0x%lx", __entry->status) +); + +TRACE_EVENT(cobalt_switch_context, + TP_PROTO(struct xnthread *prev, struct xnthread *next), + TP_ARGS(prev, next), + + TP_STRUCT__entry( + __field(struct xnthread *, prev) + __string(prev_name, prev->name) + __field(pid_t, prev_pid) + __field(int, prev_prio) + __field(unsigned long, prev_state) + __field(struct xnthread *, next) + __string(next_name, next->name) + __field(pid_t, next_pid) + __field(int, next_prio) + ), + + TP_fast_assign( + __entry->prev = prev; + __assign_str(prev_name, prev->name); + __entry->prev_pid = xnthread_host_pid(prev); + __entry->prev_prio = xnthread_current_priority(prev); + __entry->prev_state = prev->state; + __entry->next = next; + __assign_str(next_name, next->name); + __entry->next_pid = xnthread_host_pid(next); + __entry->next_prio = xnthread_current_priority(next); + ), + + TP_printk("prev_name=%s prev_pid=%d prev_prio=%d prev_state=0x%lx ==> next_name=%s next_pid=%d next_prio=%d", + __get_str(prev_name), __entry->prev_pid, + __entry->prev_prio, __entry->prev_state, + __get_str(next_name), __entry->next_pid, __entry->next_prio) +); + +#ifdef CONFIG_XENO_OPT_SCHED_QUOTA + +TRACE_EVENT(cobalt_schedquota_refill, + TP_PROTO(int dummy), + TP_ARGS(dummy), + + TP_STRUCT__entry( + __field(int, dummy) + ), + + TP_fast_assign( + (void)dummy; + ), + + TP_printk("%s", "") +); + +DECLARE_EVENT_CLASS(schedquota_group_event, + TP_PROTO(struct xnsched_quota_group *tg), + TP_ARGS(tg), + + TP_STRUCT__entry( + __field(int, tgid) + ), + + TP_fast_assign( + __entry->tgid = tg->tgid; + ), + + TP_printk("tgid=%d", + __entry->tgid) +); + +DEFINE_EVENT(schedquota_group_event, cobalt_schedquota_create_group, + TP_PROTO(struct xnsched_quota_group *tg), + TP_ARGS(tg) +); + +DEFINE_EVENT(schedquota_group_event, cobalt_schedquota_destroy_group, + TP_PROTO(struct xnsched_quota_group *tg), + TP_ARGS(tg) +); + +TRACE_EVENT(cobalt_schedquota_set_limit, + TP_PROTO(struct xnsched_quota_group *tg, + int percent, + int peak_percent), + TP_ARGS(tg, percent, peak_percent), + + TP_STRUCT__entry( + __field(int, tgid) + __field(int, percent) + __field(int, peak_percent) + ), + + TP_fast_assign( + __entry->tgid = tg->tgid; + __entry->percent = percent; + __entry->peak_percent = peak_percent; + ), + + TP_printk("tgid=%d percent=%d peak_percent=%d", + __entry->tgid, __entry->percent, __entry->peak_percent) +); + +DECLARE_EVENT_CLASS(schedquota_thread_event, + TP_PROTO(struct xnsched_quota_group *tg, + struct xnthread *thread), + TP_ARGS(tg, thread), + + TP_STRUCT__entry( + __field(int, tgid) + __field(struct xnthread *, thread) + __field(pid_t, pid) + ), + + TP_fast_assign( + __entry->tgid = tg->tgid; + __entry->thread = thread; + __entry->pid = xnthread_host_pid(thread); + ), + + TP_printk("tgid=%d thread=%p pid=%d", + __entry->tgid, __entry->thread, __entry->pid) +); + +DEFINE_EVENT(schedquota_thread_event, cobalt_schedquota_add_thread, + TP_PROTO(struct xnsched_quota_group *tg, + struct xnthread *thread), + TP_ARGS(tg, thread) +); + +DEFINE_EVENT(schedquota_thread_event, cobalt_schedquota_remove_thread, + TP_PROTO(struct xnsched_quota_group *tg, + struct xnthread *thread), + TP_ARGS(tg, thread) +); + +#endif /* CONFIG_XENO_OPT_SCHED_QUOTA */ + +TRACE_EVENT(cobalt_thread_init, + TP_PROTO(struct xnthread *thread, + const struct xnthread_init_attr *attr, + struct xnsched_class *sched_class), + TP_ARGS(thread, attr, sched_class), + + TP_STRUCT__entry( + __field(struct xnthread *, thread) + __string(thread_name, thread->name) + __string(class_name, sched_class->name) + __field(unsigned long, flags) + __field(int, cprio) + ), + + TP_fast_assign( + __entry->thread = thread; + __assign_str(thread_name, thread->name); + __entry->flags = attr->flags; + __assign_str(class_name, sched_class->name); + __entry->cprio = thread->cprio; + ), + + TP_printk("thread=%p name=%s flags=0x%lx class=%s prio=%d", + __entry->thread, __get_str(thread_name), __entry->flags, + __get_str(class_name), __entry->cprio) +); + +TRACE_EVENT(cobalt_thread_suspend, + TP_PROTO(struct xnthread *thread, unsigned long mask, xnticks_t timeout, + xntmode_t timeout_mode, struct xnsynch *wchan), + TP_ARGS(thread, mask, timeout, timeout_mode, wchan), + + TP_STRUCT__entry( + __field(pid_t, pid) + __field(unsigned long, mask) + __field(xnticks_t, timeout) + __field(xntmode_t, timeout_mode) + __field(struct xnsynch *, wchan) + ), + + TP_fast_assign( + __entry->pid = xnthread_host_pid(thread); + __entry->mask = mask; + __entry->timeout = timeout; + __entry->timeout_mode = timeout_mode; + __entry->wchan = wchan; + ), + + TP_printk("pid=%d mask=0x%lx timeout=%Lu timeout_mode=%d wchan=%p", + __entry->pid, __entry->mask, + __entry->timeout, __entry->timeout_mode, __entry->wchan) +); + +TRACE_EVENT(cobalt_thread_resume, + TP_PROTO(struct xnthread *thread, unsigned long mask), + TP_ARGS(thread, mask), + + TP_STRUCT__entry( + __string(name, thread->name) + __field(pid_t, pid) + __field(unsigned long, mask) + ), + + TP_fast_assign( + __assign_str(name, thread->name); + __entry->pid = xnthread_host_pid(thread); + __entry->mask = mask; + ), + + TP_printk("name=%s pid=%d mask=0x%lx", + __get_str(name), __entry->pid, __entry->mask) +); + +TRACE_EVENT(cobalt_thread_fault, + TP_PROTO(unsigned long ip, unsigned int type), + TP_ARGS(ip, type), + + TP_STRUCT__entry( + __field(unsigned long, ip) + __field(unsigned int, type) + ), + + TP_fast_assign( + __entry->ip = ip; + __entry->type = type; + ), + + TP_printk("ip=%#lx type=%#x", + __entry->ip, __entry->type) +); + +TRACE_EVENT(cobalt_thread_set_current_prio, + TP_PROTO(struct xnthread *thread), + TP_ARGS(thread), + + TP_STRUCT__entry( + __field(struct xnthread *, thread) + __field(pid_t, pid) + __field(int, cprio) + ), + + TP_fast_assign( + __entry->thread = thread; + __entry->pid = xnthread_host_pid(thread); + __entry->cprio = xnthread_current_priority(thread); + ), + + TP_printk("thread=%p pid=%d prio=%d", + __entry->thread, __entry->pid, __entry->cprio) +); + +DEFINE_EVENT(thread_event, cobalt_thread_start, + TP_PROTO(struct xnthread *thread), + TP_ARGS(thread) +); + +DEFINE_EVENT(thread_event, cobalt_thread_cancel, + TP_PROTO(struct xnthread *thread), + TP_ARGS(thread) +); + +DEFINE_EVENT(thread_event, cobalt_thread_join, + TP_PROTO(struct xnthread *thread), + TP_ARGS(thread) +); + +DEFINE_EVENT(thread_event, cobalt_thread_unblock, + TP_PROTO(struct xnthread *thread), + TP_ARGS(thread) +); + +DEFINE_EVENT(curr_thread_event, cobalt_thread_wait_period, + TP_PROTO(struct xnthread *thread), + TP_ARGS(thread) +); + +DEFINE_EVENT(curr_thread_event, cobalt_thread_missed_period, + TP_PROTO(struct xnthread *thread), + TP_ARGS(thread) +); + +DEFINE_EVENT(curr_thread_event, cobalt_thread_set_mode, + TP_PROTO(struct xnthread *thread), + TP_ARGS(thread) +); + +TRACE_EVENT(cobalt_thread_migrate, + TP_PROTO(unsigned int cpu), + TP_ARGS(cpu), + + TP_STRUCT__entry( + __field(unsigned int, cpu) + ), + + TP_fast_assign( + __entry->cpu = cpu; + ), + + TP_printk("cpu=%u", __entry->cpu) +); + +TRACE_EVENT(cobalt_thread_migrate_passive, + TP_PROTO(struct xnthread *thread, unsigned int cpu), + TP_ARGS(thread, cpu), + + TP_STRUCT__entry( + __field(struct xnthread *, thread) + __field(pid_t, pid) + __field(unsigned int, cpu) + ), + + TP_fast_assign( + __entry->thread = thread; + __entry->pid = xnthread_host_pid(thread); + __entry->cpu = cpu; + ), + + TP_printk("thread=%p pid=%d cpu=%u", + __entry->thread, __entry->pid, __entry->cpu) +); + +DEFINE_EVENT(curr_thread_event, cobalt_shadow_gohard, + TP_PROTO(struct xnthread *thread), + TP_ARGS(thread) +); + +DEFINE_EVENT(curr_thread_event, cobalt_watchdog_signal, + TP_PROTO(struct xnthread *thread), + TP_ARGS(thread) +); + +DEFINE_EVENT(curr_thread_event, cobalt_shadow_hardened, + TP_PROTO(struct xnthread *thread), + TP_ARGS(thread) +); + +#define cobalt_print_relax_reason(reason) \ + __print_symbolic(reason, \ + { SIGDEBUG_UNDEFINED, "undefined" }, \ + { SIGDEBUG_MIGRATE_SIGNAL, "signal" }, \ + { SIGDEBUG_MIGRATE_SYSCALL, "syscall" }, \ + { SIGDEBUG_MIGRATE_FAULT, "fault" }) + +TRACE_EVENT(cobalt_shadow_gorelax, + TP_PROTO(int reason), + TP_ARGS(reason), + + TP_STRUCT__entry( + __field(int, reason) + ), + + TP_fast_assign( + __entry->reason = reason; + ), + + TP_printk("reason=%s", cobalt_print_relax_reason(__entry->reason)) +); + +DEFINE_EVENT(curr_thread_event, cobalt_shadow_relaxed, + TP_PROTO(struct xnthread *thread), + TP_ARGS(thread) +); + +DEFINE_EVENT(curr_thread_event, cobalt_shadow_entry, + TP_PROTO(struct xnthread *thread), + TP_ARGS(thread) +); + +TRACE_EVENT(cobalt_shadow_map, + TP_PROTO(struct xnthread *thread), + TP_ARGS(thread), + + TP_STRUCT__entry( + __field(struct xnthread *, thread) + __field(pid_t, pid) + __field(int, prio) + ), + + TP_fast_assign( + __entry->thread = thread; + __entry->pid = xnthread_host_pid(thread); + __entry->prio = xnthread_base_priority(thread); + ), + + TP_printk("thread=%p pid=%d prio=%d", + __entry->thread, __entry->pid, __entry->prio) +); + +DEFINE_EVENT(curr_thread_event, cobalt_shadow_unmap, + TP_PROTO(struct xnthread *thread), + TP_ARGS(thread) +); + +TRACE_EVENT(cobalt_lostage_request, + TP_PROTO(const char *type, struct task_struct *task), + TP_ARGS(type, task), + + TP_STRUCT__entry( + __field(pid_t, pid) + __array(char, comm, TASK_COMM_LEN) + __field(const char *, type) + ), + + TP_fast_assign( + __entry->type = type; + __entry->pid = task_pid_nr(task); + memcpy(__entry->comm, task->comm, TASK_COMM_LEN); + ), + + TP_printk("request=%s pid=%d comm=%s", + __entry->type, __entry->pid, __entry->comm) +); + +TRACE_EVENT(cobalt_lostage_wakeup, + TP_PROTO(struct task_struct *task), + TP_ARGS(task), + + TP_STRUCT__entry( + __field(pid_t, pid) + __array(char, comm, TASK_COMM_LEN) + ), + + TP_fast_assign( + __entry->pid = task_pid_nr(task); + memcpy(__entry->comm, task->comm, TASK_COMM_LEN); + ), + + TP_printk("pid=%d comm=%s", + __entry->pid, __entry->comm) +); + +TRACE_EVENT(cobalt_lostage_signal, + TP_PROTO(struct task_struct *task, int sig), + TP_ARGS(task, sig), + + TP_STRUCT__entry( + __field(pid_t, pid) + __array(char, comm, TASK_COMM_LEN) + __field(int, sig) + ), + + TP_fast_assign( + __entry->pid = task_pid_nr(task); + __entry->sig = sig; + memcpy(__entry->comm, task->comm, TASK_COMM_LEN); + ), + + TP_printk("pid=%d comm=%s sig=%d", + __entry->pid, __entry->comm, __entry->sig) +); + +DEFINE_EVENT(irq_event, cobalt_irq_entry, + TP_PROTO(unsigned int irq), + TP_ARGS(irq) +); + +DEFINE_EVENT(irq_event, cobalt_irq_exit, + TP_PROTO(unsigned int irq), + TP_ARGS(irq) +); + +DEFINE_EVENT(irq_event, cobalt_irq_attach, + TP_PROTO(unsigned int irq), + TP_ARGS(irq) +); + +DEFINE_EVENT(irq_event, cobalt_irq_detach, + TP_PROTO(unsigned int irq), + TP_ARGS(irq) +); + +DEFINE_EVENT(irq_event, cobalt_irq_enable, + TP_PROTO(unsigned int irq), + TP_ARGS(irq) +); + +DEFINE_EVENT(irq_event, cobalt_irq_disable, + TP_PROTO(unsigned int irq), + TP_ARGS(irq) +); + +DEFINE_EVENT(clock_event, cobalt_clock_entry, + TP_PROTO(unsigned int irq), + TP_ARGS(irq) +); + +DEFINE_EVENT(clock_event, cobalt_clock_exit, + TP_PROTO(unsigned int irq), + TP_ARGS(irq) +); + +DEFINE_EVENT(timer_event, cobalt_timer_stop, + TP_PROTO(struct xntimer *timer), + TP_ARGS(timer) +); + +DEFINE_EVENT(timer_event, cobalt_timer_expire, + TP_PROTO(struct xntimer *timer), + TP_ARGS(timer) +); + +#define cobalt_print_timer_mode(mode) \ + __print_symbolic(mode, \ + { XN_RELATIVE, "rel" }, \ + { XN_ABSOLUTE, "abs" }, \ + { XN_REALTIME, "rt" }) + +TRACE_EVENT(cobalt_timer_start, + TP_PROTO(struct xntimer *timer, xnticks_t value, xnticks_t interval, + xntmode_t mode), + TP_ARGS(timer, value, interval, mode), + + TP_STRUCT__entry( + __field(struct xntimer *, timer) +#ifdef CONFIG_XENO_OPT_STATS + __string(name, timer->name) +#endif + __field(xnticks_t, value) + __field(xnticks_t, interval) + __field(xntmode_t, mode) + ), + + TP_fast_assign( + __entry->timer = timer; +#ifdef CONFIG_XENO_OPT_STATS + __assign_str(name, timer->name); +#endif + __entry->value = value; + __entry->interval = interval; + __entry->mode = mode; + ), + + TP_printk("timer=%p(%s) value=%Lu interval=%Lu mode=%s", + __entry->timer, +#ifdef CONFIG_XENO_OPT_STATS + __get_str(name), +#else + "(anon)", +#endif + __entry->value, __entry->interval, + cobalt_print_timer_mode(__entry->mode)) +); + +#ifdef CONFIG_SMP + +TRACE_EVENT(cobalt_timer_migrate, + TP_PROTO(struct xntimer *timer, unsigned int cpu), + TP_ARGS(timer, cpu), + + TP_STRUCT__entry( + __field(struct xntimer *, timer) + __field(unsigned int, cpu) + ), + + TP_fast_assign( + __entry->timer = timer; + __entry->cpu = cpu; + ), + + TP_printk("timer=%p cpu=%u", + __entry->timer, __entry->cpu) +); + +#endif /* CONFIG_SMP */ + +DEFINE_EVENT(synch_wait_event, cobalt_synch_sleepon, + TP_PROTO(struct xnsynch *synch), + TP_ARGS(synch) +); + +DEFINE_EVENT(synch_wait_event, cobalt_synch_try_acquire, + TP_PROTO(struct xnsynch *synch), + TP_ARGS(synch) +); + +DEFINE_EVENT(synch_wait_event, cobalt_synch_acquire, + TP_PROTO(struct xnsynch *synch), + TP_ARGS(synch) +); + +DEFINE_EVENT(synch_post_event, cobalt_synch_release, + TP_PROTO(struct xnsynch *synch), + TP_ARGS(synch) +); + +DEFINE_EVENT(synch_post_event, cobalt_synch_wakeup, + TP_PROTO(struct xnsynch *synch), + TP_ARGS(synch) +); + +DEFINE_EVENT(synch_post_event, cobalt_synch_wakeup_many, + TP_PROTO(struct xnsynch *synch), + TP_ARGS(synch) +); + +DEFINE_EVENT(synch_post_event, cobalt_synch_flush, + TP_PROTO(struct xnsynch *synch), + TP_ARGS(synch) +); + +DEFINE_EVENT(synch_post_event, cobalt_synch_forget, + TP_PROTO(struct xnsynch *synch), + TP_ARGS(synch) +); + +DEFINE_EVENT(registry_event, cobalt_registry_enter, + TP_PROTO(const char *key, void *addr), + TP_ARGS(key, addr) +); + +DEFINE_EVENT(registry_event, cobalt_registry_remove, + TP_PROTO(const char *key, void *addr), + TP_ARGS(key, addr) +); + +DEFINE_EVENT(registry_event, cobalt_registry_unlink, + TP_PROTO(const char *key, void *addr), + TP_ARGS(key, addr) +); + +TRACE_EVENT(cobalt_tick_shot, + TP_PROTO(s64 delta), + TP_ARGS(delta), + + TP_STRUCT__entry( + __field(u64, secs) + __field(u32, nsecs) + __field(s64, delta) + ), + + TP_fast_assign( + __entry->delta = delta; + __entry->secs = div_u64_rem(trace_clock_local() + delta, + NSEC_PER_SEC, &__entry->nsecs); + ), + + TP_printk("next tick at %Lu.%06u (delay: %Ld us)", + (unsigned long long)__entry->secs, + __entry->nsecs / 1000, div_s64(__entry->delta, 1000)) +); + +TRACE_EVENT(cobalt_trace, + TP_PROTO(const char *msg), + TP_ARGS(msg), + TP_STRUCT__entry( + __string(msg, msg) + ), + TP_fast_assign( + __assign_str(msg, msg); + ), + TP_printk("%s", __get_str(msg)) +); + +TRACE_EVENT(cobalt_trace_longval, + TP_PROTO(int id, u64 val), + TP_ARGS(id, val), + TP_STRUCT__entry( + __field(int, id) + __field(u64, val) + ), + TP_fast_assign( + __entry->id = id; + __entry->val = val; + ), + TP_printk("id=%#x, v=%llu", __entry->id, __entry->val) +); + +TRACE_EVENT(cobalt_trace_pid, + TP_PROTO(pid_t pid, int prio), + TP_ARGS(pid, prio), + TP_STRUCT__entry( + __field(pid_t, pid) + __field(int, prio) + ), + TP_fast_assign( + __entry->pid = pid; + __entry->prio = prio; + ), + TP_printk("pid=%d, prio=%d", __entry->pid, __entry->prio) +); + +TRACE_EVENT(cobalt_latpeak, + TP_PROTO(int latmax_ns), + TP_ARGS(latmax_ns), + TP_STRUCT__entry( + __field(int, latmax_ns) + ), + TP_fast_assign( + __entry->latmax_ns = latmax_ns; + ), + TP_printk("** latency peak: %d.%.3d us **", + __entry->latmax_ns / 1000, + __entry->latmax_ns % 1000) +); + +/* Basically cobalt_trace() + trigger point */ +TRACE_EVENT(cobalt_trigger, + TP_PROTO(const char *issuer), + TP_ARGS(issuer), + TP_STRUCT__entry( + __string(issuer, issuer) + ), + TP_fast_assign( + __assign_str(issuer, issuer); + ), + TP_printk("%s", __get_str(issuer)) +); + +#endif /* _TRACE_COBALT_CORE_H */ + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE cobalt-core +#include --- linux/kernel/xenomai/pipeline/init.c 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/kernel/xenomai/pipeline/init.c 2022-03-21 12:58:29.144891350 +0100 @@ -0,0 +1,52 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2020 Philippe Gerum + */ + +#include +#include +#include +#include + +int __init pipeline_init(void) +{ + int ret; + + if (cobalt_machine.init) { + ret = cobalt_machine.init(); + if (ret) + return ret; + } + + /* Enable the Xenomai out-of-band stage */ + enable_oob_stage("Xenomai"); + + ret = xnclock_init(); + if (ret) + goto fail_clock; + + return 0; + +fail_clock: + if (cobalt_machine.cleanup) + cobalt_machine.cleanup(); + + return ret; +} + +int __init pipeline_late_init(void) +{ + if (cobalt_machine.late_init) + return cobalt_machine.late_init(); + + return 0; +} + +__init void pipeline_cleanup(void) +{ + /* Disable the Xenomai stage */ + disable_oob_stage(); + + xnclock_cleanup(); +} --- linux/kernel/xenomai/pipeline/sched.c 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/kernel/xenomai/pipeline/sched.c 2022-03-21 12:58:29.141891380 +0100 @@ -0,0 +1,99 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2001-2020 Philippe Gerum . + */ + +#include +#include +#include +#include +#include + +/* in-band stage, hard_irqs_disabled() */ +bool irq_cpuidle_control(struct cpuidle_device *dev, + struct cpuidle_state *state) +{ + /* + * Deny entering sleep state if this entails stopping the + * timer (i.e. C3STOP misfeature). + */ + if (state && (state->flags & CPUIDLE_FLAG_TIMER_STOP)) + return false; + + return true; +} + +bool pipeline_switch_to(struct xnthread *prev, struct xnthread *next, + bool leaving_inband) +{ + return dovetail_context_switch(&xnthread_archtcb(prev)->altsched, + &xnthread_archtcb(next)->altsched, leaving_inband); +} + +void pipeline_init_shadow_tcb(struct xnthread *thread) +{ + /* + * Initialize the alternate scheduling control block. + */ + dovetail_init_altsched(&xnthread_archtcb(thread)->altsched); + + trace_cobalt_shadow_map(thread); +} + +void pipeline_init_root_tcb(struct xnthread *thread) +{ + /* + * Initialize the alternate scheduling control block. + */ + dovetail_init_altsched(&xnthread_archtcb(thread)->altsched); +} + +int pipeline_leave_inband(void) +{ + return dovetail_leave_inband(); +} + +int pipeline_leave_oob_prepare(void) +{ + int suspmask = XNRELAX; + struct xnthread *curr = xnthread_current(); + + dovetail_leave_oob(); + /* + * If current is being debugged, record that it should migrate + * back in case it resumes in userspace. If it resumes in + * kernel space, i.e. over a restarting syscall, the + * associated hardening will clear XNCONTHI. + */ + if (xnthread_test_state(curr, XNSSTEP)) { + xnthread_set_info(curr, XNCONTHI); + dovetail_request_ucall(current); + suspmask |= XNDBGSTOP; + } + return suspmask; +} + +void pipeline_leave_oob_finish(void) +{ + dovetail_resume_inband(); +} + +void pipeline_raise_mayday(struct task_struct *tsk) +{ + dovetail_send_mayday(tsk); +} + +void pipeline_clear_mayday(void) /* May solely affect current. */ +{ + clear_thread_flag(TIF_MAYDAY); +} + +irqreturn_t pipeline_reschedule_ipi_handler(int irq, void *dev_id) +{ + trace_cobalt_schedule_remote(xnsched_current()); + + /* Will reschedule from irq_exit_pipeline(). */ + + return IRQ_HANDLED; +} --- linux/kernel/xenomai/pipeline/intr.c 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/kernel/xenomai/pipeline/intr.c 2022-03-21 12:58:29.137891419 +0100 @@ -0,0 +1,142 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + */ + +#include +#include +#include +#include +#include +#include + +void xnintr_host_tick(struct xnsched *sched) /* hard irqs off */ +{ + sched->lflags &= ~XNHTICK; + tick_notify_proxy(); +} + +/* + * Low-level core clock irq handler. This one forwards ticks from the + * Xenomai platform timer to nkclock exclusively. + */ +void xnintr_core_clock_handler(void) +{ + struct xnsched *sched; + + xnlock_get(&nklock); + xnclock_tick(&nkclock); + xnlock_put(&nklock); + + /* + * If the core clock interrupt preempted a real-time thread, + * any transition to the root thread has already triggered a + * host tick propagation from xnsched_run(), so at this point, + * we only need to propagate the host tick in case the + * interrupt preempted the root thread. + */ + sched = xnsched_current(); + if ((sched->lflags & XNHTICK) && + xnthread_test_state(sched->curr, XNROOT)) + xnintr_host_tick(sched); +} + +static irqreturn_t xnintr_irq_handler(int irq, void *dev_id) +{ + struct xnintr *intr = dev_id; + int ret; + + ret = intr->isr(intr); + XENO_WARN_ON_ONCE(USER, (ret & XN_IRQ_STATMASK) == 0); + + if (ret & XN_IRQ_DISABLE) + disable_irq(irq); + else if (ret & XN_IRQ_PROPAGATE) + irq_post_inband(irq); + + return ret & XN_IRQ_NONE ? IRQ_NONE : IRQ_HANDLED; +} + +int xnintr_init(struct xnintr *intr, const char *name, + unsigned int irq, xnisr_t isr, xniack_t iack, + int flags) +{ + secondary_mode_only(); + + intr->irq = irq; + intr->isr = isr; + intr->iack = NULL; /* unused */ + intr->cookie = NULL; + intr->name = name ? : ""; + intr->flags = flags; + intr->status = 0; + intr->unhandled = 0; /* unused */ + raw_spin_lock_init(&intr->lock); /* unused */ + + return 0; +} +EXPORT_SYMBOL_GPL(xnintr_init); + +void xnintr_destroy(struct xnintr *intr) +{ + secondary_mode_only(); + xnintr_detach(intr); +} +EXPORT_SYMBOL_GPL(xnintr_destroy); + +int xnintr_attach(struct xnintr *intr, void *cookie, const cpumask_t *cpumask) +{ + cpumask_t tmp_mask, *effective_mask; + int ret; + + secondary_mode_only(); + + intr->cookie = cookie; + + if (!cpumask) { + effective_mask = &xnsched_realtime_cpus; + } else { + effective_mask = &tmp_mask; + cpumask_and(effective_mask, &xnsched_realtime_cpus, cpumask); + if (cpumask_empty(effective_mask)) + return -EINVAL; + } + ret = irq_set_affinity_hint(intr->irq, effective_mask); + if (ret) + return ret; + + return request_irq(intr->irq, xnintr_irq_handler, IRQF_OOB, + intr->name, intr); +} +EXPORT_SYMBOL_GPL(xnintr_attach); + +void xnintr_detach(struct xnintr *intr) +{ + secondary_mode_only(); + irq_set_affinity_hint(intr->irq, NULL); + free_irq(intr->irq, intr); +} +EXPORT_SYMBOL_GPL(xnintr_detach); + +void xnintr_enable(struct xnintr *intr) +{ +} +EXPORT_SYMBOL_GPL(xnintr_enable); + +void xnintr_disable(struct xnintr *intr) +{ +} +EXPORT_SYMBOL_GPL(xnintr_disable); + +int xnintr_affinity(struct xnintr *intr, const cpumask_t *cpumask) +{ + cpumask_t effective_mask; + + secondary_mode_only(); + + cpumask_and(&effective_mask, &xnsched_realtime_cpus, cpumask); + if (cpumask_empty(&effective_mask)) + return -EINVAL; + + return irq_set_affinity_hint(intr->irq, &effective_mask); +} +EXPORT_SYMBOL_GPL(xnintr_affinity); --- linux/kernel/xenomai/pipeline/Makefile 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/kernel/xenomai/pipeline/Makefile 2022-03-21 12:58:29.134891448 +0100 @@ -0,0 +1,5 @@ +ccflags-y += -I$(srctree)/kernel + +obj-y += pipeline.o + +pipeline-y := init.o kevents.o sched.o tick.o syscall.o intr.o --- linux/kernel/xenomai/pipeline/syscall.c 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/kernel/xenomai/pipeline/syscall.c 2022-03-21 12:58:29.130891487 +0100 @@ -0,0 +1,25 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2005 Philippe Gerum + * Copyright (C) 2005 Gilles Chanteperdrix + */ + +#include +#include +#include +#include +#include + +int handle_pipelined_syscall(struct irq_stage *stage, struct pt_regs *regs) +{ + if (unlikely(running_inband())) + return handle_root_syscall(regs); + + return handle_head_syscall(stage == &inband_stage, regs); +} + +int handle_oob_syscall(struct pt_regs *regs) +{ + return handle_head_syscall(false, regs); +} --- linux/kernel/xenomai/pipeline/tick.c 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/kernel/xenomai/pipeline/tick.c 2022-03-21 12:58:29.127891516 +0100 @@ -0,0 +1,190 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2001,2002,2003,2007,2012 Philippe Gerum . + * Copyright (C) 2004 Gilles Chanteperdrix + */ + +#include +#include +#include +#include +#include +#include + +static DEFINE_PER_CPU(struct clock_proxy_device *, proxy_device); + +const char *pipeline_timer_name(void) +{ + struct clock_proxy_device *dev = per_cpu(proxy_device, 0); + struct clock_event_device *real_dev = dev->real_device; + + /* + * Return the name of the current clock event chip, which is + * the real device controlled by the proxy tick device. + */ + return real_dev->name; +} + +void pipeline_set_timer_shot(unsigned long delay) /* ns */ +{ + struct clock_proxy_device *dev = __this_cpu_read(proxy_device); + struct clock_event_device *real_dev = dev->real_device; + u64 cycles; + ktime_t t; + int ret; + + if (real_dev->features & CLOCK_EVT_FEAT_KTIME) { + t = ktime_add(delay, xnclock_core_read_raw()); + real_dev->set_next_ktime(t, real_dev); + } else { + if (delay <= 0) { + delay = real_dev->min_delta_ns; + } else { + delay = min_t(int64_t, delay, + real_dev->max_delta_ns); + delay = max_t(int64_t, delay, + real_dev->min_delta_ns); + } + cycles = ((u64)delay * real_dev->mult) >> real_dev->shift; + ret = real_dev->set_next_event(cycles, real_dev); + if (ret) + real_dev->set_next_event(real_dev->min_delta_ticks, + real_dev); + } +} + +static int proxy_set_next_ktime(ktime_t expires, + struct clock_event_device *proxy_dev) /* hard irqs on/off */ +{ + struct xnsched *sched; + unsigned long flags; + ktime_t delta; + int ret; + + /* + * Expiration dates of in-band timers are based on the common + * monotonic time base. If the timeout date has already + * elapsed, make sure xntimer_start() does not fail with + * -ETIMEDOUT but programs the hardware for ticking + * immediately instead. + */ + delta = ktime_sub(expires, ktime_get()); + if (delta < 0) + delta = 0; + + xnlock_get_irqsave(&nklock, flags); + sched = xnsched_current(); + ret = xntimer_start(&sched->htimer, delta, XN_INFINITE, XN_RELATIVE); + xnlock_put_irqrestore(&nklock, flags); + + return ret ? -ETIME : 0; +} + +bool pipeline_must_force_program_tick(struct xnsched *sched) +{ + return sched->lflags & XNTSTOP; +} + +static int proxy_set_oneshot_stopped(struct clock_event_device *proxy_dev) +{ + struct clock_event_device *real_dev; + struct clock_proxy_device *dev; + struct xnsched *sched; + spl_t s; + + dev = container_of(proxy_dev, struct clock_proxy_device, proxy_device); + + /* + * In-band wants to disable the clock hardware on entering a + * tickless state, so we have to stop our in-band tick + * emulation. Propagate the request for shutting down the + * hardware to the real device only if we have no outstanding + * OOB timers. CAUTION: the in-band timer is counted when + * assessing the RQ_IDLE condition, so we need to stop it + * prior to testing the latter. + */ + xnlock_get_irqsave(&nklock, s); + sched = xnsched_current(); + xntimer_stop(&sched->htimer); + sched->lflags |= XNTSTOP; + + if (sched->lflags & XNIDLE) { + real_dev = dev->real_device; + real_dev->set_state_oneshot_stopped(real_dev); + } + + xnlock_put_irqrestore(&nklock, s); + + return 0; +} + +static void setup_proxy(struct clock_proxy_device *dev) +{ + struct clock_event_device *proxy_dev = &dev->proxy_device; + + dev->handle_oob_event = (typeof(dev->handle_oob_event)) + xnintr_core_clock_handler; + proxy_dev->features |= CLOCK_EVT_FEAT_KTIME; + proxy_dev->set_next_ktime = proxy_set_next_ktime; + if (proxy_dev->set_state_oneshot_stopped) + proxy_dev->set_state_oneshot_stopped = proxy_set_oneshot_stopped; + __this_cpu_write(proxy_device, dev); +} + +#ifdef CONFIG_SMP +static irqreturn_t tick_ipi_handler(int irq, void *dev_id) +{ + xnintr_core_clock_handler(); + + return IRQ_HANDLED; +} +#endif + +int pipeline_install_tick_proxy(void) +{ + int ret; + +#ifdef CONFIG_SMP + /* + * We may be running a SMP kernel on a uniprocessor machine + * whose interrupt controller provides no IPI: attempt to hook + * the timer IPI only if the hardware can support multiple + * CPUs. + */ + if (num_possible_cpus() > 1) { + ret = __request_percpu_irq(TIMER_OOB_IPI, + tick_ipi_handler, + IRQF_OOB, "Xenomai timer IPI", + &cobalt_machine_cpudata); + if (ret) + return ret; + } +#endif + + /* Install the proxy tick device */ + ret = tick_install_proxy(setup_proxy, &xnsched_realtime_cpus); + if (ret) + goto fail_proxy; + + return 0; + +fail_proxy: +#ifdef CONFIG_SMP + if (num_possible_cpus() > 1) + free_percpu_irq(TIMER_OOB_IPI, &cobalt_machine_cpudata); +#endif + + return ret; +} + +void pipeline_uninstall_tick_proxy(void) +{ + /* Uninstall the proxy tick device. */ + tick_uninstall_proxy(&xnsched_realtime_cpus); + +#ifdef CONFIG_SMP + if (num_possible_cpus() > 1) + free_percpu_irq(TIMER_OOB_IPI, &cobalt_machine_cpudata); +#endif +} --- linux/kernel/xenomai/pipeline/kevents.c 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/kernel/xenomai/pipeline/kevents.c 2022-03-21 12:58:29.123891555 +0100 @@ -0,0 +1,353 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2001-2014 Philippe Gerum . + * Copyright (C) 2001-2014 The Xenomai project + * Copyright (C) 2006 Gilles Chanteperdrix + * + * SMP support Copyright (C) 2004 The HYADES project + * RTAI/fusion Copyright (C) 2004 The RTAI project + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../posix/process.h" +#include "../posix/thread.h" +#include "../posix/memory.h" + +void arch_inband_task_init(struct task_struct *tsk) +{ + struct cobalt_threadinfo *p = dovetail_task_state(tsk); + + p->thread = NULL; + p->process = NULL; +} + +void handle_oob_trap_entry(unsigned int trapnr, struct pt_regs *regs) +{ + struct xnthread *thread; + struct xnsched *sched; + spl_t s; + + sched = xnsched_current(); + thread = sched->curr; + + /* + * Enable back tracing. + */ + trace_cobalt_thread_fault(xnarch_fault_pc(regs), trapnr); + + if (xnthread_test_state(thread, XNROOT)) + return; + + if (xnarch_fault_bp_p(trapnr) && user_mode(regs)) { + XENO_WARN_ON(CORE, xnthread_test_state(thread, XNRELAX)); + xnlock_get_irqsave(&nklock, s); + xnthread_set_info(thread, XNCONTHI); + dovetail_request_ucall(current); + cobalt_stop_debugged_process(thread); + xnlock_put_irqrestore(&nklock, s); + xnsched_run(); + } + + /* + * If we experienced a trap on behalf of a shadow thread + * running in primary mode, move it to the Linux domain, + * leaving the kernel process the exception. + */ +#if defined(CONFIG_XENO_OPT_DEBUG_COBALT) || defined(CONFIG_XENO_OPT_DEBUG_USER) + if (!user_mode(regs)) { + xntrace_panic_freeze(); + printk(XENO_WARNING + "switching %s to secondary mode after exception #%u in " + "kernel-space at 0x%lx (pid %d)\n", thread->name, + trapnr, + xnarch_fault_pc(regs), + xnthread_host_pid(thread)); + xntrace_panic_dump(); + } else if (xnarch_fault_notify(trapnr)) /* Don't report debug traps */ + printk(XENO_WARNING + "switching %s to secondary mode after exception #%u from " + "user-space at 0x%lx (pid %d)\n", thread->name, + trapnr, + xnarch_fault_pc(regs), + xnthread_host_pid(thread)); +#endif + + if (xnarch_fault_pf_p(trapnr)) + /* + * The page fault counter is not SMP-safe, but it's a + * simple indicator that something went wrong wrt + * memory locking anyway. + */ + xnstat_counter_inc(&thread->stat.pf); + + xnthread_relax(xnarch_fault_notify(trapnr), SIGDEBUG_MIGRATE_FAULT); +} + +static inline int handle_setaffinity_event(struct dovetail_migration_data *d) +{ + return cobalt_handle_setaffinity_event(d->task); +} + +static inline int handle_taskexit_event(struct task_struct *p) +{ + return cobalt_handle_taskexit_event(p); +} + +static inline int handle_user_return(struct task_struct *task) +{ + return cobalt_handle_user_return(task); +} + +void handle_oob_mayday(struct pt_regs *regs) +{ + XENO_BUG_ON(COBALT, !xnthread_test_state(xnthread_current(), XNUSER)); + + xnthread_relax(0, 0); +} + +static int handle_sigwake_event(struct task_struct *p) +{ + struct xnthread *thread; + sigset_t pending; + spl_t s; + + thread = xnthread_from_task(p); + if (thread == NULL) + return KEVENT_PROPAGATE; + + xnlock_get_irqsave(&nklock, s); + + /* + * CAUTION: __TASK_TRACED is not set in p->state yet. This + * state bit will be set right after we return, when the task + * is woken up. + */ + if ((p->ptrace & PT_PTRACED) && !xnthread_test_state(thread, XNSSTEP)) { + /* We already own the siglock. */ + sigorsets(&pending, + &p->pending.signal, + &p->signal->shared_pending.signal); + + if (sigismember(&pending, SIGTRAP) || + sigismember(&pending, SIGSTOP) + || sigismember(&pending, SIGINT)) + cobalt_register_debugged_thread(thread); + } + + if (xnthread_test_state(thread, XNRELAX)) + goto out; + + /* + * Allow a thread stopped for debugging to resume briefly in order to + * migrate to secondary mode. xnthread_relax will reapply XNDBGSTOP. + */ + if (xnthread_test_state(thread, XNDBGSTOP)) + xnthread_resume(thread, XNDBGSTOP); + + __xnthread_kick(thread); +out: + xnsched_run(); + + xnlock_put_irqrestore(&nklock, s); + + return KEVENT_PROPAGATE; +} + +static inline int handle_cleanup_event(struct mm_struct *mm) +{ + return cobalt_handle_cleanup_event(mm); +} + +void pipeline_cleanup_process(void) +{ + dovetail_stop_altsched(); +} + +int handle_ptrace_resume(struct task_struct *tracee) +{ + struct xnthread *thread; + spl_t s; + + thread = xnthread_from_task(tracee); + if (thread == NULL) + return KEVENT_PROPAGATE; + + if (xnthread_test_state(thread, XNSSTEP)) { + xnlock_get_irqsave(&nklock, s); + + xnthread_resume(thread, XNDBGSTOP); + cobalt_unregister_debugged_thread(thread); + + xnlock_put_irqrestore(&nklock, s); + } + + return KEVENT_PROPAGATE; +} + +static void handle_ptrace_cont(void) +{ + struct xnthread *curr = xnthread_current(); + spl_t s; + + xnlock_get_irqsave(&nklock, s); + + if (xnthread_test_state(curr, XNSSTEP)) { + if (!xnthread_test_info(curr, XNCONTHI)) + cobalt_unregister_debugged_thread(curr); + + xnthread_set_localinfo(curr, XNHICCUP); + + dovetail_request_ucall(current); + } + + xnlock_put_irqrestore(&nklock, s); +} + +void handle_inband_event(enum inband_event_type event, void *data) +{ + switch (event) { + case INBAND_TASK_SIGNAL: + handle_sigwake_event(data); + break; + case INBAND_TASK_MIGRATION: + handle_setaffinity_event(data); + break; + case INBAND_TASK_EXIT: + if (xnthread_current()) + handle_taskexit_event(current); + break; + case INBAND_TASK_RETUSER: + handle_user_return(data); + break; + case INBAND_TASK_PTSTEP: + handle_ptrace_resume(data); + break; + case INBAND_TASK_PTCONT: + handle_ptrace_cont(); + break; + case INBAND_TASK_PTSTOP: + break; + case INBAND_PROCESS_CLEANUP: + handle_cleanup_event(data); + break; + } +} + +/* + * Called by the in-band kernel when the CLOCK_REALTIME epoch changes. + */ +void inband_clock_was_set(void) +{ + if (realtime_core_enabled()) + xnclock_set_wallclock(ktime_get_real_fast_ns()); +} + +#ifdef CONFIG_MMU + +int pipeline_prepare_current(void) +{ + struct task_struct *p = current; + kernel_siginfo_t si; + + if ((p->mm->def_flags & VM_LOCKED) == 0) { + memset(&si, 0, sizeof(si)); + si.si_signo = SIGDEBUG; + si.si_code = SI_QUEUE; + si.si_int = SIGDEBUG_NOMLOCK | sigdebug_marker; + send_sig_info(SIGDEBUG, &si, p); + } + + return 0; +} + +static inline int get_mayday_prot(void) +{ + return PROT_READ|PROT_EXEC; +} + +#else /* !CONFIG_MMU */ + +int pipeline_prepare_current(void) +{ + return 0; +} + +static inline int get_mayday_prot(void) +{ + /* + * Until we stop backing /dev/mem with the mayday page, we + * can't ask for PROT_EXEC since the former does not define + * mmap capabilities, and default ones won't allow an + * executable mapping with MAP_SHARED. In the NOMMU case, this + * is (currently) not an issue. + */ + return PROT_READ; +} + +#endif /* !CONFIG_MMU */ + +void resume_oob_task(struct task_struct *p) /* inband, oob stage stalled */ +{ + struct xnthread *thread = xnthread_from_task(p); + + xnlock_get(&nklock); + + /* + * We fire the handler before the thread is migrated, so that + * thread->sched does not change between paired invocations of + * relax_thread/harden_thread handlers. + */ + xnthread_run_handler_stack(thread, harden_thread); + + cobalt_adjust_affinity(p); + + xnthread_resume(thread, XNRELAX); + + /* + * In case we migrated independently of the user return notifier, clear + * XNCONTHI here and also disable the notifier - we are already done. + */ + if (unlikely(xnthread_test_info(thread, XNCONTHI))) { + xnthread_clear_info(thread, XNCONTHI); + dovetail_clear_ucall(); + } + + /* Unregister as debugged thread in case we postponed this. */ + if (unlikely(xnthread_test_state(thread, XNSSTEP))) + cobalt_unregister_debugged_thread(thread); + + xnlock_put(&nklock); + + xnsched_run(); + +} + +void pipeline_attach_current(struct xnthread *thread) +{ + struct cobalt_threadinfo *p; + + p = pipeline_current(); + p->thread = thread; + p->process = cobalt_search_process(current->mm); + dovetail_init_altsched(&xnthread_archtcb(thread)->altsched); +} + +int pipeline_trap_kevents(void) +{ + dovetail_start(); + return 0; +} + +void pipeline_enable_kevents(void) +{ + dovetail_start_altsched(); +} --- linux/kernel/xenomai/rtdm/fd.c 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/kernel/xenomai/rtdm/fd.c 2022-03-21 12:58:29.118891604 +0100 @@ -0,0 +1,1070 @@ +/* + * Copyright (C) 2005 Jan Kiszka + * Copyright (C) 2005 Joerg Langenberg + * Copyright (C) 2013,2014 Gilles Chanteperdrix . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "internal.h" +#include "posix/process.h" +#include "posix/syscall.h" +#include "posix/clock.h" + +#define RTDM_SETFL_MASK (O_NONBLOCK) + +DEFINE_PRIVATE_XNLOCK(fdtree_lock); +static LIST_HEAD(rtdm_fd_cleanup_queue); +static struct semaphore rtdm_fd_cleanup_sem; + +struct rtdm_fd_index { + struct xnid id; + struct rtdm_fd *fd; +}; + +static int enosys(void) +{ + return -ENOSYS; +} + +static int enotty(void) +{ + return -ENOTTY; +} + +static int ebadf(void) +{ + return -EBADF; +} + +static int enodev(void) +{ + return -ENODEV; +} + +static inline struct rtdm_fd_index * +fetch_fd_index(struct cobalt_ppd *p, int ufd) +{ + struct xnid *id = xnid_fetch(&p->fds, ufd); + if (id == NULL) + return NULL; + + return container_of(id, struct rtdm_fd_index, id); +} + +static struct rtdm_fd *fetch_fd(struct cobalt_ppd *p, int ufd) +{ + struct rtdm_fd_index *idx = fetch_fd_index(p, ufd); + if (idx == NULL) + return NULL; + + return idx->fd; +} + +#define assign_invalid_handler(__handler, __invalid) \ + do \ + (__handler) = (typeof(__handler))__invalid; \ + while (0) + +/* Calling this handler should beget ENOSYS if not implemented. */ +#define assign_switch_handler(__handler) \ + do \ + if ((__handler) == NULL) \ + assign_invalid_handler(__handler, enosys); \ + while (0) + +#define assign_default_handler(__handler, __invalid) \ + do \ + if ((__handler) == NULL) \ + assign_invalid_handler(__handler, __invalid); \ + while (0) + +#define __rt(__handler) __handler ## _rt +#define __nrt(__handler) __handler ## _nrt + +/* + * Install a placeholder returning EADV if none of the dual handlers + * are implemented, ENOSYS otherwise for NULL handlers to trigger the + * adaptive switch. + */ +#define assign_default_dual_handlers(__handler, __invalid_handler) \ + do \ + if (__rt(__handler) || __nrt(__handler)) { \ + assign_switch_handler(__rt(__handler)); \ + assign_switch_handler(__nrt(__handler)); \ + } else { \ + assign_invalid_handler(__rt(__handler), \ + __invalid_handler); \ + assign_invalid_handler(__nrt(__handler), \ + __invalid_handler); \ + } \ + while (0) + +#ifdef CONFIG_XENO_ARCH_SYS3264 + +static inline void set_compat_bit(struct rtdm_fd *fd) +{ + struct pt_regs *regs; + + if (cobalt_ppd_get(0) == &cobalt_kernel_ppd) + fd->compat = 0; + else { + regs = task_pt_regs(current); + XENO_BUG_ON(COBALT, !__xn_syscall_p(regs)); + fd->compat = __COBALT_CALL_COMPAT(__xn_reg_sys(regs)); + } +} + +#else /* !CONFIG_XENO_ARCH_SYS3264 */ + +static inline void set_compat_bit(struct rtdm_fd *fd) +{ +} + +#endif /* !CONFIG_XENO_ARCH_SYS3264 */ + +int rtdm_fd_enter(struct rtdm_fd *fd, int ufd, unsigned int magic, + struct rtdm_fd_ops *ops) +{ + struct cobalt_ppd *ppd; + + secondary_mode_only(); + + if (magic == 0) + return -EINVAL; + + assign_default_dual_handlers(ops->ioctl, enotty); + assign_default_dual_handlers(ops->read, ebadf); + assign_default_dual_handlers(ops->write, ebadf); + assign_default_dual_handlers(ops->recvmsg, ebadf); + assign_default_dual_handlers(ops->sendmsg, ebadf); + assign_default_handler(ops->select, ebadf); + assign_default_handler(ops->mmap, enodev); + + ppd = cobalt_ppd_get(0); + fd->magic = magic; + fd->ops = ops; + fd->owner = ppd; + fd->ufd = ufd; + fd->refs = 1; + fd->stale = false; + set_compat_bit(fd); + INIT_LIST_HEAD(&fd->next); + + return 0; +} + +int rtdm_fd_register(struct rtdm_fd *fd, int ufd) +{ + struct rtdm_fd_index *idx; + struct cobalt_ppd *ppd; + spl_t s; + int ret = 0; + + ppd = cobalt_ppd_get(0); + idx = kmalloc(sizeof(*idx), GFP_KERNEL); + if (idx == NULL) + return -ENOMEM; + + idx->fd = fd; + + xnlock_get_irqsave(&fdtree_lock, s); + ret = xnid_enter(&ppd->fds, &idx->id, ufd); + xnlock_put_irqrestore(&fdtree_lock, s); + if (ret < 0) { + kfree(idx); + ret = -EBUSY; + } + + return ret; +} + +int rtdm_device_new_fd(struct rtdm_fd *fd, int ufd, + struct rtdm_device *device) +{ + spl_t s; + int ret; + + ret = rtdm_fd_register(fd, ufd); + if (ret < 0) + return ret; + + trace_cobalt_fd_created(fd, ufd); + xnlock_get_irqsave(&fdtree_lock, s); + list_add(&fd->next, &device->openfd_list); + xnlock_put_irqrestore(&fdtree_lock, s); + + return 0; +} + +/** + * @brief Retrieve and lock a RTDM file descriptor + * + * @param[in] ufd User-side file descriptor + * @param[in] magic Magic word for lookup validation + * + * @return Pointer to the RTDM file descriptor matching @a + * ufd. Otherwise: + * + * - ERR_PTR(-EADV) if the use-space handle is either invalid, or not + * managed by RTDM. + * + * - ERR_PTR(-EBADF) if the underlying device is being torn down at + * the time of the call. + * + * @note The file descriptor returned must be later released by a call + * to rtdm_fd_put(). + * + * @coretags{unrestricted} + */ +struct rtdm_fd *rtdm_fd_get(int ufd, unsigned int magic) +{ + struct cobalt_ppd *p = cobalt_ppd_get(0); + struct rtdm_fd *fd; + spl_t s; + + xnlock_get_irqsave(&fdtree_lock, s); + fd = fetch_fd(p, ufd); + if (fd == NULL || (magic != 0 && fd->magic != magic)) { + fd = ERR_PTR(-EADV); + goto out; + } + + if (fd->stale) { + fd = ERR_PTR(-EBADF); + goto out; + } + + ++fd->refs; +out: + xnlock_put_irqrestore(&fdtree_lock, s); + + return fd; +} +EXPORT_SYMBOL_GPL(rtdm_fd_get); + +static int fd_cleanup_thread(void *data) +{ + struct rtdm_fd *fd; + int err; + spl_t s; + + for (;;) { + set_cpus_allowed_ptr(current, cpu_online_mask); + + do { + err = down_interruptible(&rtdm_fd_cleanup_sem); + if (kthread_should_stop()) + return 0; + } while (err); + + xnlock_get_irqsave(&fdtree_lock, s); + fd = list_first_entry(&rtdm_fd_cleanup_queue, + struct rtdm_fd, cleanup); + list_del(&fd->cleanup); + xnlock_put_irqrestore(&fdtree_lock, s); + + fd->ops->close(fd); + } + + return 0; +} + +static void lostage_trigger_close(struct pipeline_inband_work *inband_work) +{ + up(&rtdm_fd_cleanup_sem); +} + +static struct lostage_trigger_close { + struct pipeline_inband_work inband_work; /* Must be first. */ +} fd_closework = { + .inband_work = PIPELINE_INBAND_WORK_INITIALIZER(fd_closework, + lostage_trigger_close), +}; + +static void __put_fd(struct rtdm_fd *fd, spl_t s) +{ + bool destroy, trigger; + + XENO_WARN_ON(COBALT, fd->refs <= 0); + destroy = --fd->refs == 0; + if (destroy && !list_empty(&fd->next)) + list_del_init(&fd->next); + + xnlock_put_irqrestore(&fdtree_lock, s); + + if (!destroy) + return; + + if (is_secondary_domain()) + fd->ops->close(fd); + else { + xnlock_get_irqsave(&fdtree_lock, s); + trigger = list_empty(&rtdm_fd_cleanup_queue); + list_add_tail(&fd->cleanup, &rtdm_fd_cleanup_queue); + xnlock_put_irqrestore(&fdtree_lock, s); + + if (trigger) + pipeline_post_inband_work(&fd_closework); + } +} + +void rtdm_device_flush_fds(struct rtdm_device *dev) +{ + struct rtdm_driver *drv = dev->driver; + struct rtdm_fd *fd; + spl_t s; + + xnlock_get_irqsave(&fdtree_lock, s); + + while (!list_empty(&dev->openfd_list)) { + fd = list_get_entry_init(&dev->openfd_list, struct rtdm_fd, next); + fd->stale = true; + if (drv->ops.close) { + rtdm_fd_get_light(fd); + xnlock_put_irqrestore(&fdtree_lock, s); + drv->ops.close(fd); + rtdm_fd_put(fd); + xnlock_get_irqsave(&fdtree_lock, s); + } + } + + xnlock_put_irqrestore(&fdtree_lock, s); +} + +/** + * @brief Release a RTDM file descriptor obtained via rtdm_fd_get() + * + * @param[in] fd RTDM file descriptor to release + * + * @note Every call to rtdm_fd_get() must be matched by a call to + * rtdm_fd_put(). + * + * @coretags{unrestricted} + */ +void rtdm_fd_put(struct rtdm_fd *fd) +{ + spl_t s; + + xnlock_get_irqsave(&fdtree_lock, s); + __put_fd(fd, s); +} +EXPORT_SYMBOL_GPL(rtdm_fd_put); + +/** + * @brief Hold a reference on a RTDM file descriptor + * + * @param[in] fd Target file descriptor + * + * @note rtdm_fd_lock() increments the reference counter of @a fd. You + * only need to call this function in special scenarios, e.g. when + * keeping additional references to the file descriptor that have + * different lifetimes. Only use rtdm_fd_lock() on descriptors that + * are currently locked via an earlier rtdm_fd_get()/rtdm_fd_lock() or + * while running a device operation handler. + * + * @coretags{unrestricted} + */ +int rtdm_fd_lock(struct rtdm_fd *fd) +{ + spl_t s; + + xnlock_get_irqsave(&fdtree_lock, s); + if (fd->refs == 0) { + xnlock_put_irqrestore(&fdtree_lock, s); + return -EIDRM; + } + ++fd->refs; + xnlock_put_irqrestore(&fdtree_lock, s); + + return 0; +} +EXPORT_SYMBOL_GPL(rtdm_fd_lock); + +/** + * @brief Drop a reference on a RTDM file descriptor + * + * @param[in] fd Target file descriptor + * + * @note Every call to rtdm_fd_lock() must be matched by a call to + * rtdm_fd_unlock(). + * + * @coretags{unrestricted} + */ +void rtdm_fd_unlock(struct rtdm_fd *fd) +{ + spl_t s; + + xnlock_get_irqsave(&fdtree_lock, s); + __put_fd(fd, s); +} +EXPORT_SYMBOL_GPL(rtdm_fd_unlock); + +int rtdm_fd_fcntl(int ufd, int cmd, ...) +{ + struct rtdm_fd *fd; + va_list ap; + long arg; + int ret; + + fd = rtdm_fd_get(ufd, 0); + if (IS_ERR(fd)) + return PTR_ERR(fd); + + va_start(ap, cmd); + arg = va_arg(ap, long); + va_end(ap); + + switch (cmd) { + case F_GETFL: + ret = fd->oflags; + break; + case F_SETFL: + fd->oflags = (fd->oflags & ~RTDM_SETFL_MASK) | + (arg & RTDM_SETFL_MASK); + ret = 0; + break; + default: + ret = -EINVAL; + } + + rtdm_fd_put(fd); + + return ret; +} +EXPORT_SYMBOL_GPL(rtdm_fd_fcntl); + +static struct rtdm_fd *get_fd_fixup_mode(int ufd) +{ + struct xnthread *thread; + struct rtdm_fd *fd; + + fd = rtdm_fd_get(ufd, 0); + if (IS_ERR(fd)) + return fd; + + /* + * Mode is selected according to the following convention: + * + * - Cobalt threads must try running the syscall from primary + * mode as a first attempt, regardless of their scheduling + * class. The driver handler may ask for demoting the caller + * to secondary mode by returning -ENOSYS. + * + * - Regular threads (i.e. not bound to Cobalt) may only run + * the syscall from secondary mode. + */ + thread = xnthread_current(); + if (unlikely(is_secondary_domain())) { + if (thread == NULL || + xnthread_test_localinfo(thread, XNDESCENT)) + return fd; + } else if (likely(thread)) + return fd; + + /* + * We need to switch to the converse mode. Since all callers + * bear the "adaptive" tag, we just pass -ENOSYS back to the + * syscall dispatcher to get switched to the next mode. + */ + rtdm_fd_put(fd); + + return ERR_PTR(-ENOSYS); +} + +int rtdm_fd_ioctl(int ufd, unsigned int request, ...) +{ + struct rtdm_fd *fd; + void __user *arg; + va_list args; + int err, ret; + + fd = get_fd_fixup_mode(ufd); + if (IS_ERR(fd)) { + err = PTR_ERR(fd); + goto out; + } + + va_start(args, request); + arg = va_arg(args, void __user *); + va_end(args); + + set_compat_bit(fd); + + trace_cobalt_fd_ioctl(current, fd, ufd, request); + + if (is_secondary_domain()) + err = fd->ops->ioctl_nrt(fd, request, arg); + else + err = fd->ops->ioctl_rt(fd, request, arg); + + if (!XENO_ASSERT(COBALT, !spltest())) + splnone(); + + if (err < 0) { + ret = __rtdm_dev_ioctl_core(fd, request, arg); + if (ret != -EADV) + err = ret; + } + + rtdm_fd_put(fd); + out: + if (err < 0) + trace_cobalt_fd_ioctl_status(current, fd, ufd, err); + + return err; +} +EXPORT_SYMBOL_GPL(rtdm_fd_ioctl); + +ssize_t +rtdm_fd_read(int ufd, void __user *buf, size_t size) +{ + struct rtdm_fd *fd; + ssize_t ret; + + fd = get_fd_fixup_mode(ufd); + if (IS_ERR(fd)) { + ret = PTR_ERR(fd); + goto out; + } + + set_compat_bit(fd); + + trace_cobalt_fd_read(current, fd, ufd, size); + + if (is_secondary_domain()) + ret = fd->ops->read_nrt(fd, buf, size); + else + ret = fd->ops->read_rt(fd, buf, size); + + if (!XENO_ASSERT(COBALT, !spltest())) + splnone(); + + rtdm_fd_put(fd); + + out: + if (ret < 0) + trace_cobalt_fd_read_status(current, fd, ufd, ret); + + return ret; +} +EXPORT_SYMBOL_GPL(rtdm_fd_read); + +ssize_t rtdm_fd_write(int ufd, const void __user *buf, size_t size) +{ + struct rtdm_fd *fd; + ssize_t ret; + + fd = get_fd_fixup_mode(ufd); + if (IS_ERR(fd)) { + ret = PTR_ERR(fd); + goto out; + } + + set_compat_bit(fd); + + trace_cobalt_fd_write(current, fd, ufd, size); + + if (is_secondary_domain()) + ret = fd->ops->write_nrt(fd, buf, size); + else + ret = fd->ops->write_rt(fd, buf, size); + + if (!XENO_ASSERT(COBALT, !spltest())) + splnone(); + + rtdm_fd_put(fd); + + out: + if (ret < 0) + trace_cobalt_fd_write_status(current, fd, ufd, ret); + + return ret; +} +EXPORT_SYMBOL_GPL(rtdm_fd_write); + +ssize_t rtdm_fd_recvmsg(int ufd, struct user_msghdr *msg, int flags) +{ + struct rtdm_fd *fd; + ssize_t ret; + + fd = get_fd_fixup_mode(ufd); + if (IS_ERR(fd)) { + ret = PTR_ERR(fd); + goto out; + } + + set_compat_bit(fd); + + trace_cobalt_fd_recvmsg(current, fd, ufd, flags); + + if (fd->oflags & O_NONBLOCK) + flags |= MSG_DONTWAIT; + + if (is_secondary_domain()) + ret = fd->ops->recvmsg_nrt(fd, msg, flags); + else + ret = fd->ops->recvmsg_rt(fd, msg, flags); + + if (!XENO_ASSERT(COBALT, !spltest())) + splnone(); + + rtdm_fd_put(fd); +out: + if (ret < 0) + trace_cobalt_fd_recvmsg_status(current, fd, ufd, ret); + + return ret; +} +EXPORT_SYMBOL_GPL(rtdm_fd_recvmsg); + +struct cobalt_recvmmsg_timer { + struct xntimer timer; + struct xnthread *waiter; +}; + +static void recvmmsg_timeout_handler(struct xntimer *timer) +{ + struct cobalt_recvmmsg_timer *rq; + + rq = container_of(timer, struct cobalt_recvmmsg_timer, timer); + xnthread_set_info(rq->waiter, XNTIMEO); + xnthread_resume(rq->waiter, XNDELAY); +} + +int __rtdm_fd_recvmmsg(int ufd, void __user *u_msgvec, unsigned int vlen, + unsigned int flags, void __user *u_timeout, + int (*get_mmsg)(struct mmsghdr *mmsg, void __user *u_mmsg), + int (*put_mmsg)(void __user **u_mmsg_p, const struct mmsghdr *mmsg), + int (*get_timespec)(struct timespec64 *ts, const void __user *u_ts)) +{ + struct cobalt_recvmmsg_timer rq; + xntmode_t tmode = XN_RELATIVE; + struct timespec64 ts = { 0 }; + int ret = 0, datagrams = 0; + xnticks_t timeout = 0; + struct mmsghdr mmsg; + struct rtdm_fd *fd; + void __user *u_p; + ssize_t len; + spl_t s; + + fd = rtdm_fd_get(ufd, 0); + if (IS_ERR(fd)) { + ret = PTR_ERR(fd); + goto out; + } + + set_compat_bit(fd); + + trace_cobalt_fd_recvmmsg(current, fd, ufd, flags); + + if (u_timeout) { + ret = get_timespec(&ts, u_timeout); + if (ret) + goto fail; + + if (!timespec64_valid(&ts)) { + ret = -EINVAL; + goto fail; + } + + tmode = XN_ABSOLUTE; + timeout = ts2ns(&ts); + if (timeout == 0) + flags |= MSG_DONTWAIT; + else { + timeout += xnclock_read_monotonic(&nkclock); + rq.waiter = xnthread_current(); + xntimer_init(&rq.timer, &nkclock, + recvmmsg_timeout_handler, + NULL, XNTIMER_IGRAVITY); + xnlock_get_irqsave(&nklock, s); + ret = xntimer_start(&rq.timer, timeout, + XN_INFINITE, tmode); + xnlock_put_irqrestore(&nklock, s); + } + } + + if (fd->oflags & O_NONBLOCK) + flags |= MSG_DONTWAIT; + + for (u_p = u_msgvec; vlen > 0; vlen--) { + ret = get_mmsg(&mmsg, u_p); + if (ret) + break; + len = fd->ops->recvmsg_rt(fd, &mmsg.msg_hdr, flags); + if (len < 0) { + ret = len; + break; + } + mmsg.msg_len = (unsigned int)len; + ret = put_mmsg(&u_p, &mmsg); + if (ret) + break; + datagrams++; + /* OOB data requires immediate handling. */ + if (mmsg.msg_hdr.msg_flags & MSG_OOB) + break; + if (flags & MSG_WAITFORONE) + flags |= MSG_DONTWAIT; + } + + if (timeout) { + xnlock_get_irqsave(&nklock, s); + xntimer_destroy(&rq.timer); + xnlock_put_irqrestore(&nklock, s); + } + +fail: + rtdm_fd_put(fd); + + if (datagrams > 0) + ret = datagrams; + +out: + trace_cobalt_fd_recvmmsg_status(current, fd, ufd, ret); + + return ret; +} + +static inline int __rtdm_fetch_timeout64(struct timespec64 *ts, + const void __user *u_ts) +{ + return u_ts == NULL ? -EFAULT : cobalt_get_timespec64(ts, u_ts); +} + +int __rtdm_fd_recvmmsg64(int ufd, void __user *u_msgvec, unsigned int vlen, + unsigned int flags, void __user *u_timeout, + int (*get_mmsg)(struct mmsghdr *mmsg, + void __user *u_mmsg), + int (*put_mmsg)(void __user **u_mmsg_p, + const struct mmsghdr *mmsg)) +{ + return __rtdm_fd_recvmmsg(ufd, u_msgvec, vlen, flags, u_timeout, + get_mmsg, put_mmsg, __rtdm_fetch_timeout64); +} + + +ssize_t rtdm_fd_sendmsg(int ufd, const struct user_msghdr *msg, int flags) +{ + struct rtdm_fd *fd; + ssize_t ret; + + fd = get_fd_fixup_mode(ufd); + if (IS_ERR(fd)) { + ret = PTR_ERR(fd); + goto out; + } + + set_compat_bit(fd); + + trace_cobalt_fd_sendmsg(current, fd, ufd, flags); + + if (fd->oflags & O_NONBLOCK) + flags |= MSG_DONTWAIT; + + if (is_secondary_domain()) + ret = fd->ops->sendmsg_nrt(fd, msg, flags); + else + ret = fd->ops->sendmsg_rt(fd, msg, flags); + + if (!XENO_ASSERT(COBALT, !spltest())) + splnone(); + + rtdm_fd_put(fd); +out: + if (ret < 0) + trace_cobalt_fd_sendmsg_status(current, fd, ufd, ret); + + return ret; +} +EXPORT_SYMBOL_GPL(rtdm_fd_sendmsg); + +int __rtdm_fd_sendmmsg(int ufd, void __user *u_msgvec, unsigned int vlen, + unsigned int flags, + int (*get_mmsg)(struct mmsghdr *mmsg, void __user *u_mmsg), + int (*put_mmsg)(void __user **u_mmsg_p, const struct mmsghdr *mmsg)) +{ + int ret = 0, datagrams = 0; + struct mmsghdr mmsg; + struct rtdm_fd *fd; + void __user *u_p; + ssize_t len; + + fd = rtdm_fd_get(ufd, 0); + if (IS_ERR(fd)) { + ret = PTR_ERR(fd); + goto out; + } + + set_compat_bit(fd); + + trace_cobalt_fd_sendmmsg(current, fd, ufd, flags); + + if (fd->oflags & O_NONBLOCK) + flags |= MSG_DONTWAIT; + + for (u_p = u_msgvec; vlen > 0; vlen--) { + ret = get_mmsg(&mmsg, u_p); + if (ret) + break; + len = fd->ops->sendmsg_rt(fd, &mmsg.msg_hdr, flags); + if (len < 0) { + ret = len; + break; + } + mmsg.msg_len = (unsigned int)len; + ret = put_mmsg(&u_p, &mmsg); + if (ret) + break; + datagrams++; + } + + rtdm_fd_put(fd); + + if (datagrams > 0) + ret = datagrams; + +out: + trace_cobalt_fd_sendmmsg_status(current, fd, ufd, ret); + + return ret; +} + +static void +__fd_close(struct cobalt_ppd *p, struct rtdm_fd_index *idx, spl_t s) +{ + xnid_remove(&p->fds, &idx->id); + __put_fd(idx->fd, s); + + kfree(idx); +} + +int rtdm_fd_close(int ufd, unsigned int magic) +{ + struct rtdm_fd_index *idx; + struct cobalt_ppd *ppd; + struct rtdm_fd *fd; + spl_t s; + + secondary_mode_only(); + + ppd = cobalt_ppd_get(0); + + xnlock_get_irqsave(&fdtree_lock, s); + idx = fetch_fd_index(ppd, ufd); + if (idx == NULL) + goto eadv; + + fd = idx->fd; + if (magic != 0 && fd->magic != magic) { +eadv: + xnlock_put_irqrestore(&fdtree_lock, s); + return -EADV; + } + + set_compat_bit(fd); + + trace_cobalt_fd_close(current, fd, ufd, fd->refs); + + /* + * In dual kernel mode, the linux-side fdtable and the RTDM + * ->close() handler are asynchronously managed, i.e. the + * handler execution may be deferred after the regular file + * descriptor was removed from the fdtable if some refs on + * rtdm_fd are still pending. + */ + __fd_close(ppd, idx, s); + close_fd(ufd); + + return 0; +} +EXPORT_SYMBOL_GPL(rtdm_fd_close); + +int rtdm_fd_mmap(int ufd, struct _rtdm_mmap_request *rma, + void **u_addrp) +{ + struct rtdm_fd *fd; + int ret; + + secondary_mode_only(); + + fd = rtdm_fd_get(ufd, 0); + if (IS_ERR(fd)) { + ret = PTR_ERR(fd); + goto out; + } + + set_compat_bit(fd); + + trace_cobalt_fd_mmap(current, fd, ufd, rma); + + if (rma->flags & (MAP_FIXED|MAP_ANONYMOUS)) { + ret = -EADV; + goto unlock; + } + + ret = __rtdm_mmap_from_fdop(fd, rma->length, rma->offset, + rma->prot, rma->flags, u_addrp); +unlock: + rtdm_fd_put(fd); +out: + if (ret) + trace_cobalt_fd_mmap_status(current, fd, ufd, ret); + + return ret; +} + +int rtdm_fd_valid_p(int ufd) +{ + struct rtdm_fd *fd; + spl_t s; + + xnlock_get_irqsave(&fdtree_lock, s); + fd = fetch_fd(cobalt_ppd_get(0), ufd); + xnlock_put_irqrestore(&fdtree_lock, s); + + return fd != NULL; +} + +/** + * @brief Bind a selector to specified event types of a given file descriptor + * @internal + * + * This function is invoked by higher RTOS layers implementing select-like + * services. It shall not be called directly by RTDM drivers. + * + * @param[in] ufd User-side file descriptor to bind to + * @param[in,out] selector Selector object that shall be bound to the given + * event + * @param[in] type Event type the caller is interested in + * + * @return 0 on success, otherwise: + * + * - -EADV is returned if the file descriptor @a ufd cannot be resolved. + * + * - -EBADF is returned if the underlying device is being torn down at the time + * of the call. + * + * - -EINVAL is returned if @a type is invalid. + * + * @coretags{task-unrestricted} + */ +int rtdm_fd_select(int ufd, struct xnselector *selector, + unsigned int type) +{ + struct rtdm_fd *fd; + int ret; + + fd = rtdm_fd_get(ufd, 0); + if (IS_ERR(fd)) + return PTR_ERR(fd); + + set_compat_bit(fd); + + ret = fd->ops->select(fd, selector, type, ufd); + + if (!XENO_ASSERT(COBALT, !spltest())) + splnone(); + + rtdm_fd_put(fd); + + return ret; +} + +static void destroy_fd(void *cookie, struct xnid *id) +{ + struct cobalt_ppd *p = cookie; + struct rtdm_fd_index *idx; + spl_t s; + + idx = container_of(id, struct rtdm_fd_index, id); + xnlock_get_irqsave(&fdtree_lock, s); + __fd_close(p, idx, 0); +} + +void rtdm_fd_cleanup(struct cobalt_ppd *p) +{ + /* + * This is called on behalf of a (userland) task exit handler, + * so we don't have to deal with the regular file descriptors, + * we only have to empty our own index. + */ + xntree_cleanup(&p->fds, p, destroy_fd); +} + +void rtdm_fd_init(void) +{ + sema_init(&rtdm_fd_cleanup_sem, 0); + kthread_run(fd_cleanup_thread, NULL, "rtdm_fd"); +} + +static inline void warn_user(struct file *file, const char *call) +{ + struct dentry *dentry = file->f_path.dentry; + + printk(XENO_WARNING + "%s[%d] called regular %s() on /dev/rtdm/%s\n", + current->comm, task_pid_nr(current), call + 5, dentry->d_name.name); +} + +static ssize_t dumb_read(struct file *file, char __user *buf, + size_t count, loff_t __user *ppos) +{ + warn_user(file, __func__); + return -EINVAL; +} + +static ssize_t dumb_write(struct file *file, const char __user *buf, + size_t count, loff_t __user *ppos) +{ + warn_user(file, __func__); + return -EINVAL; +} + +static unsigned int dumb_poll(struct file *file, poll_table *pt) +{ + warn_user(file, __func__); + return -EINVAL; +} + +static long dumb_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + warn_user(file, __func__); + return -EINVAL; +} + +const struct file_operations rtdm_dumb_fops = { + .read = dumb_read, + .write = dumb_write, + .poll = dumb_poll, + .unlocked_ioctl = dumb_ioctl, +}; --- linux/kernel/xenomai/rtdm/internal.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/kernel/xenomai/rtdm/internal.h 2022-03-21 12:58:29.114891643 +0100 @@ -0,0 +1,64 @@ +/* + * Copyright (C) 2005-2007 Jan Kiszka . + * Copyright (C) 2005 Joerg Langenberg . + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef _RTDM_INTERNAL_H +#define _RTDM_INTERNAL_H + +#include +#include +#include +#include +#include +#include +#include +#include + +static inline void __rtdm_get_device(struct rtdm_device *device) +{ + atomic_inc(&device->refcount); +} + +void __rtdm_put_device(struct rtdm_device *device); + +struct rtdm_device *__rtdm_get_namedev(const char *path); + +struct rtdm_device *__rtdm_get_protodev(int protocol_family, + int socket_type); + +void __rtdm_dev_close(struct rtdm_fd *fd); + +int __rtdm_dev_ioctl_core(struct rtdm_fd *fd, + unsigned int request, void __user *arg); + +int __rtdm_mmap_from_fdop(struct rtdm_fd *fd, size_t len, off_t offset, + int prot, int flags, void **pptr); + +/* nklock held, irqs off. */ +static inline void rtdm_fd_get_light(struct rtdm_fd *fd) +{ + ++fd->refs; +} + +int rtdm_init(void); + +void rtdm_cleanup(void); + +extern const struct file_operations rtdm_dumb_fops; + +#endif /* _RTDM_INTERNAL_H */ --- linux/kernel/xenomai/rtdm/core.c 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/kernel/xenomai/rtdm/core.c 2022-03-21 12:58:29.111891672 +0100 @@ -0,0 +1,1373 @@ +/* + * Copyright (C) 2005 Jan Kiszka + * Copyright (C) 2005 Joerg Langenberg + * Copyright (C) 2014 Philippe Gerum + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "rtdm/internal.h" +#define CREATE_TRACE_POINTS +#include +#include "posix/process.h" + +/** + * @ingroup rtdm + * @defgroup rtdm_driver_interface Driver programming interface + * RTDM driver programming interface + * @{ + */ + +static void cleanup_instance(struct rtdm_device *dev, + struct rtdm_dev_context *context) +{ + if (context) + kfree(context); + + __rtdm_put_device(dev); +} + +void __rtdm_dev_close(struct rtdm_fd *fd) +{ + struct rtdm_dev_context *context = rtdm_fd_to_context(fd); + struct rtdm_device *dev = context->device; + struct rtdm_driver *drv = dev->driver; + + if (!fd->stale && drv->ops.close) + drv->ops.close(fd); + + cleanup_instance(dev, context); +} + +int __rtdm_anon_getfd(const char *name, int flags) +{ + return anon_inode_getfd(name, &rtdm_dumb_fops, NULL, flags); +} + +void __rtdm_anon_putfd(int ufd) +{ + close_fd(ufd); +} + +static int create_instance(int ufd, struct rtdm_device *dev, + struct rtdm_dev_context **context_ptr) +{ + struct rtdm_driver *drv = dev->driver; + struct rtdm_dev_context *context; + + /* + * Reset to NULL so that we can always use cleanup_files/instance to + * revert also partially successful allocations. + */ + *context_ptr = NULL; + + if ((drv->device_flags & RTDM_EXCLUSIVE) != 0 && + atomic_read(&dev->refcount) > 1) + return -EBUSY; + + context = kzalloc(sizeof(struct rtdm_dev_context) + + drv->context_size, GFP_KERNEL); + if (unlikely(context == NULL)) + return -ENOMEM; + + context->device = dev; + *context_ptr = context; + + return rtdm_fd_enter(&context->fd, ufd, RTDM_FD_MAGIC, &dev->ops); +} + +#ifdef CONFIG_XENO_OPT_RTDM_COMPAT_DEVNODE + +static inline struct file * +open_devnode(struct rtdm_device *dev, const char *path, int oflag) +{ + struct file *filp; + char *filename; + + if (IS_ENABLED(CONFIG_XENO_OPT_DEBUG_LEGACY) && + strncmp(path, "/dev/rtdm/", 10)) + printk(XENO_WARNING + "%s[%d] opens obsolete device path: %s\n", + current->comm, task_pid_nr(current), path); + + filename = kasprintf(GFP_KERNEL, "/dev/rtdm/%s", dev->name); + if (filename == NULL) + return ERR_PTR(-ENOMEM); + + filp = filp_open(filename, oflag, 0); + kfree(filename); + + return filp; +} + +#else /* !CONFIG_XENO_OPT_RTDM_COMPAT_DEVNODE */ + +static inline struct file * +open_devnode(struct rtdm_device *dev, const char *path, int oflag) +{ + return filp_open(path, oflag, 0); +} + +#endif /* !CONFIG_XENO_OPT_RTDM_COMPAT_DEVNODE */ + +int __rtdm_dev_open(const char *path, int oflag) +{ + struct rtdm_dev_context *context; + struct rtdm_device *dev; + struct file *filp; + int ufd, ret; + + secondary_mode_only(); + + /* + * CAUTION: we do want a lookup into the registry to happen + * before any attempt is made to open the devnode, so that we + * don't inadvertently open a regular (i.e. non-RTDM) device. + * Reason is that opening, then closing a device - because we + * don't manage it - may incur side-effects we don't want, + * e.g. opening then closing one end of a pipe would cause the + * other side to read the EOF condition. This is basically + * why we keep a RTDM registry for named devices, so that we + * can figure out whether an open() request is going to be + * valid, without having to open the devnode yet. + */ + dev = __rtdm_get_namedev(path); + if (dev == NULL) + return -EADV; + + ufd = get_unused_fd_flags(oflag); + if (ufd < 0) { + ret = ufd; + goto fail_fd; + } + + filp = open_devnode(dev, path, oflag); + if (IS_ERR(filp)) { + ret = PTR_ERR(filp); + goto fail_fopen; + } + + ret = create_instance(ufd, dev, &context); + if (ret < 0) + goto fail_create; + + context->fd.minor = dev->minor; + context->fd.oflags = oflag; + + trace_cobalt_fd_open(current, &context->fd, ufd, oflag); + + if (dev->ops.open) { + ret = dev->ops.open(&context->fd, oflag); + if (!XENO_ASSERT(COBALT, !spltest())) + splnone(); + if (ret < 0) + goto fail_open; + } + + ret = rtdm_device_new_fd(&context->fd, ufd, context->device); + if (ret < 0) + goto fail_open; + + fd_install(ufd, filp); + + return ufd; + +fail_open: + cleanup_instance(dev, context); +fail_create: + filp_close(filp, current->files); +fail_fopen: + put_unused_fd(ufd); +fail_fd: + __rtdm_put_device(dev); + + return ret; +} +EXPORT_SYMBOL_GPL(__rtdm_dev_open); + +int __rtdm_dev_socket(int protocol_family, int socket_type, + int protocol) +{ + struct rtdm_dev_context *context; + struct rtdm_device *dev; + int ufd, ret; + + secondary_mode_only(); + + dev = __rtdm_get_protodev(protocol_family, socket_type); + if (dev == NULL) + return -EAFNOSUPPORT; + + ufd = __rtdm_anon_getfd("[rtdm-socket]", O_RDWR); + if (ufd < 0) { + ret = ufd; + goto fail_getfd; + } + + ret = create_instance(ufd, dev, &context); + if (ret < 0) + goto fail_create; + + trace_cobalt_fd_socket(current, &context->fd, ufd, protocol_family); + + if (dev->ops.socket) { + ret = dev->ops.socket(&context->fd, protocol); + if (!XENO_ASSERT(COBALT, !spltest())) + splnone(); + if (ret < 0) + goto fail_socket; + } + + ret = rtdm_device_new_fd(&context->fd, ufd, context->device); + if (ret < 0) + goto fail_socket; + + return ufd; + +fail_socket: + cleanup_instance(dev, context); +fail_create: + close_fd(ufd); +fail_getfd: + __rtdm_put_device(dev); + + return ret; +} +EXPORT_SYMBOL_GPL(__rtdm_dev_socket); + +int __rtdm_dev_ioctl_core(struct rtdm_fd *fd, unsigned int request, + void __user *arg) +{ + struct rtdm_device *dev = rtdm_fd_device(fd); + struct rtdm_driver *drv; + struct rtdm_device_info dev_info; + + if (fd->magic != RTDM_FD_MAGIC || request != RTIOC_DEVICE_INFO) + return -EADV; + + drv = dev->driver; + dev_info.device_flags = drv->device_flags; + dev_info.device_class = drv->profile_info.class_id; + dev_info.device_sub_class = drv->profile_info.subclass_id; + dev_info.profile_version = drv->profile_info.version; + + return rtdm_safe_copy_to_user(fd, arg, &dev_info, sizeof(dev_info)); +} + +#ifdef DOXYGEN_CPP /* Only used for doxygen doc generation */ + +/** + * @addtogroup rtdm_sync + *@{ + */ + +/** + * @fn void rtdm_waitqueue_init(struct rtdm_waitqueue *wq) + * @brief Initialize a RTDM wait queue + * + * Sets up a wait queue structure for further use. + * + * @param wq waitqueue to initialize. + * + * @coretags{task-unrestricted} + */ +void rtdm_waitqueue_init(struct rtdm_waitqueue *wq); + +/** + * @fn void rtdm_waitqueue_destroy(struct rtdm_waitqueue *wq) + * @brief Deletes a RTDM wait queue + * + * Dismantles a wait queue structure, releasing all resources attached + * to it. + * + * @param wq waitqueue to delete. + * + * @coretags{task-unrestricted} + */ +void rtdm_waitqueue_destroy(struct rtdm_waitqueue *wq); + +/** + * @fn rtdm_timedwait_condition_locked(struct rtdm_wait_queue *wq, C_expr condition, nanosecs_rel_t timeout, rtdm_toseq_t *toseq) + * @brief Timed sleep on a locked waitqueue until a condition gets true + * + * The calling task is put to sleep until @a condition evaluates to + * true or a timeout occurs. The condition is checked each time the + * waitqueue @a wq is signaled. + * + * The waitqueue must have been locked by a call to + * rtdm_waitqueue_lock() prior to calling this service. + * + * @param wq locked waitqueue to wait on. The waitqueue lock is + * dropped when sleeping, then reacquired before this service returns + * to the caller. + * + * @param condition C expression for the event to wait for. + * + * @param timeout relative timeout in nanoseconds, see + * @ref RTDM_TIMEOUT_xxx for special values. + * + * @param[in,out] toseq handle of a timeout sequence as returned by + * rtdm_toseq_init() or NULL. + * + * @return 0 on success, otherwise: + * + * - -EINTR is returned if calling task has received a Linux signal or + * has been forcibly unblocked by a call to rtdm_task_unblock(). + * + * - -ETIMEDOUT is returned if the if the request has not been satisfied + * within the specified amount of time. + * + * @note rtdm_waitqueue_signal() has to be called after changing any + * variable that could change the result of the wait condition. + * + * @note Passing RTDM_TIMEOUT_NONE to @a timeout makes no sense for + * such service, and might cause unexpected behavior. + * + * @coretags{primary-only, might-switch} + */ +rtdm_timedwait_condition_locked(struct rtdm_wait_queue *wq, C_expr condition, + nanosecs_rel_t timeout, rtdm_toseq_t *toseq); + +/** + * @fn rtdm_wait_condition_locked(struct rtdm_wait_queue *wq, C_expr condition) + * @brief Sleep on a locked waitqueue until a condition gets true + * + * The calling task is put to sleep until @a condition evaluates to + * true. The condition is checked each time the waitqueue @a wq is + * signaled. + * + * The waitqueue must have been locked by a call to + * rtdm_waitqueue_lock() prior to calling this service. + * + * @param wq locked waitqueue to wait on. The waitqueue lock is + * dropped when sleeping, then reacquired before this service returns + * to the caller. + * + * @param condition C expression for the event to wait for. + * + * @return 0 on success, otherwise: + * + * - -EINTR is returned if calling task has received a Linux signal or + * has been forcibly unblocked by a call to rtdm_task_unblock(). + * + * @note rtdm_waitqueue_signal() has to be called after changing any + * variable that could change the result of the wait condition. + * + * @coretags{primary-only, might-switch} + */ +rtdm_wait_condition_locked(struct rtdm_wait_queue *wq, C_expr condition); + +/** + * @fn rtdm_timedwait_condition(struct rtdm_wait_queue *wq, C_expr condition, nanosecs_rel_t timeout, rtdm_toseq_t *toseq) + * @brief Timed sleep on a waitqueue until a condition gets true + * + * The calling task is put to sleep until @a condition evaluates to + * true or a timeout occurs. The condition is checked each time the + * waitqueue @a wq is signaled. + * + * @param wq waitqueue to wait on. + * + * @param condition C expression for the event to wait for. + * + * @param timeout relative timeout in nanoseconds, see + * @ref RTDM_TIMEOUT_xxx for special values. + * + * @param[in,out] toseq handle of a timeout sequence as returned by + * rtdm_toseq_init() or NULL. + * + * @return 0 on success, otherwise: + * + * - -EINTR is returned if calling task has received a Linux signal or + * has been forcibly unblocked by a call to rtdm_task_unblock(). + * + * - -ETIMEDOUT is returned if the if the request has not been satisfied + * within the specified amount of time. + * + * @note rtdm_waitqueue_signal() has to be called after changing any + * variable that could change the result of the wait condition. + * + * @note Passing RTDM_TIMEOUT_NONE to @a timeout makes no sense for + * such service, and might cause unexpected behavior. + * + * @coretags{primary-only, might-switch} + */ +rtdm_timedwait_condition(struct rtdm_wait_queue *wq, C_expr condition, + nanosecs_rel_t timeout, rtdm_toseq_t *toseq); + +/** + * @fn void rtdm_timedwait(struct rtdm_wait_queue *wq, nanosecs_rel_t timeout, rtdm_toseq_t *toseq) + * @brief Timed sleep on a waitqueue unconditionally + * + * The calling task is put to sleep until the waitqueue is signaled by + * either rtdm_waitqueue_signal() or rtdm_waitqueue_broadcast(), or + * flushed by a call to rtdm_waitqueue_flush(), or a timeout occurs. + * + * @param wq waitqueue to wait on. + * + * @param timeout relative timeout in nanoseconds, see + * @ref RTDM_TIMEOUT_xxx for special values. + * + * @param[in,out] toseq handle of a timeout sequence as returned by + * rtdm_toseq_init() or NULL. + * + * @return 0 on success, otherwise: + * + * - -EINTR is returned if the waitqueue has been flushed, or the + * calling task has received a Linux signal or has been forcibly + * unblocked by a call to rtdm_task_unblock(). + * + * - -ETIMEDOUT is returned if the if the request has not been satisfied + * within the specified amount of time. + * + * @note Passing RTDM_TIMEOUT_NONE to @a timeout makes no sense for + * such service, and might cause unexpected behavior. + * + * @coretags{primary-only, might-switch} + */ +void rtdm_timedwait(struct rtdm_wait_queue *wq, + nanosecs_rel_t timeout, rtdm_toseq_t *toseq); + +/** + * @fn void rtdm_timedwait_locked(struct rtdm_wait_queue *wq, nanosecs_rel_t timeout, rtdm_toseq_t *toseq) + * @brief Timed sleep on a locked waitqueue unconditionally + * + * The calling task is put to sleep until the waitqueue is signaled by + * either rtdm_waitqueue_signal() or rtdm_waitqueue_broadcast(), or + * flushed by a call to rtdm_waitqueue_flush(), or a timeout occurs. + * + * The waitqueue must have been locked by a call to + * rtdm_waitqueue_lock() prior to calling this service. + * + * @param wq locked waitqueue to wait on. The waitqueue lock is + * dropped when sleeping, then reacquired before this service returns + * to the caller. + * + * @param timeout relative timeout in nanoseconds, see + * @ref RTDM_TIMEOUT_xxx for special values. + * + * @param[in,out] toseq handle of a timeout sequence as returned by + * rtdm_toseq_init() or NULL. + * + * @return 0 on success, otherwise: + * + * - -EINTR is returned if the waitqueue has been flushed, or the + * calling task has received a Linux signal or has been forcibly + * unblocked by a call to rtdm_task_unblock(). + * + * - -ETIMEDOUT is returned if the if the request has not been satisfied + * within the specified amount of time. + * + * @note Passing RTDM_TIMEOUT_NONE to @a timeout makes no sense for + * such service, and might cause unexpected behavior. + * + * @coretags{primary-only, might-switch} + */ +void rtdm_timedwait_locked(struct rtdm_wait_queue *wq, + nanosecs_rel_t timeout, rtdm_toseq_t *toseq); + +/** + * @fn rtdm_wait_condition(struct rtdm_wait_queue *wq, C_expr condition) + * @brief Sleep on a waitqueue until a condition gets true + * + * The calling task is put to sleep until @a condition evaluates to + * true. The condition is checked each time the waitqueue @a wq is + * signaled. + * + * @param wq waitqueue to wait on + * + * @param condition C expression for the event to wait for. + * + * @return 0 on success, otherwise: + * + * - -EINTR is returned if calling task has received a Linux signal or + * has been forcibly unblocked by a call to rtdm_task_unblock(). + * + * @note rtdm_waitqueue_signal() has to be called after changing any + * variable that could change the result of the wait condition. + * + * @coretags{primary-only, might-switch} + */ +rtdm_wait_condition(struct rtdm_wait_queue *wq, C_expr condition); + +/** + * @fn void rtdm_wait(struct rtdm_wait_queue *wq) + * @brief Sleep on a waitqueue unconditionally + * + * The calling task is put to sleep until the waitqueue is signaled by + * either rtdm_waitqueue_signal() or rtdm_waitqueue_broadcast(), or + * flushed by a call to rtdm_waitqueue_flush(). + * + * @param wq waitqueue to wait on. + * + * @return 0 on success, otherwise: + * + * - -EINTR is returned if the waitqueue has been flushed, or the + * calling task has received a Linux signal or has been forcibly + * unblocked by a call to rtdm_task_unblock(). + * + * @coretags{primary-only, might-switch} + */ +void rtdm_wait(struct rtdm_wait_queue *wq); + +/** + * @fn void rtdm_wait_locked(struct rtdm_wait_queue *wq) + * @brief Sleep on a locked waitqueue unconditionally + * + * The calling task is put to sleep until the waitqueue is signaled by + * either rtdm_waitqueue_signal() or rtdm_waitqueue_broadcast(), or + * flushed by a call to rtdm_waitqueue_flush(). + * + * The waitqueue must have been locked by a call to + * rtdm_waitqueue_lock() prior to calling this service. + * + * @param wq locked waitqueue to wait on. The waitqueue lock is + * dropped when sleeping, then reacquired before this service returns + * to the caller. + * + * @return 0 on success, otherwise: + * + * - -EINTR is returned if the waitqueue has been flushed, or the + * calling task has received a Linux signal or has been forcibly + * unblocked by a call to rtdm_task_unblock(). + * + * @coretags{primary-only, might-switch} + */ +void rtdm_wait_locked(struct rtdm_wait_queue *wq); + +/** + * @fn void rtdm_waitqueue_lock(struct rtdm_wait_queue *wq, rtdm_lockctx_t context) + * @brief Lock a waitqueue + * + * Acquires the lock on the waitqueue @a wq. + * + * @param wq waitqueue to lock. + * + * @param context name of local variable to store the context in. + * + * @note Recursive locking might lead to unexpected behavior, + * including lock up. + * + * @coretags{unrestricted} + */ +void rtdm_waitqueue_lock(struct rtdm_wait_queue *wq, rtdm_lockctx_t context); + +/** + * @fn void rtdm_waitqueue_unlock(struct rtdm_wait_queue *wq, rtdm_lockctx_t context) + * @brief Unlock a waitqueue + * + * Releases the lock on the waitqueue @a wq. + * + * @param wq waitqueue to unlock. + * + * @param context name of local variable to retrieve the context from. + * + * @coretags{unrestricted} + */ +void rtdm_waitqueue_unlock(struct rtdm_wait_queue *wq, rtdm_lockctx_t context); + +/** + * @fn void rtdm_waitqueue_signal(struct rtdm_wait_queue *wq) + * @brief Signal a waitqueue + * + * Signals the waitqueue @a wq, waking up a single waiter (if + * any). + * + * @param wq waitqueue to signal. + * + * @return non-zero if a task has been readied as a result of this + * call, zero otherwise. + * + * @coretags{unrestricted, might-switch} + */ +void rtdm_waitqueue_signal(struct rtdm_wait_queue *wq); + +/** + * @fn void rtdm_waitqueue_broadcast(struct rtdm_wait_queue *wq) + * @brief Broadcast a waitqueue + * + * Broadcast the waitqueue @a wq, waking up all waiters. Each + * readied task may assume to have received the wake up event. + * + * @param wq waitqueue to broadcast. + * + * @return non-zero if at least one task has been readied as a result + * of this call, zero otherwise. + * + * @coretags{unrestricted, might-switch} + */ +void rtdm_waitqueue_broadcast(struct rtdm_wait_queue *wq); + +/** + * @fn void rtdm_waitqueue_flush(struct rtdm_wait_queue *wq) + * @brief Flush a waitqueue + * + * Flushes the waitqueue @a wq, unblocking all waiters with an error + * status (-EINTR). + * + * @param wq waitqueue to flush. + * + * @return non-zero if at least one task has been readied as a result + * of this call, zero otherwise. + * + * @coretags{unrestricted, might-switch} + */ +void rtdm_waitqueue_flush(struct rtdm_wait_queue *wq); + +/** + * @fn void rtdm_waitqueue_wakeup(struct rtdm_wait_queue *wq, rtdm_task_t waiter) + * @brief Signal a particular waiter on a waitqueue + * + * Signals the waitqueue @a wq, waking up waiter @a waiter only, + * which must be currently sleeping on the waitqueue. + * + * @param wq waitqueue to signal. + * + * @param waiter RTDM task to wake up. + * + * @coretags{unrestricted, might-switch} + */ +void rtdm_waitqueue_wakeup(struct rtdm_wait_queue *wq, rtdm_task_t waiter); + +/** + * @fn rtdm_for_each_waiter(rtdm_task_t pos, struct rtdm_wait_queue *wq) + * @brief Simple iterator for waitqueues + * + * This construct traverses the wait list of a given waitqueue + * @a wq, assigning each RTDM task pointer to the cursor variable + * @a pos, which must be of type rtdm_task_t. + * + * @a wq must have been locked by a call to rtdm_waitqueue_lock() + * prior to traversing its wait list. + * + * @param pos cursor variable holding a pointer to the RTDM task + * being fetched. + * + * @param wq waitqueue to scan. + * + * @note The waitqueue should not be signaled, broadcast or flushed + * during the traversal, unless the loop is aborted immediately + * after. Should multiple waiters be readied while iterating, the safe + * form rtdm_for_each_waiter_safe() must be used for traversal + * instead. + * + * @coretags{unrestricted} + */ +rtdm_for_each_waiter(rtdm_task_t pos, struct rtdm_wait_queue *wq); + +/** + * @fn rtdm_for_each_waiter_safe(rtdm_task_t pos, rtdm_task_t tmp, struct rtdm_wait_queue *wq) + * @brief Safe iterator for waitqueues + * + * This construct traverses the wait list of a given waitqueue + * @a wq, assigning each RTDM task pointer to the cursor variable + * @a pos, which must be of type rtdm_task_t. + * + * Unlike with rtdm_for_each_waiter(), the waitqueue may be signaled, + * broadcast or flushed during the traversal. + * + * @a wq must have been locked by a call to rtdm_waitqueue_lock() + * prior to traversing its wait list. + * + * @param pos cursor variable holding a pointer to the RTDM task + * being fetched. + * + * @param tmp temporary cursor variable. + * + * @param wq waitqueue to scan. + * + * @coretags{unrestricted} + */ +rtdm_for_each_waiter_safe(rtdm_task_t pos, rtdm_task_t tmp, struct rtdm_wait_queue *wq); + +/** @} rtdm_sync */ + +/** + * @defgroup rtdm_interdriver_api Driver to driver services + * Inter-driver interface + *@{ + */ + +/** + * @brief Open a device + * + * Refer to rtdm_open() for parameters and return values + * + * @coretags{secondary-only, might-switch} + */ +int rtdm_open(const char *path, int oflag, ...); + +/** + * @brief Create a socket + * + * Refer to rtdm_socket() for parameters and return values. Action + * depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @coretags{secondary-only, might-switch} + */ +int rtdm_socket(int protocol_family, int socket_type, int protocol); + +/** + * @brief Close a device or socket + * + * Refer to rtdm_close() for parameters and return values. Action + * depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @coretags{secondary-only, might-switch} + */ +int rtdm_close(int fd); + +/** + * @brief Issue an IOCTL + * + * Refer to rtdm_ioctl() for parameters and return values. Action + * depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @coretags{task-unrestricted, might-switch} + */ +int rtdm_ioctl(int fd, int request, ...); + +/** + * @brief Read from device + * + * Refer to rtdm_read() for parameters and return values. Action + * depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @coretags{mode-unrestricted, might-switch} + */ +ssize_t rtdm_read(int fd, void *buf, size_t nbyte); + +/** + * @brief Write to device + * + * Refer to rtdm_write() for parameters and return values. Action + * depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @coretags{mode-unrestricted, might-switch} + */ +ssize_t rtdm_write(int fd, const void *buf, size_t nbyte); + +/** + * @brief Receive message from socket + * + * Refer to rtdm_recvmsg() for parameters and return values. Action + * depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @coretags{mode-unrestricted, might-switch} + */ +ssize_t rtdm_recvmsg(int fd, struct user_msghdr *msg, int flags); + +/** + * @brief Receive message from socket + * + * Refer to rtdm_recvfrom() for parameters and return values. Action + * depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @coretags{mode-unrestricted, might-switch} + */ +ssize_t rtdm_recvfrom(int fd, void *buf, size_t len, int flags, + struct sockaddr *from, socklen_t *fromlen); + +/** + * @brief Receive message from socket + * + * Refer to rtdm_recv() for parameters and return values. Action + * depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @coretags{mode-unrestricted, might-switch} + */ +ssize_t rtdm_recv(int fd, void *buf, size_t len, int flags); + +/** + * @brief Transmit message to socket + * + * Refer to rtdm_sendmsg() for parameters and return values. Action + * depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @coretags{mode-unrestricted, might-switch} + */ +ssize_t rtdm_sendmsg(int fd, const struct user_msghdr *msg, int flags); + +/** + * @brief Transmit message to socket + * + * Refer to rtdm_sendto() for parameters and return values. Action + * depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @coretags{mode-unrestricted, might-switch} + */ +ssize_t rtdm_sendto(int fd, const void *buf, size_t len, int flags, + const struct sockaddr *to, socklen_t tolen); + +/** + * @brief Transmit message to socket + * + * Refer to rtdm_send() for parameters and return values. Action + * depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @coretags{mode-unrestricted, might-switch} + */ +ssize_t rtdm_send(int fd, const void *buf, size_t len, int flags); + +/** + * @brief Bind to local address + * + * Refer to rtdm_bind() for parameters and return values. Action + * depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @coretags{task-unrestricted, might-switch} + */ +int rtdm_bind(int fd, const struct sockaddr *my_addr, socklen_t addrlen); + +/** + * @brief Connect to remote address + * + * Refer to rtdm_connect() for parameters and return values. Action + * depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @coretags{mode-unrestricted, might-switch} + */ +int rtdm_connect(int fd, const struct sockaddr *serv_addr, socklen_t addrlen); + +/** + * @brief Listen to incoming connection requests + * + * Refer to rtdm_listen() for parameters and return values. Action + * depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @coretags{task-unrestricted, might-switch} + */ +int rtdm_listen(int fd, int backlog); + +/** + * @brief Accept a connection request + * + * Refer to rtdm_accept() for parameters and return values. Action + * depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @coretags{mode-unrestricted, might-switch} + */ +int rtdm_accept(int fd, struct sockaddr *addr, socklen_t *addrlen); + +/** + * @brief Shut down parts of a connection + * + * Refer to rtdm_shutdown() for parameters and return values. Action + * depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @coretags{secondary-only, might-switch} + */ +int rtdm_shutdown(int fd, int how); + +/** + * @brief Get socket option + * + * Refer to rtdm_getsockopt() for parameters and return values. Action + * depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @coretags{task-unrestricted, might-switch} + */ +int rtdm_getsockopt(int fd, int level, int optname, void *optval, + socklen_t *optlen); + +/** + * @brief Set socket option + * + * Refer to rtdm_setsockopt() for parameters and return values. Action + * depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @coretags{task-unrestricted, might-switch} + */ +int rtdm_setsockopt(int fd, int level, int optname, const void *optval, + socklen_t optlen); + +/** + * @brief Get local socket address + * + * Refer to rtdm_getsockname() for parameters and return values. Action + * depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @coretags{task-unrestricted, might-switch} + */ +int rtdm_getsockname(int fd, struct sockaddr *name, socklen_t *namelen); + +/** + * @brief Get socket destination address + * + * Refer to rtdm_getpeername() for parameters and return values. Action + * depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @coretags{task-unrestricted, might-switch} + */ +int rtdm_getpeername(int fd, struct sockaddr *name, socklen_t *namelen); + +/** @} Inter-driver calls */ + +/** @} */ + +/*! + * @addtogroup rtdm_user_api + * @{ + */ + +/** + * @brief Open a device + * + * @param[in] path Device name + * @param[in] oflag Open flags + * @param ... Further parameters will be ignored. + * + * @return Positive file descriptor value on success, otherwise a negative + * error code. + * + * Action depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @see @c open() in IEEE Std 1003.1, + * http://www.opengroup.org/onlinepubs/009695399 + * + * @coretags{secondary-only, might-switch} + */ +int rtdm_open(const char *path, int oflag, ...); + +/** + * @brief Create a socket + * + * @param[in] protocol_family Protocol family (@c PF_xxx) + * @param[in] socket_type Socket type (@c SOCK_xxx) + * @param[in] protocol Protocol ID, 0 for default + * + * @return Positive file descriptor value on success, otherwise a negative + * error code. + * + * Action depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @see @c socket() in IEEE Std 1003.1, + * http://www.opengroup.org/onlinepubs/009695399 + * + * @coretags{secondary-only, might-switch} + */ +int rtdm_socket(int protocol_family, int socket_type, int protocol); + +/** + * @brief Close a device or socket + * + * @param[in] fd File descriptor as returned by rtdm_open() or rtdm_socket() + * + * @return 0 on success, otherwise a negative error code. + * + * @note If the matching rtdm_open() or rtdm_socket() call took place in + * non-real-time context, rtdm_close() must be issued within non-real-time + * as well. Otherwise, the call will fail. + * + * Action depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @see @c close() in IEEE Std 1003.1, + * http://www.opengroup.org/onlinepubs/009695399 + * + * @coretags{secondary-only, might-switch} + */ +int rtdm_close(int fd); + +/** + * @brief Issue an IOCTL + * + * @param[in] fd File descriptor as returned by rtdm_open() or rtdm_socket() + * @param[in] request IOCTL code + * @param ... Optional third argument, depending on IOCTL function + * (@c void @c * or @c unsigned @c long) + * + * @return Positiv value on success, otherwise negative error code + * + * Action depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @see @c ioctl() in IEEE Std 1003.1, + * http://www.opengroup.org/onlinepubs/009695399 + * + * @coretags{task-unrestricted, might-switch} + */ +int rtdm_ioctl(int fd, int request, ...); + +/** + * @brief Read from device + * + * @param[in] fd File descriptor as returned by rtdm_open() + * @param[out] buf Input buffer + * @param[in] nbyte Number of bytes to read + * + * @return Number of bytes read, otherwise negative error code + * + * Action depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @see @c read() in IEEE Std 1003.1, + * http://www.opengroup.org/onlinepubs/009695399 + * + * @coretags{mode-unrestricted, might-switch} + */ +ssize_t rtdm_read(int fd, void *buf, size_t nbyte); + +/** + * @brief Write to device + * + * @param[in] fd File descriptor as returned by rtdm_open() + * @param[in] buf Output buffer + * @param[in] nbyte Number of bytes to write + * + * @return Number of bytes written, otherwise negative error code + * + * Action depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @see @c write() in IEEE Std 1003.1, + * http://www.opengroup.org/onlinepubs/009695399 + * + * @coretags{mode-unrestricted, might-switch} + */ +ssize_t rtdm_write(int fd, const void *buf, size_t nbyte); + +/** + * @brief Receive message from socket + * + * @param[in] fd File descriptor as returned by rtdm_socket() + * @param[in,out] msg Message descriptor + * @param[in] flags Message flags + * + * @return Number of bytes received, otherwise negative error code + * + * Action depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @see @c recvmsg() in IEEE Std 1003.1, + * http://www.opengroup.org/onlinepubs/009695399 + * + * @coretags{mode-unrestricted, might-switch} + */ +ssize_t rtdm_recvmsg(int fd, struct user_msghdr *msg, int flags); + +/** + * @brief Receive message from socket + * + * @param[in] fd File descriptor as returned by rtdm_socket() + * @param[out] buf Message buffer + * @param[in] len Message buffer size + * @param[in] flags Message flags + * @param[out] from Buffer for message sender address + * @param[in,out] fromlen Address buffer size + * + * @return Number of bytes received, otherwise negative error code + * + * Action depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @see @c recvfrom() in IEEE Std 1003.1, + * http://www.opengroup.org/onlinepubs/009695399 + * + * @coretags{mode-unrestricted, might-switch} + */ +ssize_t rtdm_recvfrom(int fd, void *buf, size_t len, int flags, + struct sockaddr *from, socklen_t *fromlen); + +/** + * @brief Receive message from socket + * + * @param[in] fd File descriptor as returned by rtdm_socket() + * @param[out] buf Message buffer + * @param[in] len Message buffer size + * @param[in] flags Message flags + * + * @return Number of bytes received, otherwise negative error code + * + * Action depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @see @c recv() in IEEE Std 1003.1, + * http://www.opengroup.org/onlinepubs/009695399 + * + * @coretags{mode-unrestricted, might-switch} + */ +ssize_t rtdm_recv(int fd, void *buf, size_t len, int flags); + +/** + * @brief Transmit message to socket + * + * @param[in] fd File descriptor as returned by rtdm_socket() + * @param[in] msg Message descriptor + * @param[in] flags Message flags + * + * @return Number of bytes sent, otherwise negative error code + * + * Action depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @see @c sendmsg() in IEEE Std 1003.1, + * http://www.opengroup.org/onlinepubs/009695399 + * + * @coretags{mode-unrestricted, might-switch} + */ +ssize_t rtdm_sendmsg(int fd, const struct user_msghdr *msg, int flags); + +/** + * @brief Transmit message to socket + * + * @param[in] fd File descriptor as returned by rtdm_socket() + * @param[in] buf Message buffer + * @param[in] len Message buffer size + * @param[in] flags Message flags + * @param[in] to Buffer for message destination address + * @param[in] tolen Address buffer size + * + * @return Number of bytes sent, otherwise negative error code + * + * Action depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @see @c sendto() in IEEE Std 1003.1, + * http://www.opengroup.org/onlinepubs/009695399 + * + * @coretags{mode-unrestricted, might-switch} + */ +ssize_t rtdm_sendto(int fd, const void *buf, size_t len, int flags, + const struct sockaddr *to, socklen_t tolen); + +/** + * @brief Transmit message to socket + * + * @param[in] fd File descriptor as returned by rtdm_socket() + * @param[in] buf Message buffer + * @param[in] len Message buffer size + * @param[in] flags Message flags + * + * @return Number of bytes sent, otherwise negative error code + * + * Action depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @see @c send() in IEEE Std 1003.1, + * http://www.opengroup.org/onlinepubs/009695399 + * + * @coretags{mode-unrestricted, might-switch} + */ +ssize_t rtdm_send(int fd, const void *buf, size_t len, int flags); + +/** + * @brief Bind to local address + * + * @param[in] fd File descriptor as returned by rtdm_socket() + * @param[in] my_addr Address buffer + * @param[in] addrlen Address buffer size + * + * @return 0 on success, otherwise negative error code + * + * Action depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @see @c bind() in IEEE Std 1003.1, + * http://www.opengroup.org/onlinepubs/009695399 + * + * @coretags{mode-unrestricted, might-switch} + */ +int rtdm_bind(int fd, const struct sockaddr *my_addr, socklen_t addrlen); + +/** + * @brief Connect to remote address + * + * @param[in] fd File descriptor as returned by rtdm_socket() + * @param[in] serv_addr Address buffer + * @param[in] addrlen Address buffer size + * + * @return 0 on success, otherwise negative error code + * + * Action depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @see @c connect() in IEEE Std 1003.1, + * http://www.opengroup.org/onlinepubs/009695399 + * + * @coretags{mode-unrestricted, might-switch} + */ +int rtdm_connect(int fd, const struct sockaddr *serv_addr, + socklen_t addrlen); + +/** + * @brief Listen for incomming connection requests + * + * @param[in] fd File descriptor as returned by rtdm_socket() + * @param[in] backlog Maximum queue length + * + * @return 0 on success, otherwise negative error code + * + * Action depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @see @c listen() in IEEE Std 1003.1, + * http://www.opengroup.org/onlinepubs/009695399 + * + * @coretags{task-unrestricted, might-switch} + */ +int rtdm_listen(int fd, int backlog); + +/** + * @brief Accept connection requests + * + * @param[in] fd File descriptor as returned by rtdm_socket() + * @param[out] addr Buffer for remote address + * @param[in,out] addrlen Address buffer size + * + * @return 0 on success, otherwise negative error code + * + * Action depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @see @c accept() in IEEE Std 1003.1, + * http://www.opengroup.org/onlinepubs/009695399 + * + * @coretags{mode-unrestricted, might-switch} + */ +int rtdm_accept(int fd, struct sockaddr *addr, socklen_t *addrlen); + +/** + * @brief Shut down parts of a connection + * + * @param[in] fd File descriptor as returned by rtdm_socket() + * @param[in] how Specifies the part to be shut down (@c SHUT_xxx) +* + * @return 0 on success, otherwise negative error code + * + * Action depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @see @c shutdown() in IEEE Std 1003.1, + * http://www.opengroup.org/onlinepubs/009695399 + * + * @coretags{secondary-only, might-switch} + */ +int rtdm_shutdown(int fd, int how); + +/** + * @brief Get socket option + * + * @param[in] fd File descriptor as returned by rtdm_socket() + * @param[in] level Addressed stack level + * @param[in] optname Option name ID + * @param[out] optval Value buffer + * @param[in,out] optlen Value buffer size + * + * @return 0 on success, otherwise negative error code + * + * Action depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @see @c getsockopt() in IEEE Std 1003.1, + * http://www.opengroup.org/onlinepubs/009695399 + * + * @coretags{task-unrestricted, might-switch} + */ +int rtdm_getsockopt(int fd, int level, int optname, void *optval, + socklen_t *optlen); + +/** + * @brief Set socket option + * + * @param[in] fd File descriptor as returned by rtdm_socket() + * @param[in] level Addressed stack level + * @param[in] optname Option name ID + * @param[in] optval Value buffer + * @param[in] optlen Value buffer size + * + * @return 0 on success, otherwise negative error code + * + * Action depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @see @c setsockopt() in IEEE Std 1003.1, + * http://www.opengroup.org/onlinepubs/009695399 + * + * @coretags{task-unrestricted, might-switch} + */ +int rtdm_setsockopt(int fd, int level, int optname, const void *optval, + socklen_t optlen); + +/** + * @brief Get local socket address + * + * @param[in] fd File descriptor as returned by rtdm_socket() + * @param[out] name Address buffer + * @param[in,out] namelen Address buffer size + * + * @return 0 on success, otherwise negative error code + * + * Action depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @see @c getsockname() in IEEE Std 1003.1, + * http://www.opengroup.org/onlinepubs/009695399 + * + * @coretags{task-unrestricted, might-switch} + */ +int rtdm_getsockname(int fd, struct sockaddr *name, socklen_t *namelen); + +/** + * @brief Get socket destination address + * + * @param[in] fd File descriptor as returned by rtdm_socket() + * @param[out] name Address buffer + * @param[in,out] namelen Address buffer size + * + * @return 0 on success, otherwise negative error code + * + * Action depends on driver implementation, see @ref rtdm_profiles + * "Device Profiles". + * + * @see @c getpeername() in IEEE Std 1003.1, + * http://www.opengroup.org/onlinepubs/009695399 + * + * @coretags{task-unrestricted, might-switch} + */ +int rtdm_getpeername(int fd, struct sockaddr *name, socklen_t *namelen); + +#endif /* DOXYGEN_CPP */ + +/** @} */ --- linux/kernel/xenomai/rtdm/drvlib.c 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/kernel/xenomai/rtdm/drvlib.c 2022-03-21 12:58:29.107891711 +0100 @@ -0,0 +1,2502 @@ +/* + * Real-Time Driver Model for Xenomai, driver library + * + * Copyright (C) 2005-2007 Jan Kiszka + * Copyright (C) 2005 Joerg Langenberg + * Copyright (C) 2008 Gilles Chanteperdrix + * Copyright (C) 2014 Philippe Gerum + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "internal.h" +#include + +/** + * @ingroup rtdm_driver_interface + * @defgroup rtdm_clock Clock Services + * @{ + */ + +#ifdef DOXYGEN_CPP /* Only used for doxygen doc generation */ + +/** + * @brief Get system time + * + * @return The system time in nanoseconds is returned + * + * @note The resolution of this service depends on the system timer. In + * particular, if the system timer is running in periodic mode, the return + * value will be limited to multiples of the timer tick period. + * + * @note The system timer may have to be started to obtain valid results. + * Whether this happens automatically (as on Xenomai) or is controlled by the + * application depends on the RTDM host environment. + * + * @coretags{unrestricted} + */ +nanosecs_abs_t rtdm_clock_read(void); + +/** + * @brief Get monotonic time + * + * @return The monotonic time in nanoseconds is returned + * + * @note The resolution of this service depends on the system timer. In + * particular, if the system timer is running in periodic mode, the return + * value will be limited to multiples of the timer tick period. + * + * @note The system timer may have to be started to obtain valid results. + * Whether this happens automatically (as on Xenomai) or is controlled by the + * application depends on the RTDM host environment. + * + * @coretags{unrestricted} + */ +nanosecs_abs_t rtdm_clock_read_monotonic(void); +#endif /* DOXYGEN_CPP */ +/** @} */ + +/** + * @ingroup rtdm_driver_interface + * @defgroup rtdm_task Task Services + * @{ + */ + +/** + * @brief Initialise and start a real-time task + * + * After initialising a task, the task handle remains valid and can be + * passed to RTDM services until either rtdm_task_destroy() or + * rtdm_task_join() was invoked. + * + * @param[in,out] task Task handle + * @param[in] name Optional task name + * @param[in] task_proc Procedure to be executed by the task + * @param[in] arg Custom argument passed to @c task_proc() on entry + * @param[in] priority Priority of the task, see also + * @ref rtdmtaskprio "Task Priority Range" + * @param[in] period Period in nanoseconds of a cyclic task, 0 for non-cyclic + * mode. Waiting for the first and subsequent periodic events is + * done using rtdm_task_wait_period(). + * + * @return 0 on success, otherwise negative error code + * + * @coretags{secondary-only, might-switch} + */ +int rtdm_task_init(rtdm_task_t *task, const char *name, + rtdm_task_proc_t task_proc, void *arg, + int priority, nanosecs_rel_t period) +{ + union xnsched_policy_param param; + struct xnthread_start_attr sattr; + struct xnthread_init_attr iattr; + int err; + + if (!realtime_core_enabled()) + return -ENOSYS; + + iattr.name = name; + iattr.flags = 0; + iattr.personality = &xenomai_personality; + iattr.affinity = CPU_MASK_ALL; + param.rt.prio = priority; + + err = xnthread_init(task, &iattr, &xnsched_class_rt, ¶m); + if (err) + return err; + + /* We need an anonymous registry entry to obtain a handle for fast + mutex locking. */ + err = xnthread_register(task, ""); + if (err) + goto cleanup_out; + + if (period > 0) { + err = xnthread_set_periodic(task, XN_INFINITE, + XN_RELATIVE, period); + if (err) + goto cleanup_out; + } + + sattr.mode = 0; + sattr.entry = task_proc; + sattr.cookie = arg; + err = xnthread_start(task, &sattr); + if (err) + goto cleanup_out; + + return 0; + + cleanup_out: + xnthread_cancel(task); + return err; +} + +EXPORT_SYMBOL_GPL(rtdm_task_init); + +#ifdef DOXYGEN_CPP /* Only used for doxygen doc generation */ +/** + * @brief Destroy a real-time task + * + * This call sends a termination request to @a task, then waits for it + * to exit. All RTDM task should check for pending termination + * requests by calling rtdm_task_should_stop() from their work loop. + * + * If @a task is current, rtdm_task_destroy() terminates the current + * context, and does not return to the caller. + * + * @param[in,out] task Task handle as returned by rtdm_task_init() + * + * @note Passing the same task handle to RTDM services after the completion of + * this function is not allowed. + * + * @coretags{secondary-only, might-switch} + */ +void rtdm_task_destroy(rtdm_task_t *task); + +/** + * @brief Check for pending termination request + * + * Check whether a termination request was received by the current + * RTDM task. Termination requests are sent by calling + * rtdm_task_destroy(). + * + * @return Non-zero indicates that a termination request is pending, + * in which case the caller should wrap up and exit. + * + * @coretags{rtdm-task, might-switch} + */ +int rtdm_task_should_stop(void); + +/** + * @brief Adjust real-time task priority + * + * @param[in,out] task Task handle as returned by rtdm_task_init() + * @param[in] priority New priority of the task, see also + * @ref rtdmtaskprio "Task Priority Range" + * + * @coretags{task-unrestricted, might-switch} + */ +void rtdm_task_set_priority(rtdm_task_t *task, int priority); + +/** + * @brief Adjust real-time task period + * + * @param[in,out] task Task handle as returned by rtdm_task_init(), or + * NULL for referring to the current RTDM task or Cobalt thread. + * + * @param[in] start_date The initial (absolute) date of the first + * release point, expressed in nanoseconds. @a task will be delayed + * by the first call to rtdm_task_wait_period() until this point is + * reached. If @a start_date is zero, the first release point is set + * to @a period nanoseconds after the current date. + + * @param[in] period New period in nanoseconds of a cyclic task, zero + * to disable cyclic mode for @a task. + * + * @coretags{task-unrestricted} + */ +int rtdm_task_set_period(rtdm_task_t *task, nanosecs_abs_t start_date, + nanosecs_rel_t period); + +/** + * @brief Wait on next real-time task period + * + * @param[in] overruns_r Address of a long word receiving the count of + * overruns if -ETIMEDOUT is returned, or NULL if the caller don't + * need that information. + * + * @return 0 on success, otherwise: + * + * - -EINVAL is returned if calling task is not in periodic mode. + * + * - -ETIMEDOUT is returned if a timer overrun occurred, which indicates + * that a previous release point has been missed by the calling task. + * + * @coretags{primary-only, might-switch} + */ +int rtdm_task_wait_period(unsigned long *overruns_r); + +/** + * @brief Activate a blocked real-time task + * + * @return Non-zero is returned if the task was actually unblocked from a + * pending wait state, 0 otherwise. + * + * @coretags{unrestricted, might-switch} + */ +int rtdm_task_unblock(rtdm_task_t *task); + +/** + * @brief Get current real-time task + * + * @return Pointer to task handle + * + * @coretags{mode-unrestricted} + */ +rtdm_task_t *rtdm_task_current(void); + +/** + * @brief Sleep a specified amount of time + * + * @param[in] delay Delay in nanoseconds, see @ref RTDM_TIMEOUT_xxx for + * special values. + * + * @return 0 on success, otherwise: + * + * - -EINTR is returned if calling task has been unblock by a signal or + * explicitly via rtdm_task_unblock(). + * + * - -EPERM @e may be returned if an illegal invocation environment is + * detected. + * + * @coretags{primary-only, might-switch} + */ +int rtdm_task_sleep(nanosecs_rel_t delay); + +/** + * @brief Sleep until a specified absolute time + * + * @deprecated Use rtdm_task_sleep_abs instead! + * + * @param[in] wakeup_time Absolute timeout in nanoseconds + * + * @return 0 on success, otherwise: + * + * - -EINTR is returned if calling task has been unblock by a signal or + * explicitly via rtdm_task_unblock(). + * + * - -EPERM @e may be returned if an illegal invocation environment is + * detected. + * + * @coretags{primary-only, might-switch} + */ +int rtdm_task_sleep_until(nanosecs_abs_t wakeup_time); + +/** + * @brief Sleep until a specified absolute time + * + * @param[in] wakeup_time Absolute timeout in nanoseconds + * @param[in] mode Selects the timer mode, see RTDM_TIMERMODE_xxx for details + * + * @return 0 on success, otherwise: + * + * - -EINTR is returned if calling task has been unblock by a signal or + * explicitly via rtdm_task_unblock(). + * + * - -EPERM @e may be returned if an illegal invocation environment is + * detected. + * + * - -EINVAL is returned if an invalid parameter was passed. + * + * @coretags{primary-only, might-switch} + */ +int rtdm_task_sleep_abs(nanosecs_abs_t wakeup_time, enum rtdm_timer_mode mode); + +/** + * @brief Safe busy waiting + * + * This service alternates active spinning and sleeping within a wait + * loop, until a condition is satisfied. While sleeping, a task is + * scheduled out and does not consume any CPU time. + * + * rtdm_task_busy_wait() is particularly useful for waiting for a + * state change reading an I/O register, which usually happens shortly + * after the wait starts, without incurring the adverse effects of + * long busy waiting if it doesn't. + * + * @param[in] condition The C expression to be tested for detecting + * completion. + * @param[in] spin_ns The time to spin on @a condition before + * sleeping, expressed as a count of nanoseconds. + * @param[in] sleep_ns The time to sleep for before spinning again, + * expressed as a count of nanoseconds. + * + * @return 0 on success if @a condition is satisfied, otherwise: + * + * - -EINTR is returned if the calling task has been unblocked by a + * Linux signal or explicitly via rtdm_task_unblock(). + * + * - -EPERM may be returned if an illegal invocation environment is + * detected. + * + * @coretags{primary-only, might-switch} + */ +int rtdm_task_busy_wait(bool condition, nanosecs_rel_t spin_ns, + nanosecs_rel_t sleep_ns); + +/** + * @brief Register wait context + * + * rtdm_wait_prepare() registers a wait context structure for the + * caller, which can be later retrieved by a call to + * rtdm_wait_get_context(). This call is normally issued before the + * current task blocks on a wait object, waiting for some (producer) + * code to wake it up. Arbitrary data can be exchanged between both + * sites via the wait context structure, which is allocated by the + * waiter (consumer) side. + * + * @a wc is the address of an anchor object which is commonly embedded + * into a larger structure with arbitrary contents, which needs to be + * shared between the consumer (waiter) and the producer for + * implementing the wait code. + * + * A typical implementation pattern for the wait side is: + * + * @code + * struct rtdm_waitqueue wq; + * struct some_wait_context { + * int input_value; + * int output_value; + * struct rtdm_wait_context wc; + * } wait_context; + * + * wait_context.input_value = 42; + * rtdm_wait_prepare(&wait_context); + * ret = rtdm_wait_condition(&wq, rtdm_wait_is_completed(&wait_context)); + * if (ret) + * goto wait_failed; + * handle_event(wait_context.output_value); + * @endcode + * + * On the producer side, the implementation would look like: + * + * @code + * struct rtdm_waitqueue wq; + * struct some_wait_context { + * int input_value; + * int output_value; + * struct rtdm_wait_context wc; + * } *wait_context_ptr; + * struct rtdm_wait_context *wc; + * rtdm_task_t *task; + * + * rtdm_for_each_waiter(task, &wq) { + * wc = rtdm_wait_get_context(task); + * wait_context_ptr = container_of(wc, struct some_wait_context, wc); + * wait_context_ptr->output_value = 12; + * } + * rtdm_waitqueue_broadcast(&wq); + * @endcode + * + * @param wc Wait context to register. + */ +void rtdm_wait_prepare(struct rtdm_wait_context *wc); + +/** + * @brief Mark completion for a wait context + * + * rtdm_complete_wait() marks a wait context as completed, so that + * rtdm_wait_is_completed() returns true for such context. + * + * @param wc Wait context to complete. + */ +void rtdm_wait_complete(struct rtdm_wait_context *wc); + +/** + * @brief Test completion of a wait context + * + * rtdm_wait_is_completed() returns true if rtdm_complete_wait() was + * called for @a wc. The completion mark is reset each time + * rtdm_wait_prepare() is called for a wait context. + * + * @param wc Wait context to check for completion. + * + * @return non-zero/true if rtdm_wait_complete() was called for @a wc, + * zero otherwise. + */ +int rtdm_wait_is_completed(struct rtdm_wait_context *wc); + +#endif /* DOXYGEN_CPP */ + +int __rtdm_task_sleep(xnticks_t timeout, xntmode_t mode) +{ + struct xnthread *thread; + + if (!XENO_ASSERT(COBALT, !xnsched_unblockable_p())) + return -EPERM; + + thread = xnthread_current(); + xnthread_suspend(thread, XNDELAY, timeout, mode, NULL); + + return xnthread_test_info(thread, XNBREAK) ? -EINTR : 0; +} + +EXPORT_SYMBOL_GPL(__rtdm_task_sleep); + +/** + * @brief Wait on a real-time task to terminate + * + * @param[in,out] task Task handle as returned by rtdm_task_init() + * + * @note Passing the same task handle to RTDM services after the + * completion of this function is not allowed. + * + * @note This service does not trigger the termination of the targeted + * task. The user has to take of this, otherwise rtdm_task_join() + * will never return. + * + * @coretags{mode-unrestricted} + */ +void rtdm_task_join(rtdm_task_t *task) +{ + trace_cobalt_driver_task_join(task); + + xnthread_join(task, true); +} + +EXPORT_SYMBOL_GPL(rtdm_task_join); + +/** + * @brief Busy-wait a specified amount of time + * + * This service does not schedule out the caller, but rather spins in + * a tight loop, burning CPU cycles until the timeout elapses. + * + * @param[in] delay Delay in nanoseconds. Note that a zero delay does @b not + * have the meaning of @c RTDM_TIMEOUT_INFINITE here. + * + * @note The caller must not be migratable to different CPUs while executing + * this service. Otherwise, the actual delay will be undefined. + * + * @coretags{unrestricted} + */ +void rtdm_task_busy_sleep(nanosecs_rel_t delay) +{ + xnticks_t wakeup; + + wakeup = xnclock_read_raw(&nkclock) + + xnclock_ns_to_ticks(&nkclock, delay); + + while ((xnsticks_t)(xnclock_read_raw(&nkclock) - wakeup) < 0) + cpu_relax(); +} + +EXPORT_SYMBOL_GPL(rtdm_task_busy_sleep); +/** @} */ + +/** + * @ingroup rtdm_driver_interface + * @defgroup rtdm_timer Timer Services + * @{ + */ + +/** + * @brief Initialise a timer + * + * @param[in,out] timer Timer handle + * @param[in] handler Handler to be called on timer expiry + * @param[in] name Optional timer name + * + * @return 0 on success, otherwise negative error code + * + * @coretags{task-unrestricted} + */ +int rtdm_timer_init(rtdm_timer_t *timer, rtdm_timer_handler_t handler, + const char *name) +{ + if (!realtime_core_enabled()) + return -ENOSYS; + + xntimer_init((timer), &nkclock, handler, NULL, XNTIMER_IGRAVITY); + xntimer_set_name((timer), (name)); + return 0; +} + +EXPORT_SYMBOL_GPL(rtdm_timer_init); + +/** + * @brief Destroy a timer + * + * @param[in,out] timer Timer handle as returned by rtdm_timer_init() + * + * @coretags{task-unrestricted} + */ +void rtdm_timer_destroy(rtdm_timer_t *timer) +{ + spl_t s; + + xnlock_get_irqsave(&nklock, s); + xntimer_destroy(timer); + xnlock_put_irqrestore(&nklock, s); +} + +EXPORT_SYMBOL_GPL(rtdm_timer_destroy); + +/** + * @brief Start a timer + * + * @param[in,out] timer Timer handle as returned by rtdm_timer_init() + * @param[in] expiry Firing time of the timer, @c mode defines if relative or + * absolute + * @param[in] interval Relative reload value, > 0 if the timer shall work in + * periodic mode with the specific interval, 0 for one-shot timers + * @param[in] mode Defines the operation mode, see @ref RTDM_TIMERMODE_xxx for + * possible values + * + * @return 0 on success, otherwise: + * + * - -ETIMEDOUT is returned if @c expiry describes an absolute date in + * the past. In such an event, the timer is nevertheless armed for the + * next shot in the timeline if @a interval is non-zero. + * + * @coretags{unrestricted} + */ +int rtdm_timer_start(rtdm_timer_t *timer, nanosecs_abs_t expiry, + nanosecs_rel_t interval, enum rtdm_timer_mode mode) +{ + spl_t s; + int err; + + xnlock_get_irqsave(&nklock, s); + err = xntimer_start(timer, expiry, interval, (xntmode_t)mode); + xnlock_put_irqrestore(&nklock, s); + + return err; +} + +EXPORT_SYMBOL_GPL(rtdm_timer_start); + +/** + * @brief Stop a timer + * + * @param[in,out] timer Timer handle as returned by rtdm_timer_init() + * + * @coretags{unrestricted} + */ +void rtdm_timer_stop(rtdm_timer_t *timer) +{ + spl_t s; + + xnlock_get_irqsave(&nklock, s); + xntimer_stop(timer); + xnlock_put_irqrestore(&nklock, s); +} + +EXPORT_SYMBOL_GPL(rtdm_timer_stop); + +#ifdef DOXYGEN_CPP /* Only used for doxygen doc generation */ +/** + * @brief Start a timer from inside a timer handler + * + * @param[in,out] timer Timer handle as returned by rtdm_timer_init() + * @param[in] expiry Firing time of the timer, @c mode defines if relative or + * absolute + * @param[in] interval Relative reload value, > 0 if the timer shall work in + * periodic mode with the specific interval, 0 for one-shot timers + * @param[in] mode Defines the operation mode, see @ref RTDM_TIMERMODE_xxx for + * possible values + * + * @return 0 on success, otherwise: + * + * - -ETIMEDOUT is returned if @c expiry describes an absolute date in the + * past. + * + * @coretags{coreirq-only} + */ +int rtdm_timer_start_in_handler(rtdm_timer_t *timer, nanosecs_abs_t expiry, + nanosecs_rel_t interval, + enum rtdm_timer_mode mode); + +/** + * @brief Stop a timer from inside a timer handler + * + * @param[in,out] timer Timer handle as returned by rtdm_timer_init() + * + * @coretags{coreirq-only} + */ +void rtdm_timer_stop_in_handler(rtdm_timer_t *timer); +#endif /* DOXYGEN_CPP */ +/** @} */ + +/* --- IPC cleanup helper --- */ + +#define RTDM_SYNCH_DELETED XNSYNCH_SPARE0 + +void __rtdm_synch_flush(struct xnsynch *synch, unsigned long reason) +{ + spl_t s; + + xnlock_get_irqsave(&nklock, s); + + if (reason == XNRMID) + xnsynch_set_status(synch, RTDM_SYNCH_DELETED); + + if (likely(xnsynch_flush(synch, reason) == XNSYNCH_RESCHED)) + xnsched_run(); + + xnlock_put_irqrestore(&nklock, s); +} + +EXPORT_SYMBOL_GPL(__rtdm_synch_flush); + +/** + * @ingroup rtdm_driver_interface + * @defgroup rtdm_sync Synchronisation Services + * @{ + */ + +/*! + * @name Timeout Sequence Management + * @{ + */ + +/** + * @brief Initialise a timeout sequence + * + * This service initialises a timeout sequence handle according to the given + * timeout value. Timeout sequences allow to maintain a continuous @a timeout + * across multiple calls of blocking synchronisation services. A typical + * application scenario is given below. + * + * @param[in,out] timeout_seq Timeout sequence handle + * @param[in] timeout Relative timeout in nanoseconds, see + * @ref RTDM_TIMEOUT_xxx for special values + * + * Application Scenario: + * @code +int device_service_routine(...) +{ + rtdm_toseq_t timeout_seq; + ... + + rtdm_toseq_init(&timeout_seq, timeout); + ... + while (received < requested) { + ret = rtdm_event_timedwait(&data_available, timeout, &timeout_seq); + if (ret < 0) // including -ETIMEDOUT + break; + + // receive some data + ... + } + ... +} + * @endcode + * Using a timeout sequence in such a scenario avoids that the user-provided + * relative @c timeout is restarted on every call to rtdm_event_timedwait(), + * potentially causing an overall delay that is larger than specified by + * @c timeout. Moreover, all functions supporting timeout sequences also + * interpret special timeout values (infinite and non-blocking), + * disburdening the driver developer from handling them separately. + * + * @coretags{task-unrestricted} + */ +void rtdm_toseq_init(rtdm_toseq_t *timeout_seq, nanosecs_rel_t timeout) +{ + XENO_WARN_ON(COBALT, xnsched_unblockable_p()); /* only warn here */ + + *timeout_seq = xnclock_read_monotonic(&nkclock) + timeout; +} + +EXPORT_SYMBOL_GPL(rtdm_toseq_init); + +/** @} */ + +/** + * @ingroup rtdm_sync + * @defgroup rtdm_sync_event Event Services + * @{ + */ + +/** + * @brief Initialise an event + * + * @param[in,out] event Event handle + * @param[in] pending Non-zero if event shall be initialised as set, 0 otherwise + * + * @coretags{task-unrestricted} + */ +void rtdm_event_init(rtdm_event_t *event, unsigned long pending) +{ + spl_t s; + + trace_cobalt_driver_event_init(event, pending); + + /* Make atomic for re-initialisation support */ + xnlock_get_irqsave(&nklock, s); + + xnsynch_init(&event->synch_base, XNSYNCH_PRIO, NULL); + if (pending) + xnsynch_set_status(&event->synch_base, RTDM_EVENT_PENDING); + xnselect_init(&event->select_block); + + xnlock_put_irqrestore(&nklock, s); +} + +EXPORT_SYMBOL_GPL(rtdm_event_init); + +/** + * @brief Destroy an event + * + * @param[in,out] event Event handle as returned by rtdm_event_init() + * + * @coretags{task-unrestricted, might-switch} + */ +void rtdm_event_destroy(rtdm_event_t *event) +{ + trace_cobalt_driver_event_destroy(event); + if (realtime_core_enabled()) { + __rtdm_synch_flush(&event->synch_base, XNRMID); + xnselect_destroy(&event->select_block); + } +} +EXPORT_SYMBOL_GPL(rtdm_event_destroy); + +/** + * @brief Signal an event occurrence to currently listening waiters + * + * This function wakes up all current waiters of the given event, but it does + * not change the event state. Subsequently callers of rtdm_event_wait() or + * rtdm_event_timedwait() will therefore be blocked first. + * + * @param[in,out] event Event handle as returned by rtdm_event_init() + * + * @coretags{unrestricted, might-switch} + */ +void rtdm_event_pulse(rtdm_event_t *event) +{ + trace_cobalt_driver_event_pulse(event); + __rtdm_synch_flush(&event->synch_base, 0); +} +EXPORT_SYMBOL_GPL(rtdm_event_pulse); + +/** + * @brief Signal an event occurrence + * + * This function sets the given event and wakes up all current waiters. If no + * waiter is presently registered, the next call to rtdm_event_wait() or + * rtdm_event_timedwait() will return immediately. + * + * @param[in,out] event Event handle as returned by rtdm_event_init() + * + * @coretags{unrestricted, might-switch} + */ +void rtdm_event_signal(rtdm_event_t *event) +{ + int resched = 0; + spl_t s; + + trace_cobalt_driver_event_signal(event); + + xnlock_get_irqsave(&nklock, s); + + xnsynch_set_status(&event->synch_base, RTDM_EVENT_PENDING); + if (xnsynch_flush(&event->synch_base, 0)) + resched = 1; + if (xnselect_signal(&event->select_block, 1)) + resched = 1; + if (resched) + xnsched_run(); + + xnlock_put_irqrestore(&nklock, s); +} + +EXPORT_SYMBOL_GPL(rtdm_event_signal); + +/** + * @brief Wait on event occurrence + * + * This is the light-weight version of rtdm_event_timedwait(), implying an + * infinite timeout. + * + * @param[in,out] event Event handle as returned by rtdm_event_init() + * + * @return 0 on success, otherwise: + * + * - -EINTR is returned if calling task has been unblock by a signal or + * explicitly via rtdm_task_unblock(). + * + * - -EIDRM is returned if @a event has been destroyed. + * + * - -EPERM @e may be returned if an illegal invocation environment is + * detected. + * + * @coretags{primary-only, might-switch} + */ +int rtdm_event_wait(rtdm_event_t *event) +{ + return rtdm_event_timedwait(event, 0, NULL); +} + +EXPORT_SYMBOL_GPL(rtdm_event_wait); + +/** + * @brief Wait on event occurrence with timeout + * + * This function waits or tests for the occurence of the given event, taking + * the provided timeout into account. On successful return, the event is + * reset. + * + * @param[in,out] event Event handle as returned by rtdm_event_init() + * @param[in] timeout Relative timeout in nanoseconds, see + * @ref RTDM_TIMEOUT_xxx for special values + * @param[in,out] timeout_seq Handle of a timeout sequence as returned by + * rtdm_toseq_init() or NULL + * + * @return 0 on success, otherwise: + * + * - -ETIMEDOUT is returned if the if the request has not been satisfied + * within the specified amount of time. + * + * - -EINTR is returned if calling task has been unblock by a signal or + * explicitly via rtdm_task_unblock(). + * + * - -EIDRM is returned if @a event has been destroyed. + * + * - -EPERM @e may be returned if an illegal invocation environment is + * detected. + * + * - -EWOULDBLOCK is returned if a negative @a timeout (i.e., non-blocking + * operation) has been specified. + * + * @coretags{primary-timed, might-switch} + */ +int rtdm_event_timedwait(rtdm_event_t *event, nanosecs_rel_t timeout, + rtdm_toseq_t *timeout_seq) +{ + struct xnthread *thread; + int err = 0, ret; + spl_t s; + + if (!XENO_ASSERT(COBALT, timeout < 0 || !xnsched_unblockable_p())) + return -EPERM; + + trace_cobalt_driver_event_wait(event, xnthread_current()); + + xnlock_get_irqsave(&nklock, s); + + if (unlikely(event->synch_base.status & RTDM_SYNCH_DELETED)) + err = -EIDRM; + else if (likely(event->synch_base.status & RTDM_EVENT_PENDING)) { + xnsynch_clear_status(&event->synch_base, RTDM_EVENT_PENDING); + xnselect_signal(&event->select_block, 0); + } else { + /* non-blocking mode */ + if (timeout < 0) { + err = -EWOULDBLOCK; + goto unlock_out; + } + + thread = xnthread_current(); + + if (timeout_seq && (timeout > 0)) + /* timeout sequence */ + ret = xnsynch_sleep_on(&event->synch_base, *timeout_seq, + XN_ABSOLUTE); + else + /* infinite or relative timeout */ + ret = xnsynch_sleep_on(&event->synch_base, timeout, XN_RELATIVE); + + if (likely(ret == 0)) { + xnsynch_clear_status(&event->synch_base, + RTDM_EVENT_PENDING); + xnselect_signal(&event->select_block, 0); + } else if (ret & XNTIMEO) + err = -ETIMEDOUT; + else if (ret & XNRMID) + err = -EIDRM; + else /* XNBREAK */ + err = -EINTR; + } + +unlock_out: + xnlock_put_irqrestore(&nklock, s); + + return err; +} + +EXPORT_SYMBOL_GPL(rtdm_event_timedwait); + +/** + * @brief Clear event state + * + * @param[in,out] event Event handle as returned by rtdm_event_init() + * + * @coretags{unrestricted} + */ +void rtdm_event_clear(rtdm_event_t *event) +{ + spl_t s; + + trace_cobalt_driver_event_clear(event); + + xnlock_get_irqsave(&nklock, s); + + xnsynch_clear_status(&event->synch_base, RTDM_EVENT_PENDING); + xnselect_signal(&event->select_block, 0); + + xnlock_put_irqrestore(&nklock, s); +} + +EXPORT_SYMBOL_GPL(rtdm_event_clear); + +/** + * @brief Bind a selector to an event + * + * This functions binds the given selector to an event so that the former is + * notified when the event state changes. Typically the select binding handler + * will invoke this service. + * + * @param[in,out] event Event handle as returned by rtdm_event_init() + * @param[in,out] selector Selector as passed to the select binding handler + * @param[in] type Type of the bound event as passed to the select binding handler + * @param[in] fd_index File descriptor index as passed to the select binding + * handler + * + * @return 0 on success, otherwise: + * + * - -ENOMEM is returned if there is insufficient memory to establish the + * dynamic binding. + * + * - -EINVAL is returned if @a type or @a fd_index are invalid. + * + * @coretags{task-unrestricted} + */ +int rtdm_event_select(rtdm_event_t *event, rtdm_selector_t *selector, + enum rtdm_selecttype type, unsigned int fd_index) +{ + struct xnselect_binding *binding; + int err; + spl_t s; + + binding = xnmalloc(sizeof(*binding)); + if (!binding) + return -ENOMEM; + + xnlock_get_irqsave(&nklock, s); + err = xnselect_bind(&event->select_block, + binding, selector, type, fd_index, + event->synch_base.status & (RTDM_SYNCH_DELETED | + RTDM_EVENT_PENDING)); + xnlock_put_irqrestore(&nklock, s); + + if (err) + xnfree(binding); + + return err; +} +EXPORT_SYMBOL_GPL(rtdm_event_select); + +/** @} */ + +/** + * @ingroup rtdm_sync + * @defgroup rtdm_sync_sem Semaphore Services + * @{ + */ + +/** + * @brief Initialise a semaphore + * + * @param[in,out] sem Semaphore handle + * @param[in] value Initial value of the semaphore + * + * @coretags{task-unrestricted} + */ +void rtdm_sem_init(rtdm_sem_t *sem, unsigned long value) +{ + spl_t s; + + trace_cobalt_driver_sem_init(sem, value); + + /* Make atomic for re-initialisation support */ + xnlock_get_irqsave(&nklock, s); + + sem->value = value; + xnsynch_init(&sem->synch_base, XNSYNCH_PRIO, NULL); + xnselect_init(&sem->select_block); + + xnlock_put_irqrestore(&nklock, s); +} + +EXPORT_SYMBOL_GPL(rtdm_sem_init); + +/** + * @brief Destroy a semaphore + * + * @param[in,out] sem Semaphore handle as returned by rtdm_sem_init() + * + * @coretags{task-unrestricted, might-switch} + */ +void rtdm_sem_destroy(rtdm_sem_t *sem) +{ + trace_cobalt_driver_sem_destroy(sem); + if (realtime_core_enabled()) { + __rtdm_synch_flush(&sem->synch_base, XNRMID); + xnselect_destroy(&sem->select_block); + } +} +EXPORT_SYMBOL_GPL(rtdm_sem_destroy); + +/** + * @brief Decrement a semaphore + * + * This is the light-weight version of rtdm_sem_timeddown(), implying an + * infinite timeout. + * + * @param[in,out] sem Semaphore handle as returned by rtdm_sem_init() + * + * @return 0 on success, otherwise: + * + * - -EINTR is returned if calling task has been unblock by a signal or + * explicitly via rtdm_task_unblock(). + * + * - -EIDRM is returned if @a sem has been destroyed. + * + * - -EPERM @e may be returned if an illegal invocation environment is + * detected. + * + * @coretags{primary-only, might-switch} + */ +int rtdm_sem_down(rtdm_sem_t *sem) +{ + return rtdm_sem_timeddown(sem, 0, NULL); +} + +EXPORT_SYMBOL_GPL(rtdm_sem_down); + +/** + * @brief Decrement a semaphore with timeout + * + * This function tries to decrement the given semphore's value if it is + * positive on entry. If not, the caller is blocked unless non-blocking + * operation was selected. + * + * @param[in,out] sem Semaphore handle as returned by rtdm_sem_init() + * @param[in] timeout Relative timeout in nanoseconds, see + * @ref RTDM_TIMEOUT_xxx for special values + * @param[in,out] timeout_seq Handle of a timeout sequence as returned by + * rtdm_toseq_init() or NULL + * + * @return 0 on success, otherwise: + * + * - -ETIMEDOUT is returned if the if the request has not been satisfied + * within the specified amount of time. + * + * - -EWOULDBLOCK is returned if @a timeout is negative and the semaphore + * value is currently not positive. + * + * - -EINTR is returned if calling task has been unblock by a signal or + * explicitly via rtdm_task_unblock(). + * + * - -EIDRM is returned if @a sem has been destroyed. + * + * - -EPERM @e may be returned if an illegal invocation environment is + * detected. + * + * @coretags{primary-timed, might-switch} + */ +int rtdm_sem_timeddown(rtdm_sem_t *sem, nanosecs_rel_t timeout, + rtdm_toseq_t *timeout_seq) +{ + struct xnthread *thread; + int err = 0, ret; + spl_t s; + + if (!XENO_ASSERT(COBALT, timeout < 0 || !xnsched_unblockable_p())) + return -EPERM; + + trace_cobalt_driver_sem_wait(sem, xnthread_current()); + + xnlock_get_irqsave(&nklock, s); + + if (unlikely(sem->synch_base.status & RTDM_SYNCH_DELETED)) + err = -EIDRM; + else if (sem->value > 0) { + if(!--sem->value) + xnselect_signal(&sem->select_block, 0); + } else if (timeout < 0) /* non-blocking mode */ + err = -EWOULDBLOCK; + else { + thread = xnthread_current(); + + if (timeout_seq && timeout > 0) + /* timeout sequence */ + ret = xnsynch_sleep_on(&sem->synch_base, *timeout_seq, + XN_ABSOLUTE); + else + /* infinite or relative timeout */ + ret = xnsynch_sleep_on(&sem->synch_base, timeout, XN_RELATIVE); + + if (ret) { + if (ret & XNTIMEO) + err = -ETIMEDOUT; + else if (ret & XNRMID) + err = -EIDRM; + else /* XNBREAK */ + err = -EINTR; + } + } + + xnlock_put_irqrestore(&nklock, s); + + return err; +} + +EXPORT_SYMBOL_GPL(rtdm_sem_timeddown); + +/** + * @brief Increment a semaphore + * + * This function increments the given semphore's value, waking up a potential + * waiter which was blocked upon rtdm_sem_down(). + * + * @param[in,out] sem Semaphore handle as returned by rtdm_sem_init() + * + * @coretags{unrestricted, might-switch} + */ +void rtdm_sem_up(rtdm_sem_t *sem) +{ + spl_t s; + + trace_cobalt_driver_sem_up(sem); + + xnlock_get_irqsave(&nklock, s); + + if (xnsynch_wakeup_one_sleeper(&sem->synch_base)) + xnsched_run(); + else + if (sem->value++ == 0 + && xnselect_signal(&sem->select_block, 1)) + xnsched_run(); + + xnlock_put_irqrestore(&nklock, s); +} + +EXPORT_SYMBOL_GPL(rtdm_sem_up); + +/** + * @brief Bind a selector to a semaphore + * + * This functions binds the given selector to the semaphore so that the former + * is notified when the semaphore state changes. Typically the select binding + * handler will invoke this service. + * + * @param[in,out] sem Semaphore handle as returned by rtdm_sem_init() + * @param[in,out] selector Selector as passed to the select binding handler + * @param[in] type Type of the bound event as passed to the select binding handler + * @param[in] fd_index File descriptor index as passed to the select binding + * handler + * + * @return 0 on success, otherwise: + * + * - -ENOMEM is returned if there is insufficient memory to establish the + * dynamic binding. + * + * - -EINVAL is returned if @a type or @a fd_index are invalid. + * + * @coretags{task-unrestricted} + */ +int rtdm_sem_select(rtdm_sem_t *sem, rtdm_selector_t *selector, + enum rtdm_selecttype type, unsigned int fd_index) +{ + struct xnselect_binding *binding; + int err; + spl_t s; + + binding = xnmalloc(sizeof(*binding)); + if (!binding) + return -ENOMEM; + + xnlock_get_irqsave(&nklock, s); + err = xnselect_bind(&sem->select_block, binding, selector, + type, fd_index, + (sem->value > 0) || + sem->synch_base.status & RTDM_SYNCH_DELETED); + xnlock_put_irqrestore(&nklock, s); + + if (err) + xnfree(binding); + + return err; +} +EXPORT_SYMBOL_GPL(rtdm_sem_select); + +/** @} */ + +/** + * @ingroup rtdm_sync + * @defgroup rtdm_sync_mutex Mutex services + * @{ + */ + +/** + * @brief Initialise a mutex + * + * This function initalises a basic mutex with priority inversion protection. + * "Basic", as it does not allow a mutex owner to recursively lock the same + * mutex again. + * + * @param[in,out] mutex Mutex handle + * + * @coretags{task-unrestricted} + */ +void rtdm_mutex_init(rtdm_mutex_t *mutex) +{ + spl_t s; + + /* Make atomic for re-initialisation support */ + xnlock_get_irqsave(&nklock, s); + xnsynch_init(&mutex->synch_base, XNSYNCH_PI, &mutex->fastlock); + xnlock_put_irqrestore(&nklock, s); +} +EXPORT_SYMBOL_GPL(rtdm_mutex_init); + +/** + * @brief Destroy a mutex + * + * @param[in,out] mutex Mutex handle as returned by rtdm_mutex_init() + * + * @coretags{task-unrestricted, might-switch} + */ +void rtdm_mutex_destroy(rtdm_mutex_t *mutex) +{ + trace_cobalt_driver_mutex_destroy(mutex); + + if (realtime_core_enabled()) + __rtdm_synch_flush(&mutex->synch_base, XNRMID); +} +EXPORT_SYMBOL_GPL(rtdm_mutex_destroy); + +/** + * @brief Release a mutex + * + * This function releases the given mutex, waking up a potential waiter which + * was blocked upon rtdm_mutex_lock() or rtdm_mutex_timedlock(). + * + * @param[in,out] mutex Mutex handle as returned by rtdm_mutex_init() + * + * @coretags{primary-only, might-switch} + */ +void rtdm_mutex_unlock(rtdm_mutex_t *mutex) +{ + if (!XENO_ASSERT(COBALT, !xnsched_interrupt_p())) + return; + + trace_cobalt_driver_mutex_release(mutex); + + if (unlikely(xnsynch_release(&mutex->synch_base, + xnsched_current_thread()))) + xnsched_run(); +} +EXPORT_SYMBOL_GPL(rtdm_mutex_unlock); + +/** + * @brief Request a mutex + * + * This is the light-weight version of rtdm_mutex_timedlock(), implying an + * infinite timeout. + * + * @param[in,out] mutex Mutex handle as returned by rtdm_mutex_init() + * + * @return 0 on success, otherwise: + * + * - -EIDRM is returned if @a mutex has been destroyed. + * + * - -EPERM @e may be returned if an illegal invocation environment is + * detected. + * + * @coretags{primary-only, might-switch} + */ +int rtdm_mutex_lock(rtdm_mutex_t *mutex) +{ + return rtdm_mutex_timedlock(mutex, 0, NULL); +} + +EXPORT_SYMBOL_GPL(rtdm_mutex_lock); + +/** + * @brief Request a mutex with timeout + * + * This function tries to acquire the given mutex. If it is not available, the + * caller is blocked unless non-blocking operation was selected. + * + * @param[in,out] mutex Mutex handle as returned by rtdm_mutex_init() + * @param[in] timeout Relative timeout in nanoseconds, see + * @ref RTDM_TIMEOUT_xxx for special values + * @param[in,out] timeout_seq Handle of a timeout sequence as returned by + * rtdm_toseq_init() or NULL + * + * @return 0 on success, otherwise: + * + * - -ETIMEDOUT is returned if the if the request has not been satisfied + * within the specified amount of time. + * + * - -EWOULDBLOCK is returned if @a timeout is negative and the semaphore + * value is currently not positive. + * + * - -EIDRM is returned if @a mutex has been destroyed. + * + * - -EPERM @e may be returned if an illegal invocation environment is + * detected. + * + * @coretags{primary-only, might-switch} + */ +int rtdm_mutex_timedlock(rtdm_mutex_t *mutex, nanosecs_rel_t timeout, + rtdm_toseq_t *timeout_seq) +{ + struct xnthread *curr; + int ret; + spl_t s; + + if (!XENO_ASSERT(COBALT, !xnsched_unblockable_p())) + return -EPERM; + + curr = xnthread_current(); + trace_cobalt_driver_mutex_wait(mutex, curr); + + xnlock_get_irqsave(&nklock, s); + + if (unlikely(mutex->synch_base.status & RTDM_SYNCH_DELETED)) { + ret = -EIDRM; + goto out; + } + + ret = xnsynch_try_acquire(&mutex->synch_base); + if (ret != -EBUSY) + goto out; + + if (timeout < 0) { + ret = -EWOULDBLOCK; + goto out; + } + + for (;;) { + if (timeout_seq && timeout > 0) /* timeout sequence */ + ret = xnsynch_acquire(&mutex->synch_base, *timeout_seq, + XN_ABSOLUTE); + else /* infinite or relative timeout */ + ret = xnsynch_acquire(&mutex->synch_base, timeout, + XN_RELATIVE); + if (ret == 0) + break; + if (ret & XNBREAK) + continue; + ret = ret & XNTIMEO ? -ETIMEDOUT : -EIDRM; + break; + } +out: + xnlock_put_irqrestore(&nklock, s); + + return ret; +} + +EXPORT_SYMBOL_GPL(rtdm_mutex_timedlock); +/** @} */ + +/** @} Synchronisation services */ + +/** + * @ingroup rtdm_driver_interface + * @defgroup rtdm_irq Interrupt Management Services + * @{ + */ + +/** + * @brief Register an interrupt handler + * + * This function registers the provided handler with an IRQ line and enables + * the line. + * + * @param[in,out] irq_handle IRQ handle + * @param[in] irq_no Line number of the addressed IRQ + * @param[in] handler Interrupt handler + * @param[in] flags Registration flags, see @ref RTDM_IRQTYPE_xxx for details + * @param[in] device_name Device name to show up in real-time IRQ lists + * @param[in] arg Pointer to be passed to the interrupt handler on invocation + * + * @return 0 on success, otherwise: + * + * - -EINVAL is returned if an invalid parameter was passed. + * + * - -EBUSY is returned if the specified IRQ line is already in use. + * + * - -ENOSYS is returned if the real-time core is disabled. + * + * @coretags{secondary-only} + */ +int rtdm_irq_request(rtdm_irq_t *irq_handle, unsigned int irq_no, + rtdm_irq_handler_t handler, unsigned long flags, + const char *device_name, void *arg) +{ + return rtdm_irq_request_affine(irq_handle, irq_no, handler, flags, + device_name, arg, NULL); +} + +EXPORT_SYMBOL_GPL(rtdm_irq_request); + +/** + * @brief Register an interrupt handler + * + * This function registers the provided handler with an IRQ line and enables + * the line. + * + * @param[in,out] irq_handle IRQ handle + * @param[in] irq_no Line number of the addressed IRQ + * @param[in] handler Interrupt handler + * @param[in] flags Registration flags, see @ref RTDM_IRQTYPE_xxx for details + * @param[in] device_name Device name to show up in real-time IRQ lists + * @param[in] arg Pointer to be passed to the interrupt handler on invocation + * @param[in] cpumask CPU affinity of the interrupt + * + * @return 0 on success, otherwise: + * + * - -EINVAL is returned if an invalid parameter was passed. + * + * - -EBUSY is returned if the specified IRQ line is already in use. + * + * - -ENOSYS is returned if the real-time core is disabled. + * + * @coretags{secondary-only} + */ +int rtdm_irq_request_affine(rtdm_irq_t *irq_handle, unsigned int irq_no, + rtdm_irq_handler_t handler, unsigned long flags, + const char *device_name, void *arg, + const cpumask_t *cpumask) +{ + int err; + + if (!realtime_core_enabled()) + return -ENOSYS; + + if (!XENO_ASSERT(COBALT, xnsched_root_p())) + return -EPERM; + + err = xnintr_init(irq_handle, device_name, irq_no, handler, NULL, flags); + if (err) + return err; + + err = xnintr_attach(irq_handle, arg, cpumask); + if (err) { + xnintr_destroy(irq_handle); + return err; + } + + xnintr_enable(irq_handle); + + return 0; +} + +EXPORT_SYMBOL_GPL(rtdm_irq_request_affine); + +#ifdef DOXYGEN_CPP /* Only used for doxygen doc generation */ +/** + * @brief Release an interrupt handler + * + * @param[in,out] irq_handle IRQ handle as returned by rtdm_irq_request() + * + * @return 0 on success, otherwise negative error code + * + * @note The caller is responsible for shutting down the IRQ source at device + * level before invoking this service. In turn, rtdm_irq_free ensures that any + * pending event on the given IRQ line is fully processed on return from this + * service. + * + * @coretags{secondary-only} + */ +int rtdm_irq_free(rtdm_irq_t *irq_handle); + +/** + * @brief Enable interrupt line + * + * @param[in,out] irq_handle IRQ handle as returned by rtdm_irq_request() + * + * @return 0 on success, otherwise negative error code + * + * @note This service is for exceptional use only. Drivers should + * always prefer interrupt masking at device level (via corresponding + * control registers etc.) over masking at line level. Keep in mind + * that the latter is incompatible with IRQ line sharing and can also + * be more costly as interrupt controller access requires broader + * synchronization. Also, such service is solely available from + * secondary mode. The caller is responsible for excluding such + * conflicts. + * + * @coretags{secondary-only} + */ +int rtdm_irq_enable(rtdm_irq_t *irq_handle); + +/** + * @brief Disable interrupt line + * + * @param[in,out] irq_handle IRQ handle as returned by rtdm_irq_request() + * + * @return 0 on success, otherwise negative error code + * + * @note This service is for exceptional use only. Drivers should + * always prefer interrupt masking at device level (via corresponding + * control registers etc.) over masking at line level. Keep in mind + * that the latter is incompatible with IRQ line sharing and can also + * be more costly as interrupt controller access requires broader + * synchronization. Also, such service is solely available from + * secondary mode. The caller is responsible for excluding such + * conflicts. + * + * @coretags{secondary-only} + */ +int rtdm_irq_disable(rtdm_irq_t *irq_handle); + +/** + * @brief Set interrupt affinity + * + * @param[in] irq_handle IRQ handle as returned by rtdm_irq_request() + * + * @param[in] cpumask The new CPU affinity of the interrupt + * + * @return 0 on success, otherwise negative error code + * + * @coretags{secondary-only} + */ +int rtdm_irq_set_affinity(rtdm_irq_t *irq_handle, const cpumask_t *cpumask); +#endif /* DOXYGEN_CPP */ + +/** @} Interrupt Management Services */ + +/** + * @ingroup rtdm_driver_interface + * @defgroup rtdm_nrtsignal Non-Real-Time Signalling Services + * + * These services provide a mechanism to request the execution of a specified + * handler in non-real-time context. The triggering can safely be performed in + * real-time context without suffering from unknown delays. The handler + * execution will be deferred until the next time the real-time subsystem + * releases the CPU to the non-real-time part. + * @{ + */ + +#ifdef DOXYGEN_CPP /* Only used for doxygen doc generation */ + +/** + * @brief Register a non-real-time signal handler + * + * @param[in,out] nrt_sig Signal handle + * @param[in] handler Non-real-time signal handler + * @param[in] arg Custom argument passed to @c handler() on each invocation + * + * @return 0 on success, otherwise: + * + * - -EAGAIN is returned if no free signal slot is available. + * + * @coretags{task-unrestricted} + */ +int rtdm_nrtsig_init(rtdm_nrtsig_t *nrt_sig, rtdm_nrtsig_handler_t handler, + void *arg); + +/** + * @brief Release a non-realtime signal handler + * + * @param[in,out] nrt_sig Signal handle + * + * @coretags{task-unrestricted} + */ +void rtdm_nrtsig_destroy(rtdm_nrtsig_t *nrt_sig); +#endif /* DOXYGEN_CPP */ + +void __rtdm_nrtsig_execute(struct pipeline_inband_work *inband_work) +{ + struct rtdm_nrtsig *nrt_sig; + + nrt_sig = container_of(inband_work, typeof(*nrt_sig), inband_work); + nrt_sig->handler(nrt_sig, nrt_sig->arg); +} +EXPORT_SYMBOL_GPL(__rtdm_nrtsig_execute); + +/** + * Trigger non-real-time signal + * + * @param[in,out] nrt_sig Signal handle + * + * @coretags{unrestricted} + */ +void rtdm_nrtsig_pend(rtdm_nrtsig_t *nrt_sig) +{ + pipeline_post_inband_work(nrt_sig); +} +EXPORT_SYMBOL_GPL(rtdm_nrtsig_pend); + +static LIST_HEAD(nrt_work_list); +DEFINE_PRIVATE_XNLOCK(nrt_work_lock); + +static void lostage_schedule_work(struct pipeline_inband_work *inband_work) +{ + struct work_struct *lostage_work; + spl_t s; + + xnlock_get_irqsave(&nrt_work_lock, s); + + while (!list_empty(&nrt_work_list)) { + lostage_work = list_first_entry(&nrt_work_list, + struct work_struct, entry); + list_del_init(&lostage_work->entry); + + xnlock_put_irqrestore(&nrt_work_lock, s); + + schedule_work(lostage_work); + + xnlock_get_irqsave(&nrt_work_lock, s); + } + + xnlock_put_irqrestore(&nrt_work_lock, s); +} + +static struct lostage_trigger_work { + struct pipeline_inband_work inband_work; /* Must be first. */ +} nrt_work = { + .inband_work = PIPELINE_INBAND_WORK_INITIALIZER(nrt_work, + lostage_schedule_work), +}; + +/** + * Put a work task in Linux non real-time global workqueue from primary mode. + * + * @param lostage_work + */ +void rtdm_schedule_nrt_work(struct work_struct *lostage_work) +{ + spl_t s; + + if (is_secondary_domain()) { + schedule_work(lostage_work); + return; + } + + xnlock_get_irqsave(&nrt_work_lock, s); + + list_add_tail(&lostage_work->entry, &nrt_work_list); + pipeline_post_inband_work(&nrt_work); + + xnlock_put_irqrestore(&nrt_work_lock, s); +} +EXPORT_SYMBOL_GPL(rtdm_schedule_nrt_work); + +/** @} Non-Real-Time Signalling Services */ + + +/** + * @ingroup rtdm_driver_interface + * @defgroup rtdm_util Utility Services + * @{ + */ + +struct mmap_tramp_data { + struct rtdm_fd *fd; + struct file_operations *fops; + int (*mmap_handler)(struct rtdm_fd *fd, + struct vm_area_struct *vma); +}; + +struct mmap_helper_data { + void *src_vaddr; + phys_addr_t src_paddr; + struct vm_operations_struct *vm_ops; + void *vm_private_data; + struct mmap_tramp_data tramp_data; +}; + +static int mmap_kmem_helper(struct vm_area_struct *vma, void *va) +{ + unsigned long addr, len, pfn, to; + int ret = 0; + + to = (unsigned long)va; + addr = vma->vm_start; + len = vma->vm_end - vma->vm_start; + + if (to != PAGE_ALIGN(to) || (len & ~PAGE_MASK) != 0) + return -EINVAL; + +#ifndef CONFIG_MMU + pfn = __pa(to) >> PAGE_SHIFT; + ret = remap_pfn_range(vma, addr, pfn, len, PAGE_SHARED); +#else + if (to < VMALLOC_START || to >= VMALLOC_END) { + /* logical address. */ + pfn = __pa(to) >> PAGE_SHIFT; + ret = remap_pfn_range(vma, addr, pfn, len, PAGE_SHARED); + if (ret) + return ret; + } else { + /* vmalloc memory. */ + while (len > 0) { + struct page *page = vmalloc_to_page((void *)to); + if (vm_insert_page(vma, addr, page)) + return -EAGAIN; + addr += PAGE_SIZE; + to += PAGE_SIZE; + len -= PAGE_SIZE; + } + } + + if (cobalt_machine.prefault) + cobalt_machine.prefault(vma); +#endif + + return ret; +} + +static int mmap_iomem_helper(struct vm_area_struct *vma, phys_addr_t pa) +{ + pgprot_t prot = PAGE_SHARED; + unsigned long len; + + len = vma->vm_end - vma->vm_start; +#ifndef CONFIG_MMU + vma->vm_pgoff = pa >> PAGE_SHIFT; +#endif /* CONFIG_MMU */ + +#ifdef __HAVE_PHYS_MEM_ACCESS_PROT + if (vma->vm_file) + prot = phys_mem_access_prot(vma->vm_file, pa >> PAGE_SHIFT, + len, prot); +#endif + vma->vm_page_prot = pgprot_noncached(prot); + + return remap_pfn_range(vma, vma->vm_start, pa >> PAGE_SHIFT, + len, vma->vm_page_prot); +} + +static int mmap_buffer_helper(struct rtdm_fd *fd, struct vm_area_struct *vma) +{ + struct mmap_tramp_data *tramp_data = vma->vm_private_data; + struct mmap_helper_data *helper_data; + int ret; + + helper_data = container_of(tramp_data, struct mmap_helper_data, tramp_data); + vma->vm_ops = helper_data->vm_ops; + vma->vm_private_data = helper_data->vm_private_data; + + if (helper_data->src_paddr) + ret = mmap_iomem_helper(vma, helper_data->src_paddr); + else + ret = mmap_kmem_helper(vma, helper_data->src_vaddr); + + return ret; +} + +static int mmap_trampoline(struct file *filp, struct vm_area_struct *vma) +{ + struct mmap_tramp_data *tramp_data = filp->private_data; + int ret; + + vma->vm_private_data = tramp_data; + + ret = tramp_data->mmap_handler(tramp_data->fd, vma); + if (ret) + return ret; + + return 0; +} + +#ifndef CONFIG_MMU + +static unsigned long +internal_get_unmapped_area(struct file *filp, + unsigned long addr, unsigned long len, + unsigned long pgoff, unsigned long flags) +{ + struct mmap_tramp_data *tramp_data = filp->private_data; + struct mmap_helper_data *helper_data; + unsigned long pa; + + helper_data = container_of(tramp_data, struct mmap_helper_data, tramp_data); + pa = helper_data->src_paddr; + if (pa) + return (unsigned long)__va(pa); + + return (unsigned long)helper_data->src_vaddr; +} + +static int do_rtdm_mmap(struct mmap_tramp_data *tramp_data, + size_t len, off_t offset, int prot, int flags, + void **pptr) +{ + const struct file_operations *old_fops; + unsigned long u_addr; + struct file *filp; + + filp = filp_open("/dev/mem", O_RDWR, 0); + if (IS_ERR(filp)) + return PTR_ERR(filp); + + old_fops = filp->f_op; + filp->f_op = tramp_data->fops; + filp->private_data = tramp_data; + u_addr = vm_mmap(filp, (unsigned long)*pptr, len, prot, flags, offset); + filp_close(filp, current->files); + filp->f_op = old_fops; + + if (IS_ERR_VALUE(u_addr)) + return (int)u_addr; + + *pptr = (void *)u_addr; + + return 0; +} + +#else /* CONFIG_MMU */ + +static int do_rtdm_mmap(struct mmap_tramp_data *tramp_data, + size_t len, off_t offset, int prot, int flags, + void **pptr) +{ + unsigned long u_addr; + struct file *filp; + + filp = anon_inode_getfile("[rtdm]", tramp_data->fops, tramp_data, O_RDWR); + if (IS_ERR(filp)) + return PTR_ERR(filp); + + u_addr = vm_mmap(filp, (unsigned long)*pptr, len, prot, flags, offset); + filp_close(filp, current->files); + + if (IS_ERR_VALUE(u_addr)) + return (int)u_addr; + + *pptr = (void *)u_addr; + + return 0; +} + +#define internal_get_unmapped_area NULL + +#endif /* CONFIG_MMU */ + +static struct file_operations internal_mmap_fops = { + .mmap = mmap_trampoline, + .get_unmapped_area = internal_get_unmapped_area +}; + +static unsigned long +driver_get_unmapped_area(struct file *filp, + unsigned long addr, unsigned long len, + unsigned long pgoff, unsigned long flags) +{ + struct mmap_tramp_data *tramp_data = filp->private_data; + struct rtdm_fd *fd = tramp_data->fd; + + if (fd->ops->get_unmapped_area) + return fd->ops->get_unmapped_area(fd, len, pgoff, flags); + +#ifdef CONFIG_MMU + /* Run default handler. */ + return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags); +#else + return -ENODEV; +#endif +} + +static struct file_operations driver_mmap_fops = { + .mmap = mmap_trampoline, + .get_unmapped_area = driver_get_unmapped_area +}; + +int __rtdm_mmap_from_fdop(struct rtdm_fd *fd, size_t len, off_t offset, + int prot, int flags, void **pptr) +{ + struct mmap_tramp_data tramp_data = { + .fd = fd, + .fops = &driver_mmap_fops, + .mmap_handler = fd->ops->mmap, + }; + +#ifndef CONFIG_MMU + /* + * XXX: A .get_unmapped_area handler must be provided in the + * nommu case. We use this to force the memory management code + * not to share VM regions for distinct areas to map to, as it + * would otherwise do since all requests currently apply to + * the same file (i.e. from /dev/mem, see do_mmap_pgoff() in + * the nommu case). + */ + if (fd->ops->get_unmapped_area) + offset = fd->ops->get_unmapped_area(fd, len, 0, flags); +#endif + + return do_rtdm_mmap(&tramp_data, len, offset, prot, flags, pptr); +} + +/** + * Map a kernel memory range into the address space of the user. + * + * @param[in] fd RTDM file descriptor as passed to the invoked + * device operation handler + * @param[in] src_addr Kernel virtual address to be mapped + * @param[in] len Length of the memory range + * @param[in] prot Protection flags for the user's memory range, typically + * either PROT_READ or PROT_READ|PROT_WRITE + * @param[in,out] pptr Address of a pointer containing the desired user + * address or NULL on entry and the finally assigned address on return + * @param[in] vm_ops vm_operations to be executed on the vm_area of the + * user memory range or NULL + * @param[in] vm_private_data Private data to be stored in the vm_area, + * primarily useful for vm_operation handlers + * + * @return 0 on success, otherwise (most common values): + * + * - -EINVAL is returned if an invalid start address, size, or destination + * address was passed. + * + * - -ENOMEM is returned if there is insufficient free memory or the limit of + * memory mapping for the user process was reached. + * + * - -EAGAIN is returned if too much memory has been already locked by the + * user process. + * + * - -EPERM @e may be returned if an illegal invocation environment is + * detected. + * + * @note This service only works on memory regions allocated via kmalloc() or + * vmalloc(). To map physical I/O memory to user-space use + * rtdm_iomap_to_user() instead. + * + * @note RTDM supports two models for unmapping the memory area: + * - manual unmapping via rtdm_munmap(), which may be issued from a + * driver in response to an IOCTL call, or by a call to the regular + * munmap() call from the application. + * - automatic unmapping, triggered by the termination of the process + * which owns the mapping. + * To track the number of references pending on the resource mapped, + * the driver can pass the address of a close handler for the vm_area + * considered, in the @a vm_ops descriptor. See the relevant Linux + * kernel programming documentation (e.g. Linux Device Drivers book) + * on virtual memory management for details. + * + * @coretags{secondary-only} + */ +int rtdm_mmap_to_user(struct rtdm_fd *fd, + void *src_addr, size_t len, + int prot, void **pptr, + struct vm_operations_struct *vm_ops, + void *vm_private_data) +{ + struct mmap_helper_data helper_data = { + .tramp_data = { + .fd = fd, + .fops = &internal_mmap_fops, + .mmap_handler = mmap_buffer_helper, + }, + .src_vaddr = src_addr, + .src_paddr = 0, + .vm_ops = vm_ops, + .vm_private_data = vm_private_data + }; + + if (!XENO_ASSERT(COBALT, xnsched_root_p())) + return -EPERM; + + return do_rtdm_mmap(&helper_data.tramp_data, len, 0, prot, MAP_SHARED, pptr); +} +EXPORT_SYMBOL_GPL(rtdm_mmap_to_user); + +/** + * Map an I/O memory range into the address space of the user. + * + * @param[in] fd RTDM file descriptor as passed to the invoked + * device operation handler + * @param[in] src_addr physical I/O address to be mapped + * @param[in] len Length of the memory range + * @param[in] prot Protection flags for the user's memory range, typically + * either PROT_READ or PROT_READ|PROT_WRITE + * @param[in,out] pptr Address of a pointer containing the desired user + * address or NULL on entry and the finally assigned address on return + * @param[in] vm_ops vm_operations to be executed on the vm_area of the + * user memory range or NULL + * @param[in] vm_private_data Private data to be stored in the vm_area, + * primarily useful for vm_operation handlers + * + * @return 0 on success, otherwise (most common values): + * + * - -EINVAL is returned if an invalid start address, size, or destination + * address was passed. + * + * - -ENOMEM is returned if there is insufficient free memory or the limit of + * memory mapping for the user process was reached. + * + * - -EAGAIN is returned if too much memory has been already locked by the + * user process. + * + * - -EPERM @e may be returned if an illegal invocation environment is + * detected. + * + * @note RTDM supports two models for unmapping the memory area: + * - manual unmapping via rtdm_munmap(), which may be issued from a + * driver in response to an IOCTL call, or by a call to the regular + * munmap() call from the application. + * - automatic unmapping, triggered by the termination of the process + * which owns the mapping. + * To track the number of references pending on the resource mapped, + * the driver can pass the address of a close handler for the vm_area + * considered, in the @a vm_ops descriptor. See the relevant Linux + * kernel programming documentation (e.g. Linux Device Drivers book) + * on virtual memory management for details. + * + * @coretags{secondary-only} + */ +int rtdm_iomap_to_user(struct rtdm_fd *fd, + phys_addr_t src_addr, size_t len, + int prot, void **pptr, + struct vm_operations_struct *vm_ops, + void *vm_private_data) +{ + struct mmap_helper_data helper_data = { + .tramp_data = { + .fd = fd, + .fops = &internal_mmap_fops, + .mmap_handler = mmap_buffer_helper, + }, + .src_vaddr = NULL, + .src_paddr = src_addr, + .vm_ops = vm_ops, + .vm_private_data = vm_private_data + }; + + if (!XENO_ASSERT(COBALT, xnsched_root_p())) + return -EPERM; + + return do_rtdm_mmap(&helper_data.tramp_data, len, 0, prot, MAP_SHARED, pptr); +} +EXPORT_SYMBOL_GPL(rtdm_iomap_to_user); + +/** + * Map a kernel logical memory range to a virtual user area. + * + * This routine is commonly used from a ->mmap() handler of a RTDM + * driver, for mapping a virtual memory area with a direct physical + * mapping over the user address space referred to by @a vma. + * + * @param[in] vma The VMA descriptor to receive the mapping. + * @param[in] va The kernel logical address to be mapped. + * + * @return 0 on success, otherwise a negated error code is returned. + * + * @note This service works on memory regions allocated via + * kmalloc(). To map a chunk of virtual space with no direct physical + * mapping, or a physical I/O memory to a VMA, call rtdm_mmap_vmem() + * or rtdm_mmap_iomem() respectively instead. + * + * @coretags{secondary-only} + */ +int rtdm_mmap_kmem(struct vm_area_struct *vma, void *va) +{ + return mmap_kmem_helper(vma, va); +} +EXPORT_SYMBOL_GPL(rtdm_mmap_kmem); + +/** + * Map a virtual memory range to a virtual user area. + * + * This routine is commonly used from a ->mmap() handler of a RTDM + * driver, for mapping a purely virtual memory area over the user + * address space referred to by @a vma. + * + * @param[in] vma The VMA descriptor to receive the mapping. + * @param[in] va The virtual address to be mapped. + * + * @return 0 on success, otherwise a negated error code is returned. + * + * @note This service works on memory regions allocated via + * vmalloc(). To map a chunk of logical space obtained from kmalloc(), + * or a physical I/O memory to a VMA, call rtdm_mmap_kmem() or + * rtdm_mmap_iomem() respectively instead. + * + * @coretags{secondary-only} + */ +int rtdm_mmap_vmem(struct vm_area_struct *vma, void *va) +{ + /* + * Our helper handles both of directly mapped to physical and + * purely virtual memory ranges. + */ + return mmap_kmem_helper(vma, va); +} +EXPORT_SYMBOL_GPL(rtdm_mmap_vmem); + +/** + * Map an I/O memory range to a virtual user area. + * + * This routine is commonly used from a ->mmap() handler of a RTDM + * driver, for mapping an I/O memory area over the user address space + * referred to by @a vma. + * + * @param[in] vma The VMA descriptor to receive the mapping. + * @param[in] pa The physical I/O address to be mapped. + * + * @return 0 on success, otherwise a negated error code is returned. + * + * @note To map a chunk of logical space obtained from kmalloc(), or a + * purely virtual area with no direct physical mapping to a VMA, call + * rtdm_mmap_kmem() or rtdm_mmap_vmem() respectively instead. + * + * @coretags{secondary-only} + */ +int rtdm_mmap_iomem(struct vm_area_struct *vma, phys_addr_t pa) +{ + return mmap_iomem_helper(vma, pa); +} +EXPORT_SYMBOL_GPL(rtdm_mmap_iomem); + +/** + * Unmap a user memory range. + * + * @param[in] ptr User address or the memory range + * @param[in] len Length of the memory range + * + * @return 0 on success, otherwise: + * + * - -EINVAL is returned if an invalid address or size was passed. + * + * - -EPERM @e may be returned if an illegal invocation environment is + * detected. + * + * @coretags{secondary-only} + */ +int rtdm_munmap(void *ptr, size_t len) +{ + if (!XENO_ASSERT(COBALT, xnsched_root_p())) + return -EPERM; + + return vm_munmap((unsigned long)ptr, len); +} +EXPORT_SYMBOL_GPL(rtdm_munmap); + +int rtdm_get_iovec(struct rtdm_fd *fd, struct iovec **iovp, + const struct user_msghdr *msg, + struct iovec *iov_fast) +{ + size_t len = sizeof(struct iovec) * msg->msg_iovlen; + struct iovec *iov = iov_fast; + + /* + * If the I/O vector doesn't fit in the fast memory, allocate + * a chunk from the system heap which is large enough to hold + * it. + */ + if (msg->msg_iovlen > RTDM_IOV_FASTMAX) { + iov = xnmalloc(len); + if (iov == NULL) + return -ENOMEM; + } + + *iovp = iov; + + if (!rtdm_fd_is_user(fd)) { + memcpy(iov, msg->msg_iov, len); + return 0; + } + +#ifdef CONFIG_XENO_ARCH_SYS3264 + if (rtdm_fd_is_compat(fd)) + return sys32_get_iovec(iov, + (struct compat_iovec __user *)msg->msg_iov, + msg->msg_iovlen); +#endif + + return rtdm_copy_from_user(fd, iov, msg->msg_iov, len); +} +EXPORT_SYMBOL_GPL(rtdm_get_iovec); + +int rtdm_put_iovec(struct rtdm_fd *fd, struct iovec *iov, + const struct user_msghdr *msg, + struct iovec *iov_fast) +{ + size_t len = sizeof(iov[0]) * msg->msg_iovlen; + int ret; + + if (!rtdm_fd_is_user(fd)) { + memcpy(msg->msg_iov, iov, len); + ret = 0; + } else +#ifdef CONFIG_XENO_ARCH_SYS3264 + if (rtdm_fd_is_compat(fd)) + ret = sys32_put_iovec((struct compat_iovec __user *)msg->msg_iov, + iov, msg->msg_iovlen); + else +#endif + ret = rtdm_copy_to_user(fd, msg->msg_iov, iov, len); + + if (iov != iov_fast) + xnfree(iov); + + return ret; +} +EXPORT_SYMBOL_GPL(rtdm_put_iovec); + +ssize_t rtdm_get_iov_flatlen(struct iovec *iov, int iovlen) +{ + ssize_t len; + int nvec; + + /* Return the flattened vector length. */ + for (len = 0, nvec = 0; nvec < iovlen; nvec++) { + ssize_t l = iov[nvec].iov_len; + if (l < 0 || len + l < len) /* SuS wants this. */ + return -EINVAL; + len += l; + } + + return len; +} +EXPORT_SYMBOL_GPL(rtdm_get_iov_flatlen); + +#ifdef DOXYGEN_CPP /* Only used for doxygen doc generation */ + +/** + * Real-time safe rate-limited message printing on kernel console + * + * @param[in] format Format string (conforming standard @c printf()) + * @param ... Arguments referred by @a format + * + * @return On success, this service returns the number of characters printed. + * Otherwise, a negative error code is returned. + * + * @coretags{unrestricted} + */ +void rtdm_printk_ratelimited(const char *format, ...); + +/** + * Real-time safe message printing on kernel console + * + * @param[in] format Format string (conforming standard @c printf()) + * @param ... Arguments referred by @a format + * + * @return On success, this service returns the number of characters printed. + * Otherwise, a negative error code is returned. + * + * @coretags{unrestricted} + */ +void rtdm_printk(const char *format, ...); + +/** + * Allocate memory block + * + * @param[in] size Requested size of the memory block + * + * @return The pointer to the allocated block is returned on success, NULL + * otherwise. + * + * @coretags{unrestricted} + */ +void *rtdm_malloc(size_t size); + +/** + * Release real-time memory block + * + * @param[in] ptr Pointer to memory block as returned by rtdm_malloc() + * + * @coretags{unrestricted} + */ +void rtdm_free(void *ptr); + +/** + * Check if read access to user-space memory block is safe + * + * @param[in] fd RTDM file descriptor as passed to the invoked + * device operation handler + * @param[in] ptr Address of the user-provided memory block + * @param[in] size Size of the memory block + * + * @return Non-zero is return when it is safe to read from the specified + * memory block, 0 otherwise. + * + * @coretags{task-unrestricted} + */ +int rtdm_read_user_ok(struct rtdm_fd *fd, const void __user *ptr, + size_t size); + +/** + * Check if read/write access to user-space memory block is safe + * + * @param[in] fd RTDM file descriptor as passed to the invoked + * device operation handler + * @param[in] ptr Address of the user-provided memory block + * @param[in] size Size of the memory block + * + * @return Non-zero is return when it is safe to read from or write to the + * specified memory block, 0 otherwise. + * + * @coretags{task-unrestricted} + */ +int rtdm_rw_user_ok(struct rtdm_fd *fd, const void __user *ptr, + size_t size); + +/** + * Copy user-space memory block to specified buffer + * + * @param[in] fd RTDM file descriptor as passed to the invoked + * device operation handler + * @param[in] dst Destination buffer address + * @param[in] src Address of the user-space memory block + * @param[in] size Size of the memory block + * + * @return 0 on success, otherwise: + * + * - -EFAULT is returned if an invalid memory area was accessed. + * + * @note Before invoking this service, verify via rtdm_read_user_ok() that the + * provided user-space address can securely be accessed. + * + * @coretags{task-unrestricted} + */ +int rtdm_copy_from_user(struct rtdm_fd *fd, void *dst, + const void __user *src, size_t size); + +/** + * Check if read access to user-space memory block and copy it to specified + * buffer + * + * @param[in] fd RTDM file descriptor as passed to the invoked + * device operation handler + * @param[in] dst Destination buffer address + * @param[in] src Address of the user-space memory block + * @param[in] size Size of the memory block + * + * @return 0 on success, otherwise: + * + * - -EFAULT is returned if an invalid memory area was accessed. + * + * @note This service is a combination of rtdm_read_user_ok and + * rtdm_copy_from_user. + * + * @coretags{task-unrestricted} + */ +int rtdm_safe_copy_from_user(struct rtdm_fd *fd, void *dst, + const void __user *src, size_t size); + +/** + * Copy specified buffer to user-space memory block + * + * @param[in] fd RTDM file descriptor as passed to the invoked + * device operation handler + * @param[in] dst Address of the user-space memory block + * @param[in] src Source buffer address + * @param[in] size Size of the memory block + * + * @return 0 on success, otherwise: + * + * - -EFAULT is returned if an invalid memory area was accessed. + * + * @note Before invoking this service, verify via rtdm_rw_user_ok() that the + * provided user-space address can securely be accessed. + * + * @coretags{task-unrestricted} + */ +int rtdm_copy_to_user(struct rtdm_fd *fd, void __user *dst, + const void *src, size_t size); + +/** + * Check if read/write access to user-space memory block is safe and copy + * specified buffer to it + * + * @param[in] fd RTDM file descriptor as passed to the invoked + * device operation handler + * @param[in] dst Address of the user-space memory block + * @param[in] src Source buffer address + * @param[in] size Size of the memory block + * + * @return 0 on success, otherwise: + * + * - -EFAULT is returned if an invalid memory area was accessed. + * + * @note This service is a combination of rtdm_rw_user_ok and + * rtdm_copy_to_user. + * + * @coretags{task-unrestricted} + */ +int rtdm_safe_copy_to_user(struct rtdm_fd *fd, void __user *dst, + const void *src, size_t size); + +/** + * Copy user-space string to specified buffer + * + * @param[in] fd RTDM file descriptor as passed to the invoked + * device operation handler + * @param[in] dst Destination buffer address + * @param[in] src Address of the user-space string + * @param[in] count Maximum number of bytes to copy, including the trailing + * '0' + * + * @return Length of the string on success (not including the trailing '0'), + * otherwise: + * + * - -EFAULT is returned if an invalid memory area was accessed. + * + * @note This services already includes a check of the source address, + * calling rtdm_read_user_ok() for @a src explicitly is not required. + * + * @coretags{task-unrestricted} + */ +int rtdm_strncpy_from_user(struct rtdm_fd *fd, char *dst, + const char __user *src, size_t count); + +/** + * Test if running in a real-time task + * + * @return Non-zero is returned if the caller resides in real-time context, 0 + * otherwise. + * + * @coretags{unrestricted} + */ +int rtdm_in_rt_context(void); + +/** + * Test if the caller is capable of running in real-time context + * + * @param[in] fd RTDM file descriptor as passed to the invoked + * device operation handler + * + * @return Non-zero is returned if the caller is able to execute in real-time + * context (independent of its current execution mode), 0 otherwise. + * + * @note This function can be used by drivers that provide different + * implementations for the same service depending on the execution mode of + * the caller. If a caller requests such a service in non-real-time context + * but is capable of running in real-time as well, it might be appropriate + * for the driver to reject the request via -ENOSYS so that RTDM can switch + * the caller and restart the request in real-time context. + * + * @coretags{unrestricted} + */ +int rtdm_rt_capable(struct rtdm_fd *fd); + +/** + * Test if the real-time core is available + * + * @return True if the real-time is available, false if it is disabled or in + * error state. + * + * @note Drivers should query the core state during initialization if they + * perform hardware setup operations or interact with RTDM services such as + * locks prior to calling an RTDM service that has a built-in state check of + * the real-time core (e.g. rtdm_dev_register() or rtdm_task_init()). + * + * @coretags{unrestricted} + */ +bool rtdm_available(void); + +#endif /* DOXYGEN_CPP */ + +/** @} Utility Services */ --- linux/kernel/xenomai/rtdm/Makefile 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/kernel/xenomai/rtdm/Makefile 2022-03-21 12:58:29.104891741 +0100 @@ -0,0 +1,10 @@ + +obj-$(CONFIG_XENOMAI) += xenomai.o + +xenomai-y := core.o \ + device.o \ + drvlib.o \ + fd.o \ + wrappers.o + +ccflags-y += -I$(srctree)/$(src)/.. -I$(srctree)/kernel --- linux/kernel/xenomai/rtdm/device.c 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/kernel/xenomai/rtdm/device.c 2022-03-21 12:58:29.101891770 +0100 @@ -0,0 +1,649 @@ +/* + * Real-Time Driver Model for Xenomai, device management + * + * Copyright (C) 2005 Jan Kiszka + * Copyright (C) 2005 Joerg Langenberg + * Copyright (C) 2014 Philippe Gerum + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include +#include +#include +#include +#include +#include +#include "rtdm/internal.h" +#include +#include + +/** + * @ingroup rtdm + * @defgroup rtdm_profiles Device Profiles + * + * Pre-defined classes of real-time devices + * + * Device profiles define which operation handlers a driver of a + * certain class of devices has to implement, which name or protocol + * it has to register, which IOCTLs it has to provide, and further + * details. Sub-classes can be defined in order to extend a device + * profile with more hardware-specific functions. + */ + +/** + * @addtogroup rtdm_driver_interface + * @{ + */ + +#define RTDM_DEVICE_MAGIC 0x82846877 + +static struct rb_root protocol_devices; + +static DEFINE_MUTEX(register_lock); +static DECLARE_BITMAP(protocol_devices_minor_map, RTDM_MAX_MINOR); + +static struct class *rtdm_class; + +static int enosys(void) +{ + return -ENOSYS; +} + +void __rtdm_put_device(struct rtdm_device *dev) +{ + secondary_mode_only(); + + if (atomic_dec_and_test(&dev->refcount)) + wake_up(&dev->putwq); +} + +static inline xnkey_t get_proto_id(int pf, int type) +{ + xnkey_t llpf = (unsigned int)pf; + return (llpf << 32) | (unsigned int)type; +} + +struct rtdm_device *__rtdm_get_namedev(const char *path) +{ + struct rtdm_device *dev; + xnhandle_t handle; + int ret; + + secondary_mode_only(); + + /* skip common /dev prefix */ + if (strncmp(path, "/dev/", 5) == 0) + path += 5; + + /* skip RTDM devnode root */ + if (strncmp(path, "rtdm/", 5) == 0) + path += 5; + + ret = xnregistry_bind(path, XN_NONBLOCK, XN_RELATIVE, &handle); + if (ret) + return NULL; + + mutex_lock(®ister_lock); + + dev = xnregistry_lookup(handle, NULL); + if (dev && dev->magic == RTDM_DEVICE_MAGIC) + __rtdm_get_device(dev); + else + dev = NULL; + + mutex_unlock(®ister_lock); + + return dev; +} + +struct rtdm_device *__rtdm_get_protodev(int protocol_family, int socket_type) +{ + struct rtdm_device *dev = NULL; + struct xnid *xnid; + xnkey_t id; + + secondary_mode_only(); + + id = get_proto_id(protocol_family, socket_type); + + mutex_lock(®ister_lock); + + xnid = xnid_fetch(&protocol_devices, id); + if (xnid) { + dev = container_of(xnid, struct rtdm_device, proto.id); + __rtdm_get_device(dev); + } + + mutex_unlock(®ister_lock); + + return dev; +} + +/** + * @ingroup rtdm_driver_interface + * @defgroup rtdm_device_register Device Registration Services + * @{ + */ + +static char *rtdm_devnode(struct device *dev, umode_t *mode) +{ + return kasprintf(GFP_KERNEL, "rtdm/%s", dev_name(dev)); +} + +static ssize_t profile_show(struct device *kdev, + struct device_attribute *attr, char *buf) +{ + struct rtdm_device *dev = dev_get_drvdata(kdev); + + return sprintf(buf, "%d,%d\n", + dev->driver->profile_info.class_id, + dev->driver->profile_info.subclass_id); +} + +static ssize_t refcount_show(struct device *kdev, + struct device_attribute *attr, char *buf) +{ + struct rtdm_device *dev = dev_get_drvdata(kdev); + + return sprintf(buf, "%d\n", atomic_read(&dev->refcount)); +} + +#define cat_count(__buf, __str) \ + ({ \ + int __ret = sizeof(__str) - 1; \ + strcat(__buf, __str); \ + __ret; \ + }) + +static ssize_t flags_show(struct device *kdev, + struct device_attribute *attr, char *buf) +{ + struct rtdm_device *dev = dev_get_drvdata(kdev); + struct rtdm_driver *drv = dev->driver; + + return sprintf(buf, "%#x\n", drv->device_flags); + +} + +static ssize_t type_show(struct device *kdev, + struct device_attribute *attr, char *buf) +{ + struct rtdm_device *dev = dev_get_drvdata(kdev); + struct rtdm_driver *drv = dev->driver; + int ret; + + if (drv->device_flags & RTDM_NAMED_DEVICE) + ret = cat_count(buf, "named\n"); + else + ret = cat_count(buf, "protocol\n"); + + return ret; + +} + +#ifdef ATTRIBUTE_GROUPS + +static DEVICE_ATTR_RO(profile); +static DEVICE_ATTR_RO(refcount); +static DEVICE_ATTR_RO(flags); +static DEVICE_ATTR_RO(type); + +static struct attribute *rtdm_attrs[] = { + &dev_attr_profile.attr, + &dev_attr_refcount.attr, + &dev_attr_flags.attr, + &dev_attr_type.attr, + NULL, +}; +ATTRIBUTE_GROUPS(rtdm); + +#else /* !ATTRIBUTE_GROUPS */ + +/* + * Cope with legacy sysfs attributes. Scheduled for removal when 3.10 + * is at EOL for us. + */ +static struct device_attribute rtdm_attrs[] = { + DEVICE_ATTR_RO(profile), + DEVICE_ATTR_RO(refcount), + DEVICE_ATTR_RO(flags), + DEVICE_ATTR_RO(type), + __ATTR_NULL +}; + +#define dev_groups dev_attrs +#define rtdm_groups rtdm_attrs + +#endif /* !ATTRIBUTE_GROUPS */ + +static int state_change_notifier(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct rtdm_driver *drv; + int ret; + + drv = container_of(nb, struct rtdm_driver, nb_statechange); + + switch (action) { + case COBALT_STATE_WARMUP: + if (drv->smops.start == NULL) + return NOTIFY_DONE; + ret = drv->smops.start(drv); + if (ret) + printk(XENO_WARNING + "failed starting driver %s (%d)\n", + drv->profile_info.name, ret); + break; + case COBALT_STATE_TEARDOWN: + if (drv->smops.stop == NULL) + return NOTIFY_DONE; + ret = drv->smops.stop(drv); + if (ret) + printk(XENO_WARNING + "failed stopping driver %s (%d)\n", + drv->profile_info.name, ret); + break; + default: + return NOTIFY_DONE; + } + + return NOTIFY_OK; +} + +static int register_driver(struct rtdm_driver *drv) +{ + dev_t rdev; + int ret; + + if (drv->profile_info.magic == RTDM_CLASS_MAGIC) { + atomic_inc(&drv->refcount); + return 0; + } + + if (drv->profile_info.magic != ~RTDM_CLASS_MAGIC) { + XENO_WARN_ON_ONCE(COBALT, 1); + return -EINVAL; + } + + switch (drv->device_flags & RTDM_DEVICE_TYPE_MASK) { + case RTDM_NAMED_DEVICE: + case RTDM_PROTOCOL_DEVICE: + break; + default: + printk(XENO_WARNING "%s has invalid device type (%#x)\n", + drv->profile_info.name, + drv->device_flags & RTDM_DEVICE_TYPE_MASK); + return -EINVAL; + } + + if (drv->device_count <= 0 || + drv->device_count > RTDM_MAX_MINOR) { + printk(XENO_WARNING "%s has invalid device count (%d)\n", + drv->profile_info.name, drv->device_count); + return -EINVAL; + } + + if ((drv->device_flags & RTDM_NAMED_DEVICE) == 0) + goto done; + + if (drv->base_minor < 0 || + drv->base_minor >= RTDM_MAX_MINOR) { + printk(XENO_WARNING "%s has invalid base minor (%d)\n", + drv->profile_info.name, drv->base_minor); + return -EINVAL; + } + + ret = alloc_chrdev_region(&rdev, drv->base_minor, drv->device_count, + drv->profile_info.name); + if (ret) { + printk(XENO_WARNING "cannot allocate chrdev region %s[%d..%d]\n", + drv->profile_info.name, drv->base_minor, + drv->base_minor + drv->device_count - 1); + return ret; + } + + cdev_init(&drv->named.cdev, &rtdm_dumb_fops); + ret = cdev_add(&drv->named.cdev, rdev, drv->device_count); + if (ret) { + printk(XENO_WARNING "cannot create cdev series for %s\n", + drv->profile_info.name); + goto fail_cdev; + } + + drv->named.major = MAJOR(rdev); + bitmap_zero(drv->minor_map, RTDM_MAX_MINOR); + +done: + atomic_set(&drv->refcount, 1); + drv->nb_statechange.notifier_call = state_change_notifier; + drv->nb_statechange.priority = 0; + cobalt_add_state_chain(&drv->nb_statechange); + drv->profile_info.magic = RTDM_CLASS_MAGIC; + + return 0; + +fail_cdev: + unregister_chrdev_region(rdev, drv->device_count); + + return ret; +} + +static void unregister_driver(struct rtdm_driver *drv) +{ + XENO_BUG_ON(COBALT, drv->profile_info.magic != RTDM_CLASS_MAGIC); + + if (!atomic_dec_and_test(&drv->refcount)) + return; + + cobalt_remove_state_chain(&drv->nb_statechange); + + drv->profile_info.magic = ~RTDM_CLASS_MAGIC; + + if (drv->device_flags & RTDM_NAMED_DEVICE) { + cdev_del(&drv->named.cdev); + unregister_chrdev_region(MKDEV(drv->named.major, drv->base_minor), + drv->device_count); + } +} + +/** + * @brief Register a RTDM device + * + * Registers a device in the RTDM namespace. + * + * @param[in] dev Device descriptor. + * + * @return 0 is returned upon success. Otherwise: + * + * - -EINVAL is returned if the descriptor contains invalid + * entries. RTDM_PROFILE_INFO() must appear in the list of + * initializers for the driver properties. + * + * - -EEXIST is returned if the specified device name of protocol ID is + * already in use. + * + * - -ENOMEM is returned if a memory allocation failed in the process + * of registering the device. + * + * - -EAGAIN is returned if no registry slot is available (check/raise + * CONFIG_XENO_OPT_REGISTRY_NRSLOTS). + * + * - -ENOSYS is returned if the real-time core is disabled. + * + * - -ENXIO is returned if no valid minor could be assigned + * + * @coretags{secondary-only} + */ +int rtdm_dev_register(struct rtdm_device *dev) +{ + struct class *kdev_class = rtdm_class; + struct device *kdev = NULL; + struct rtdm_driver *drv; + int ret, major, minor; + xnkey_t id; + dev_t rdev; + const char *dev_name; + + secondary_mode_only(); + + if (!realtime_core_enabled()) + return -ENOSYS; + + mutex_lock(®ister_lock); + + dev->name = NULL; + drv = dev->driver; + ret = register_driver(drv); + if (ret) { + mutex_unlock(®ister_lock); + return ret; + } + + dev->ops = drv->ops; + if (drv->device_flags & RTDM_NAMED_DEVICE) + dev->ops.socket = (typeof(dev->ops.socket))enosys; + else + dev->ops.open = (typeof(dev->ops.open))enosys; + + INIT_LIST_HEAD(&dev->openfd_list); + init_waitqueue_head(&dev->putwq); + dev->ops.close = __rtdm_dev_close; /* Interpose on driver's handler. */ + atomic_set(&dev->refcount, 0); + + if (drv->profile_info.kdev_class) + kdev_class = drv->profile_info.kdev_class; + + if (drv->device_flags & RTDM_NAMED_DEVICE) { + if (drv->device_flags & RTDM_FIXED_MINOR) { + minor = dev->minor; + if (minor < 0 || + minor >= drv->base_minor + drv->device_count) { + ret = -ENXIO; + goto fail; + } + } else { + minor = find_first_zero_bit(drv->minor_map, RTDM_MAX_MINOR); + if (minor >= RTDM_MAX_MINOR) { + ret = -ENXIO; + goto fail; + } + dev->minor = minor; + } + + major = drv->named.major; + dev->name = kasformat(dev->label, minor); + if (dev->name == NULL) { + ret = -ENOMEM; + goto fail; + } + if (dev->name[0] == '/') { + dev_name = dev->name+1; + } else { + dev_name = dev->name; + } + ret = xnregistry_enter(dev_name, dev, + &dev->named.handle, NULL); + if (ret) + goto fail; + + rdev = MKDEV(major, minor); + kdev = device_create(kdev_class, NULL, rdev, + dev, kbasename(dev->label), minor); + if (IS_ERR(kdev)) { + xnregistry_remove(dev->named.handle); + ret = PTR_ERR(kdev); + goto fail2; + } + __set_bit(minor, drv->minor_map); + } else { + minor = find_first_zero_bit(protocol_devices_minor_map, + RTDM_MAX_MINOR); + if (minor >= RTDM_MAX_MINOR) { + ret = -ENXIO; + goto fail; + } + dev->minor = minor; + + dev->name = kstrdup(dev->label, GFP_KERNEL); + if (dev->name == NULL) { + ret = -ENOMEM; + goto fail; + } + + rdev = MKDEV(0, minor); + kdev = device_create(kdev_class, NULL, rdev, + dev, dev->name); + if (IS_ERR(kdev)) { + ret = PTR_ERR(kdev); + goto fail2; + } + + id = get_proto_id(drv->protocol_family, drv->socket_type); + ret = xnid_enter(&protocol_devices, &dev->proto.id, id); + if (ret < 0) + goto fail; + __set_bit(minor, protocol_devices_minor_map); + } + + dev->rdev = rdev; + dev->kdev = kdev; + dev->magic = RTDM_DEVICE_MAGIC; + dev->kdev_class = kdev_class; + + mutex_unlock(®ister_lock); + + trace_cobalt_device_register(dev); + + return 0; +fail: + if (kdev) + device_destroy(kdev_class, rdev); +fail2: + unregister_driver(drv); + + mutex_unlock(®ister_lock); + + if (dev->name) + kfree(dev->name); + + return ret; +} +EXPORT_SYMBOL_GPL(rtdm_dev_register); + +/** + * @brief Unregister a RTDM device + * + * Removes the device from the RTDM namespace. This routine first + * attempts to teardown all active connections to the @a device prior + * to unregistering. + * + * @param[in] dev Device descriptor. + * + * @coretags{secondary-only} + */ +void rtdm_dev_unregister(struct rtdm_device *dev) +{ + struct rtdm_driver *drv = dev->driver; + + secondary_mode_only(); + + trace_cobalt_device_unregister(dev); + + /* Lock out any further connection. */ + dev->magic = ~RTDM_DEVICE_MAGIC; + + /* Flush all fds from this device. */ + rtdm_device_flush_fds(dev); + + /* Then wait for the ongoing connections to finish. */ + wait_event(dev->putwq, + atomic_read(&dev->refcount) == 0); + + mutex_lock(®ister_lock); + + if (drv->device_flags & RTDM_NAMED_DEVICE) { + xnregistry_remove(dev->named.handle); + __clear_bit(dev->minor, drv->minor_map); + } else { + xnid_remove(&protocol_devices, &dev->proto.id); + __clear_bit(dev->minor, protocol_devices_minor_map); + } + + device_destroy(dev->kdev_class, dev->rdev); + + unregister_driver(drv); + + mutex_unlock(®ister_lock); + + kfree(dev->name); +} +EXPORT_SYMBOL_GPL(rtdm_dev_unregister); + +/** + * @brief Set the kernel device class of a RTDM driver. + * + * Set the kernel device class assigned to the RTDM driver. By + * default, RTDM drivers belong to Linux's "rtdm" device class, + * creating a device node hierarchy rooted at /dev/rtdm, and sysfs + * nodes under /sys/class/rtdm. + * + * This call assigns a user-defined kernel device class to the RTDM + * driver, so that its devices are created into a different system + * hierarchy. + * + * rtdm_drv_set_sysclass() is meaningful only before the first device + * which is attached to @a drv is registered by a call to + * rtdm_dev_register(). + * + * @param[in] drv Address of the RTDM driver descriptor. + * + * @param[in] cls Pointer to the kernel device class. NULL is allowed + * to clear a previous setting, switching back to the default "rtdm" + * device class. + * + * @return 0 on success, otherwise: + * + * - -EBUSY is returned if the kernel device class has already been + * set for @a drv, or some device(s) attached to @a drv are currently + * registered. + * + * @coretags{task-unrestricted} + * + * @attention The kernel device class set by this call is not related to + * the RTDM class identification as defined by the @ref rtdm_profiles + * "RTDM profiles" in any way. This is strictly related to the Linux + * kernel device hierarchy. + */ +int rtdm_drv_set_sysclass(struct rtdm_driver *drv, struct class *cls) +{ + if ((cls && drv->profile_info.kdev_class) || + atomic_read(&drv->refcount)) + return -EBUSY; + + drv->profile_info.kdev_class = cls; + + return 0; +} +EXPORT_SYMBOL_GPL(rtdm_drv_set_sysclass); + +/** @} */ + +int __init rtdm_init(void) +{ + xntree_init(&protocol_devices); + + rtdm_class = class_create(THIS_MODULE, "rtdm"); + if (IS_ERR(rtdm_class)) { + printk(XENO_ERR "cannot create RTDM sysfs class\n"); + return PTR_ERR(rtdm_class); + } + rtdm_class->dev_groups = rtdm_groups; + rtdm_class->devnode = rtdm_devnode; + + bitmap_zero(protocol_devices_minor_map, RTDM_MAX_MINOR); + + return 0; +} + +void rtdm_cleanup(void) +{ + class_destroy(rtdm_class); + /* + * NOTE: no need to flush the cleanup_queue as no device is + * allowed to unregister as long as there are references. + */ +} + +/** @} */ --- linux/kernel/xenomai/rtdm/wrappers.c 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/kernel/xenomai/rtdm/wrappers.c 2022-03-21 12:58:29.097891809 +0100 @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2013 Hauke Mehrtens + * Copyright (c) 2013 Hannes Frederic Sowa + * Copyright (c) 2014 Luis R. Rodriguez + * + * Backport functionality introduced in Linux 3.13. + * + * Copyright (c) 2014 Hauke Mehrtens + * + * Backport functionality introduced in Linux 3.14. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include + +/* + * Same rules as kernel/cobalt/include/asm-generic/xenomai/wrappers.h + * apply to reduce #ifdefery. + */ + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0) +#ifdef CONFIG_PCI_MSI +int pci_enable_msix_range(struct pci_dev *dev, + struct msix_entry *entries, + int minvec, int maxvec) +{ + int nvec = maxvec; + int rc; + + if (maxvec < minvec) + return -ERANGE; + + do { + rc = pci_enable_msix(dev, entries, nvec); + if (rc < 0) { + return rc; + } else if (rc > 0) { + if (rc < minvec) + return -ENOSPC; + nvec = rc; + } + } while (rc); + + return nvec; +} +EXPORT_SYMBOL(pci_enable_msix_range); +#endif +#endif /* < 3.14 */ + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0) +#ifdef CONFIG_HWMON +struct device* +hwmon_device_register_with_groups(struct device *dev, const char *name, + void *drvdata, + const struct attribute_group **groups) +{ + struct device *hwdev; + + hwdev = hwmon_device_register(dev); + hwdev->groups = groups; + dev_set_drvdata(hwdev, drvdata); + return hwdev; +} + +static void devm_hwmon_release(struct device *dev, void *res) +{ + struct device *hwdev = *(struct device **)res; + + hwmon_device_unregister(hwdev); +} + +struct device * +devm_hwmon_device_register_with_groups(struct device *dev, const char *name, + void *drvdata, + const struct attribute_group **groups) +{ + struct device **ptr, *hwdev; + + if (!dev) + return ERR_PTR(-EINVAL); + + ptr = devres_alloc(devm_hwmon_release, sizeof(*ptr), GFP_KERNEL); + if (!ptr) + return ERR_PTR(-ENOMEM); + + hwdev = hwmon_device_register_with_groups(dev, name, drvdata, groups); + if (IS_ERR(hwdev)) + goto error; + + *ptr = hwdev; + devres_add(dev, ptr); + return hwdev; + +error: + devres_free(ptr); + return hwdev; +} +EXPORT_SYMBOL_GPL(devm_hwmon_device_register_with_groups); +#endif +#endif /* < 3.13 */ --- linux/kernel/xenomai/posix/mqueue.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/kernel/xenomai/posix/mqueue.h 2022-03-21 12:58:29.092891858 +0100 @@ -0,0 +1,92 @@ +/* + * Written by Gilles Chanteperdrix . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef _COBALT_POSIX_MQUEUE_H +#define _COBALT_POSIX_MQUEUE_H + +#include +#include +#include + +struct mq_attr { + long mq_flags; + long mq_maxmsg; + long mq_msgsize; + long mq_curmsgs; +}; + +int __cobalt_mq_open(const char __user *u_name, int oflags, + mode_t mode, struct mq_attr *attr); + +int __cobalt_mq_getattr(mqd_t uqd, struct mq_attr *attr); + +int __cobalt_mq_timedsend(mqd_t uqd, const void __user *u_buf, size_t len, + unsigned int prio, const void __user *u_ts, + int (*fetch_timeout)(struct timespec64 *ts, + const void __user *u_ts)); + +int __cobalt_mq_timedsend64(mqd_t uqd, const void __user *u_buf, size_t len, + unsigned int prio, const void __user *u_ts); + +int __cobalt_mq_timedreceive(mqd_t uqd, void __user *u_buf, + ssize_t *lenp, + unsigned int __user *u_prio, + const void __user *u_ts, + int (*fetch_timeout)(struct timespec64 *ts, + const void __user *u_ts)); + +int __cobalt_mq_timedreceive64(mqd_t uqd, void __user *u_buf, + ssize_t __user *u_len, + unsigned int __user *u_prio, + const void __user *u_ts); + +int __cobalt_mq_notify(mqd_t fd, const struct sigevent *evp); + +COBALT_SYSCALL_DECL(mq_open, + (const char __user *u_name, int oflags, + mode_t mode, struct mq_attr __user *u_attr)); + +COBALT_SYSCALL_DECL(mq_close, (mqd_t uqd)); + +COBALT_SYSCALL_DECL(mq_unlink, (const char __user *u_name)); + +COBALT_SYSCALL_DECL(mq_getattr, (mqd_t uqd, struct mq_attr __user *u_attr)); + +COBALT_SYSCALL_DECL(mq_timedsend, + (mqd_t uqd, const void __user *u_buf, size_t len, + unsigned int prio, const struct __user_old_timespec __user *u_ts)); + +COBALT_SYSCALL_DECL(mq_timedsend64, + (mqd_t uqd, const void __user *u_buf, size_t len, + unsigned int prio, + const struct __kernel_timespec __user *u_ts)); + +COBALT_SYSCALL_DECL(mq_timedreceive, + (mqd_t uqd, void __user *u_buf, ssize_t __user *u_len, + unsigned int __user *u_prio, + const struct __user_old_timespec __user *u_ts)); + +COBALT_SYSCALL_DECL(mq_timedreceive64, + (mqd_t uqd, void __user *u_buf, ssize_t __user *u_len, + unsigned int __user *u_prio, + const struct __kernel_timespec __user *u_ts)); + +COBALT_SYSCALL_DECL(mq_notify, + (mqd_t fd, const struct sigevent *__user evp)); + +#endif /* !_COBALT_POSIX_MQUEUE_H */ --- linux/kernel/xenomai/posix/sched.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/kernel/xenomai/posix/sched.h 2022-03-21 12:58:29.088891896 +0100 @@ -0,0 +1,109 @@ +/* + * Copyright (C) 2009 Philippe Gerum . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_POSIX_SCHED_H +#define _COBALT_POSIX_SCHED_H + +#include +#include +#include + +struct cobalt_resources; +struct cobalt_process; + +struct cobalt_sched_group { +#ifdef CONFIG_XENO_OPT_SCHED_QUOTA + struct xnsched_quota_group quota; +#endif + struct cobalt_resources *scope; + int pshared; + struct list_head next; +}; + +int __cobalt_sched_weightprio(int policy, + const struct sched_param_ex *param_ex); + +int __cobalt_sched_setconfig_np(int cpu, int policy, + void __user *u_config, + size_t len, + union sched_config *(*fetch_config) + (int policy, const void __user *u_config, + size_t *len), + int (*ack_config)(int policy, + const union sched_config *config, + void __user *u_config)); + +ssize_t __cobalt_sched_getconfig_np(int cpu, int policy, + void __user *u_config, + size_t len, + union sched_config *(*fetch_config) + (int policy, const void __user *u_config, + size_t *len), + ssize_t (*put_config)(int policy, + void __user *u_config, size_t u_len, + const union sched_config *config, + size_t len)); +int cobalt_sched_setscheduler_ex(pid_t pid, + int policy, + const struct sched_param_ex *param_ex, + __u32 __user *u_winoff, + int __user *u_promoted); + +int cobalt_sched_getscheduler_ex(pid_t pid, + int *policy_r, + struct sched_param_ex *param_ex); + +struct xnsched_class * +cobalt_sched_policy_param(union xnsched_policy_param *param, + int u_policy, const struct sched_param_ex *param_ex, + xnticks_t *tslice_r); + +COBALT_SYSCALL_DECL(sched_yield, (void)); + +COBALT_SYSCALL_DECL(sched_weightprio, + (int policy, const struct sched_param_ex __user *u_param)); + +COBALT_SYSCALL_DECL(sched_minprio, (int policy)); + +COBALT_SYSCALL_DECL(sched_maxprio, (int policy)); + +COBALT_SYSCALL_DECL(sched_setconfig_np, + (int cpu, + int policy, + union sched_config __user *u_config, + size_t len)); + +COBALT_SYSCALL_DECL(sched_getconfig_np, + (int cpu, int policy, + union sched_config __user *u_config, + size_t len)); + +COBALT_SYSCALL_DECL(sched_setscheduler_ex, + (pid_t pid, + int policy, + const struct sched_param_ex __user *u_param, + __u32 __user *u_winoff, + int __user *u_promoted)); + +COBALT_SYSCALL_DECL(sched_getscheduler_ex, + (pid_t pid, + int __user *u_policy, + struct sched_param_ex __user *u_param)); + +void cobalt_sched_reclaim(struct cobalt_process *process); + +#endif /* !_COBALT_POSIX_SCHED_H */ --- linux/kernel/xenomai/posix/internal.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/kernel/xenomai/posix/internal.h 2022-03-21 12:58:29.085891926 +0100 @@ -0,0 +1,62 @@ +/* + * Written by Gilles Chanteperdrix . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_POSIX_INTERNAL_H +#define _COBALT_POSIX_INTERNAL_H + +#include +#include +#include +#include +#include +#include +#include +#include "process.h" +#include "extension.h" +#include "syscall.h" +#include "memory.h" + +#define COBALT_MAXNAME 64 +#define COBALT_PERMS_MASK (O_RDONLY | O_WRONLY | O_RDWR) + +#define COBALT_MAGIC(n) (0x8686##n##n) +#define COBALT_ANY_MAGIC COBALT_MAGIC(00) +#define COBALT_THREAD_MAGIC COBALT_MAGIC(01) +#define COBALT_MQ_MAGIC COBALT_MAGIC(0A) +#define COBALT_MQD_MAGIC COBALT_MAGIC(0B) +#define COBALT_EVENT_MAGIC COBALT_MAGIC(0F) +#define COBALT_MONITOR_MAGIC COBALT_MAGIC(10) +#define COBALT_TIMERFD_MAGIC COBALT_MAGIC(11) + +#define cobalt_obj_active(h,m,t) \ + ((h) && ((t *)(h))->magic == (m)) + +#define cobalt_mark_deleted(t) ((t)->magic = ~(t)->magic) + +extern struct xnptree posix_ptree; + +static inline xnhandle_t cobalt_get_handle_from_user(xnhandle_t *u_h) +{ + xnhandle_t handle; + return __xn_get_user(handle, u_h) ? 0 : handle; +} + +int cobalt_init(void); + +long cobalt_restart_syscall_placeholder(struct restart_block *param); + +#endif /* !_COBALT_POSIX_INTERNAL_H */ --- linux/kernel/xenomai/posix/mutex.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/kernel/xenomai/posix/mutex.h 2022-03-21 12:58:29.081891965 +0100 @@ -0,0 +1,83 @@ +/* + * Written by Gilles Chanteperdrix . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef _COBALT_POSIX_MUTEX_H +#define _COBALT_POSIX_MUTEX_H + +#include "thread.h" +#include +#include +#include + +struct cobalt_process; + +struct cobalt_mutex { + unsigned int magic; + struct xnsynch synchbase; + /** cobalt_mutexq */ + struct list_head conds; + struct cobalt_mutexattr attr; + struct cobalt_resnode resnode; +}; + +int __cobalt_mutex_timedlock_break(struct cobalt_mutex_shadow __user *u_mx, + const void __user *u_ts, + int (*fetch_timeout)(struct timespec64 *ts, + const void __user *u_ts)); + +int __cobalt_mutex_timedlock64(struct cobalt_mutex_shadow __user *u_mx, + const void __user *u_ts); + +int __cobalt_mutex_acquire_unchecked(struct xnthread *cur, + struct cobalt_mutex *mutex, + const struct timespec64 *ts); + +COBALT_SYSCALL_DECL(mutex_check_init, + (struct cobalt_mutex_shadow __user *u_mx)); + +COBALT_SYSCALL_DECL(mutex_init, + (struct cobalt_mutex_shadow __user *u_mx, + const struct cobalt_mutexattr __user *u_attr)); + +COBALT_SYSCALL_DECL(mutex_destroy, + (struct cobalt_mutex_shadow __user *u_mx)); + +COBALT_SYSCALL_DECL(mutex_trylock, + (struct cobalt_mutex_shadow __user *u_mx)); + +COBALT_SYSCALL_DECL(mutex_lock, + (struct cobalt_mutex_shadow __user *u_mx)); + +COBALT_SYSCALL_DECL(mutex_timedlock, + (struct cobalt_mutex_shadow __user *u_mx, + const struct __user_old_timespec __user *u_ts)); + +COBALT_SYSCALL_DECL(mutex_timedlock64, + (struct cobalt_mutex_shadow __user *u_mx, + const struct __kernel_timespec __user *u_ts)); + +COBALT_SYSCALL_DECL(mutex_unlock, + (struct cobalt_mutex_shadow __user *u_mx)); + +int cobalt_mutex_release(struct xnthread *cur, + struct cobalt_mutex *mutex); + +void cobalt_mutex_reclaim(struct cobalt_resnode *node, + spl_t s); + +#endif /* !_COBALT_POSIX_MUTEX_H */ --- linux/kernel/xenomai/posix/signal.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/kernel/xenomai/posix/signal.h 2022-03-21 12:58:29.078891994 +0100 @@ -0,0 +1,120 @@ +/* + * Copyright (C) 2013 Philippe Gerum . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_POSIX_SIGNAL_H +#define _COBALT_POSIX_SIGNAL_H + +#include +#include +#include +#include +#include + +struct cobalt_thread; + +struct cobalt_sigpending { + struct siginfo si; + struct list_head next; +}; + +static inline +void cobalt_copy_siginfo(int code, + struct siginfo *__restrict__ dst, + const struct siginfo *__restrict__ src) +{ + dst->si_signo = src->si_signo; + dst->si_errno = src->si_errno; + dst->si_code = code; + + switch (code) { + case SI_TIMER: + dst->si_tid = src->si_tid; + dst->si_overrun = src->si_overrun; + dst->si_value = src->si_value; + break; + case SI_QUEUE: + case SI_MESGQ: + dst->si_value = src->si_value; + fallthrough; + case SI_USER: + dst->si_pid = src->si_pid; + dst->si_uid = src->si_uid; + } +} + +int __cobalt_sigwait(sigset_t *set); + +int __cobalt_sigtimedwait(sigset_t *set, + const struct timespec64 *timeout, + void __user *u_si, + bool compat); + +int __cobalt_sigwaitinfo(sigset_t *set, + void __user *u_si, + bool compat); + +int __cobalt_sigqueue(pid_t pid, int sig, const union sigval *value); + +int cobalt_signal_send(struct cobalt_thread *thread, + struct cobalt_sigpending *sigp, + int group); + +int cobalt_signal_send_pid(pid_t pid, + struct cobalt_sigpending *sigp); + +struct cobalt_sigpending *cobalt_signal_alloc(void); + +void cobalt_signal_free(struct cobalt_sigpending *sigp); + +void cobalt_signal_flush(struct cobalt_thread *thread); + +int cobalt_signal_wait(sigset_t *set, struct siginfo *si, + xnticks_t timeout, xntmode_t tmode); + +int __cobalt_kill(struct cobalt_thread *thread, + int sig, int group); + +COBALT_SYSCALL_DECL(sigwait, + (const sigset_t __user *u_set, int __user *u_sig)); + +COBALT_SYSCALL_DECL(sigtimedwait, + (const sigset_t __user *u_set, + struct siginfo __user *u_si, + const struct __user_old_timespec __user *u_timeout)); + +COBALT_SYSCALL_DECL(sigtimedwait64, + (const sigset_t __user *u_set, + struct siginfo __user *u_si, + const struct __kernel_timespec __user *u_timeout)); + +COBALT_SYSCALL_DECL(sigwaitinfo, + (const sigset_t __user *u_set, + struct siginfo __user *u_si)); + +COBALT_SYSCALL_DECL(sigpending, + (old_sigset_t __user *u_set)); + +COBALT_SYSCALL_DECL(kill, (pid_t pid, int sig)); + +COBALT_SYSCALL_DECL(sigqueue, + (pid_t pid, int sig, const union sigval __user *u_value)); + +int cobalt_signal_init(void); + +void cobalt_signal_cleanup(void); + +#endif /* !_COBALT_POSIX_SIGNAL_H */ --- linux/kernel/xenomai/posix/timerfd.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/kernel/xenomai/posix/timerfd.h 2022-03-21 12:58:29.074892033 +0100 @@ -0,0 +1,42 @@ +/* + * Copyright (C) 2014 Gilles Chanteperdrix . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef TIMERFD_H +#define TIMERFD_H + +#include +#include + +int __cobalt_timerfd_settime(int fd, int flags, + const struct itimerspec64 *new_value, + struct itimerspec64 *old_value); + +int __cobalt_timerfd_gettime(int fd, + struct itimerspec64 *value); + +COBALT_SYSCALL_DECL(timerfd_create, + (int clockid, int flags)); + +COBALT_SYSCALL_DECL(timerfd_settime, + (int fd, int flags, + const struct __user_old_itimerspec __user *new_value, + struct __user_old_itimerspec __user *old_value)); + +COBALT_SYSCALL_DECL(timerfd_gettime, + (int fd, struct __user_old_itimerspec __user *curr_value)); + +#endif /* TIMERFD_H */ --- linux/kernel/xenomai/posix/process.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/kernel/xenomai/posix/process.h 2022-03-21 12:58:29.071892062 +0100 @@ -0,0 +1,178 @@ +/* + * Copyright (C) 2013 Philippe Gerum . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_POSIX_PROCESS_H +#define _COBALT_POSIX_PROCESS_H + +#include +#include +#include +#include + +#define NR_PERSONALITIES 4 +#if BITS_PER_LONG < NR_PERSONALITIES +#error "NR_PERSONALITIES overflows internal bitmap" +#endif + +struct mm_struct; +struct xnthread_personality; +struct cobalt_timer; + +struct cobalt_resources { + struct list_head condq; + struct list_head mutexq; + struct list_head semq; + struct list_head monitorq; + struct list_head eventq; + struct list_head schedq; +}; + +struct cobalt_process { + struct mm_struct *mm; + struct hlist_node hlink; + struct cobalt_ppd sys_ppd; + unsigned long permap; + struct rb_root usems; + struct list_head sigwaiters; + struct cobalt_resources resources; + struct list_head thread_list; + DECLARE_BITMAP(timers_map, CONFIG_XENO_OPT_NRTIMERS); + struct cobalt_timer *timers[CONFIG_XENO_OPT_NRTIMERS]; + void *priv[NR_PERSONALITIES]; + int ufeatures; + unsigned int debugged_threads; +}; + +struct cobalt_resnode { + struct cobalt_resources *scope; + struct cobalt_process *owner; + struct list_head next; + xnhandle_t handle; +}; + +int cobalt_register_personality(struct xnthread_personality *personality); + +int cobalt_unregister_personality(int xid); + +struct xnthread_personality *cobalt_push_personality(int xid); + +void cobalt_pop_personality(struct xnthread_personality *prev); + +int cobalt_bind_core(int ufeatures); + +int cobalt_bind_personality(unsigned int magic); + +struct cobalt_process *cobalt_search_process(struct mm_struct *mm); + +int cobalt_map_user(struct xnthread *thread, __u32 __user *u_winoff); + +void *cobalt_get_context(int xid); + +int cobalt_yield(xnticks_t min, xnticks_t max); + +int cobalt_process_init(void); + +extern struct list_head cobalt_global_thread_list; + +extern struct cobalt_resources cobalt_global_resources; + +static inline struct cobalt_process *cobalt_current_process(void) +{ + return pipeline_current()->process; +} + +static inline struct cobalt_process * +cobalt_set_process(struct cobalt_process *process) +{ + struct cobalt_threadinfo *p = pipeline_current(); + struct cobalt_process *old; + + old = p->process; + p->process = process; + + return old; +} + +static inline struct cobalt_ppd *cobalt_ppd_get(int global) +{ + struct cobalt_process *process; + + if (global || (process = cobalt_current_process()) == NULL) + return &cobalt_kernel_ppd; + + return &process->sys_ppd; +} + +static inline struct cobalt_resources *cobalt_current_resources(int pshared) +{ + struct cobalt_process *process; + + if (pshared || (process = cobalt_current_process()) == NULL) + return &cobalt_global_resources; + + return &process->resources; +} + +static inline +void __cobalt_add_resource(struct cobalt_resnode *node, int pshared) +{ + node->owner = cobalt_current_process(); + node->scope = cobalt_current_resources(pshared); +} + +#define cobalt_add_resource(__node, __type, __pshared) \ + do { \ + __cobalt_add_resource(__node, __pshared); \ + list_add_tail(&(__node)->next, \ + &((__node)->scope)->__type ## q); \ + } while (0) + +static inline +void cobalt_del_resource(struct cobalt_resnode *node) +{ + list_del(&node->next); +} + +void cobalt_remove_process(struct cobalt_process *process); + +void cobalt_signal_yield(void); + +void cobalt_stop_debugged_process(struct xnthread *thread); + +void cobalt_register_debugged_thread(struct xnthread *thread); + +void cobalt_unregister_debugged_thread(struct xnthread *thread); + +extern struct xnthread_personality *cobalt_personalities[]; + +extern struct xnthread_personality cobalt_personality; + +int cobalt_handle_setaffinity_event(struct task_struct *task); + +#ifdef CONFIG_SMP +void cobalt_adjust_affinity(struct task_struct *task); +#else +static inline void cobalt_adjust_affinity(struct task_struct *task) { } +#endif + +int cobalt_handle_taskexit_event(struct task_struct *task); + +int cobalt_handle_cleanup_event(struct mm_struct *mm); + +int cobalt_handle_user_return(struct task_struct *task); + +#endif /* !_COBALT_POSIX_PROCESS_H */ --- linux/kernel/xenomai/posix/sched.c 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/kernel/xenomai/posix/sched.c 2022-03-21 12:58:29.067892101 +0100 @@ -0,0 +1,853 @@ +/* + * Copyright (C) 2009 Philippe Gerum . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include +#include "internal.h" +#include "thread.h" +#include "sched.h" +#include "clock.h" +#include + +struct xnsched_class * +cobalt_sched_policy_param(union xnsched_policy_param *param, + int u_policy, const struct sched_param_ex *param_ex, + xnticks_t *tslice_r) +{ + struct xnsched_class *sched_class; + int prio, policy; + xnticks_t tslice; + + prio = param_ex->sched_priority; + tslice = XN_INFINITE; + policy = u_policy; + + /* + * NOTE: The user-defined policy may be different than ours, + * e.g. SCHED_FIFO,prio=-7 from userland would be interpreted + * as SCHED_WEAK,prio=7 in kernel space. + */ + if (prio < 0) { + prio = -prio; + policy = SCHED_WEAK; + } + sched_class = &xnsched_class_rt; + param->rt.prio = prio; + + switch (policy) { + case SCHED_NORMAL: + if (prio) + return NULL; + /* + * When the weak scheduling class is compiled in, + * SCHED_WEAK and SCHED_NORMAL threads are scheduled + * by xnsched_class_weak, at their respective priority + * levels. Otherwise, SCHED_NORMAL is scheduled by + * xnsched_class_rt at priority level #0. + */ + fallthrough; + case SCHED_WEAK: +#ifdef CONFIG_XENO_OPT_SCHED_WEAK + if (prio < XNSCHED_WEAK_MIN_PRIO || + prio > XNSCHED_WEAK_MAX_PRIO) + return NULL; + param->weak.prio = prio; + sched_class = &xnsched_class_weak; +#else + if (prio) + return NULL; +#endif + break; + case SCHED_RR: + /* if unspecified, use current one. */ + tslice = u_ts2ns(¶m_ex->sched_rr_quantum); + if (tslice == XN_INFINITE && tslice_r) + tslice = *tslice_r; + fallthrough; + case SCHED_FIFO: + if (prio < XNSCHED_FIFO_MIN_PRIO || + prio > XNSCHED_FIFO_MAX_PRIO) + return NULL; + break; + case SCHED_COBALT: + if (prio < XNSCHED_CORE_MIN_PRIO || + prio > XNSCHED_CORE_MAX_PRIO) + return NULL; + break; +#ifdef CONFIG_XENO_OPT_SCHED_SPORADIC + case SCHED_SPORADIC: + param->pss.normal_prio = param_ex->sched_priority; + param->pss.low_prio = param_ex->sched_ss_low_priority; + param->pss.current_prio = param->pss.normal_prio; + param->pss.init_budget = u_ts2ns(¶m_ex->sched_ss_init_budget); + param->pss.repl_period = u_ts2ns(¶m_ex->sched_ss_repl_period); + param->pss.max_repl = param_ex->sched_ss_max_repl; + sched_class = &xnsched_class_sporadic; + break; +#endif +#ifdef CONFIG_XENO_OPT_SCHED_TP + case SCHED_TP: + param->tp.prio = param_ex->sched_priority; + param->tp.ptid = param_ex->sched_tp_partition; + sched_class = &xnsched_class_tp; + break; +#endif +#ifdef CONFIG_XENO_OPT_SCHED_QUOTA + case SCHED_QUOTA: + param->quota.prio = param_ex->sched_priority; + param->quota.tgid = param_ex->sched_quota_group; + sched_class = &xnsched_class_quota; + break; +#endif + default: + return NULL; + } + + if (tslice_r) + *tslice_r = tslice; + + return sched_class; +} + +COBALT_SYSCALL(sched_minprio, current, (int policy)) +{ + int ret; + + switch (policy) { + case SCHED_FIFO: + case SCHED_RR: + case SCHED_SPORADIC: + case SCHED_TP: + case SCHED_QUOTA: + ret = XNSCHED_FIFO_MIN_PRIO; + break; + case SCHED_COBALT: + ret = XNSCHED_CORE_MIN_PRIO; + break; + case SCHED_NORMAL: + case SCHED_WEAK: + ret = 0; + break; + default: + ret = -EINVAL; + } + + trace_cobalt_sched_min_prio(policy, ret); + + return ret; +} + +COBALT_SYSCALL(sched_maxprio, current, (int policy)) +{ + int ret; + + switch (policy) { + case SCHED_FIFO: + case SCHED_RR: + case SCHED_SPORADIC: + case SCHED_TP: + case SCHED_QUOTA: + ret = XNSCHED_FIFO_MAX_PRIO; + break; + case SCHED_COBALT: + ret = XNSCHED_CORE_MAX_PRIO; + break; + case SCHED_NORMAL: + ret = 0; + break; + case SCHED_WEAK: +#ifdef CONFIG_XENO_OPT_SCHED_WEAK + ret = XNSCHED_FIFO_MAX_PRIO; +#else + ret = 0; +#endif + break; + default: + ret = -EINVAL; + } + + trace_cobalt_sched_max_prio(policy, ret); + + return ret; +} + +COBALT_SYSCALL(sched_yield, primary, (void)) +{ + struct cobalt_thread *curr = cobalt_current_thread(); + int ret = 0; + + trace_cobalt_pthread_yield(0); + + /* Maybe some extension wants to handle this. */ + if (cobalt_call_extension(sched_yield, &curr->extref, ret) && ret) + return ret > 0 ? 0 : ret; + + xnthread_resume(&curr->threadbase, 0); + if (xnsched_run()) + return 0; + + /* + * If the round-robin move did not beget any context switch to + * a thread running in primary mode, then wait for the next + * linux context switch to happen. + * + * Rationale: it is most probably unexpected that + * sched_yield() does not cause any context switch, since this + * service is commonly used for implementing a poor man's + * cooperative scheduling. By waiting for a context switch to + * happen in the regular kernel, we guarantee that the CPU has + * been relinquished for a while. + * + * Typically, this behavior allows a thread running in primary + * mode to effectively yield the CPU to a thread of + * same/higher priority stuck in secondary mode. + * + * NOTE: calling cobalt_yield() with no timeout + * (i.e. XN_INFINITE) is probably never a good idea. This + * means that a SCHED_FIFO non-rt thread stuck in a tight loop + * would prevent the caller from waking up, since no + * linux-originated schedule event would happen for unblocking + * it on the current CPU. For this reason, we pass the + * arbitrary TICK_NSEC value to limit the wait time to a + * reasonable amount. + */ + return cobalt_yield(TICK_NSEC, TICK_NSEC); +} + +#ifdef CONFIG_XENO_OPT_SCHED_TP + +static inline +int set_tp_config(int cpu, union sched_config *config, size_t len) +{ + xnticks_t offset, duration, next_offset; + struct xnsched_tp_schedule *gps, *ogps; + struct xnsched_tp_window *w; + struct sched_tp_window *p; + struct xnsched *sched; + spl_t s; + int n; + + if (len < sizeof(config->tp)) + return -EINVAL; + + sched = xnsched_struct(cpu); + + switch (config->tp.op) { + case sched_tp_install: + if (config->tp.nr_windows > 0) + break; + fallthrough; + case sched_tp_uninstall: + gps = NULL; + goto set_schedule; + case sched_tp_start: + xnlock_get_irqsave(&nklock, s); + xnsched_tp_start_schedule(sched); + xnlock_put_irqrestore(&nklock, s); + return 0; + case sched_tp_stop: + xnlock_get_irqsave(&nklock, s); + xnsched_tp_stop_schedule(sched); + xnlock_put_irqrestore(&nklock, s); + return 0; + default: + return -EINVAL; + } + + /* Install a new TP schedule on CPU. */ + + gps = xnmalloc(sizeof(*gps) + config->tp.nr_windows * sizeof(*w)); + if (gps == NULL) + return -ENOMEM; + + for (n = 0, p = config->tp.windows, w = gps->pwins, next_offset = 0; + n < config->tp.nr_windows; n++, p++, w++) { + /* + * Time windows must be strictly contiguous. Holes may + * be defined using windows assigned to the pseudo + * partition #-1. + */ + offset = u_ts2ns(&p->offset); + if (offset != next_offset) + goto cleanup_and_fail; + + duration = u_ts2ns(&p->duration); + if (duration <= 0) + goto cleanup_and_fail; + + if (p->ptid < -1 || + p->ptid >= CONFIG_XENO_OPT_SCHED_TP_NRPART) + goto cleanup_and_fail; + + w->w_offset = next_offset; + w->w_part = p->ptid; + next_offset += duration; + } + + atomic_set(&gps->refcount, 1); + gps->pwin_nr = n; + gps->tf_duration = next_offset; +set_schedule: + xnlock_get_irqsave(&nklock, s); + ogps = xnsched_tp_set_schedule(sched, gps); + xnlock_put_irqrestore(&nklock, s); + + if (ogps) + xnsched_tp_put_schedule(ogps); + + return 0; + +cleanup_and_fail: + xnfree(gps); + + return -EINVAL; +} + +static inline +ssize_t get_tp_config(int cpu, void __user *u_config, size_t len, + union sched_config *(*fetch_config) + (int policy, const void __user *u_config, + size_t *len), + ssize_t (*put_config)(int policy, void __user *u_config, + size_t u_len, + const union sched_config *config, + size_t len)) +{ + struct xnsched_tp_window *pw, *w; + struct xnsched_tp_schedule *gps; + struct sched_tp_window *pp, *p; + union sched_config *config; + struct xnsched *sched; + ssize_t ret, elen; + spl_t s; + int n; + + xnlock_get_irqsave(&nklock, s); + + sched = xnsched_struct(cpu); + gps = xnsched_tp_get_schedule(sched); + if (gps == NULL) { + xnlock_put_irqrestore(&nklock, s); + return 0; + } + + xnlock_put_irqrestore(&nklock, s); + + elen = sched_tp_confsz(gps->pwin_nr); + config = xnmalloc(elen); + if (config == NULL) { + ret = -ENOMEM; + goto out; + } + + config->tp.op = sched_tp_install; + config->tp.nr_windows = gps->pwin_nr; + for (n = 0, pp = p = config->tp.windows, pw = w = gps->pwins; + n < gps->pwin_nr; pp = p, p++, pw = w, w++, n++) { + u_ns2ts(&p->offset, w->w_offset); + u_ns2ts(&pp->duration, w->w_offset - pw->w_offset); + p->ptid = w->w_part; + } + u_ns2ts(&pp->duration, gps->tf_duration - pw->w_offset); + ret = put_config(SCHED_TP, u_config, len, config, elen); + xnfree(config); +out: + xnsched_tp_put_schedule(gps); + + return ret; +} + +#else /* !CONFIG_XENO_OPT_SCHED_TP */ + +static inline int +set_tp_config(int cpu, union sched_config *config, size_t len) +{ + return -EINVAL; +} + +static inline ssize_t +get_tp_config(int cpu, union sched_config __user *u_config, size_t len, + union sched_config *(*fetch_config) + (int policy, const void __user *u_config, + size_t *len), + ssize_t (*put_config)(int policy, void __user *u_config, + size_t u_len, + const union sched_config *config, + size_t len)) +{ + return -EINVAL; +} + +#endif /* !CONFIG_XENO_OPT_SCHED_TP */ + +#ifdef CONFIG_XENO_OPT_SCHED_QUOTA + +static inline +int set_quota_config(int cpu, union sched_config *config, size_t len) +{ + struct __sched_config_quota *p = &config->quota; + struct __sched_quota_info *iq = &p->info; + struct cobalt_sched_group *group; + struct xnsched_quota_group *tg; + struct xnsched *sched; + int ret, quota_sum; + spl_t s; + + if (len < sizeof(*p)) + return -EINVAL; + + switch (p->op) { + case sched_quota_add: + group = xnmalloc(sizeof(*group)); + if (group == NULL) + return -ENOMEM; + tg = &group->quota; + group->pshared = p->add.pshared != 0; + group->scope = cobalt_current_resources(group->pshared); + xnlock_get_irqsave(&nklock, s); + sched = xnsched_struct(cpu); + ret = xnsched_quota_create_group(tg, sched, "a_sum); + if (ret) { + xnlock_put_irqrestore(&nklock, s); + xnfree(group); + return ret; + } + list_add(&group->next, &group->scope->schedq); + xnlock_put_irqrestore(&nklock, s); + break; + case sched_quota_remove: + case sched_quota_force_remove: + xnlock_get_irqsave(&nklock, s); + sched = xnsched_struct(cpu); + tg = xnsched_quota_find_group(sched, p->remove.tgid); + if (tg == NULL) + goto bad_tgid; + group = container_of(tg, struct cobalt_sched_group, quota); + if (group->scope != cobalt_current_resources(group->pshared)) + goto bad_tgid; + ret = xnsched_quota_destroy_group(tg, + p->op == sched_quota_force_remove, + "a_sum); + if (ret) { + xnlock_put_irqrestore(&nklock, s); + return ret; + } + list_del(&group->next); + xnlock_put_irqrestore(&nklock, s); + iq->tgid = tg->tgid; + iq->quota = tg->quota_percent; + iq->quota_peak = tg->quota_peak_percent; + iq->quota_sum = quota_sum; + xnfree(group); + return 0; + case sched_quota_set: + xnlock_get_irqsave(&nklock, s); + sched = xnsched_struct(cpu); + tg = xnsched_quota_find_group(sched, p->set.tgid); + if (tg == NULL) + goto bad_tgid; + group = container_of(tg, struct cobalt_sched_group, quota); + if (group->scope != cobalt_current_resources(group->pshared)) + goto bad_tgid; + xnsched_quota_set_limit(tg, p->set.quota, p->set.quota_peak, + "a_sum); + xnlock_put_irqrestore(&nklock, s); + break; + default: + return -EINVAL; + } + + iq->tgid = tg->tgid; + iq->quota = tg->quota_percent; + iq->quota_peak = tg->quota_peak_percent; + iq->quota_sum = quota_sum; + + return 0; +bad_tgid: + xnlock_put_irqrestore(&nklock, s); + + return -ESRCH; +} + +static inline +ssize_t get_quota_config(int cpu, void __user *u_config, size_t len, + union sched_config *(*fetch_config) + (int policy, const void __user *u_config, + size_t *len), + ssize_t (*put_config)(int policy, void __user *u_config, + size_t u_len, + const union sched_config *config, + size_t len)) +{ + struct cobalt_sched_group *group; + struct xnsched_quota_group *tg; + union sched_config *config; + struct xnsched *sched; + ssize_t ret; + spl_t s; + + config = fetch_config(SCHED_QUOTA, u_config, &len); + if (IS_ERR(config)) + return PTR_ERR(config); + + xnlock_get_irqsave(&nklock, s); + sched = xnsched_struct(cpu); + tg = xnsched_quota_find_group(sched, config->quota.get.tgid); + if (tg == NULL) + goto bad_tgid; + + group = container_of(tg, struct cobalt_sched_group, quota); + if (group->scope != cobalt_current_resources(group->pshared)) + goto bad_tgid; + + config->quota.info.tgid = tg->tgid; + config->quota.info.quota = tg->quota_percent; + config->quota.info.quota_peak = tg->quota_peak_percent; + config->quota.info.quota_sum = xnsched_quota_sum_all(sched); + xnlock_put_irqrestore(&nklock, s); + + ret = put_config(SCHED_QUOTA, u_config, len, config, sizeof(*config)); + xnfree(config); + + return ret; +bad_tgid: + xnlock_put_irqrestore(&nklock, s); + xnfree(config); + + return -ESRCH; +} + +#else /* !CONFIG_XENO_OPT_SCHED_QUOTA */ + +static inline +int set_quota_config(int cpu, union sched_config *config, size_t len) +{ + return -EINVAL; +} + +static inline +ssize_t get_quota_config(int cpu, void __user *u_config, + size_t len, + union sched_config *(*fetch_config) + (int policy, const void __user *u_config, + size_t *len), + ssize_t (*put_config)(int policy, void __user *u_config, + size_t u_len, + const union sched_config *config, + size_t len)) +{ + return -EINVAL; +} + +#endif /* !CONFIG_XENO_OPT_SCHED_QUOTA */ + +static union sched_config * +sched_fetch_config(int policy, const void __user *u_config, size_t *len) +{ + union sched_config *buf; + int ret; + + if (u_config == NULL) + return ERR_PTR(-EFAULT); + + if (policy == SCHED_QUOTA && *len < sizeof(buf->quota)) + return ERR_PTR(-EINVAL); + + buf = xnmalloc(*len); + if (buf == NULL) + return ERR_PTR(-ENOMEM); + + ret = cobalt_copy_from_user(buf, u_config, *len); + if (ret) { + xnfree(buf); + return ERR_PTR(ret); + } + + return buf; +} + +static int sched_ack_config(int policy, const union sched_config *config, + void __user *u_config) +{ + union sched_config __user *u_p = u_config; + + if (policy != SCHED_QUOTA) + return 0; + + return u_p == NULL ? -EFAULT : + cobalt_copy_to_user(&u_p->quota.info, &config->quota.info, + sizeof(u_p->quota.info)); +} + +static ssize_t sched_put_config(int policy, + void __user *u_config, size_t u_len, + const union sched_config *config, size_t len) +{ + union sched_config *u_p = u_config; + + if (u_config == NULL) + return -EFAULT; + + if (policy == SCHED_QUOTA) { + if (u_len < sizeof(config->quota)) + return -EINVAL; + return cobalt_copy_to_user(&u_p->quota.info, &config->quota.info, + sizeof(u_p->quota.info)) ?: + sizeof(u_p->quota.info); + } + + return cobalt_copy_to_user(u_config, config, len) ?: len; +} + +int __cobalt_sched_setconfig_np(int cpu, int policy, + void __user *u_config, + size_t len, + union sched_config *(*fetch_config) + (int policy, const void __user *u_config, + size_t *len), + int (*ack_config)(int policy, + const union sched_config *config, + void __user *u_config)) +{ + union sched_config *buf; + int ret; + + trace_cobalt_sched_setconfig(cpu, policy, len); + + if (cpu < 0 || cpu >= NR_CPUS || !xnsched_threading_cpu(cpu)) + return -EINVAL; + + if (len == 0) + return -EINVAL; + + buf = fetch_config(policy, u_config, &len); + if (IS_ERR(buf)) + return PTR_ERR(buf); + + switch (policy) { + case SCHED_TP: + ret = set_tp_config(cpu, buf, len); + break; + case SCHED_QUOTA: + ret = set_quota_config(cpu, buf, len); + break; + default: + ret = -EINVAL; + } + + if (ret == 0) + ret = ack_config(policy, buf, u_config); + + xnfree(buf); + + return ret; +} + +COBALT_SYSCALL(sched_setconfig_np, conforming, + (int cpu, int policy, + union sched_config __user *u_config, + size_t len)) +{ + return __cobalt_sched_setconfig_np(cpu, policy, u_config, len, + sched_fetch_config, sched_ack_config); +} + +ssize_t __cobalt_sched_getconfig_np(int cpu, int policy, + void __user *u_config, + size_t len, + union sched_config *(*fetch_config) + (int policy, const void __user *u_config, + size_t *len), + ssize_t (*put_config)(int policy, + void __user *u_config, + size_t u_len, + const union sched_config *config, + size_t len)) +{ + ssize_t ret; + + switch (policy) { + case SCHED_TP: + ret = get_tp_config(cpu, u_config, len, + fetch_config, put_config); + break; + case SCHED_QUOTA: + ret = get_quota_config(cpu, u_config, len, + fetch_config, put_config); + break; + default: + ret = -EINVAL; + } + + trace_cobalt_sched_get_config(cpu, policy, ret); + + return ret; +} + +COBALT_SYSCALL(sched_getconfig_np, conforming, + (int cpu, int policy, + union sched_config __user *u_config, + size_t len)) +{ + return __cobalt_sched_getconfig_np(cpu, policy, u_config, len, + sched_fetch_config, sched_put_config); +} + +int __cobalt_sched_weightprio(int policy, + const struct sched_param_ex *param_ex) +{ + struct xnsched_class *sched_class; + union xnsched_policy_param param; + int prio; + + sched_class = cobalt_sched_policy_param(¶m, policy, + param_ex, NULL); + if (sched_class == NULL) + return -EINVAL; + + prio = param_ex->sched_priority; + if (prio < 0) + prio = -prio; + + return prio + sched_class->weight; +} + +COBALT_SYSCALL(sched_weightprio, current, + (int policy, const struct sched_param_ex __user *u_param)) +{ + struct sched_param_ex param_ex; + + if (cobalt_copy_from_user(¶m_ex, u_param, sizeof(param_ex))) + return -EFAULT; + + return __cobalt_sched_weightprio(policy, ¶m_ex); +} + +int cobalt_sched_setscheduler_ex(pid_t pid, + int policy, + const struct sched_param_ex *param_ex, + __u32 __user *u_winoff, + int __user *u_promoted) +{ + struct cobalt_local_hkey hkey; + struct cobalt_thread *thread; + int ret, promoted = 0; + spl_t s; + + trace_cobalt_sched_setscheduler(pid, policy, param_ex); + + if (pid) { + xnlock_get_irqsave(&nklock, s); + thread = cobalt_thread_find(pid); + xnlock_put_irqrestore(&nklock, s); + } else + thread = cobalt_current_thread(); + + if (thread == NULL) { + if (u_winoff == NULL || pid != task_pid_vnr(current)) + return -ESRCH; + + thread = cobalt_thread_shadow(&hkey, u_winoff); + if (IS_ERR(thread)) + return PTR_ERR(thread); + + promoted = 1; + } + + ret = __cobalt_thread_setschedparam_ex(thread, policy, param_ex); + if (ret) + return ret; + + return cobalt_copy_to_user(u_promoted, &promoted, sizeof(promoted)); +} + +COBALT_SYSCALL(sched_setscheduler_ex, conforming, + (pid_t pid, + int policy, + const struct sched_param_ex __user *u_param, + __u32 __user *u_winoff, + int __user *u_promoted)) +{ + struct sched_param_ex param_ex; + + if (cobalt_copy_from_user(¶m_ex, u_param, sizeof(param_ex))) + return -EFAULT; + + return cobalt_sched_setscheduler_ex(pid, policy, ¶m_ex, + u_winoff, u_promoted); +} + +int cobalt_sched_getscheduler_ex(pid_t pid, + int *policy_r, + struct sched_param_ex *param_ex) +{ + struct cobalt_thread *thread; + spl_t s; + + trace_cobalt_sched_getscheduler(pid); + + if (pid) { + xnlock_get_irqsave(&nklock, s); + thread = cobalt_thread_find(pid); + xnlock_put_irqrestore(&nklock, s); + } else + thread = cobalt_current_thread(); + + if (thread == NULL) + return -ESRCH; + + return __cobalt_thread_getschedparam_ex(thread, policy_r, param_ex); +} + +COBALT_SYSCALL(sched_getscheduler_ex, current, + (pid_t pid, + int __user *u_policy, + struct sched_param_ex __user *u_param)) +{ + struct sched_param_ex param_ex; + int ret, policy; + + ret = cobalt_sched_getscheduler_ex(pid, &policy, ¶m_ex); + if (ret) + return ret; + + if (cobalt_copy_to_user(u_param, ¶m_ex, sizeof(param_ex)) || + cobalt_copy_to_user(u_policy, &policy, sizeof(policy))) + return -EFAULT; + + return 0; +} + +void cobalt_sched_reclaim(struct cobalt_process *process) +{ + struct cobalt_resources *p = &process->resources; + struct cobalt_sched_group *group; + spl_t s; + + xnlock_get_irqsave(&nklock, s); + + while (!list_empty(&p->schedq)) { + group = list_get_entry(&p->schedq, struct cobalt_sched_group, next); +#ifdef CONFIG_XENO_OPT_SCHED_QUOTA + xnsched_quota_destroy_group(&group->quota, 1, NULL); +#endif + xnlock_put_irqrestore(&nklock, s); + xnfree(group); + xnlock_get_irqsave(&nklock, s); + } + + xnlock_put_irqrestore(&nklock, s); +} --- linux/kernel/xenomai/posix/sem.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/kernel/xenomai/posix/sem.h 2022-03-21 12:58:29.064892131 +0100 @@ -0,0 +1,133 @@ +/* + * Written by Gilles Chanteperdrix . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_POSIX_SEM_H +#define _COBALT_POSIX_SEM_H + +#include +#include +#include +#include +#include +#include + +struct cobalt_process; +struct filename; + +struct cobalt_sem { + unsigned int magic; + struct xnsynch synchbase; + struct cobalt_sem_state *state; + int flags; + unsigned int refs; + struct filename *pathname; + struct cobalt_resnode resnode; +}; + +/* Copied from Linuxthreads semaphore.h. */ +struct _sem_fastlock +{ + long int __status; + int __spinlock; +}; + +typedef struct +{ + struct _sem_fastlock __sem_lock; + int __sem_value; + long __sem_waiting; +} sem_t; + +#include + +#define SEM_VALUE_MAX (INT_MAX) +#define SEM_FAILED NULL +#define SEM_NAMED 0x80000000 + +struct cobalt_sem_shadow __user * +__cobalt_sem_open(struct cobalt_sem_shadow __user *usm, + const char __user *u_name, + int oflags, mode_t mode, unsigned int value); + +int __cobalt_sem_timedwait(struct cobalt_sem_shadow __user *u_sem, + const struct timespec64 *ts); + +int __cobalt_sem_timedwait64(struct cobalt_sem_shadow __user *u_sem, + const struct __kernel_timespec __user *u_ts); + +int __cobalt_sem_destroy(xnhandle_t handle); + +void cobalt_nsem_reclaim(struct cobalt_process *process); + +struct cobalt_sem * +__cobalt_sem_init(const char *name, struct cobalt_sem_shadow *sem, + int flags, unsigned value); + +void __cobalt_sem_shadow_init(struct cobalt_sem *sem, __u32 magic, + struct cobalt_sem_shadow *sm); + +COBALT_SYSCALL_DECL(sem_init, + (struct cobalt_sem_shadow __user *u_sem, + int flags, unsigned value)); + +COBALT_SYSCALL_DECL(sem_post, + (struct cobalt_sem_shadow __user *u_sem)); + +COBALT_SYSCALL_DECL(sem_wait, + (struct cobalt_sem_shadow __user *u_sem)); + +COBALT_SYSCALL_DECL(sem_timedwait, + (struct cobalt_sem_shadow __user *u_sem, + const struct __user_old_timespec __user *u_ts)); + +COBALT_SYSCALL_DECL(sem_timedwait64, + (struct cobalt_sem_shadow __user *u_sem, + const struct __kernel_timespec __user *u_ts)); + +COBALT_SYSCALL_DECL(sem_trywait, + (struct cobalt_sem_shadow __user *u_sem)); + +COBALT_SYSCALL_DECL(sem_getvalue, + (struct cobalt_sem_shadow __user *u_sem, + int __user *u_sval)); + +COBALT_SYSCALL_DECL(sem_destroy, + (struct cobalt_sem_shadow __user *u_sem)); + +COBALT_SYSCALL_DECL(sem_open, + (struct cobalt_sem_shadow __user *__user *u_addrp, + const char __user *u_name, + int oflags, mode_t mode, unsigned int value)); + +COBALT_SYSCALL_DECL(sem_close, + (struct cobalt_sem_shadow __user *usm)); + +COBALT_SYSCALL_DECL(sem_unlink, (const char __user *u_name)); + +COBALT_SYSCALL_DECL(sem_broadcast_np, + (struct cobalt_sem_shadow __user *u_sem)); + +COBALT_SYSCALL_DECL(sem_inquire, + (struct cobalt_sem_shadow __user *u_sem, + struct cobalt_sem_info __user *u_info, + pid_t __user *u_waitlist, + size_t waitsz)); + +void cobalt_sem_reclaim(struct cobalt_resnode *node, + spl_t s); + +#endif /* !_COBALT_POSIX_SEM_H */ --- linux/kernel/xenomai/posix/corectl.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/kernel/xenomai/posix/corectl.h 2022-03-21 12:58:29.060892170 +0100 @@ -0,0 +1,38 @@ +/* + * Copyright (C) 2016 Philippe Gerum . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_POSIX_CORECTL_H +#define _COBALT_POSIX_CORECTL_H + +#include +#include +#include +#include + +struct cobalt_config_vector { + void __user *u_buf; + size_t u_bufsz; +}; + +COBALT_SYSCALL_DECL(corectl, + (int request, void __user *u_buf, size_t u_bufsz)); + +void cobalt_add_config_chain(struct notifier_block *nb); + +void cobalt_remove_config_chain(struct notifier_block *nb); + +#endif /* !_COBALT_POSIX_CORECTL_H */ --- linux/kernel/xenomai/posix/mqueue.c 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/kernel/xenomai/posix/mqueue.c 2022-03-21 12:58:29.057892199 +0100 @@ -0,0 +1,1093 @@ +/* + * Written by Gilles Chanteperdrix . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include +#include +#include +#include +#include +#include +#include "internal.h" +#include "thread.h" +#include "signal.h" +#include "timer.h" +#include "mqueue.h" +#include "clock.h" +#include +#include + +#define COBALT_MSGMAX 65536 +#define COBALT_MSGSIZEMAX (16*1024*1024) +#define COBALT_MSGPRIOMAX 32768 + +struct cobalt_mq { + unsigned magic; + + struct list_head link; + + struct xnsynch receivers; + struct xnsynch senders; + size_t memsize; + char *mem; + struct list_head queued; + struct list_head avail; + int nrqueued; + + /* mq_notify */ + struct siginfo si; + mqd_t target_qd; + struct cobalt_thread *target; + + struct mq_attr attr; + + unsigned refs; + char name[COBALT_MAXNAME]; + xnhandle_t handle; + + DECLARE_XNSELECT(read_select); + DECLARE_XNSELECT(write_select); +}; + +struct cobalt_mqd { + struct cobalt_mq *mq; + struct rtdm_fd fd; +}; + +struct cobalt_msg { + struct list_head link; + unsigned int prio; + size_t len; + char data[0]; +}; + +struct cobalt_mqwait_context { + struct xnthread_wait_context wc; + struct cobalt_msg *msg; +}; + +static struct mq_attr default_attr = { + .mq_maxmsg = 10, + .mq_msgsize = 8192, +}; + +static LIST_HEAD(cobalt_mqq); + +#ifdef CONFIG_XENO_OPT_VFILE + +static int mq_vfile_show(struct xnvfile_regular_iterator *it, void *data) +{ + return 0; +} + +static struct xnvfile_regular_ops mq_vfile_ops = { + .show = mq_vfile_show, +}; + +static struct xnpnode_regular __mq_pnode = { + .node = { + .dirname = "mqueue", + .root = &posix_ptree, + .ops = &xnregistry_vfreg_ops, + }, + .vfile = { + .ops = &mq_vfile_ops, + }, +}; + +#else /* !CONFIG_XENO_OPT_VFILE */ + +static struct xnpnode_link __mq_pnode = { + .node = { + .dirname = "mqueue", + } +}; + +#endif /* !CONFIG_XENO_OPT_VFILE */ + + +static inline struct cobalt_msg *mq_msg_alloc(struct cobalt_mq *mq) +{ + if (list_empty(&mq->avail)) + return NULL; + + return list_get_entry(&mq->avail, struct cobalt_msg, link); +} + +static inline void mq_msg_free(struct cobalt_mq *mq, struct cobalt_msg * msg) +{ + list_add(&msg->link, &mq->avail); /* For earliest re-use of the block. */ +} + +static inline int mq_init(struct cobalt_mq *mq, const struct mq_attr *attr) +{ + unsigned i, msgsize, memsize; + char *mem; + + if (attr == NULL) + attr = &default_attr; + else { + if (attr->mq_maxmsg <= 0 || attr->mq_msgsize <= 0) + return -EINVAL; + if (attr->mq_maxmsg > COBALT_MSGMAX) + return -EINVAL; + if (attr->mq_msgsize > COBALT_MSGSIZEMAX) + return -EINVAL; + } + + msgsize = attr->mq_msgsize + sizeof(struct cobalt_msg); + + /* Align msgsize on natural boundary. */ + if ((msgsize % sizeof(unsigned long))) + msgsize += + sizeof(unsigned long) - (msgsize % sizeof(unsigned long)); + + memsize = msgsize * attr->mq_maxmsg; + memsize = PAGE_ALIGN(memsize); + if (get_order(memsize) > MAX_ORDER) + return -ENOSPC; + + mem = xnheap_vmalloc(memsize); + if (mem == NULL) + return -ENOSPC; + + mq->memsize = memsize; + INIT_LIST_HEAD(&mq->queued); + mq->nrqueued = 0; + xnsynch_init(&mq->receivers, XNSYNCH_PRIO, NULL); + xnsynch_init(&mq->senders, XNSYNCH_PRIO, NULL); + mq->mem = mem; + + /* Fill the pool. */ + INIT_LIST_HEAD(&mq->avail); + for (i = 0; i < attr->mq_maxmsg; i++) { + struct cobalt_msg *msg = (struct cobalt_msg *) (mem + i * msgsize); + mq_msg_free(mq, msg); + } + + mq->attr = *attr; + mq->target = NULL; + xnselect_init(&mq->read_select); + xnselect_init(&mq->write_select); + mq->magic = COBALT_MQ_MAGIC; + mq->refs = 2; + INIT_LIST_HEAD(&mq->link); + + return 0; +} + +static inline void mq_destroy(struct cobalt_mq *mq) +{ + spl_t s; + + xnlock_get_irqsave(&nklock, s); + xnsynch_destroy(&mq->receivers); + xnsynch_destroy(&mq->senders); + list_del(&mq->link); + xnsched_run(); + xnlock_put_irqrestore(&nklock, s); + xnselect_destroy(&mq->read_select); /* Reschedules. */ + xnselect_destroy(&mq->write_select); /* Ditto. */ + xnregistry_remove(mq->handle); + xnheap_vfree(mq->mem); + kfree(mq); +} + +static int mq_unref_inner(struct cobalt_mq *mq, spl_t s) +{ + int destroy; + + destroy = --mq->refs == 0; + xnlock_put_irqrestore(&nklock, s); + + if (destroy) + mq_destroy(mq); + + return destroy; +} + +static int mq_unref(struct cobalt_mq *mq) +{ + spl_t s; + + xnlock_get_irqsave(&nklock, s); + return mq_unref_inner(mq, s); +} + +static void mqd_close(struct rtdm_fd *fd) +{ + struct cobalt_mqd *mqd = container_of(fd, struct cobalt_mqd, fd); + struct cobalt_mq *mq = mqd->mq; + + kfree(mqd); + mq_unref(mq); +} + +int +mqd_select(struct rtdm_fd *fd, struct xnselector *selector, + unsigned type, unsigned index) +{ + struct cobalt_mqd *mqd = container_of(fd, struct cobalt_mqd, fd); + struct xnselect_binding *binding; + struct cobalt_mq *mq; + int err; + spl_t s; + + if (type == XNSELECT_READ || type == XNSELECT_WRITE) { + binding = xnmalloc(sizeof(*binding)); + if (!binding) + return -ENOMEM; + } else + return -EBADF; + + xnlock_get_irqsave(&nklock, s); + mq = mqd->mq; + + switch(type) { + case XNSELECT_READ: + err = -EBADF; + if ((rtdm_fd_flags(fd) & COBALT_PERMS_MASK) == O_WRONLY) + goto unlock_and_error; + + err = xnselect_bind(&mq->read_select, binding, + selector, type, index, + !list_empty(&mq->queued)); + if (err) + goto unlock_and_error; + break; + + case XNSELECT_WRITE: + err = -EBADF; + if ((rtdm_fd_flags(fd) & COBALT_PERMS_MASK) == O_RDONLY) + goto unlock_and_error; + + err = xnselect_bind(&mq->write_select, binding, + selector, type, index, + !list_empty(&mq->avail)); + if (err) + goto unlock_and_error; + break; + } + xnlock_put_irqrestore(&nklock, s); + return 0; + + unlock_and_error: + xnlock_put_irqrestore(&nklock, s); + xnfree(binding); + return err; +} + +static struct rtdm_fd_ops mqd_ops = { + .close = mqd_close, + .select = mqd_select, +}; + +static inline int mqd_create(struct cobalt_mq *mq, unsigned long flags, int ufd) +{ + struct cobalt_mqd *mqd; + int ret; + + if (cobalt_ppd_get(0) == &cobalt_kernel_ppd) + return -EPERM; + + mqd = kmalloc(sizeof(*mqd), GFP_KERNEL); + if (mqd == NULL) + return -ENOSPC; + + mqd->fd.oflags = flags; + mqd->mq = mq; + + ret = rtdm_fd_enter(&mqd->fd, ufd, COBALT_MQD_MAGIC, &mqd_ops); + if (ret < 0) + return ret; + + return rtdm_fd_register(&mqd->fd, ufd); +} + +static int mq_open(int uqd, const char *name, int oflags, + int mode, struct mq_attr *attr) +{ + struct cobalt_mq *mq; + xnhandle_t handle; + spl_t s; + int err; + + if (name[0] != '/' || name[1] == '\0') + return -EINVAL; + + retry_bind: + err = xnregistry_bind(&name[1], XN_NONBLOCK, XN_RELATIVE, &handle); + switch (err) { + case 0: + /* Found */ + if ((oflags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL)) + return -EEXIST; + + xnlock_get_irqsave(&nklock, s); + mq = xnregistry_lookup(handle, NULL); + if (mq && mq->magic != COBALT_MQ_MAGIC) { + xnlock_put_irqrestore(&nklock, s); + return -EINVAL; + } + + if (mq) { + ++mq->refs; + xnlock_put_irqrestore(&nklock, s); + } else { + xnlock_put_irqrestore(&nklock, s); + goto retry_bind; + } + + err = mqd_create(mq, oflags & (O_NONBLOCK | COBALT_PERMS_MASK), + uqd); + if (err < 0) { + mq_unref(mq); + return err; + } + break; + + case -EWOULDBLOCK: + /* Not found */ + if ((oflags & O_CREAT) == 0) + return (mqd_t)-ENOENT; + + mq = kmalloc(sizeof(*mq), GFP_KERNEL); + if (mq == NULL) + return -ENOSPC; + + err = mq_init(mq, attr); + if (err) { + kfree(mq); + return err; + } + + snprintf(mq->name, sizeof(mq->name), "%s", &name[1]); + + err = mqd_create(mq, oflags & (O_NONBLOCK | COBALT_PERMS_MASK), + uqd); + if (err < 0) { + mq_destroy(mq); + return err; + } + + xnlock_get_irqsave(&nklock, s); + err = xnregistry_enter(mq->name, mq, &mq->handle, + &__mq_pnode.node); + if (err < 0) + --mq->refs; + else + list_add_tail(&mq->link, &cobalt_mqq); + xnlock_put_irqrestore(&nklock, s); + if (err < 0) { + rtdm_fd_close(uqd, COBALT_MQD_MAGIC); + if (err == -EEXIST) + goto retry_bind; + return err; + } + break; + + default: + return err; + } + + return 0; +} + +static inline int mq_close(mqd_t fd) +{ + int err; + + err = rtdm_fd_close(fd, COBALT_MQD_MAGIC); + return err == -EADV ? -EBADF : err; +} + +static inline int mq_unlink(const char *name) +{ + struct cobalt_mq *mq; + xnhandle_t handle; + spl_t s; + int err; + + if (name[0] != '/' || name[1] == '\0') + return -EINVAL; + + err = xnregistry_bind(&name[1], XN_NONBLOCK, XN_RELATIVE, &handle); + if (err == -EWOULDBLOCK) + return -ENOENT; + if (err) + return err; + + xnlock_get_irqsave(&nklock, s); + mq = xnregistry_lookup(handle, NULL); + if (!mq) { + err = -ENOENT; + goto err_unlock; + } + if (mq->magic != COBALT_MQ_MAGIC) { + err = -EINVAL; + err_unlock: + xnlock_put_irqrestore(&nklock, s); + + return err; + } + if (mq_unref_inner(mq, s) == 0) + xnregistry_unlink(&name[1]); + return 0; +} + +static inline struct cobalt_msg * +mq_trysend(struct cobalt_mqd *mqd, size_t len) +{ + struct cobalt_msg *msg; + struct cobalt_mq *mq; + unsigned flags; + + mq = mqd->mq; + flags = rtdm_fd_flags(&mqd->fd) & COBALT_PERMS_MASK; + + if (flags != O_WRONLY && flags != O_RDWR) + return ERR_PTR(-EBADF); + + if (len > mq->attr.mq_msgsize) + return ERR_PTR(-EMSGSIZE); + + msg = mq_msg_alloc(mq); + if (msg == NULL) + return ERR_PTR(-EAGAIN); + + if (list_empty(&mq->avail)) + xnselect_signal(&mq->write_select, 0); + + return msg; +} + +static inline struct cobalt_msg * +mq_tryrcv(struct cobalt_mqd *mqd, size_t len) +{ + struct cobalt_msg *msg; + unsigned int flags; + struct cobalt_mq *mq; + + mq = mqd->mq; + flags = rtdm_fd_flags(&mqd->fd) & COBALT_PERMS_MASK; + + if (flags != O_RDONLY && flags != O_RDWR) + return ERR_PTR(-EBADF); + + if (len < mq->attr.mq_msgsize) + return ERR_PTR(-EMSGSIZE); + + if (list_empty(&mq->queued)) + return ERR_PTR(-EAGAIN); + + msg = list_get_entry(&mq->queued, struct cobalt_msg, link); + mq->nrqueued--; + + if (list_empty(&mq->queued)) + xnselect_signal(&mq->read_select, 0); + + return msg; +} + +static struct cobalt_msg * +mq_timedsend_inner(struct cobalt_mqd *mqd, + size_t len, const void __user *u_ts, + int (*fetch_timeout)(struct timespec64 *ts, + const void __user *u_ts)) +{ + struct cobalt_mqwait_context mwc; + struct cobalt_msg *msg; + struct cobalt_mq *mq; + struct timespec64 ts; + xntmode_t tmode; + xnticks_t to; + spl_t s; + int ret; + + to = XN_INFINITE; + tmode = XN_RELATIVE; +redo: + xnlock_get_irqsave(&nklock, s); + msg = mq_trysend(mqd, len); + if (msg != ERR_PTR(-EAGAIN)) + goto out; + + if (rtdm_fd_flags(&mqd->fd) & O_NONBLOCK) + goto out; + + if (fetch_timeout) { + xnlock_put_irqrestore(&nklock, s); + ret = fetch_timeout(&ts, u_ts); + if (ret) + return ERR_PTR(ret); + if (!timespec64_valid(&ts)) + return ERR_PTR(-EINVAL); + to = ts2ns(&ts) + 1; + tmode = XN_REALTIME; + fetch_timeout = NULL; + goto redo; + } + + mq = mqd->mq; + xnthread_prepare_wait(&mwc.wc); + ret = xnsynch_sleep_on(&mq->senders, to, tmode); + if (ret) { + if (ret & XNBREAK) + msg = ERR_PTR(-EINTR); + else if (ret & XNTIMEO) + msg = ERR_PTR(-ETIMEDOUT); + else if (ret & XNRMID) + msg = ERR_PTR(-EBADF); + } else + msg = mwc.msg; +out: + xnlock_put_irqrestore(&nklock, s); + + return msg; +} + +static void mq_release_msg(struct cobalt_mq *mq, struct cobalt_msg *msg) +{ + struct cobalt_mqwait_context *mwc; + struct xnthread_wait_context *wc; + struct xnthread *thread; + + /* + * Try passing the free message slot to a waiting sender, link + * it to the free queue otherwise. + */ + if (xnsynch_pended_p(&mq->senders)) { + thread = xnsynch_wakeup_one_sleeper(&mq->senders); + wc = xnthread_get_wait_context(thread); + mwc = container_of(wc, struct cobalt_mqwait_context, wc); + mwc->msg = msg; + xnthread_complete_wait(wc); + } else { + mq_msg_free(mq, msg); + if (list_is_singular(&mq->avail)) + xnselect_signal(&mq->write_select, 1); + } +} + +static int +mq_finish_send(struct cobalt_mqd *mqd, struct cobalt_msg *msg) +{ + struct cobalt_mqwait_context *mwc; + struct xnthread_wait_context *wc; + struct cobalt_sigpending *sigp; + struct xnthread *thread; + struct cobalt_mq *mq; + spl_t s; + + mq = mqd->mq; + + xnlock_get_irqsave(&nklock, s); + /* Can we do pipelined sending? */ + if (xnsynch_pended_p(&mq->receivers)) { + thread = xnsynch_wakeup_one_sleeper(&mq->receivers); + wc = xnthread_get_wait_context(thread); + mwc = container_of(wc, struct cobalt_mqwait_context, wc); + mwc->msg = msg; + xnthread_complete_wait(wc); + } else { + /* Nope, have to go through the queue. */ + list_add_priff(msg, &mq->queued, prio, link); + mq->nrqueued++; + + /* + * If first message and no pending reader, send a + * signal if notification was enabled via mq_notify(). + */ + if (list_is_singular(&mq->queued)) { + xnselect_signal(&mq->read_select, 1); + if (mq->target) { + sigp = cobalt_signal_alloc(); + if (sigp) { + cobalt_copy_siginfo(SI_MESGQ, &sigp->si, &mq->si); + if (cobalt_signal_send(mq->target, sigp, 0) <= 0) + cobalt_signal_free(sigp); + } + mq->target = NULL; + } + } + } + xnsched_run(); + xnlock_put_irqrestore(&nklock, s); + + return 0; +} + +static struct cobalt_msg * +mq_timedrcv_inner(struct cobalt_mqd *mqd, + size_t len, + const void __user *u_ts, + int (*fetch_timeout)(struct timespec64 *ts, + const void __user *u_ts)) +{ + struct cobalt_mqwait_context mwc; + struct cobalt_msg *msg; + struct cobalt_mq *mq; + struct timespec64 ts; + xntmode_t tmode; + xnticks_t to; + spl_t s; + int ret; + + to = XN_INFINITE; + tmode = XN_RELATIVE; +redo: + xnlock_get_irqsave(&nklock, s); + msg = mq_tryrcv(mqd, len); + if (msg != ERR_PTR(-EAGAIN)) + goto out; + + if (rtdm_fd_flags(&mqd->fd) & O_NONBLOCK) + goto out; + + if (fetch_timeout) { + xnlock_put_irqrestore(&nklock, s); + ret = fetch_timeout(&ts, u_ts); + if (ret) + return ERR_PTR(ret); + if (!timespec64_valid(&ts)) + return ERR_PTR(-EINVAL); + to = ts2ns(&ts) + 1; + tmode = XN_REALTIME; + fetch_timeout = NULL; + goto redo; + } + + mq = mqd->mq; + xnthread_prepare_wait(&mwc.wc); + ret = xnsynch_sleep_on(&mq->receivers, to, tmode); + if (ret == 0) + msg = mwc.msg; + else if (ret & XNRMID) + msg = ERR_PTR(-EBADF); + else if (ret & XNTIMEO) + msg = ERR_PTR(-ETIMEDOUT); + else + msg = ERR_PTR(-EINTR); +out: + xnlock_put_irqrestore(&nklock, s); + + return msg; +} + +static int +mq_finish_rcv(struct cobalt_mqd *mqd, struct cobalt_msg *msg) +{ + spl_t s; + + xnlock_get_irqsave(&nklock, s); + mq_release_msg(mqd->mq, msg); + xnsched_run(); + xnlock_put_irqrestore(&nklock, s); + + return 0; +} + +static inline int mq_getattr(struct cobalt_mqd *mqd, struct mq_attr *attr) +{ + struct cobalt_mq *mq; + spl_t s; + + mq = mqd->mq; + *attr = mq->attr; + xnlock_get_irqsave(&nklock, s); + attr->mq_flags = rtdm_fd_flags(&mqd->fd); + attr->mq_curmsgs = mq->nrqueued; + xnlock_put_irqrestore(&nklock, s); + + return 0; +} + +static inline int +mq_notify(struct cobalt_mqd *mqd, unsigned index, const struct sigevent *evp) +{ + struct cobalt_thread *thread = cobalt_current_thread(); + struct cobalt_mq *mq; + int err; + spl_t s; + + if (evp && ((evp->sigev_notify != SIGEV_SIGNAL && + evp->sigev_notify != SIGEV_NONE) || + (unsigned int)(evp->sigev_signo - 1) > SIGRTMAX - 1)) + return -EINVAL; + + if (xnsched_interrupt_p() || thread == NULL) + return -EPERM; + + xnlock_get_irqsave(&nklock, s); + mq = mqd->mq; + if (mq->target && mq->target != thread) { + err = -EBUSY; + goto unlock_and_error; + } + + if (evp == NULL || evp->sigev_notify == SIGEV_NONE) + /* Here, mq->target == cobalt_current_thread() or NULL. */ + mq->target = NULL; + else { + mq->target = thread; + mq->target_qd = index; + mq->si.si_signo = evp->sigev_signo; + mq->si.si_errno = 0; + mq->si.si_code = SI_MESGQ; + mq->si.si_value = evp->sigev_value; + /* + * XXX: we differ from the regular kernel here, which + * passes the sender's pid/uid data into the + * receiver's namespaces. We pass the receiver's creds + * into the init namespace instead. + */ + mq->si.si_pid = task_pid_nr(current); + mq->si.si_uid = get_current_uuid(); + } + + xnlock_put_irqrestore(&nklock, s); + return 0; + + unlock_and_error: + xnlock_put_irqrestore(&nklock, s); + return err; +} + +static inline struct cobalt_mqd *cobalt_mqd_get(mqd_t ufd) +{ + struct rtdm_fd *fd; + + fd = rtdm_fd_get(ufd, COBALT_MQD_MAGIC); + if (IS_ERR(fd)) { + int err = PTR_ERR(fd); + if (err == -EADV) + err = cobalt_current_process() ? -EBADF : -EPERM; + return ERR_PTR(err); + } + + return container_of(fd, struct cobalt_mqd, fd); +} + +static inline void cobalt_mqd_put(struct cobalt_mqd *mqd) +{ + rtdm_fd_put(&mqd->fd); +} + +int __cobalt_mq_notify(mqd_t fd, const struct sigevent *evp) +{ + struct cobalt_mqd *mqd; + int ret; + + mqd = cobalt_mqd_get(fd); + if (IS_ERR(mqd)) + ret = PTR_ERR(mqd); + else { + trace_cobalt_mq_notify(fd, evp); + ret = mq_notify(mqd, fd, evp); + cobalt_mqd_put(mqd); + } + + return ret; +} + +COBALT_SYSCALL(mq_notify, primary, + (mqd_t fd, const struct sigevent *__user evp)) +{ + struct sigevent sev; + + if (evp && cobalt_copy_from_user(&sev, evp, sizeof(sev))) + return -EFAULT; + + return __cobalt_mq_notify(fd, evp ? &sev : NULL); +} + +int __cobalt_mq_open(const char __user *u_name, int oflags, + mode_t mode, struct mq_attr *attr) +{ + char name[COBALT_MAXNAME]; + unsigned int len; + mqd_t uqd; + int ret; + + len = cobalt_strncpy_from_user(name, u_name, sizeof(name)); + if (len < 0) + return -EFAULT; + + if (len >= sizeof(name)) + return -ENAMETOOLONG; + + if (len == 0) + return -EINVAL; + + trace_cobalt_mq_open(name, oflags, mode); + + uqd = __rtdm_anon_getfd("[cobalt-mq]", oflags); + if (uqd < 0) + return uqd; + + ret = mq_open(uqd, name, oflags, mode, attr); + if (ret < 0) { + __rtdm_anon_putfd(uqd); + return ret; + } + + return uqd; +} + +COBALT_SYSCALL(mq_open, lostage, + (const char __user *u_name, int oflags, + mode_t mode, struct mq_attr __user *u_attr)) +{ + struct mq_attr _attr, *attr = &_attr; + + if ((oflags & O_CREAT) && u_attr) { + if (cobalt_copy_from_user(&_attr, u_attr, sizeof(_attr))) + return -EFAULT; + } else + attr = NULL; + + return __cobalt_mq_open(u_name, oflags, mode, attr); +} + +COBALT_SYSCALL(mq_close, lostage, (mqd_t uqd)) +{ + trace_cobalt_mq_close(uqd); + + return mq_close(uqd); +} + +COBALT_SYSCALL(mq_unlink, lostage, (const char __user *u_name)) +{ + char name[COBALT_MAXNAME]; + unsigned len; + + len = cobalt_strncpy_from_user(name, u_name, sizeof(name)); + if (len < 0) + return -EFAULT; + if (len >= sizeof(name)) + return -ENAMETOOLONG; + + trace_cobalt_mq_unlink(name); + + return mq_unlink(name); +} + +int __cobalt_mq_getattr(mqd_t uqd, struct mq_attr *attr) +{ + struct cobalt_mqd *mqd; + int ret; + + mqd = cobalt_mqd_get(uqd); + if (IS_ERR(mqd)) + return PTR_ERR(mqd); + + ret = mq_getattr(mqd, attr); + cobalt_mqd_put(mqd); + if (ret) + return ret; + + trace_cobalt_mq_getattr(uqd, attr); + + return 0; +} + +COBALT_SYSCALL(mq_getattr, current, + (mqd_t uqd, struct mq_attr __user *u_attr)) +{ + struct mq_attr attr; + int ret; + + ret = __cobalt_mq_getattr(uqd, &attr); + if (ret) + return ret; + + return cobalt_copy_to_user(u_attr, &attr, sizeof(attr)); +} + +static inline int mq_fetch_timeout(struct timespec64 *ts, + const void __user *u_ts) +{ + return u_ts == NULL ? -EFAULT : cobalt_get_u_timespec(ts, u_ts); +} + +static inline int mq_fetch_timeout64(struct timespec64 *ts, + const void __user *u_ts) +{ + return u_ts == NULL ? -EFAULT : cobalt_get_timespec64(ts, u_ts); +} + +int __cobalt_mq_timedsend(mqd_t uqd, const void __user *u_buf, size_t len, + unsigned int prio, const void __user *u_ts, + int (*fetch_timeout)(struct timespec64 *ts, + const void __user *u_ts)) +{ + struct cobalt_msg *msg; + struct cobalt_mqd *mqd; + int ret; + + mqd = cobalt_mqd_get(uqd); + if (IS_ERR(mqd)) + return PTR_ERR(mqd); + + if (prio >= COBALT_MSGPRIOMAX) { + ret = -EINVAL; + goto out; + } + + if (len > 0 && !access_rok(u_buf, len)) { + ret = -EFAULT; + goto out; + } + + trace_cobalt_mq_send(uqd, u_buf, len, prio); + msg = mq_timedsend_inner(mqd, len, u_ts, fetch_timeout); + if (IS_ERR(msg)) { + ret = PTR_ERR(msg); + goto out; + } + + ret = cobalt_copy_from_user(msg->data, u_buf, len); + if (ret) { + mq_finish_rcv(mqd, msg); + goto out; + } + msg->len = len; + msg->prio = prio; + ret = mq_finish_send(mqd, msg); +out: + cobalt_mqd_put(mqd); + + return ret; +} + +int __cobalt_mq_timedsend64(mqd_t uqd, const void __user *u_buf, size_t len, + unsigned int prio, const void __user *u_ts) +{ + return __cobalt_mq_timedsend(uqd, u_buf, len, prio, u_ts, + u_ts ? mq_fetch_timeout64 : NULL); +} + +COBALT_SYSCALL(mq_timedsend, primary, + (mqd_t uqd, const void __user *u_buf, size_t len, + unsigned int prio, const struct __user_old_timespec __user *u_ts)) +{ + return __cobalt_mq_timedsend(uqd, u_buf, len, prio, + u_ts, u_ts ? mq_fetch_timeout : NULL); +} + +COBALT_SYSCALL(mq_timedsend64, primary, + (mqd_t uqd, const void __user *u_buf, size_t len, + unsigned int prio, const struct __kernel_timespec __user *u_ts)) +{ + return __cobalt_mq_timedsend64(uqd, u_buf, len, prio, u_ts); +} + +int __cobalt_mq_timedreceive(mqd_t uqd, void __user *u_buf, + ssize_t *lenp, + unsigned int __user *u_prio, + const void __user *u_ts, + int (*fetch_timeout)(struct timespec64 *ts, + const void __user *u_ts)) +{ + struct cobalt_mqd *mqd; + struct cobalt_msg *msg; + unsigned int prio; + int ret; + + mqd = cobalt_mqd_get(uqd); + if (IS_ERR(mqd)) + return PTR_ERR(mqd); + + if (*lenp > 0 && !access_wok(u_buf, *lenp)) { + ret = -EFAULT; + goto fail; + } + + msg = mq_timedrcv_inner(mqd, *lenp, u_ts, fetch_timeout); + if (IS_ERR(msg)) { + ret = PTR_ERR(msg); + goto fail; + } + + ret = cobalt_copy_to_user(u_buf, msg->data, msg->len); + if (ret) { + mq_finish_rcv(mqd, msg); + goto fail; + } + + *lenp = msg->len; + prio = msg->prio; + ret = mq_finish_rcv(mqd, msg); + if (ret) + goto fail; + + cobalt_mqd_put(mqd); + + if (u_prio && __xn_put_user(prio, u_prio)) + return -EFAULT; + + return 0; +fail: + cobalt_mqd_put(mqd); + + return ret; +} + +int __cobalt_mq_timedreceive64(mqd_t uqd, void __user *u_buf, + ssize_t __user *u_len, + unsigned int __user *u_prio, + const void __user *u_ts) +{ + ssize_t len; + int ret; + + ret = cobalt_copy_from_user(&len, u_len, sizeof(len)); + if (ret) + return ret; + + ret = __cobalt_mq_timedreceive(uqd, u_buf, &len, u_prio, u_ts, + u_ts ? mq_fetch_timeout64 : NULL); + + return ret ?: cobalt_copy_to_user(u_len, &len, sizeof(*u_len)); +} + +COBALT_SYSCALL(mq_timedreceive, primary, + (mqd_t uqd, void __user *u_buf, + ssize_t __user *u_len, + unsigned int __user *u_prio, + const struct __user_old_timespec __user *u_ts)) +{ + ssize_t len; + int ret; + + ret = cobalt_copy_from_user(&len, u_len, sizeof(len)); + if (ret) + return ret; + + ret = __cobalt_mq_timedreceive(uqd, u_buf, &len, u_prio, + u_ts, u_ts ? mq_fetch_timeout : NULL); + + return ret ?: cobalt_copy_to_user(u_len, &len, sizeof(*u_len)); +} + +COBALT_SYSCALL(mq_timedreceive64, primary, + (mqd_t uqd, void __user *u_buf, ssize_t __user *u_len, + unsigned int __user *u_prio, + const struct __kernel_timespec __user *u_ts)) +{ + return __cobalt_mq_timedreceive64(uqd, u_buf, u_len, u_prio, u_ts); +} --- linux/kernel/xenomai/posix/signal.c 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/kernel/xenomai/posix/signal.c 2022-03-21 12:58:29.054892228 +0100 @@ -0,0 +1,638 @@ +/* + * Copyright (C) 2013 Philippe Gerum . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include +#include +#include +#include +#include "internal.h" +#include "signal.h" +#include "thread.h" +#include "timer.h" +#include "clock.h" + +static void *sigpending_mem; + +static LIST_HEAD(sigpending_pool); + +/* + * How many signal notifications which may be pending at any given + * time, except timers. Cobalt signals are always thread directed, + * and we assume that in practice, each signal number is processed by + * a dedicated thread. We provide for up to three real-time signal + * events to pile up, and a single notification pending for other + * signals. Timers use a fast queuing logic maintaining a count of + * overruns, and therefore do not consume any memory from this pool. + */ +#define __SIGPOOL_SIZE (sizeof(struct cobalt_sigpending) * \ + (_NSIG + (SIGRTMAX - SIGRTMIN) * 2)) + +static int cobalt_signal_deliver(struct cobalt_thread *thread, + struct cobalt_sigpending *sigp, + int group) +{ /* nklocked, IRQs off */ + struct cobalt_sigwait_context *swc; + struct xnthread_wait_context *wc; + struct list_head *sigwaiters; + int sig, ret; + + sig = sigp->si.si_signo; + XENO_BUG_ON(COBALT, sig < 1 || sig > _NSIG); + + /* + * Attempt to deliver the signal immediately to the initial + * target that waits for it. + */ + if (xnsynch_pended_p(&thread->sigwait)) { + wc = xnthread_get_wait_context(&thread->threadbase); + swc = container_of(wc, struct cobalt_sigwait_context, wc); + if (sigismember(swc->set, sig)) + goto deliver; + } + + /* + * If that does not work out and we are sending to a thread + * group, try to deliver to any thread from the same process + * waiting for that signal. + */ + sigwaiters = &thread->process->sigwaiters; + if (!group || list_empty(sigwaiters)) + return 0; + + list_for_each_entry(thread, sigwaiters, signext) { + wc = xnthread_get_wait_context(&thread->threadbase); + swc = container_of(wc, struct cobalt_sigwait_context, wc); + if (sigismember(swc->set, sig)) + goto deliver; + } + + return 0; +deliver: + cobalt_copy_siginfo(sigp->si.si_code, swc->si, &sigp->si); + cobalt_call_extension(signal_deliver, &thread->extref, + ret, swc->si, sigp); + xnthread_complete_wait(&swc->wc); + xnsynch_wakeup_one_sleeper(&thread->sigwait); + list_del(&thread->signext); + + /* + * This is an immediate delivery bypassing any queuing, so we + * have to release the sigpending data right away before + * leaving. + */ + cobalt_signal_free(sigp); + + return 1; +} + +int cobalt_signal_send(struct cobalt_thread *thread, + struct cobalt_sigpending *sigp, + int group) +{ /* nklocked, IRQs off */ + struct list_head *sigq; + int sig, ret; + + /* Can we deliver this signal immediately? */ + ret = cobalt_signal_deliver(thread, sigp, group); + if (ret) + return ret; /* Yep, done. */ + + /* + * Nope, attempt to queue it. We start by calling any Cobalt + * extension for queuing the signal first. + */ + if (cobalt_call_extension(signal_queue, &thread->extref, ret, sigp)) { + if (ret) + /* Queuing done remotely or error. */ + return ret; + } + + sig = sigp->si.si_signo; + sigq = thread->sigqueues + sig - 1; + if (!list_empty(sigq)) { + /* Queue non-rt signals only once. */ + if (sig < SIGRTMIN) + return 0; + /* Queue rt signal source only once (SI_TIMER). */ + if (!list_empty(&sigp->next)) + return 0; + } + + sigaddset(&thread->sigpending, sig); + list_add_tail(&sigp->next, sigq); + + return 1; +} +EXPORT_SYMBOL_GPL(cobalt_signal_send); + +int cobalt_signal_send_pid(pid_t pid, struct cobalt_sigpending *sigp) +{ /* nklocked, IRQs off */ + struct cobalt_thread *thread; + + thread = cobalt_thread_find(pid); + if (thread) + return cobalt_signal_send(thread, sigp, 0); + + return -ESRCH; +} +EXPORT_SYMBOL_GPL(cobalt_signal_send_pid); + +struct cobalt_sigpending *cobalt_signal_alloc(void) +{ /* nklocked, IRQs off */ + struct cobalt_sigpending *sigp; + + if (list_empty(&sigpending_pool)) { + if (xnclock_ratelimit()) + printk(XENO_WARNING "signal bucket pool underflows\n"); + return NULL; + } + + sigp = list_get_entry(&sigpending_pool, struct cobalt_sigpending, next); + INIT_LIST_HEAD(&sigp->next); + + return sigp; +} +EXPORT_SYMBOL_GPL(cobalt_signal_alloc); + +void cobalt_signal_free(struct cobalt_sigpending *sigp) +{ /* nklocked, IRQs off */ + if ((void *)sigp >= sigpending_mem && + (void *)sigp < sigpending_mem + __SIGPOOL_SIZE) + list_add_tail(&sigp->next, &sigpending_pool); +} +EXPORT_SYMBOL_GPL(cobalt_signal_free); + +void cobalt_signal_flush(struct cobalt_thread *thread) +{ + struct cobalt_sigpending *sigp, *tmp; + struct list_head *sigq; + spl_t s; + int n; + + /* + * TCB is not accessible from userland anymore, no locking + * required. + */ + if (sigisemptyset(&thread->sigpending)) + return; + + for (n = 0; n < _NSIG; n++) { + sigq = thread->sigqueues + n; + if (list_empty(sigq)) + continue; + /* + * sigpending blocks must be unlinked so that we + * detect this fact when deleting their respective + * owners. + */ + list_for_each_entry_safe(sigp, tmp, sigq, next) { + list_del_init(&sigp->next); + if ((void *)sigp >= sigpending_mem && + (void *)sigp < sigpending_mem + __SIGPOOL_SIZE) { + xnlock_get_irqsave(&nklock, s); + list_add_tail(&sigp->next, &sigpending_pool); + xnlock_put_irqrestore(&nklock, s); + } + } + } + + sigemptyset(&thread->sigpending); +} + +static int signal_put_siginfo(void __user *u_si, const struct siginfo *si, + int overrun) +{ + struct siginfo __user *u_p = u_si; + int ret; + + ret = __xn_put_user(si->si_signo, &u_p->si_signo); + ret |= __xn_put_user(si->si_errno, &u_p->si_errno); + ret |= __xn_put_user(si->si_code, &u_p->si_code); + + /* + * Copy the generic/standard siginfo bits to userland. + */ + switch (si->si_code) { + case SI_TIMER: + ret |= __xn_put_user(si->si_tid, &u_p->si_tid); + ret |= __xn_put_user(si->si_ptr, &u_p->si_ptr); + ret |= __xn_put_user(overrun, &u_p->si_overrun); + break; + case SI_QUEUE: + case SI_MESGQ: + ret |= __xn_put_user(si->si_ptr, &u_p->si_ptr); + fallthrough; + case SI_USER: + ret |= __xn_put_user(si->si_pid, &u_p->si_pid); + ret |= __xn_put_user(si->si_uid, &u_p->si_uid); + } + + return ret; +} + +static int signal_wait(sigset_t *set, xnticks_t timeout, + void __user *u_si, bool compat) +{ + struct cobalt_sigpending *sigp = NULL; + struct cobalt_sigwait_context swc; + struct cobalt_thread *curr; + int ret, sig, n, overrun; + unsigned long *p, *t, m; + struct siginfo si, *sip; + struct list_head *sigq; + spl_t s; + + curr = cobalt_current_thread(); + XENO_BUG_ON(COBALT, curr == NULL); + + if (u_si && !access_wok(u_si, sizeof(*u_si))) + return -EFAULT; + + xnlock_get_irqsave(&nklock, s); + +check: + if (sigisemptyset(&curr->sigpending)) + /* Most common/fast path. */ + goto wait; + + p = curr->sigpending.sig; /* pending */ + t = set->sig; /* tested */ + + for (n = 0, sig = 0; n < _NSIG_WORDS; ++n) { + m = *p++ & *t++; + if (m == 0) + continue; + sig = ffz(~m) + n *_NSIG_BPW + 1; + break; + } + + if (sig) { + sigq = curr->sigqueues + sig - 1; + if (list_empty(sigq)) { + sigdelset(&curr->sigpending, sig); + goto check; + } + sigp = list_get_entry(sigq, struct cobalt_sigpending, next); + INIT_LIST_HEAD(&sigp->next); /* Mark sigp as unlinked. */ + if (list_empty(sigq)) + sigdelset(&curr->sigpending, sig); + sip = &sigp->si; + ret = 0; + goto done; + } + +wait: + if (timeout == XN_NONBLOCK) { + ret = -EAGAIN; + goto fail; + } + swc.set = set; + swc.si = &si; + xnthread_prepare_wait(&swc.wc); + list_add_tail(&curr->signext, &curr->process->sigwaiters); + ret = xnsynch_sleep_on(&curr->sigwait, timeout, XN_RELATIVE); + if (ret) { + list_del(&curr->signext); + ret = ret & XNBREAK ? -EINTR : -EAGAIN; + goto fail; + } + sig = si.si_signo; + sip = &si; +done: + /* + * si_overrun raises a nasty issue since we have to + * collect+clear it atomically before we drop the lock, + * although we don't know in advance if any extension would + * use it along with the additional si_codes it may provide, + * but we must drop the lock before running the + * signal_copyinfo handler. + * + * Observing that si_overrun is likely the only "unstable" + * data from the signal information which might change under + * our feet while we copy the bits to userland, we collect it + * here from the atomic section for all unknown si_codes, + * then pass its value to the signal_copyinfo handler. + */ + switch (sip->si_code) { + case SI_TIMER: + overrun = cobalt_timer_deliver(curr, sip->si_tid); + break; + case SI_USER: + case SI_MESGQ: + case SI_QUEUE: + overrun = 0; + break; + default: + overrun = sip->si_overrun; + if (overrun) + sip->si_overrun = 0; + } + + xnlock_put_irqrestore(&nklock, s); + + if (u_si == NULL) + goto out; /* Return signo only. */ + +#ifdef CONFIG_XENO_ARCH_SYS3264 + if (compat) { + ret = sys32_put_siginfo(u_si, sip, overrun); + if (!ret) + /* Allow an extended target to receive more data. */ + cobalt_call_extension(signal_copyinfo_compat, + &curr->extref, ret, u_si, sip, + overrun); + } else +#endif + { + ret = signal_put_siginfo(u_si, sip, overrun); + if (!ret) + /* Allow an extended target to receive more data. */ + cobalt_call_extension(signal_copyinfo, &curr->extref, + ret, u_si, sip, overrun); + } + +out: + /* + * If we pulled the signal information from a sigpending + * block, release it to the free pool if applicable. + */ + if (sigp && + (void *)sigp >= sigpending_mem && + (void *)sigp < sigpending_mem + __SIGPOOL_SIZE) { + xnlock_get_irqsave(&nklock, s); + list_add_tail(&sigp->next, &sigpending_pool); + xnlock_put_irqrestore(&nklock, s); + /* no more ref. to sigp beyond this point. */ + } + + return ret ? -EFAULT : sig; +fail: + xnlock_put_irqrestore(&nklock, s); + + return ret; +} + +int __cobalt_sigwait(sigset_t *set) +{ + return signal_wait(set, XN_INFINITE, NULL, false); +} + +COBALT_SYSCALL(sigwait, primary, + (const sigset_t __user *u_set, int __user *u_sig)) +{ + sigset_t set; + int sig; + + if (cobalt_copy_from_user(&set, u_set, sizeof(set))) + return -EFAULT; + + sig = signal_wait(&set, XN_INFINITE, NULL, false); + if (sig < 0) + return sig; + + return cobalt_copy_to_user(u_sig, &sig, sizeof(*u_sig)); +} + +int __cobalt_sigtimedwait(sigset_t *set, + const struct timespec64 *timeout, + void __user *u_si, + bool compat) +{ + xnticks_t ticks; + + if (!timespec64_valid(timeout)) + return -EINVAL; + ticks = ts2ns(timeout); + if (ticks++ == 0) + ticks = XN_NONBLOCK; + + return signal_wait(set, ticks, u_si, compat); +} + +COBALT_SYSCALL(sigtimedwait, nonrestartable, + (const sigset_t __user *u_set, + struct siginfo __user *u_si, + const struct __user_old_timespec __user *u_timeout)) +{ + struct timespec64 timeout; + sigset_t set; + + if (cobalt_copy_from_user(&set, u_set, sizeof(set))) + return -EFAULT; + + if (cobalt_copy_from_user(&timeout, u_timeout, sizeof(timeout))) + return -EFAULT; + + return __cobalt_sigtimedwait(&set, &timeout, u_si, false); +} + +COBALT_SYSCALL(sigtimedwait64, nonrestartable, + (const sigset_t __user *u_set, + struct siginfo __user *u_si, + const struct __kernel_timespec __user *u_timeout)) +{ + struct timespec64 timeout; + sigset_t set; + + if (cobalt_copy_from_user(&set, u_set, sizeof(set))) + return -EFAULT; + + if (cobalt_get_timespec64(&timeout, u_timeout)) + return -EFAULT; + + return __cobalt_sigtimedwait(&set, &timeout, u_si, false); +} + +int __cobalt_sigwaitinfo(sigset_t *set, + void __user *u_si, + bool compat) +{ + return signal_wait(set, XN_INFINITE, u_si, compat); +} + +COBALT_SYSCALL(sigwaitinfo, nonrestartable, + (const sigset_t __user *u_set, struct siginfo __user *u_si)) +{ + sigset_t set; + + if (cobalt_copy_from_user(&set, u_set, sizeof(set))) + return -EFAULT; + + return __cobalt_sigwaitinfo(&set, u_si, false); +} + +COBALT_SYSCALL(sigpending, primary, (old_sigset_t __user *u_set)) +{ + struct cobalt_thread *curr = cobalt_current_thread(); + + return cobalt_copy_to_user(u_set, &curr->sigpending, sizeof(*u_set)); +} + +int __cobalt_kill(struct cobalt_thread *thread, int sig, int group) /* nklocked, IRQs off */ +{ + struct cobalt_sigpending *sigp; + int ret = 0; + + /* + * We have undocumented pseudo-signals to suspend/resume/unblock + * threads, force them out of primary mode or even demote them + * to the weak scheduling class/priority. Process them early, + * before anyone can notice... + */ + switch(sig) { + case 0: + /* Check for existence only. */ + break; + case SIGSUSP: + /* + * All callers shall be tagged as conforming calls, so + * self-directed suspension can only happen from + * primary mode. Yummie. + */ + xnthread_suspend(&thread->threadbase, XNSUSP, + XN_INFINITE, XN_RELATIVE, NULL); + if (&thread->threadbase == xnthread_current() && + xnthread_test_info(&thread->threadbase, XNBREAK)) + ret = -EINTR; + break; + case SIGRESM: + xnthread_resume(&thread->threadbase, XNSUSP); + goto resched; + case SIGRELS: + xnthread_unblock(&thread->threadbase); + goto resched; + case SIGKICK: + xnthread_kick(&thread->threadbase); + goto resched; + case SIGDEMT: + xnthread_demote(&thread->threadbase); + goto resched; + case 1 ... _NSIG: + sigp = cobalt_signal_alloc(); + if (sigp) { + sigp->si.si_signo = sig; + sigp->si.si_errno = 0; + sigp->si.si_code = SI_USER; + sigp->si.si_pid = task_pid_nr(current); + sigp->si.si_uid = get_current_uuid(); + if (cobalt_signal_send(thread, sigp, group) <= 0) + cobalt_signal_free(sigp); + } + resched: + xnsched_run(); + break; + default: + ret = -EINVAL; + } + + return ret; +} + +COBALT_SYSCALL(kill, conforming, (pid_t pid, int sig)) +{ + struct cobalt_thread *thread; + int ret; + spl_t s; + + xnlock_get_irqsave(&nklock, s); + + thread = cobalt_thread_find(pid); + if (thread == NULL) + ret = -ESRCH; + else + ret = __cobalt_kill(thread, sig, 1); + + xnlock_put_irqrestore(&nklock, s); + + return ret; +} + +int __cobalt_sigqueue(pid_t pid, int sig, const union sigval *value) +{ + struct cobalt_sigpending *sigp; + struct cobalt_thread *thread; + int ret = 0; + spl_t s; + + xnlock_get_irqsave(&nklock, s); + + thread = cobalt_thread_find(pid); + if (thread == NULL) { + ret = -ESRCH; + goto out; + } + + switch(sig) { + case 0: + /* Check for existence only. */ + break; + case 1 ... _NSIG: + sigp = cobalt_signal_alloc(); + if (sigp) { + sigp->si.si_signo = sig; + sigp->si.si_errno = 0; + sigp->si.si_code = SI_QUEUE; + sigp->si.si_pid = task_pid_nr(current); + sigp->si.si_uid = get_current_uuid(); + sigp->si.si_value = *value; + if (cobalt_signal_send(thread, sigp, 1) <= 0) + cobalt_signal_free(sigp); + else + xnsched_run(); + } + break; + default: + /* Cobalt pseudo-signals are never process-directed. */ + ret = __cobalt_kill(thread, sig, 0); + } +out: + xnlock_put_irqrestore(&nklock, s); + + return ret; +} +EXPORT_SYMBOL_GPL(__cobalt_sigqueue); + +COBALT_SYSCALL(sigqueue, conforming, + (pid_t pid, int sig, const union sigval __user *u_value)) +{ + union sigval val; + int ret; + + ret = cobalt_copy_from_user(&val, u_value, sizeof(val)); + + return ret ?: __cobalt_sigqueue(pid, sig, &val); +} + +__init int cobalt_signal_init(void) +{ + struct cobalt_sigpending *sigp; + + sigpending_mem = xnheap_vmalloc(__SIGPOOL_SIZE); + if (sigpending_mem == NULL) + return -ENOMEM; + + for (sigp = sigpending_mem; + (void *)sigp < sigpending_mem + __SIGPOOL_SIZE; sigp++) + list_add_tail(&sigp->next, &sigpending_pool); + + return 0; +} + +__init void cobalt_signal_cleanup(void) +{ + xnheap_vfree(sigpending_mem); +} --- linux/kernel/xenomai/posix/io.c 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/kernel/xenomai/posix/io.c 2022-03-21 12:58:29.050892267 +0100 @@ -0,0 +1,394 @@ +/* + * Copyright (C) 2005 Jan Kiszka . + * Copyright (C) 2005 Joerg Langenberg . + * Copyright (C) 2008 Gilles Chanteperdrix + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include +#include +#include +#include +#include +#include +#include "process.h" +#include "internal.h" +#include "clock.h" +#include "io.h" + +COBALT_SYSCALL(open, lostage, + (const char __user *u_path, int oflag)) +{ + struct filename *filename; + int ufd; + + filename = getname(u_path); + if (IS_ERR(filename)) + return PTR_ERR(filename); + + ufd = __rtdm_dev_open(filename->name, oflag); + putname(filename); + + return ufd; +} + +COBALT_SYSCALL(socket, lostage, + (int protocol_family, int socket_type, int protocol)) +{ + return __rtdm_dev_socket(protocol_family, socket_type, protocol); +} + +COBALT_SYSCALL(close, lostage, (int fd)) +{ + return rtdm_fd_close(fd, 0); +} + +COBALT_SYSCALL(fcntl, current, (int fd, int cmd, long arg)) +{ + return rtdm_fd_fcntl(fd, cmd, arg); +} + +COBALT_SYSCALL(ioctl, handover, + (int fd, unsigned int request, void __user *arg)) +{ + return rtdm_fd_ioctl(fd, request, arg); +} + +COBALT_SYSCALL(read, handover, + (int fd, void __user *buf, size_t size)) +{ + return rtdm_fd_read(fd, buf, size); +} + +COBALT_SYSCALL(write, handover, + (int fd, const void __user *buf, size_t size)) +{ + return rtdm_fd_write(fd, buf, size); +} + +COBALT_SYSCALL(recvmsg, handover, + (int fd, struct user_msghdr __user *umsg, int flags)) +{ + struct user_msghdr m; + ssize_t ret; + + ret = cobalt_copy_from_user(&m, umsg, sizeof(m)); + if (ret) + return ret; + + ret = rtdm_fd_recvmsg(fd, &m, flags); + if (ret < 0) + return ret; + + return cobalt_copy_to_user(umsg, &m, sizeof(*umsg)) ?: ret; +} + +static int get_timespec(struct timespec64 *ts, + const void __user *u_ts) +{ + return cobalt_get_u_timespec(ts, u_ts); +} + +static int get_mmsg(struct mmsghdr *mmsg, void __user *u_mmsg) +{ + return cobalt_copy_from_user(mmsg, u_mmsg, sizeof(*mmsg)); +} + +static int put_mmsg(void __user **u_mmsg_p, const struct mmsghdr *mmsg) +{ + struct mmsghdr __user **p = (struct mmsghdr **)u_mmsg_p, + *q __user = (*p)++; + + return cobalt_copy_to_user(q, mmsg, sizeof(*q)); +} + +COBALT_SYSCALL(recvmmsg, primary, + (int fd, struct mmsghdr __user *u_msgvec, unsigned int vlen, + unsigned int flags, struct __user_old_timespec __user *u_timeout)) +{ + return __rtdm_fd_recvmmsg(fd, u_msgvec, vlen, flags, u_timeout, + get_mmsg, put_mmsg, get_timespec); +} + +COBALT_SYSCALL(recvmmsg64, primary, + (int fd, struct mmsghdr __user *u_msgvec, unsigned int vlen, + unsigned int flags, struct __kernel_timespec __user *u_timeout)) +{ + return __rtdm_fd_recvmmsg64(fd, u_msgvec, vlen, flags, u_timeout, + get_mmsg, put_mmsg); +} + +COBALT_SYSCALL(sendmsg, handover, + (int fd, struct user_msghdr __user *umsg, int flags)) +{ + struct user_msghdr m; + int ret; + + ret = cobalt_copy_from_user(&m, umsg, sizeof(m)); + + return ret ?: rtdm_fd_sendmsg(fd, &m, flags); +} + +static int put_mmsglen(void __user **u_mmsg_p, const struct mmsghdr *mmsg) +{ + struct mmsghdr __user **p = (struct mmsghdr **)u_mmsg_p, + *q __user = (*p)++; + + return __xn_put_user(mmsg->msg_len, &q->msg_len); +} + +COBALT_SYSCALL(sendmmsg, primary, + (int fd, struct mmsghdr __user *u_msgvec, + unsigned int vlen, unsigned int flags)) +{ + return __rtdm_fd_sendmmsg(fd, u_msgvec, vlen, flags, + get_mmsg, put_mmsglen); +} + +COBALT_SYSCALL(mmap, lostage, + (int fd, struct _rtdm_mmap_request __user *u_rma, + void __user **u_addrp)) +{ + struct _rtdm_mmap_request rma; + void *u_addr = NULL; + int ret; + + ret = cobalt_copy_from_user(&rma, u_rma, sizeof(rma)); + if (ret) + return ret; + + ret = rtdm_fd_mmap(fd, &rma, &u_addr); + if (ret) + return ret; + + return cobalt_copy_to_user(u_addrp, &u_addr, sizeof(u_addr)); +} + +static int __cobalt_first_fd_valid_p(fd_set *fds[XNSELECT_MAX_TYPES], int nfds) +{ + int i, fd; + + for (i = 0; i < XNSELECT_MAX_TYPES; i++) + if (fds[i] + && (fd = find_first_bit(fds[i]->fds_bits, nfds)) < nfds) + return rtdm_fd_valid_p(fd); + + /* All empty is correct, used as a "sleep" mechanism by strange + applications. */ + return 1; +} + +static int __cobalt_select_bind_all(struct xnselector *selector, + fd_set *fds[XNSELECT_MAX_TYPES], int nfds) +{ + bool first_fd = true; + unsigned fd, type; + int err; + + for (type = 0; type < XNSELECT_MAX_TYPES; type++) { + fd_set *set = fds[type]; + if (set) + for (fd = find_first_bit(set->fds_bits, nfds); + fd < nfds; + fd = find_next_bit(set->fds_bits, nfds, fd + 1)) { + err = rtdm_fd_select(fd, selector, type); + if (err) { + /* + * Do not needlessly signal "retry + * under Linux" for mixed fd sets. + */ + if (err == -EADV && !first_fd) + return -EBADF; + return err; + } + first_fd = false; + } + } + + return 0; +} + +int __cobalt_select(int nfds, void __user *u_rfds, void __user *u_wfds, + void __user *u_xfds, void __user *u_tv, bool compat) +{ + void __user *ufd_sets[XNSELECT_MAX_TYPES] = { + [XNSELECT_READ] = u_rfds, + [XNSELECT_WRITE] = u_wfds, + [XNSELECT_EXCEPT] = u_xfds + }; + fd_set *in_fds[XNSELECT_MAX_TYPES] = {NULL, NULL, NULL}; + fd_set *out_fds[XNSELECT_MAX_TYPES] = {NULL, NULL, NULL}; + fd_set in_fds_storage[XNSELECT_MAX_TYPES], + out_fds_storage[XNSELECT_MAX_TYPES]; + xnticks_t timeout = XN_INFINITE; + struct restart_block *restart; + xntmode_t mode = XN_RELATIVE; + struct xnselector *selector; + struct xnthread *curr; + struct __kernel_old_timeval tv; + size_t fds_size; + int i, err; + + curr = xnthread_current(); + + if (u_tv) { + if (xnthread_test_localinfo(curr, XNSYSRST)) { + xnthread_clear_localinfo(curr, XNSYSRST); + + restart = cobalt_get_restart_block(current); + timeout = restart->nanosleep.expires; + + if (restart->fn != cobalt_restart_syscall_placeholder) { + err = -EINTR; + goto out; + } + } else { +#ifdef CONFIG_XENO_ARCH_SYS3264 + if (compat) { + if (sys32_get_timeval(&tv, u_tv)) + return -EFAULT; + } else +#endif + { + if (!access_wok(u_tv, sizeof(tv)) + || cobalt_copy_from_user(&tv, u_tv, + sizeof(tv))) + return -EFAULT; + } + + if (tv.tv_usec >= 1000000) + return -EINVAL; + + timeout = clock_get_ticks(CLOCK_MONOTONIC) + tv2ns(&tv); + } + + mode = XN_ABSOLUTE; + } + + fds_size = __FDELT__(nfds + __NFDBITS__ - 1) * sizeof(long); + + for (i = 0; i < XNSELECT_MAX_TYPES; i++) + if (ufd_sets[i]) { + in_fds[i] = &in_fds_storage[i]; + out_fds[i] = &out_fds_storage[i]; +#ifdef CONFIG_XENO_ARCH_SYS3264 + if (compat) { + if (sys32_get_fdset(in_fds[i], ufd_sets[i], + fds_size)) + return -EFAULT; + } else +#endif + { + if (!access_wok((void __user *) ufd_sets[i], + sizeof(fd_set)) + || cobalt_copy_from_user(in_fds[i], + (void __user *)ufd_sets[i], + fds_size)) + return -EFAULT; + } + } + + selector = curr->selector; + if (!selector) { + /* This function may be called from pure Linux fd_sets, we want + to avoid the xnselector allocation in this case, so, we do a + simple test: test if the first file descriptor we find in the + fd_set is an RTDM descriptor or a message queue descriptor. */ + if (!__cobalt_first_fd_valid_p(in_fds, nfds)) + return -EADV; + + selector = xnmalloc(sizeof(*curr->selector)); + if (selector == NULL) + return -ENOMEM; + xnselector_init(selector); + curr->selector = selector; + + /* Bind directly the file descriptors, we do not need to go + through xnselect returning -ECHRNG */ + err = __cobalt_select_bind_all(selector, in_fds, nfds); + if (err) + return err; + } + + do { + err = xnselect(selector, out_fds, in_fds, nfds, timeout, mode); + if (err == -ECHRNG) { + int bind_err = __cobalt_select_bind_all(selector, + out_fds, nfds); + if (bind_err) + return bind_err; + } + } while (err == -ECHRNG); + + if (err == -EINTR && signal_pending(current)) { + xnthread_set_localinfo(curr, XNSYSRST); + + restart = cobalt_get_restart_block(current); + restart->fn = cobalt_restart_syscall_placeholder; + restart->nanosleep.expires = timeout; + + return -ERESTARTSYS; + } + +out: + if (u_tv && (err > 0 || err == -EINTR)) { + xnsticks_t diff = timeout - clock_get_ticks(CLOCK_MONOTONIC); + if (diff > 0) + ticks2tv(&tv, diff); + else + tv.tv_sec = tv.tv_usec = 0; + +#ifdef CONFIG_XENO_ARCH_SYS3264 + if (compat) { + if (sys32_put_timeval(u_tv, &tv)) + return -EFAULT; + } else +#endif + { + if (cobalt_copy_to_user(u_tv, &tv, sizeof(tv))) + return -EFAULT; + } + } + + if (err >= 0) + for (i = 0; i < XNSELECT_MAX_TYPES; i++) { + if (!ufd_sets[i]) + continue; +#ifdef CONFIG_XENO_ARCH_SYS3264 + if (compat) { + if (sys32_put_fdset(ufd_sets[i], out_fds[i], + sizeof(fd_set))) + return -EFAULT; + } else +#endif + { + if (cobalt_copy_to_user((void __user *)ufd_sets[i], + out_fds[i], sizeof(fd_set))) + return -EFAULT; + } + } + return err; +} + +/* int select(int, fd_set *, fd_set *, fd_set *, struct __kernel_old_timeval *) */ +COBALT_SYSCALL(select, primary, + (int nfds, + fd_set __user *u_rfds, + fd_set __user *u_wfds, + fd_set __user *u_xfds, + struct __kernel_old_timeval __user *u_tv)) +{ + return __cobalt_select(nfds, u_rfds, u_wfds, u_xfds, u_tv, false); +} --- linux/kernel/xenomai/posix/extension.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/kernel/xenomai/posix/extension.h 2022-03-21 12:58:29.047892296 +0100 @@ -0,0 +1,135 @@ +/* + * Copyright (C) 2013 Philippe Gerum . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_POSIX_EXTENSION_H +#define _COBALT_POSIX_EXTENSION_H + +#include +#include + +#ifdef CONFIG_XENO_OPT_COBALT_EXTENSION + +#include + +struct cobalt_timer; +struct cobalt_sigpending; +struct cobalt_extref; +struct siginfo; +struct xnsched_class; +union xnsched_policy_param; + +struct cobalt_extension { + struct xnthread_personality core; + struct { + struct cobalt_thread * + (*timer_init)(struct cobalt_extref *reftimer, /* nklocked, IRQs off. */ + const struct sigevent *__restrict__ evp); + int (*timer_settime)(struct cobalt_extref *reftimer, /* nklocked, IRQs off. */ + const struct itimerspec64 *__restrict__ value, + int flags); + int (*timer_gettime)(struct cobalt_extref *reftimer, /* nklocked, IRQs off. */ + struct itimerspec64 *__restrict__ value); + int (*timer_delete)(struct cobalt_extref *reftimer); /* nklocked, IRQs off. */ + int (*timer_cleanup)(struct cobalt_extref *reftimer); /* nklocked, IRQs off. */ + int (*signal_deliver)(struct cobalt_extref *refthread, + struct siginfo *si, + struct cobalt_sigpending *sigp); + int (*signal_queue)(struct cobalt_extref *refthread, + struct cobalt_sigpending *sigp); + int (*signal_copyinfo)(struct cobalt_extref *refthread, + void __user *u_si, + const struct siginfo *si, + int overrun); + int (*signal_copyinfo_compat)(struct cobalt_extref *refthread, + void __user *u_si, + const struct siginfo *si, + int overrun); + int (*sched_yield)(struct cobalt_extref *curref); + int (*thread_setsched)(struct cobalt_extref *refthread, /* nklocked, IRQs off. */ + struct xnsched_class *sched_class, + union xnsched_policy_param *param); + } ops; +}; + +struct cobalt_extref { + struct cobalt_extension *extension; + struct list_head next; + void *private; +}; + +static inline void cobalt_set_extref(struct cobalt_extref *ref, + struct cobalt_extension *ext, + void *priv) +{ + ref->extension = ext; + ref->private = priv; +} + +/** + * All macros return non-zero if some thread-level extension code was + * called, leaving the output value into __ret. Otherwise, the __ret + * value is undefined. + */ +#define cobalt_initcall_extension(__extfn, __extref, __owner, __ret, __args...) \ + ({ \ + int __val = 0; \ + if ((__owner) && (__owner)->extref.extension) { \ + (__extref)->extension = (__owner)->extref.extension; \ + if ((__extref)->extension->ops.__extfn) { \ + (__ret) = (__extref)->extension->ops. \ + __extfn(__extref, ##__args ); \ + __val = 1; \ + } \ + } else \ + (__extref)->extension = NULL; \ + __val; \ + }) + +#define cobalt_call_extension(__extfn, __extref, __ret, __args...) \ + ({ \ + int __val = 0; \ + if ((__extref)->extension && \ + (__extref)->extension->ops.__extfn) { \ + (__ret) = (__extref)->extension->ops. \ + __extfn(__extref, ##__args ); \ + __val = 1; \ + } \ + __val; \ + }) + +#else /* !CONFIG_XENO_OPT_COBALT_EXTENSION */ + +struct cobalt_extension; + +struct cobalt_extref { +}; + +static inline void cobalt_set_extref(struct cobalt_extref *ref, + struct cobalt_extension *ext, + void *priv) +{ +} + +#define cobalt_initcall_extension(__extfn, __extref, __owner, __ret, __args...) \ + ({ (void)(__owner); (void)(__ret); 0; }) + +#define cobalt_call_extension(__extfn, __extref, __ret, __args...) \ + ({ (void)(__ret); 0; }) + +#endif /* !CONFIG_XENO_OPT_COBALT_EXTENSION */ + +#endif /* !_COBALT_POSIX_EXTENSION_H */ --- linux/kernel/xenomai/posix/sem.c 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/kernel/xenomai/posix/sem.c 2022-03-21 12:58:29.043892335 +0100 @@ -0,0 +1,667 @@ +/* + * Written by Gilles Chanteperdrix . + * Copyright (C) 2014,2015 Philippe Gerum + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include +#include +#include +#include "internal.h" +#include "thread.h" +#include "clock.h" +#include "sem.h" +#include + +#ifdef CONFIG_XENO_OPT_VFILE + +static int sem_vfile_show(struct xnvfile_regular_iterator *it, void *data) +{ + return 0; +} + +static struct xnvfile_regular_ops sem_vfile_ops = { + .show = sem_vfile_show, +}; + +static struct xnpnode_regular __sem_pnode = { + .node = { + .dirname = "sem", + .root = &posix_ptree, + .ops = &xnregistry_vfreg_ops, + }, + .vfile = { + .ops = &sem_vfile_ops, + }, +}; + +#else /* !CONFIG_XENO_OPT_VFILE */ + +static struct xnpnode_link __sem_pnode = { + .node = { + .dirname = "sem", + } +}; + +#endif /* !CONFIG_XENO_OPT_VFILE */ + +static inline struct cobalt_resources *sem_kqueue(struct cobalt_sem *sem) +{ + int pshared = !!(sem->flags & SEM_PSHARED); + return cobalt_current_resources(pshared); +} + +static inline int sem_check(struct cobalt_sem *sem) +{ + if (sem == NULL || sem->magic != COBALT_SEM_MAGIC) + return -EINVAL; + + if (sem->resnode.scope && sem->resnode.scope != sem_kqueue(sem)) + return -EPERM; + + return 0; +} + +int __cobalt_sem_destroy(xnhandle_t handle) +{ + struct cobalt_sem *sem; + int ret = 0; + spl_t s; + + xnlock_get_irqsave(&nklock, s); + sem = xnregistry_lookup(handle, NULL); + if (!cobalt_obj_active(sem, COBALT_SEM_MAGIC, typeof(*sem))) { + ret = -EINVAL; + goto fail; + } + + if (--sem->refs) { + ret = -EBUSY; + goto fail; + } + + cobalt_mark_deleted(sem); + if (!sem->pathname) + cobalt_del_resource(&sem->resnode); + if (xnsynch_destroy(&sem->synchbase) == XNSYNCH_RESCHED) { + xnsched_run(); + ret = 1; + } + + xnlock_put_irqrestore(&nklock, s); + + xnregistry_remove(sem->resnode.handle); + if (sem->pathname) + putname(sem->pathname); + + cobalt_umm_free(&cobalt_ppd_get(!!(sem->flags & SEM_PSHARED))->umm, + sem->state); + + xnfree(sem); + + return ret; +fail: + xnlock_put_irqrestore(&nklock, s); + + return ret; +} + +struct cobalt_sem * +__cobalt_sem_init(const char *name, struct cobalt_sem_shadow *sm, + int flags, unsigned int value) +{ + struct cobalt_sem_state *state; + struct cobalt_sem *sem, *osem; + struct cobalt_ppd *sys_ppd; + int ret, sflags, pshared; + struct list_head *semq; + spl_t s; + + if ((flags & SEM_PULSE) != 0 && value > 0) { + ret = -EINVAL; + goto out; + } + + sem = xnmalloc(sizeof(*sem)); + if (sem == NULL) { + ret = -ENOMEM; + goto out; + } + + pshared = !!(flags & SEM_PSHARED); + sys_ppd = cobalt_ppd_get(pshared); + state = cobalt_umm_alloc(&sys_ppd->umm, sizeof(*state)); + if (state == NULL) { + ret = -EAGAIN; + goto err_free_sem; + } + + xnlock_get_irqsave(&nklock, s); + + semq = &cobalt_current_resources(pshared)->semq; + if ((sm->magic == COBALT_SEM_MAGIC && !list_empty(semq)) || + sm->magic == COBALT_NAMED_SEM_MAGIC) { + osem = xnregistry_lookup(sm->handle, NULL); + if (cobalt_obj_active(osem, COBALT_SEM_MAGIC, typeof(*osem))) { + ret = -EBUSY; + goto err_lock_put; + } + } + + if (value > (unsigned)SEM_VALUE_MAX) { + ret = -EINVAL; + goto err_lock_put; + } + + ret = xnregistry_enter(name ?: "", sem, &sem->resnode.handle, + name ? &__sem_pnode.node : NULL); + if (ret < 0) + goto err_lock_put; + + sem->magic = COBALT_SEM_MAGIC; + if (!name) + cobalt_add_resource(&sem->resnode, sem, pshared); + else + sem->resnode.scope = NULL; + sflags = flags & SEM_FIFO ? 0 : XNSYNCH_PRIO; + xnsynch_init(&sem->synchbase, sflags, NULL); + + sem->state = state; + atomic_set(&state->value, value); + state->flags = flags; + sem->flags = flags; + sem->refs = name ? 2 : 1; + sem->pathname = NULL; + + xnlock_put_irqrestore(&nklock, s); + + __cobalt_sem_shadow_init(sem, + name ? COBALT_NAMED_SEM_MAGIC : COBALT_SEM_MAGIC, sm); + + trace_cobalt_psem_init(name ?: "anon", + sem->resnode.handle, flags, value); + + return sem; + +err_lock_put: + xnlock_put_irqrestore(&nklock, s); + cobalt_umm_free(&sys_ppd->umm, state); +err_free_sem: + xnfree(sem); +out: + trace_cobalt_psem_init_failed(name ?: "anon", flags, value, ret); + + return ERR_PTR(ret); +} + +void __cobalt_sem_shadow_init(struct cobalt_sem *sem, __u32 magic, + struct cobalt_sem_shadow *sm) +{ + __u32 flags = sem->state->flags; + struct cobalt_ppd *sys_ppd; + + sys_ppd = cobalt_ppd_get(!!(flags & SEM_PSHARED)); + + sm->magic = magic; + sm->handle = sem->resnode.handle; + sm->state_offset = cobalt_umm_offset(&sys_ppd->umm, sem->state); + if (sem->state->flags & SEM_PSHARED) + sm->state_offset = -sm->state_offset; +} + +static int sem_destroy(struct cobalt_sem_shadow *sm) +{ + struct cobalt_sem *sem; + int warn, ret; + spl_t s; + + xnlock_get_irqsave(&nklock, s); + + if (sm->magic != COBALT_SEM_MAGIC) { + ret = -EINVAL; + goto fail; + } + + sem = xnregistry_lookup(sm->handle, NULL); + ret = sem_check(sem); + if (ret) + goto fail; + + if ((sem->flags & SEM_NOBUSYDEL) != 0 && + xnsynch_pended_p(&sem->synchbase)) { + ret = -EBUSY; + goto fail; + } + + warn = sem->flags & SEM_WARNDEL; + cobalt_mark_deleted(sm); + + xnlock_put_irqrestore(&nklock, s); + + ret = __cobalt_sem_destroy(sem->resnode.handle); + + return warn ? ret : 0; +fail: + xnlock_put_irqrestore(&nklock, s); + + return ret; +} + +static inline int do_trywait(struct cobalt_sem *sem) +{ + int ret; + + ret = sem_check(sem); + if (ret) + return ret; + + if (atomic_sub_return(1, &sem->state->value) < 0) + return -EAGAIN; + + return 0; +} + +static int sem_wait(xnhandle_t handle) +{ + struct cobalt_sem *sem; + int ret, info; + spl_t s; + + xnlock_get_irqsave(&nklock, s); + + sem = xnregistry_lookup(handle, NULL); + ret = do_trywait(sem); + if (ret != -EAGAIN) + goto out; + + ret = 0; + info = xnsynch_sleep_on(&sem->synchbase, XN_INFINITE, XN_RELATIVE); + if (info & XNRMID) { + ret = -EINVAL; + } else if (info & XNBREAK) { + atomic_inc(&sem->state->value); /* undo do_trywait() */ + ret = -EINTR; + } +out: + xnlock_put_irqrestore(&nklock, s); + + return ret; +} + +int __cobalt_sem_timedwait(struct cobalt_sem_shadow __user *u_sem, + const struct timespec64 *ts) +{ + int ret, info; + bool validate_ts = true; + struct cobalt_sem *sem; + xnhandle_t handle; + xntmode_t tmode; + spl_t s; + + handle = cobalt_get_handle_from_user(&u_sem->handle); + trace_cobalt_psem_timedwait(handle); + + xnlock_get_irqsave(&nklock, s); + + for (;;) { + sem = xnregistry_lookup(handle, NULL); + ret = do_trywait(sem); + if (ret != -EAGAIN) + break; + + /* + * POSIX states that the validity of the timeout spec + * _need_ not be checked if the semaphore can be + * locked immediately, we show this behavior despite + * it's actually more complex, to keep some + * applications ported to Linux happy. + */ + if (validate_ts) { + atomic_inc(&sem->state->value); + if (!ts) { + ret = -EFAULT; + break; + } + if (!timespec64_valid(ts)) { + ret = -EINVAL; + break; + } + validate_ts = false; + continue; + } + + ret = 0; + tmode = sem->flags & SEM_RAWCLOCK ? XN_ABSOLUTE : XN_REALTIME; + info = xnsynch_sleep_on(&sem->synchbase, ts2ns(ts) + 1, tmode); + if (info & XNRMID) + ret = -EINVAL; + else if (info & (XNBREAK|XNTIMEO)) { + ret = (info & XNBREAK) ? -EINTR : -ETIMEDOUT; + atomic_inc(&sem->state->value); + } + break; + } + + xnlock_put_irqrestore(&nklock, s); + + return ret; +} + +int __cobalt_sem_timedwait64(struct cobalt_sem_shadow __user *u_sem, + const struct __kernel_timespec __user *u_ts) +{ + int ret = 1; + struct timespec64 ts64; + + if (u_ts) + ret = cobalt_get_timespec64(&ts64, u_ts); + + return __cobalt_sem_timedwait(u_sem, ret ? NULL : &ts64); +} + +static int sem_post(xnhandle_t handle) +{ + struct cobalt_sem *sem; + int ret; + spl_t s; + + xnlock_get_irqsave(&nklock, s); + + sem = xnregistry_lookup(handle, NULL); + ret = sem_check(sem); + if (ret) + goto out; + + if (atomic_read(&sem->state->value) == SEM_VALUE_MAX) { + ret = -EINVAL; + goto out; + } + + if (atomic_inc_return(&sem->state->value) <= 0) { + if (xnsynch_wakeup_one_sleeper(&sem->synchbase)) + xnsched_run(); + } else if (sem->flags & SEM_PULSE) + atomic_set(&sem->state->value, 0); +out: + xnlock_put_irqrestore(&nklock, s); + + return ret; +} + +static int sem_getvalue(xnhandle_t handle, int *value) +{ + struct cobalt_sem *sem; + int ret; + spl_t s; + + xnlock_get_irqsave(&nklock, s); + + sem = xnregistry_lookup(handle, NULL); + ret = sem_check(sem); + if (ret) { + xnlock_put_irqrestore(&nklock, s); + return ret; + } + + *value = atomic_read(&sem->state->value); + if ((sem->flags & SEM_REPORT) == 0 && *value < 0) + *value = 0; + + xnlock_put_irqrestore(&nklock, s); + + return 0; +} + +COBALT_SYSCALL(sem_init, current, + (struct cobalt_sem_shadow __user *u_sem, + int flags, unsigned int value)) +{ + struct cobalt_sem_shadow sm; + struct cobalt_sem *sem; + + if (cobalt_copy_from_user(&sm, u_sem, sizeof(sm))) + return -EFAULT; + + if (flags & ~(SEM_FIFO|SEM_PULSE|SEM_PSHARED|SEM_REPORT|\ + SEM_WARNDEL|SEM_RAWCLOCK|SEM_NOBUSYDEL)) + return -EINVAL; + + sem = __cobalt_sem_init(NULL, &sm, flags, value); + if (IS_ERR(sem)) + return PTR_ERR(sem); + + return cobalt_copy_to_user(u_sem, &sm, sizeof(*u_sem)); +} + +COBALT_SYSCALL(sem_post, current, + (struct cobalt_sem_shadow __user *u_sem)) +{ + xnhandle_t handle; + + handle = cobalt_get_handle_from_user(&u_sem->handle); + trace_cobalt_psem_post(handle); + + return sem_post(handle); +} + +COBALT_SYSCALL(sem_wait, primary, + (struct cobalt_sem_shadow __user *u_sem)) +{ + xnhandle_t handle; + + handle = cobalt_get_handle_from_user(&u_sem->handle); + trace_cobalt_psem_wait(handle); + + return sem_wait(handle); +} + +COBALT_SYSCALL(sem_timedwait, primary, + (struct cobalt_sem_shadow __user *u_sem, + const struct __user_old_timespec __user *u_ts)) +{ + int ret = 1; + struct timespec64 ts64; + + if (u_ts) + ret = cobalt_get_u_timespec(&ts64, u_ts); + + return __cobalt_sem_timedwait(u_sem, ret ? NULL : &ts64); +} + +COBALT_SYSCALL(sem_timedwait64, primary, + (struct cobalt_sem_shadow __user *u_sem, + const struct __kernel_timespec __user *u_ts)) +{ + return __cobalt_sem_timedwait64(u_sem, u_ts); +} + +COBALT_SYSCALL(sem_trywait, primary, + (struct cobalt_sem_shadow __user *u_sem)) +{ + struct cobalt_sem *sem; + xnhandle_t handle; + int ret; + spl_t s; + + handle = cobalt_get_handle_from_user(&u_sem->handle); + trace_cobalt_psem_trywait(handle); + + xnlock_get_irqsave(&nklock, s); + sem = xnregistry_lookup(handle, NULL); + ret = do_trywait(sem); + xnlock_put_irqrestore(&nklock, s); + + return ret; +} + +COBALT_SYSCALL(sem_getvalue, current, + (struct cobalt_sem_shadow __user *u_sem, + int __user *u_sval)) +{ + int ret, sval = -1; + xnhandle_t handle; + + handle = cobalt_get_handle_from_user(&u_sem->handle); + + ret = sem_getvalue(handle, &sval); + trace_cobalt_psem_getvalue(handle, sval); + if (ret) + return ret; + + return cobalt_copy_to_user(u_sval, &sval, sizeof(sval)); +} + +COBALT_SYSCALL(sem_destroy, current, + (struct cobalt_sem_shadow __user *u_sem)) +{ + struct cobalt_sem_shadow sm; + int err; + + if (cobalt_copy_from_user(&sm, u_sem, sizeof(sm))) + return -EFAULT; + + trace_cobalt_psem_destroy(sm.handle); + + err = sem_destroy(&sm); + if (err < 0) + return err; + + return cobalt_copy_to_user(u_sem, &sm, sizeof(*u_sem)) ?: err; +} + +COBALT_SYSCALL(sem_broadcast_np, current, + (struct cobalt_sem_shadow __user *u_sem)) +{ + struct cobalt_sem *sem; + xnhandle_t handle; + spl_t s; + int ret; + + handle = cobalt_get_handle_from_user(&u_sem->handle); + trace_cobalt_psem_broadcast(handle); + + xnlock_get_irqsave(&nklock, s); + + sem = xnregistry_lookup(handle, NULL); + ret = sem_check(sem); + if (ret == 0 && atomic_read(&sem->state->value) < 0) { + atomic_set(&sem->state->value, 0); + xnsynch_flush(&sem->synchbase, 0); + xnsched_run(); + } + + xnlock_put_irqrestore(&nklock, s); + + return ret; +} + +COBALT_SYSCALL(sem_inquire, current, + (struct cobalt_sem_shadow __user *u_sem, + struct cobalt_sem_info __user *u_info, + pid_t __user *u_waitlist, + size_t waitsz)) +{ + int val = 0, nrwait = 0, nrpids, ret = 0; + unsigned long pstamp, nstamp = 0; + struct cobalt_sem_info info; + pid_t *t = NULL, fbuf[16]; + struct xnthread *thread; + struct cobalt_sem *sem; + xnhandle_t handle; + spl_t s; + + handle = cobalt_get_handle_from_user(&u_sem->handle); + trace_cobalt_psem_inquire(handle); + + nrpids = waitsz / sizeof(pid_t); + + xnlock_get_irqsave(&nklock, s); + + for (;;) { + pstamp = nstamp; + sem = xnregistry_lookup(handle, &nstamp); + if (sem == NULL || sem->magic != COBALT_SEM_MAGIC) { + xnlock_put_irqrestore(&nklock, s); + return -EINVAL; + } + /* + * Allocate memory to return the wait list without + * holding any lock, then revalidate the handle. + */ + if (t == NULL) { + val = atomic_read(&sem->state->value); + if (val >= 0 || u_waitlist == NULL) + break; + xnlock_put_irqrestore(&nklock, s); + if (nrpids > -val) + nrpids = -val; + if (-val <= ARRAY_SIZE(fbuf)) + t = fbuf; /* Use fast buffer. */ + else { + t = xnmalloc(-val * sizeof(pid_t)); + if (t == NULL) + return -ENOMEM; + } + xnlock_get_irqsave(&nklock, s); + } else if (pstamp == nstamp) + break; + else if (val != atomic_read(&sem->state->value)) { + xnlock_put_irqrestore(&nklock, s); + if (t != fbuf) + xnfree(t); + t = NULL; + xnlock_get_irqsave(&nklock, s); + } + } + + info.flags = sem->flags; + info.value = (sem->flags & SEM_REPORT) || val >= 0 ? val : 0; + info.nrwait = val < 0 ? -val : 0; + + if (xnsynch_pended_p(&sem->synchbase) && u_waitlist != NULL) { + xnsynch_for_each_sleeper(thread, &sem->synchbase) { + if (nrwait >= nrpids) + break; + t[nrwait++] = xnthread_host_pid(thread); + } + } + + xnlock_put_irqrestore(&nklock, s); + + ret = cobalt_copy_to_user(u_info, &info, sizeof(info)); + if (ret == 0 && nrwait > 0) + ret = cobalt_copy_to_user(u_waitlist, t, nrwait * sizeof(pid_t)); + + if (t && t != fbuf) + xnfree(t); + + return ret ?: nrwait; +} + +void cobalt_sem_reclaim(struct cobalt_resnode *node, spl_t s) +{ + struct cobalt_sem *sem; + xnhandle_t handle; + int named, ret; + + sem = container_of(node, struct cobalt_sem, resnode); + named = (sem->flags & SEM_NAMED) != 0; + handle = node->handle; + xnlock_put_irqrestore(&nklock, s); + ret = __cobalt_sem_destroy(handle); + if (named && ret == -EBUSY) + xnregistry_unlink(xnregistry_key(handle)); +} --- linux/kernel/xenomai/posix/timer.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/kernel/xenomai/posix/timer.h 2022-03-21 12:58:29.040892365 +0100 @@ -0,0 +1,86 @@ +/* + * Copyright (C) 2005 Philippe Gerum . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_POSIX_TIMER_H +#define _COBALT_POSIX_TIMER_H + +#include +#include +#include +#include +#include +#include + +struct cobalt_timer { + struct xntimer timerbase; + timer_t id; + int overruns; + clockid_t clockid; + pid_t target; + struct cobalt_sigpending sigp; + struct cobalt_extref extref; +}; + +int cobalt_timer_deliver(struct cobalt_thread *waiter, + timer_t timerid); + +void cobalt_timer_reclaim(struct cobalt_process *p); + +static inline timer_t cobalt_timer_id(const struct cobalt_timer *timer) +{ + return timer->id; +} + +struct cobalt_timer * +cobalt_timer_by_id(struct cobalt_process *p, timer_t timer_id); + +void cobalt_timer_handler(struct xntimer *xntimer); + +void __cobalt_timer_getval(struct xntimer *__restrict__ timer, + struct itimerspec64 *__restrict__ value); + +int __cobalt_timer_setval(struct xntimer *__restrict__ timer, int clock_flag, + const struct itimerspec64 *__restrict__ value); + +int __cobalt_timer_create(clockid_t clock, + const struct sigevent *sev, + timer_t __user *u_tm); + +int __cobalt_timer_settime(timer_t timerid, int flags, + const struct itimerspec64 *__restrict__ value, + struct itimerspec64 *__restrict__ ovalue); + +int __cobalt_timer_gettime(timer_t timerid, struct itimerspec64 *value); + +COBALT_SYSCALL_DECL(timer_create, + (clockid_t clock, + const struct sigevent __user *u_sev, + timer_t __user *u_tm)); + +COBALT_SYSCALL_DECL(timer_delete, (timer_t tm)); + +COBALT_SYSCALL_DECL(timer_settime, + (timer_t tm, int flags, + const struct __user_old_itimerspec __user *u_newval, + struct __user_old_itimerspec __user *u_oldval)); + +COBALT_SYSCALL_DECL(timer_gettime, + (timer_t tm, struct __user_old_itimerspec __user *u_val)); + +COBALT_SYSCALL_DECL(timer_getoverrun, (timer_t tm)); + +#endif /* !_COBALT_POSIX_TIMER_H */ --- linux/kernel/xenomai/posix/corectl.c 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/kernel/xenomai/posix/corectl.c 2022-03-21 12:58:29.036892404 +0100 @@ -0,0 +1,215 @@ +/* + * Copyright (C) 2016 Philippe Gerum . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "corectl.h" + +static BLOCKING_NOTIFIER_HEAD(config_notifier_list); + +static int do_conf_option(int option, void __user *u_buf, size_t u_bufsz) +{ + struct cobalt_config_vector vec; + int ret, val = 0; + + if (option <= _CC_COBALT_GET_CORE_STATUS && u_bufsz < sizeof(val)) + return -EINVAL; + + switch (option) { + case _CC_COBALT_GET_VERSION: + val = XENO_VERSION_CODE; + break; + case _CC_COBALT_GET_NR_PIPES: +#ifdef CONFIG_XENO_OPT_PIPE + val = CONFIG_XENO_OPT_PIPE_NRDEV; +#endif + break; + case _CC_COBALT_GET_NR_TIMERS: + val = CONFIG_XENO_OPT_NRTIMERS; + break; + case _CC_COBALT_GET_POLICIES: + val = _CC_COBALT_SCHED_FIFO|_CC_COBALT_SCHED_RR; + if (IS_ENABLED(CONFIG_XENO_OPT_SCHED_WEAK)) + val |= _CC_COBALT_SCHED_WEAK; + if (IS_ENABLED(CONFIG_XENO_OPT_SCHED_SPORADIC)) + val |= _CC_COBALT_SCHED_SPORADIC; + if (IS_ENABLED(CONFIG_XENO_OPT_SCHED_QUOTA)) + val |= _CC_COBALT_SCHED_QUOTA; + if (IS_ENABLED(CONFIG_XENO_OPT_SCHED_TP)) + val |= _CC_COBALT_SCHED_TP; + break; + case _CC_COBALT_GET_DEBUG: + if (IS_ENABLED(CONFIG_XENO_OPT_DEBUG_COBALT)) + val |= _CC_COBALT_DEBUG_ASSERT; + if (IS_ENABLED(CONFIG_XENO_OPT_DEBUG_CONTEXT)) + val |= _CC_COBALT_DEBUG_CONTEXT; + if (IS_ENABLED(CONFIG_XENO_OPT_DEBUG_LOCKING)) + val |= _CC_COBALT_DEBUG_LOCKING; + if (IS_ENABLED(CONFIG_XENO_OPT_DEBUG_USER)) + val |= _CC_COBALT_DEBUG_USER; + if (IS_ENABLED(CONFIG_XENO_OPT_DEBUG_MUTEX_RELAXED)) + val |= _CC_COBALT_DEBUG_MUTEX_RELAXED; + if (IS_ENABLED(CONFIG_XENO_OPT_DEBUG_MUTEX_SLEEP)) + val |= _CC_COBALT_DEBUG_MUTEX_SLEEP; + if (IS_ENABLED(CONFIG_XENO_OPT_DEBUG_LEGACY)) + val |= _CC_COBALT_DEBUG_LEGACY; + if (IS_ENABLED(CONFIG_XENO_OPT_DEBUG_TRACE_RELAX)) + val |= _CC_COBALT_DEBUG_TRACE_RELAX; + if (IS_ENABLED(CONFIG_XENO_DRIVERS_RTNET_CHECKED)) + val |= _CC_COBALT_DEBUG_NET; + break; + case _CC_COBALT_GET_WATCHDOG: +#ifdef CONFIG_XENO_OPT_WATCHDOG + val = CONFIG_XENO_OPT_WATCHDOG_TIMEOUT; +#endif + break; + case _CC_COBALT_GET_CORE_STATUS: + val = realtime_core_state(); + break; + default: + if (is_primary_domain()) + /* Switch to secondary mode first. */ + return -ENOSYS; + vec.u_buf = u_buf; + vec.u_bufsz = u_bufsz; + ret = blocking_notifier_call_chain(&config_notifier_list, + option, &vec); + if (ret == NOTIFY_DONE) + return -EINVAL; /* Nobody cared. */ + return notifier_to_errno(ret); + } + + ret = cobalt_copy_to_user(u_buf, &val, sizeof(val)); + + return ret ? -EFAULT : 0; +} + +static int stop_services(const void __user *u_buf, size_t u_bufsz) +{ + const u32 final_grace_period = 3; /* seconds */ + enum cobalt_run_states state; + __u32 grace_period; + int ret; + + /* + * XXX: we don't have any syscall for unbinding a thread from + * the Cobalt core, so we deny real-time threads from stopping + * Cobalt services. i.e. _CC_COBALT_STOP_CORE must be issued + * from a plain regular linux thread. + */ + if (xnthread_current()) + return -EPERM; + + if (u_bufsz != sizeof(__u32)) + return -EINVAL; + + ret = cobalt_copy_from_user(&grace_period, + u_buf, sizeof(grace_period)); + if (ret) + return ret; + + state = atomic_cmpxchg(&cobalt_runstate, + COBALT_STATE_RUNNING, + COBALT_STATE_TEARDOWN); + switch (state) { + case COBALT_STATE_STOPPED: + break; + case COBALT_STATE_RUNNING: + /* Kill user threads. */ + ret = xnthread_killall(grace_period, XNUSER); + if (ret) { + set_realtime_core_state(state); + return ret; + } + cobalt_call_state_chain(COBALT_STATE_TEARDOWN); + /* Kill lingering RTDM tasks. */ + ret = xnthread_killall(final_grace_period, 0); + if (ret == -EAGAIN) + printk(XENO_WARNING "some RTDM tasks won't stop"); + pipeline_uninstall_tick_proxy(); + set_realtime_core_state(COBALT_STATE_STOPPED); + printk(XENO_INFO "services stopped\n"); + break; + default: + ret = -EINPROGRESS; + } + + return ret; +} + +static int start_services(void) +{ + enum cobalt_run_states state; + int ret = 0; + + state = atomic_cmpxchg(&cobalt_runstate, + COBALT_STATE_STOPPED, + COBALT_STATE_WARMUP); + switch (state) { + case COBALT_STATE_RUNNING: + break; + case COBALT_STATE_STOPPED: + pipeline_install_tick_proxy(); + cobalt_call_state_chain(COBALT_STATE_WARMUP); + set_realtime_core_state(COBALT_STATE_RUNNING); + printk(XENO_INFO "services started\n"); + break; + default: + ret = -EINPROGRESS; + } + + return ret; +} + +COBALT_SYSCALL(corectl, probing, + (int request, void __user *u_buf, size_t u_bufsz)) +{ + int ret; + + switch (request) { + case _CC_COBALT_STOP_CORE: + ret = stop_services(u_buf, u_bufsz); + break; + case _CC_COBALT_START_CORE: + ret = start_services(); + break; + default: + ret = do_conf_option(request, u_buf, u_bufsz); + } + + return ret; +} + +void cobalt_add_config_chain(struct notifier_block *nb) +{ + blocking_notifier_chain_register(&config_notifier_list, nb); +} +EXPORT_SYMBOL_GPL(cobalt_add_config_chain); + +void cobalt_remove_config_chain(struct notifier_block *nb) +{ + blocking_notifier_chain_unregister(&config_notifier_list, nb); +} +EXPORT_SYMBOL_GPL(cobalt_remove_config_chain); --- linux/kernel/xenomai/posix/timerfd.c 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/kernel/xenomai/posix/timerfd.c 2022-03-21 12:58:29.033892433 +0100 @@ -0,0 +1,334 @@ +/* + * Copyright (C) 2013 Gilles Chanteperdrix . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include +#include +#include +#include +#include +#include "internal.h" +#include "clock.h" +#include "timer.h" +#include "timerfd.h" + +struct cobalt_tfd { + int flags; + clockid_t clockid; + struct rtdm_fd fd; + struct xntimer timer; + DECLARE_XNSELECT(read_select); + struct itimerspec64 value; + struct xnsynch readers; + struct xnthread *target; +}; + +#define COBALT_TFD_TICKED (1 << 2) + +#define COBALT_TFD_SETTIME_FLAGS (TFD_TIMER_ABSTIME | TFD_WAKEUP) + +static ssize_t timerfd_read(struct rtdm_fd *fd, void __user *buf, size_t size) +{ + struct cobalt_tfd *tfd; + __u64 __user *u_ticks; + __u64 ticks = 0; + bool aligned; + spl_t s; + int err; + + if (size < sizeof(ticks)) + return -EINVAL; + + u_ticks = buf; + if (!access_wok(u_ticks, sizeof(*u_ticks))) + return -EFAULT; + + aligned = (((unsigned long)buf) & (sizeof(ticks) - 1)) == 0; + + tfd = container_of(fd, struct cobalt_tfd, fd); + + xnlock_get_irqsave(&nklock, s); + if (tfd->flags & COBALT_TFD_TICKED) { + err = 0; + goto out; + } + if (rtdm_fd_flags(fd) & O_NONBLOCK) { + err = -EAGAIN; + goto out; + } + + do { + err = xnsynch_sleep_on(&tfd->readers, XN_INFINITE, XN_RELATIVE); + } while (err == 0 && (tfd->flags & COBALT_TFD_TICKED) == 0); + + if (err & XNBREAK) + err = -EINTR; + out: + if (err == 0) { + xnticks_t now; + + if (xntimer_periodic_p(&tfd->timer)) { + now = xnclock_read_raw(xntimer_clock(&tfd->timer)); + ticks = 1 + xntimer_get_overruns(&tfd->timer, + xnthread_current(), now); + } else + ticks = 1; + + tfd->flags &= ~COBALT_TFD_TICKED; + xnselect_signal(&tfd->read_select, 0); + } + xnlock_put_irqrestore(&nklock, s); + + if (err == 0) { + err = aligned ? __xn_put_user(ticks, u_ticks) : + __xn_copy_to_user(buf, &ticks, sizeof(ticks)); + if (err) + err =-EFAULT; + } + + return err ?: sizeof(ticks); +} + +static int +timerfd_select(struct rtdm_fd *fd, struct xnselector *selector, + unsigned type, unsigned index) +{ + struct cobalt_tfd *tfd = container_of(fd, struct cobalt_tfd, fd); + struct xnselect_binding *binding; + spl_t s; + int err; + + if (type != XNSELECT_READ) + return -EBADF; + + binding = xnmalloc(sizeof(*binding)); + if (binding == NULL) + return -ENOMEM; + + xnlock_get_irqsave(&nklock, s); + xntimer_set_affinity(&tfd->timer, xnthread_current()->sched); + err = xnselect_bind(&tfd->read_select, binding, selector, type, + index, tfd->flags & COBALT_TFD_TICKED); + xnlock_put_irqrestore(&nklock, s); + + return err; +} + +static void timerfd_close(struct rtdm_fd *fd) +{ + struct cobalt_tfd *tfd = container_of(fd, struct cobalt_tfd, fd); + spl_t s; + + xnlock_get_irqsave(&nklock, s); + xntimer_destroy(&tfd->timer); + xnsynch_destroy(&tfd->readers); + xnsched_run(); + xnlock_put_irqrestore(&nklock, s); + xnselect_destroy(&tfd->read_select); /* Reschedules. */ + xnfree(tfd); +} + +static struct rtdm_fd_ops timerfd_ops = { + .read_rt = timerfd_read, + .select = timerfd_select, + .close = timerfd_close, +}; + +static void timerfd_handler(struct xntimer *xntimer) +{ + struct cobalt_tfd *tfd; + + tfd = container_of(xntimer, struct cobalt_tfd, timer); + tfd->flags |= COBALT_TFD_TICKED; + xnselect_signal(&tfd->read_select, 1); + xnsynch_wakeup_one_sleeper(&tfd->readers); + if (tfd->target) + xnthread_unblock(tfd->target); +} + +COBALT_SYSCALL(timerfd_create, lostage, (int clockid, int flags)) +{ + struct cobalt_tfd *tfd; + struct xnthread *curr; + struct xnclock *clock; + int ret, ufd; + + if (flags & ~TFD_CREATE_FLAGS) + return -EINVAL; + + clock = cobalt_clock_find(clockid); + if (IS_ERR(clock)) + return PTR_ERR(clock); + + tfd = xnmalloc(sizeof(*tfd)); + if (tfd == NULL) + return -ENOMEM; + + ufd = __rtdm_anon_getfd("[cobalt-timerfd]", + O_RDWR | (flags & TFD_SHARED_FCNTL_FLAGS)); + if (ufd < 0) { + ret = ufd; + goto fail_getfd; + } + + tfd->flags = flags & ~TFD_NONBLOCK; + tfd->fd.oflags = (flags & TFD_NONBLOCK) ? O_NONBLOCK : 0; + tfd->clockid = clockid; + curr = xnthread_current(); + xntimer_init(&tfd->timer, clock, timerfd_handler, + curr ? curr->sched : NULL, XNTIMER_UGRAVITY); + xnsynch_init(&tfd->readers, XNSYNCH_PRIO, NULL); + xnselect_init(&tfd->read_select); + tfd->target = NULL; + + ret = rtdm_fd_enter(&tfd->fd, ufd, COBALT_TIMERFD_MAGIC, &timerfd_ops); + if (ret < 0) + goto fail; + + ret = rtdm_fd_register(&tfd->fd, ufd); + if (ret < 0) + goto fail; + + return ufd; +fail: + xnselect_destroy(&tfd->read_select); + xnsynch_destroy(&tfd->readers); + xntimer_destroy(&tfd->timer); + __rtdm_anon_putfd(ufd); +fail_getfd: + xnfree(tfd); + + return ret; +} + +static inline struct cobalt_tfd *tfd_get(int ufd) +{ + struct rtdm_fd *fd; + + fd = rtdm_fd_get(ufd, COBALT_TIMERFD_MAGIC); + if (IS_ERR(fd)) { + int err = PTR_ERR(fd); + if (err == -EBADF && cobalt_current_process() == NULL) + err = -EPERM; + return ERR_PTR(err); + } + + return container_of(fd, struct cobalt_tfd, fd); +} + +static inline void tfd_put(struct cobalt_tfd *tfd) +{ + rtdm_fd_put(&tfd->fd); +} + +int __cobalt_timerfd_settime(int fd, int flags, + const struct itimerspec64 *value, + struct itimerspec64 *ovalue) +{ + struct cobalt_tfd *tfd; + int cflag, ret; + spl_t s; + + if (flags & ~COBALT_TFD_SETTIME_FLAGS) + return -EINVAL; + + tfd = tfd_get(fd); + if (IS_ERR(tfd)) + return PTR_ERR(tfd); + + cflag = (flags & TFD_TIMER_ABSTIME) ? TIMER_ABSTIME : 0; + + xnlock_get_irqsave(&nklock, s); + + tfd->target = NULL; + if (flags & TFD_WAKEUP) { + tfd->target = xnthread_current(); + if (tfd->target == NULL) { + ret = -EPERM; + goto out; + } + } + + if (ovalue) + __cobalt_timer_getval(&tfd->timer, ovalue); + + xntimer_set_affinity(&tfd->timer, xnthread_current()->sched); + + ret = __cobalt_timer_setval(&tfd->timer, + clock_flag(cflag, tfd->clockid), value); +out: + xnlock_put_irqrestore(&nklock, s); + + tfd_put(tfd); + + return ret; +} + +COBALT_SYSCALL(timerfd_settime, primary, + (int fd, int flags, + const struct __user_old_itimerspec __user *new_value, + struct __user_old_itimerspec __user *old_value)) +{ + struct itimerspec64 ovalue, value; + int ret; + + ret = cobalt_get_u_itimerspec(&value, new_value); + if (ret) + return ret; + + ret = __cobalt_timerfd_settime(fd, flags, &value, &ovalue); + if (ret) + return ret; + + if (old_value) { + ret = cobalt_copy_to_user(old_value, &ovalue, sizeof(ovalue)); + value.it_value.tv_sec = 0; + value.it_value.tv_nsec = 0; + __cobalt_timerfd_settime(fd, flags, &value, NULL); + } + + return ret; +} + +int __cobalt_timerfd_gettime(int fd, struct itimerspec64 *value) +{ + struct cobalt_tfd *tfd; + spl_t s; + + tfd = tfd_get(fd); + if (IS_ERR(tfd)) + return PTR_ERR(tfd); + + xnlock_get_irqsave(&nklock, s); + __cobalt_timer_getval(&tfd->timer, value); + xnlock_put_irqrestore(&nklock, s); + + tfd_put(tfd); + + return 0; +} + +COBALT_SYSCALL(timerfd_gettime, current, + (int fd, struct __user_old_itimerspec __user *curr_value)) +{ + struct itimerspec64 value; + int ret; + + ret = __cobalt_timerfd_gettime(fd, &value); + + return ret ?: cobalt_put_u_itimerspec(curr_value, &value); +} --- linux/kernel/xenomai/posix/cond.c 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/kernel/xenomai/posix/cond.c 2022-03-21 12:58:29.029892472 +0100 @@ -0,0 +1,424 @@ +/* + * Written by Gilles Chanteperdrix . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include "internal.h" +#include "thread.h" +#include "mutex.h" +#include "cond.h" +#include "clock.h" +#include + +static inline int +pthread_cond_init(struct cobalt_cond_shadow *cnd, const struct cobalt_condattr *attr) +{ + int synch_flags = XNSYNCH_PRIO, ret; + struct cobalt_cond *cond, *old_cond; + struct cobalt_cond_state *state; + struct cobalt_ppd *sys_ppd; + struct list_head *condq; + spl_t s; + + cond = xnmalloc(sizeof(*cond)); + if (cond == NULL) + return -ENOMEM; + + sys_ppd = cobalt_ppd_get(attr->pshared); + state = cobalt_umm_alloc(&sys_ppd->umm, sizeof(*state)); + if (state == NULL) { + ret = -EAGAIN; + goto fail_umm; + } + cond->state = state; + state->pending_signals = 0; + state->mutex_state_offset = ~0U; + + xnlock_get_irqsave(&nklock, s); + + condq = &cobalt_current_resources(attr->pshared)->condq; + if (cnd->magic == COBALT_COND_MAGIC && !list_empty(condq)) { + old_cond = xnregistry_lookup(cnd->handle, NULL); + if (cobalt_obj_active(old_cond, COBALT_COND_MAGIC, + typeof(*old_cond))) { + ret = -EBUSY; + goto fail_register; + } + } + + ret = xnregistry_enter_anon(cond, &cond->resnode.handle); + if (ret < 0) + goto fail_register; + if (attr->pshared) + cond->resnode.handle |= XNSYNCH_PSHARED; + cond->magic = COBALT_COND_MAGIC; + xnsynch_init(&cond->synchbase, synch_flags, NULL); + cond->attr = *attr; + cond->mutex = NULL; + cobalt_add_resource(&cond->resnode, cond, attr->pshared); + + cnd->handle = cond->resnode.handle; + cnd->state_offset = cobalt_umm_offset(&sys_ppd->umm, state); + cnd->magic = COBALT_COND_MAGIC; + + xnlock_put_irqrestore(&nklock, s); + + return 0; +fail_register: + xnlock_put_irqrestore(&nklock, s); + cobalt_umm_free(&sys_ppd->umm, state); +fail_umm: + xnfree(cond); + + return ret; +} + +static inline int pthread_cond_destroy(struct cobalt_cond_shadow *cnd) +{ + struct cobalt_cond *cond; + spl_t s; + + xnlock_get_irqsave(&nklock, s); + cond = xnregistry_lookup(cnd->handle, NULL); + if (cond == NULL) { + xnlock_put_irqrestore(&nklock, s); + return -EINVAL; + } + + if (!cobalt_obj_active(cnd, COBALT_COND_MAGIC, struct cobalt_cond_shadow) + || !cobalt_obj_active(cond, COBALT_COND_MAGIC, struct cobalt_cond)) { + xnlock_put_irqrestore(&nklock, s); + return -EINVAL; + } + + if (cond->resnode.scope != + cobalt_current_resources(cond->attr.pshared)) { + xnlock_put_irqrestore(&nklock, s); + return -EPERM; + } + + if (xnsynch_pended_p(&cond->synchbase) || cond->mutex) { + xnlock_put_irqrestore(&nklock, s); + return -EBUSY; + } + + cobalt_cond_reclaim(&cond->resnode, s); /* drops lock */ + + cobalt_mark_deleted(cnd); + + return 0; +} + +static inline int cobalt_cond_timedwait_prologue(struct xnthread *cur, + struct cobalt_cond *cond, + struct cobalt_mutex *mutex, + xnticks_t abs_to) +{ + int err, ret; + spl_t s; + + xnlock_get_irqsave(&nklock, s); + + /* If another thread waiting for cond does not use the same mutex */ + if (!cobalt_obj_active(cond, COBALT_COND_MAGIC, struct cobalt_cond) + || (cond->mutex && cond->mutex != mutex)) { + err = -EINVAL; + goto unlock_and_return; + } + + if (cond->resnode.scope != + cobalt_current_resources(cond->attr.pshared)) { + err = -EPERM; + goto unlock_and_return; + } + + if (mutex->attr.pshared != cond->attr.pshared) { + err = -EINVAL; + goto unlock_and_return; + } + + /* Unlock mutex. */ + err = cobalt_mutex_release(cur, mutex); + if (err < 0) + goto unlock_and_return; + + /* err == 1 means a reschedule is needed, but do not + reschedule here, releasing the mutex and suspension must be + done atomically in pthread_cond_*wait. */ + + /* Bind mutex to cond. */ + if (cond->mutex == NULL) { + cond->mutex = mutex; + list_add_tail(&cond->mutex_link, &mutex->conds); + } + + /* Wait for another thread to signal the condition. */ + if (abs_to != XN_INFINITE) + ret = xnsynch_sleep_on(&cond->synchbase, abs_to, + clock_flag(TIMER_ABSTIME, cond->attr.clock)); + else + ret = xnsynch_sleep_on(&cond->synchbase, XN_INFINITE, XN_RELATIVE); + + /* There are three possible wakeup conditions : + - cond_signal / cond_broadcast, no status bit is set, and the function + should return 0 ; + - timeout, the status XNTIMEO is set, and the function should return + ETIMEDOUT ; + - pthread_kill, the status bit XNBREAK is set, but ignored, the + function simply returns EINTR (used only by the user-space + interface, replaced by 0 anywhere else), causing a wakeup, spurious + or not whether pthread_cond_signal was called between pthread_kill + and the moment when xnsynch_sleep_on returned ; + */ + + err = 0; + + if (ret & XNBREAK) + err = -EINTR; + else if (ret & XNTIMEO) + err = -ETIMEDOUT; + +unlock_and_return: + xnlock_put_irqrestore(&nklock, s); + + return err; +} + +static inline int cobalt_cond_timedwait_epilogue(struct xnthread *cur, + struct cobalt_cond *cond, + struct cobalt_mutex *mutex) +{ + int err; + spl_t s; + + xnlock_get_irqsave(&nklock, s); + + err = __cobalt_mutex_acquire_unchecked(cur, mutex, NULL); + if (err == -EINTR) + goto unlock_and_return; + + /* + * Unbind mutex and cond, if no other thread is waiting, if + * the job was not already done. + */ + if (!xnsynch_pended_p(&cond->synchbase) && cond->mutex == mutex) { + cond->mutex = NULL; + list_del(&cond->mutex_link); + } + +unlock_and_return: + xnlock_put_irqrestore(&nklock, s); + + return err; +} + +COBALT_SYSCALL(cond_init, current, + (struct cobalt_cond_shadow __user *u_cnd, + const struct cobalt_condattr __user *u_attr)) +{ + struct cobalt_cond_shadow cnd; + struct cobalt_condattr attr; + int err; + + if (cobalt_copy_from_user(&cnd, u_cnd, sizeof(cnd))) + return -EFAULT; + + if (cobalt_copy_from_user(&attr, u_attr, sizeof(attr))) + return -EFAULT; + + trace_cobalt_cond_init(u_cnd, &attr); + + err = pthread_cond_init(&cnd, &attr); + if (err < 0) + return err; + + return cobalt_copy_to_user(u_cnd, &cnd, sizeof(*u_cnd)); +} + +COBALT_SYSCALL(cond_destroy, current, + (struct cobalt_cond_shadow __user *u_cnd)) +{ + struct cobalt_cond_shadow cnd; + int err; + + if (cobalt_copy_from_user(&cnd, u_cnd, sizeof(cnd))) + return -EFAULT; + + trace_cobalt_cond_destroy(u_cnd); + + err = pthread_cond_destroy(&cnd); + if (err < 0) + return err; + + return cobalt_copy_to_user(u_cnd, &cnd, sizeof(*u_cnd)); +} + +struct us_cond_data { + int err; +}; + +static inline int cond_fetch_timeout(struct timespec64 *ts, + const void __user *u_ts) +{ + return u_ts == NULL ? -EFAULT : cobalt_get_u_timespec(ts, u_ts); +} + +int __cobalt_cond_wait_prologue(struct cobalt_cond_shadow __user *u_cnd, + struct cobalt_mutex_shadow __user *u_mx, + int *u_err, + void __user *u_ts, + int (*fetch_timeout)(struct timespec64 *ts, + const void __user *u_ts)) +{ + struct xnthread *cur = xnthread_current(); + struct cobalt_cond *cond; + struct cobalt_mutex *mx; + struct us_cond_data d; + struct timespec64 ts; + xnhandle_t handle; + int err, perr = 0; + __u32 offset; + + handle = cobalt_get_handle_from_user(&u_cnd->handle); + cond = xnregistry_lookup(handle, NULL); + + handle = cobalt_get_handle_from_user(&u_mx->handle); + mx = xnregistry_lookup(handle, NULL); + + if (cond->mutex == NULL) { + __xn_get_user(offset, &u_mx->state_offset); + cond->state->mutex_state_offset = offset; + } + + if (fetch_timeout) { + err = fetch_timeout(&ts, u_ts); + if (err == 0) { + trace_cobalt_cond_timedwait(u_cnd, u_mx, &ts); + err = cobalt_cond_timedwait_prologue(cur, cond, mx, + ts2ns(&ts) + 1); + } + } else { + trace_cobalt_cond_wait(u_cnd, u_mx); + err = cobalt_cond_timedwait_prologue(cur, cond, mx, XN_INFINITE); + } + + switch(err) { + case 0: + case -ETIMEDOUT: + perr = d.err = err; + err = cobalt_cond_timedwait_epilogue(cur, cond, mx); + break; + + case -EINTR: + perr = err; + d.err = 0; /* epilogue should return 0. */ + break; + + default: + /* Please gcc and handle the case which will never + happen */ + d.err = EINVAL; + } + + if (cond->mutex == NULL) + cond->state->mutex_state_offset = ~0U; + + if (err == -EINTR) + __xn_put_user(d.err, u_err); + + return err == 0 ? perr : err; +} + +/* pthread_cond_wait_prologue(cond, mutex, count_ptr, timed, timeout) */ +COBALT_SYSCALL(cond_wait_prologue, nonrestartable, + (struct cobalt_cond_shadow __user *u_cnd, + struct cobalt_mutex_shadow __user *u_mx, + int *u_err, + unsigned int timed, + struct __user_old_timespec __user *u_ts)) +{ + return __cobalt_cond_wait_prologue(u_cnd, u_mx, u_err, u_ts, + timed ? cond_fetch_timeout : NULL); +} + +COBALT_SYSCALL(cond_wait_epilogue, primary, + (struct cobalt_cond_shadow __user *u_cnd, + struct cobalt_mutex_shadow __user *u_mx)) +{ + struct xnthread *cur = xnthread_current(); + struct cobalt_cond *cond; + struct cobalt_mutex *mx; + xnhandle_t handle; + int err; + + handle = cobalt_get_handle_from_user(&u_cnd->handle); + cond = xnregistry_lookup(handle, NULL); + + handle = cobalt_get_handle_from_user(&u_mx->handle); + mx = xnregistry_lookup(handle, NULL); + err = cobalt_cond_timedwait_epilogue(cur, cond, mx); + + if (cond->mutex == NULL) + cond->state->mutex_state_offset = ~0U; + + return err; +} + +int cobalt_cond_deferred_signals(struct cobalt_cond *cond) +{ + struct cobalt_cond_state *state; + __u32 pending_signals; + int need_resched; + + state = cond->state; + pending_signals = state->pending_signals; + + switch(pending_signals) { + default: + state->pending_signals = 0; + need_resched = xnsynch_wakeup_many_sleepers(&cond->synchbase, + pending_signals); + break; + + case ~0U: + need_resched = + xnsynch_flush(&cond->synchbase, 0) == XNSYNCH_RESCHED; + state->pending_signals = 0; + break; + + case 0: + need_resched = 0; + break; + } + + return need_resched; +} + +void cobalt_cond_reclaim(struct cobalt_resnode *node, spl_t s) +{ + struct cobalt_cond *cond; + + cond = container_of(node, struct cobalt_cond, resnode); + xnregistry_remove(node->handle); + cobalt_del_resource(node); + xnsynch_destroy(&cond->synchbase); + cobalt_mark_deleted(cond); + xnlock_put_irqrestore(&nklock, s); + + cobalt_umm_free(&cobalt_ppd_get(cond->attr.pshared)->umm, + cond->state); + xnfree(cond); +} --- linux/kernel/xenomai/posix/syscall.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/kernel/xenomai/posix/syscall.h 2022-03-21 12:58:29.026892501 +0100 @@ -0,0 +1,40 @@ +/* + * Copyright (C) 2014 Philippe Gerum . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef _COBALT_POSIX_SYSCALL_H +#define _COBALT_POSIX_SYSCALL_H + +#include + +struct pt_regs; + +/* Regular (native) syscall handler implementation. */ +#define COBALT_SYSCALL(__name, __mode, __args) \ + long CoBaLt_ ## __name __args + +/* Regular (native) syscall handler declaration. */ +#define COBALT_SYSCALL_DECL(__name, __args) \ + long CoBaLt_ ## __name __args + +#include + +int handle_head_syscall(bool caller_is_relaxed, + struct pt_regs *regs); + +int handle_root_syscall(struct pt_regs *regs); + +#endif /* !_COBALT_POSIX_SYSCALL_H */ --- linux/kernel/xenomai/posix/process.c 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/kernel/xenomai/posix/process.c 2022-03-21 12:58:29.022892540 +0100 @@ -0,0 +1,1203 @@ +/* + * Copyright (C) 2001-2014 Philippe Gerum . + * Copyright (C) 2001-2014 The Xenomai project + * Copyright (C) 2006 Gilles Chanteperdrix + * + * SMP support Copyright (C) 2004 The HYADES project + * RTAI/fusion Copyright (C) 2004 The RTAI project + * + * Xenomai is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Xenomai; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../debug.h" +#include "internal.h" +#include "thread.h" +#include "sched.h" +#include "mutex.h" +#include "cond.h" +#include "mqueue.h" +#include "sem.h" +#include "signal.h" +#include "timer.h" +#include "monitor.h" +#include "clock.h" +#include "event.h" +#include "timerfd.h" +#include "io.h" + +static int gid_arg = -1; +module_param_named(allowed_group, gid_arg, int, 0644); + +static DEFINE_MUTEX(personality_lock); + +static struct hlist_head *process_hash; +DEFINE_PRIVATE_XNLOCK(process_hash_lock); +#define PROCESS_HASH_SIZE 13 + +struct xnthread_personality *cobalt_personalities[NR_PERSONALITIES]; + +static struct xnsynch yield_sync; + +LIST_HEAD(cobalt_global_thread_list); + +DEFINE_XNPTREE(posix_ptree, "posix"); + +struct cobalt_resources cobalt_global_resources = { + .condq = LIST_HEAD_INIT(cobalt_global_resources.condq), + .mutexq = LIST_HEAD_INIT(cobalt_global_resources.mutexq), + .semq = LIST_HEAD_INIT(cobalt_global_resources.semq), + .monitorq = LIST_HEAD_INIT(cobalt_global_resources.monitorq), + .eventq = LIST_HEAD_INIT(cobalt_global_resources.eventq), + .schedq = LIST_HEAD_INIT(cobalt_global_resources.schedq), +}; + +static unsigned __attribute__((pure)) process_hash_crunch(struct mm_struct *mm) +{ + unsigned long hash = ((unsigned long)mm - PAGE_OFFSET) / sizeof(*mm); + return hash % PROCESS_HASH_SIZE; +} + +static struct cobalt_process *__process_hash_search(struct mm_struct *mm) +{ + unsigned int bucket = process_hash_crunch(mm); + struct cobalt_process *p; + + hlist_for_each_entry(p, &process_hash[bucket], hlink) + if (p->mm == mm) + return p; + + return NULL; +} + +static int process_hash_enter(struct cobalt_process *p) +{ + struct mm_struct *mm = current->mm; + unsigned int bucket = process_hash_crunch(mm); + int err; + spl_t s; + + xnlock_get_irqsave(&process_hash_lock, s); + if (__process_hash_search(mm)) { + err = -EBUSY; + goto out; + } + + p->mm = mm; + hlist_add_head(&p->hlink, &process_hash[bucket]); + err = 0; + out: + xnlock_put_irqrestore(&process_hash_lock, s); + return err; +} + +static void process_hash_remove(struct cobalt_process *p) +{ + spl_t s; + + xnlock_get_irqsave(&process_hash_lock, s); + if (p->mm) + hlist_del(&p->hlink); + xnlock_put_irqrestore(&process_hash_lock, s); +} + +struct cobalt_process *cobalt_search_process(struct mm_struct *mm) +{ + struct cobalt_process *process; + spl_t s; + + xnlock_get_irqsave(&process_hash_lock, s); + process = __process_hash_search(mm); + xnlock_put_irqrestore(&process_hash_lock, s); + + return process; +} + +static void *lookup_context(int xid) +{ + struct cobalt_process *process = cobalt_current_process(); + void *priv = NULL; + spl_t s; + + xnlock_get_irqsave(&process_hash_lock, s); + /* + * First try matching the process context attached to the + * (usually main) thread which issued sc_cobalt_bind. If not + * found, try matching by mm context, which should point us + * back to the latter. If none match, then the current process + * is unbound. + */ + if (process == NULL && current->mm) + process = __process_hash_search(current->mm); + if (process) + priv = process->priv[xid]; + + xnlock_put_irqrestore(&process_hash_lock, s); + + return priv; +} + +void cobalt_remove_process(struct cobalt_process *process) +{ + struct xnthread_personality *personality; + void *priv; + int xid; + + mutex_lock(&personality_lock); + + for (xid = NR_PERSONALITIES - 1; xid >= 0; xid--) { + if (!__test_and_clear_bit(xid, &process->permap)) + continue; + personality = cobalt_personalities[xid]; + priv = process->priv[xid]; + if (priv == NULL) + continue; + /* + * CAUTION: process potentially refers to stale memory + * upon return from detach_process() for the Cobalt + * personality, so don't dereference it afterwards. + */ + if (xid) + process->priv[xid] = NULL; + __clear_bit(personality->xid, &process->permap); + personality->ops.detach_process(priv); + atomic_dec(&personality->refcnt); + XENO_WARN_ON(COBALT, atomic_read(&personality->refcnt) < 0); + if (personality->module) + module_put(personality->module); + } + + cobalt_set_process(NULL); + + mutex_unlock(&personality_lock); +} + +static void post_ppd_release(struct cobalt_umm *umm) +{ + struct cobalt_process *process; + + process = container_of(umm, struct cobalt_process, sys_ppd.umm); + kfree(process); +} + +static inline char *get_exe_path(struct task_struct *p) +{ + struct file *exe_file; + char *pathname, *buf; + struct mm_struct *mm; + struct path path; + + /* + * PATH_MAX is fairly large, and in any case won't fit on the + * caller's stack happily; since we are mapping a shadow, + * which is a heavyweight operation anyway, let's pick the + * memory from the page allocator. + */ + buf = (char *)__get_free_page(GFP_KERNEL); + if (buf == NULL) + return ERR_PTR(-ENOMEM); + + mm = get_task_mm(p); + if (mm == NULL) { + pathname = "vmlinux"; + goto copy; /* kernel thread */ + } + + exe_file = get_mm_exe_file(mm); + mmput(mm); + if (exe_file == NULL) { + pathname = ERR_PTR(-ENOENT); + goto out; /* no luck. */ + } + + path = exe_file->f_path; + path_get(&exe_file->f_path); + fput(exe_file); + pathname = d_path(&path, buf, PATH_MAX); + path_put(&path); + if (IS_ERR(pathname)) + goto out; /* mmmh... */ +copy: + /* caution: d_path() may start writing anywhere in the buffer. */ + pathname = kstrdup(pathname, GFP_KERNEL); +out: + free_page((unsigned long)buf); + + return pathname; +} + +static inline int raise_cap(int cap) +{ + struct cred *new; + + new = prepare_creds(); + if (new == NULL) + return -ENOMEM; + + cap_raise(new->cap_effective, cap); + + return commit_creds(new); +} + +static int bind_personality(struct xnthread_personality *personality) +{ + struct cobalt_process *process; + void *priv; + + /* + * We also check capabilities for stacking a Cobalt extension, + * in case the process dropped the supervisor privileges after + * a successful initial binding to the Cobalt interface. + */ + if (!capable(CAP_SYS_NICE) && + (gid_arg == -1 || !in_group_p(KGIDT_INIT(gid_arg)))) + return -EPERM; + /* + * Protect from the same process binding to the same interface + * several times. + */ + priv = lookup_context(personality->xid); + if (priv) + return 0; + + priv = personality->ops.attach_process(); + if (IS_ERR(priv)) + return PTR_ERR(priv); + + process = cobalt_current_process(); + /* + * We are still covered by the personality_lock, so we may + * safely bump the module refcount after the attach handler + * has returned. + */ + if (personality->module && !try_module_get(personality->module)) { + personality->ops.detach_process(priv); + return -EAGAIN; + } + + __set_bit(personality->xid, &process->permap); + atomic_inc(&personality->refcnt); + process->priv[personality->xid] = priv; + + raise_cap(CAP_SYS_NICE); + raise_cap(CAP_IPC_LOCK); + raise_cap(CAP_SYS_RAWIO); + + return 0; +} + +int cobalt_bind_personality(unsigned int magic) +{ + struct xnthread_personality *personality; + int xid, ret = -ESRCH; + + mutex_lock(&personality_lock); + + for (xid = 1; xid < NR_PERSONALITIES; xid++) { + personality = cobalt_personalities[xid]; + if (personality && personality->magic == magic) { + ret = bind_personality(personality); + break; + } + } + + mutex_unlock(&personality_lock); + + return ret ?: xid; +} + +int cobalt_bind_core(int ufeatures) +{ + struct cobalt_process *process; + int ret; + + mutex_lock(&personality_lock); + ret = bind_personality(&cobalt_personality); + mutex_unlock(&personality_lock); + if (ret) + return ret; + + process = cobalt_current_process(); + /* Feature set userland knows about. */ + process->ufeatures = ufeatures; + + return 0; +} + +/** + * @fn int cobalt_register_personality(struct xnthread_personality *personality) + * @internal + * @brief Register a new interface personality. + * + * - personality->ops.attach_process() is called when a user-space + * process binds to the personality, on behalf of one of its + * threads. The attach_process() handler may return: + * + * . an opaque pointer, representing the context of the calling + * process for this personality; + * + * . a NULL pointer, meaning that no per-process structure should be + * attached to this process for this personality; + * + * . ERR_PTR(negative value) indicating an error, the binding + * process will then abort. + * + * - personality->ops.detach_process() is called on behalf of an + * exiting user-space process which has previously attached to the + * personality. This handler is passed a pointer to the per-process + * data received earlier from the ops->attach_process() handler. + * + * @return the personality (extension) identifier. + * + * @note cobalt_get_context() is NULL when ops.detach_process() is + * invoked for the personality the caller detaches from. + * + * @coretags{secondary-only} + */ +int cobalt_register_personality(struct xnthread_personality *personality) +{ + int xid; + + mutex_lock(&personality_lock); + + for (xid = 0; xid < NR_PERSONALITIES; xid++) { + if (cobalt_personalities[xid] == NULL) { + personality->xid = xid; + atomic_set(&personality->refcnt, 0); + cobalt_personalities[xid] = personality; + goto out; + } + } + + xid = -EAGAIN; +out: + mutex_unlock(&personality_lock); + + return xid; +} +EXPORT_SYMBOL_GPL(cobalt_register_personality); + +/* + * @brief Unregister an interface personality. + * + * @coretags{secondary-only} + */ +int cobalt_unregister_personality(int xid) +{ + struct xnthread_personality *personality; + int ret = 0; + + if (xid < 0 || xid >= NR_PERSONALITIES) + return -EINVAL; + + mutex_lock(&personality_lock); + + personality = cobalt_personalities[xid]; + if (atomic_read(&personality->refcnt) > 0) + ret = -EBUSY; + else + cobalt_personalities[xid] = NULL; + + mutex_unlock(&personality_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(cobalt_unregister_personality); + +/** + * Stack a new personality over Cobalt for the current thread. + * + * This service registers the current thread as a member of the + * additional personality identified by @a xid. If the current thread + * is already assigned this personality, the call returns successfully + * with no effect. + * + * @param xid the identifier of the additional personality. + * + * @return A handle to the previous personality. The caller should + * save this handle for unstacking @a xid when applicable via a call + * to cobalt_pop_personality(). + * + * @coretags{secondary-only} + */ +struct xnthread_personality * +cobalt_push_personality(int xid) +{ + struct cobalt_threadinfo *p = pipeline_current(); + struct xnthread_personality *prev, *next; + struct xnthread *thread = p->thread; + + secondary_mode_only(); + + mutex_lock(&personality_lock); + + if (xid < 0 || xid >= NR_PERSONALITIES || + p->process == NULL || !test_bit(xid, &p->process->permap)) { + mutex_unlock(&personality_lock); + return NULL; + } + + next = cobalt_personalities[xid]; + prev = thread->personality; + if (next == prev) { + mutex_unlock(&personality_lock); + return prev; + } + + thread->personality = next; + mutex_unlock(&personality_lock); + xnthread_run_handler(thread, map_thread); + + return prev; +} +EXPORT_SYMBOL_GPL(cobalt_push_personality); + +/** + * Pop the topmost personality from the current thread. + * + * This service pops the topmost personality off the current thread. + * + * @param prev the previous personality which was returned by the + * latest call to cobalt_push_personality() for the current thread. + * + * @coretags{secondary-only} + */ +void cobalt_pop_personality(struct xnthread_personality *prev) +{ + struct cobalt_threadinfo *p = pipeline_current(); + struct xnthread *thread = p->thread; + + secondary_mode_only(); + thread->personality = prev; +} +EXPORT_SYMBOL_GPL(cobalt_pop_personality); + +/** + * Return the per-process data attached to the calling user process. + * + * This service returns the per-process data attached to the calling + * user process for the personality whose xid is @a xid. + * + * The per-process data was obtained from the ->attach_process() + * handler defined for the personality @a xid refers to. + * + * See cobalt_register_personality() documentation for information on + * the way to attach a per-process data to a process. + * + * @param xid the personality identifier. + * + * @return the per-process data if the current context is a user-space + * process; @return NULL otherwise. As a special case, + * cobalt_get_context(0) returns the current Cobalt process + * descriptor, which is strictly identical to calling + * cobalt_current_process(). + * + * @coretags{task-unrestricted} + */ +void *cobalt_get_context(int xid) +{ + return lookup_context(xid); +} +EXPORT_SYMBOL_GPL(cobalt_get_context); + +int cobalt_yield(xnticks_t min, xnticks_t max) +{ + xnticks_t start; + int ret; + + start = xnclock_read_monotonic(&nkclock); + max += start; + min += start; + + do { + ret = xnsynch_sleep_on(&yield_sync, max, XN_ABSOLUTE); + if (ret & XNBREAK) + return -EINTR; + } while (ret == 0 && xnclock_read_monotonic(&nkclock) < min); + + return 0; +} +EXPORT_SYMBOL_GPL(cobalt_yield); + +/** + * @fn int cobalt_map_user(struct xnthread *thread, __u32 __user *u_winoff) + * @internal + * @brief Create a shadow thread context over a user task. + * + * This call maps a Xenomai thread to the current regular Linux task + * running in userland. The priority and scheduling class of the + * underlying Linux task are not affected; it is assumed that the + * interface library did set them appropriately before issuing the + * shadow mapping request. + * + * @param thread The descriptor address of the new shadow thread to be + * mapped to current. This descriptor must have been previously + * initialized by a call to xnthread_init(). + * + * @param u_winoff will receive the offset of the per-thread + * "u_window" structure in the global heap associated to @a + * thread. This structure reflects thread state information visible + * from userland through a shared memory window. + * + * @return 0 is returned on success. Otherwise: + * + * - -EINVAL is returned if the thread control block does not bear the + * XNUSER bit. + * + * - -EBUSY is returned if either the current Linux task or the + * associated shadow thread is already involved in a shadow mapping. + * + * @coretags{secondary-only} + */ +int cobalt_map_user(struct xnthread *thread, __u32 __user *u_winoff) +{ + struct xnthread_user_window *u_window; + struct xnthread_start_attr attr; + struct cobalt_ppd *sys_ppd; + struct cobalt_umm *umm; + int ret; + + if (!xnthread_test_state(thread, XNUSER)) + return -EINVAL; + + if (xnthread_current() || xnthread_test_state(thread, XNMAPPED)) + return -EBUSY; + + if (!access_wok(u_winoff, sizeof(*u_winoff))) + return -EFAULT; + + ret = pipeline_prepare_current(); + if (ret) + return ret; + + umm = &cobalt_kernel_ppd.umm; + u_window = cobalt_umm_zalloc(umm, sizeof(*u_window)); + if (u_window == NULL) + return -ENOMEM; + + thread->u_window = u_window; + __xn_put_user(cobalt_umm_offset(umm, u_window), u_winoff); + xnthread_pin_initial(thread); + + /* + * CAUTION: we enable the pipeline notifier only when our + * shadow TCB is consistent, so that we won't trigger false + * positive in debug code from handle_schedule_event() and + * friends. + */ + pipeline_init_shadow_tcb(thread); + xnthread_suspend(thread, XNRELAX, XN_INFINITE, XN_RELATIVE, NULL); + pipeline_attach_current(thread); + xnthread_set_state(thread, XNMAPPED); + xndebug_shadow_init(thread); + sys_ppd = cobalt_ppd_get(0); + atomic_inc(&sys_ppd->refcnt); + /* + * ->map_thread() handler is invoked after the TCB is fully + * built, and when we know for sure that current will go + * through our task-exit handler, because it has a shadow + * extension and I-pipe notifications will soon be enabled for + * it. + */ + xnthread_run_handler(thread, map_thread); + pipeline_enable_kevents(); + + attr.mode = 0; + attr.entry = NULL; + attr.cookie = NULL; + ret = xnthread_start(thread, &attr); + if (ret) + return ret; + + xnthread_sync_window(thread); + + xntrace_pid(xnthread_host_pid(thread), + xnthread_current_priority(thread)); + + return 0; +} + +void cobalt_signal_yield(void) +{ + spl_t s; + + if (!xnsynch_pended_p(&yield_sync)) + return; + + xnlock_get_irqsave(&nklock, s); + if (xnsynch_pended_p(&yield_sync)) { + xnsynch_flush(&yield_sync, 0); + xnsched_run(); + } + xnlock_put_irqrestore(&nklock, s); +} + +static inline struct cobalt_process * +process_from_thread(struct xnthread *thread) +{ + return container_of(thread, struct cobalt_thread, threadbase)->process; +} + +void cobalt_stop_debugged_process(struct xnthread *thread) +{ + struct cobalt_process *process = process_from_thread(thread); + struct cobalt_thread *cth; + + if (process->debugged_threads > 0) + return; + + list_for_each_entry(cth, &process->thread_list, next) { + if (&cth->threadbase == thread) + continue; + + xnthread_suspend(&cth->threadbase, XNDBGSTOP, XN_INFINITE, + XN_RELATIVE, NULL); + } +} + +static void cobalt_resume_debugged_process(struct cobalt_process *process) +{ + struct cobalt_thread *cth; + + xnsched_lock(); + + list_for_each_entry(cth, &process->thread_list, next) + if (xnthread_test_state(&cth->threadbase, XNDBGSTOP)) + xnthread_resume(&cth->threadbase, XNDBGSTOP); + + xnsched_unlock(); +} + +/* called with nklock held */ +void cobalt_register_debugged_thread(struct xnthread *thread) +{ + struct cobalt_process *process = process_from_thread(thread); + + xnthread_set_state(thread, XNSSTEP); + + cobalt_stop_debugged_process(thread); + process->debugged_threads++; + + if (xnthread_test_state(thread, XNRELAX)) + xnthread_suspend(thread, XNDBGSTOP, XN_INFINITE, XN_RELATIVE, + NULL); +} + +/* called with nklock held */ +void cobalt_unregister_debugged_thread(struct xnthread *thread) +{ + struct cobalt_process *process = process_from_thread(thread); + + process->debugged_threads--; + xnthread_clear_state(thread, XNSSTEP); + + if (process->debugged_threads == 0) + cobalt_resume_debugged_process(process); +} + +int cobalt_handle_setaffinity_event(struct task_struct *task) +{ +#ifdef CONFIG_SMP + struct xnthread *thread; + spl_t s; + + thread = xnthread_from_task(task); + if (thread == NULL) + return KEVENT_PROPAGATE; + + /* + * Detect a Cobalt thread sleeping in primary mode which is + * required to migrate to another CPU by the host kernel. + * + * We may NOT fix up thread->sched immediately using the + * passive migration call, because that latter always has to + * take place on behalf of the target thread itself while + * running in secondary mode. Therefore, that thread needs to + * go through secondary mode first, then move back to primary + * mode, so that affinity_ok() does the fixup work. + * + * We force this by sending a SIGSHADOW signal to the migrated + * thread, asking it to switch back to primary mode from the + * handler, at which point the interrupted syscall may be + * restarted. + */ + xnlock_get_irqsave(&nklock, s); + + if (xnthread_test_state(thread, XNTHREAD_BLOCK_BITS & ~XNRELAX)) + __xnthread_signal(thread, SIGSHADOW, SIGSHADOW_ACTION_HARDEN); + + xnlock_put_irqrestore(&nklock, s); +#endif /* CONFIG_SMP */ + + return KEVENT_PROPAGATE; +} + +#ifdef CONFIG_SMP +void cobalt_adjust_affinity(struct task_struct *task) /* nklocked, IRQs off */ +{ + struct xnthread *thread = xnthread_from_task(task); + struct xnsched *sched; + int cpu = task_cpu(task); + + /* + * To maintain consistency between both Cobalt and host + * schedulers, reflecting a thread migration to another CPU + * into the Cobalt scheduler state must happen from secondary + * mode only, on behalf of the migrated thread itself once it + * runs on the target CPU. + * + * This means that the Cobalt scheduler state regarding the + * CPU information lags behind the host scheduler state until + * the migrated thread switches back to primary mode + * (i.e. task_cpu(p) != xnsched_cpu(xnthread_from_task(p)->sched)). + * This is ok since Cobalt does not schedule such thread until then. + * + * check_affinity() detects when a Cobalt thread switching + * back to primary mode did move to another CPU earlier while + * in secondary mode. If so, do the fixups to reflect the + * change. + */ + if (!xnsched_threading_cpu(cpu)) { + /* + * The thread is about to switch to primary mode on a + * non-rt CPU, which is damn wrong and hopeless. + * Whine and cancel that thread. + */ + printk(XENO_WARNING "thread %s[%d] switched to non-rt CPU%d, aborted.\n", + thread->name, xnthread_host_pid(thread), cpu); + /* + * Can't call xnthread_cancel() from a migration + * point, that would break. Since we are on the wakeup + * path to hardening, just raise XNCANCELD to catch it + * in xnthread_harden(). + */ + xnthread_set_info(thread, XNCANCELD); + return; + } + + sched = xnsched_struct(cpu); + if (sched == thread->sched) + return; + + /* + * The current thread moved to a supported real-time CPU, + * which is not part of its original affinity mask + * though. Assume user wants to extend this mask. + */ + if (!cpumask_test_cpu(cpu, &thread->affinity)) + cpumask_set_cpu(cpu, &thread->affinity); + + xnthread_run_handler_stack(thread, move_thread, cpu); + xnthread_migrate_passive(thread, sched); +} +#endif /* CONFIG_SMP */ + +static void __handle_taskexit_event(struct task_struct *p) +{ + struct cobalt_ppd *sys_ppd; + struct xnthread *thread; + spl_t s; + + /* + * We are called for both kernel and user shadows over the + * root thread. + */ + secondary_mode_only(); + + thread = xnthread_current(); + XENO_BUG_ON(COBALT, thread == NULL); + trace_cobalt_shadow_unmap(thread); + + xnlock_get_irqsave(&nklock, s); + + if (xnthread_test_state(thread, XNSSTEP)) + cobalt_unregister_debugged_thread(thread); + + xnsched_run(); + + xnlock_put_irqrestore(&nklock, s); + + xnthread_run_handler_stack(thread, exit_thread); + + if (xnthread_test_state(thread, XNUSER)) { + cobalt_umm_free(&cobalt_kernel_ppd.umm, thread->u_window); + thread->u_window = NULL; + sys_ppd = cobalt_ppd_get(0); + if (atomic_dec_and_test(&sys_ppd->refcnt)) + cobalt_remove_process(cobalt_current_process()); + } +} + +int cobalt_handle_user_return(struct task_struct *task) +{ + struct xnthread *thread; + spl_t s; + int err; + + thread = xnthread_from_task(task); + if (thread == NULL) + return KEVENT_PROPAGATE; + + if (xnthread_test_info(thread, XNCONTHI)) { + xnlock_get_irqsave(&nklock, s); + xnthread_clear_info(thread, XNCONTHI); + xnlock_put_irqrestore(&nklock, s); + + err = xnthread_harden(); + + /* + * XNCONTHI may or may not have been re-applied if + * harden bailed out due to pending signals. Make sure + * it is set in that case. + */ + if (err == -ERESTARTSYS) { + xnlock_get_irqsave(&nklock, s); + xnthread_set_info(thread, XNCONTHI); + xnlock_put_irqrestore(&nklock, s); + } + } + + return KEVENT_PROPAGATE; +} + +static void detach_current(void) +{ + struct cobalt_threadinfo *p = pipeline_current(); + + p->thread = NULL; + p->process = NULL; +} + +int cobalt_handle_taskexit_event(struct task_struct *task) /* task == current */ +{ + __handle_taskexit_event(task); + + /* + * __xnthread_cleanup() -> ... -> finalize_thread + * handler. From that point, the TCB is dropped. Be careful of + * not treading on stale memory within @thread. + */ + __xnthread_cleanup(xnthread_current()); + + detach_current(); + + return KEVENT_PROPAGATE; +} + +int cobalt_handle_cleanup_event(struct mm_struct *mm) +{ + struct cobalt_process *old, *process; + struct cobalt_ppd *sys_ppd; + struct xnthread *curr; + + /* + * We are NOT called for exiting kernel shadows. + * cobalt_current_process() is cleared if we get there after + * handle_task_exit(), so we need to restore this context + * pointer temporarily. + */ + process = cobalt_search_process(mm); + old = cobalt_set_process(process); + sys_ppd = cobalt_ppd_get(0); + if (sys_ppd != &cobalt_kernel_ppd) { + bool running_exec; + + /* + * Detect a userland shadow running exec(), i.e. still + * attached to the current linux task (no prior + * detach_current). In this case, we emulate a task + * exit, since the Xenomai binding shall not survive + * the exec() syscall. Since the process will keep on + * running though, we have to disable the event + * notifier manually for it. + */ + curr = xnthread_current(); + running_exec = curr && (current->flags & PF_EXITING) == 0; + if (running_exec) { + __handle_taskexit_event(current); + pipeline_cleanup_process(); + } + if (atomic_dec_and_test(&sys_ppd->refcnt)) + cobalt_remove_process(process); + if (running_exec) { + __xnthread_cleanup(curr); + detach_current(); + } + } + + /* + * CAUTION: Do not override a state change caused by + * cobalt_remove_process(). + */ + if (cobalt_current_process() == process) + cobalt_set_process(old); + + return KEVENT_PROPAGATE; +} + +static int attach_process(struct cobalt_process *process) +{ + struct cobalt_ppd *p = &process->sys_ppd; + char *exe_path; + int ret; + + ret = cobalt_umm_init(&p->umm, CONFIG_XENO_OPT_PRIVATE_HEAPSZ * 1024, + post_ppd_release); + if (ret) + return ret; + + cobalt_umm_set_name(&p->umm, "private heap[%d]", task_pid_nr(current)); + + ret = pipeline_attach_process(process); + if (ret) + goto fail_pipeline; + + exe_path = get_exe_path(current); + if (IS_ERR(exe_path)) { + printk(XENO_WARNING + "%s[%d] can't find exe path\n", + current->comm, task_pid_nr(current)); + exe_path = NULL; /* Not lethal, but weird. */ + } + p->exe_path = exe_path; + xntree_init(&p->fds); + atomic_set(&p->refcnt, 1); + + ret = process_hash_enter(process); + if (ret) + goto fail_hash; + + return 0; +fail_hash: + pipeline_detach_process(process); + if (p->exe_path) + kfree(p->exe_path); +fail_pipeline: + cobalt_umm_destroy(&p->umm); + + return ret; +} + +static void *cobalt_process_attach(void) +{ + struct cobalt_process *process; + int ret; + + process = kzalloc(sizeof(*process), GFP_KERNEL); + if (process == NULL) + return ERR_PTR(-ENOMEM); + + ret = attach_process(process); + if (ret) { + kfree(process); + return ERR_PTR(ret); + } + + INIT_LIST_HEAD(&process->resources.condq); + INIT_LIST_HEAD(&process->resources.mutexq); + INIT_LIST_HEAD(&process->resources.semq); + INIT_LIST_HEAD(&process->resources.monitorq); + INIT_LIST_HEAD(&process->resources.eventq); + INIT_LIST_HEAD(&process->resources.schedq); + INIT_LIST_HEAD(&process->sigwaiters); + INIT_LIST_HEAD(&process->thread_list); + xntree_init(&process->usems); + bitmap_fill(process->timers_map, CONFIG_XENO_OPT_NRTIMERS); + cobalt_set_process(process); + + return process; +} + +static void detach_process(struct cobalt_process *process) +{ + struct cobalt_ppd *p = &process->sys_ppd; + + if (p->exe_path) + kfree(p->exe_path); + + rtdm_fd_cleanup(p); + process_hash_remove(process); + /* + * CAUTION: the process descriptor might be immediately + * released as a result of calling cobalt_umm_destroy(), so we + * must do this last, not to tread on stale memory. + */ + cobalt_umm_destroy(&p->umm); +} + +static void __reclaim_resource(struct cobalt_process *process, + void (*reclaim)(struct cobalt_resnode *node, spl_t s), + struct list_head *local, + struct list_head *global) +{ + struct cobalt_resnode *node, *tmp; + LIST_HEAD(stash); + spl_t s; + + xnlock_get_irqsave(&nklock, s); + + if (list_empty(global)) + goto flush_local; + + list_for_each_entry_safe(node, tmp, global, next) { + if (node->owner == process) { + list_del(&node->next); + list_add(&node->next, &stash); + } + } + + list_for_each_entry_safe(node, tmp, &stash, next) { + reclaim(node, s); + xnlock_get_irqsave(&nklock, s); + } + + XENO_BUG_ON(COBALT, !list_empty(&stash)); + +flush_local: + if (list_empty(local)) + goto out; + + list_for_each_entry_safe(node, tmp, local, next) { + reclaim(node, s); + xnlock_get_irqsave(&nklock, s); + } +out: + xnsched_run(); + xnlock_put_irqrestore(&nklock, s); +} + +#define cobalt_reclaim_resource(__process, __reclaim, __type) \ + __reclaim_resource(__process, __reclaim, \ + &(__process)->resources.__type ## q, \ + &cobalt_global_resources.__type ## q) + +static void cobalt_process_detach(void *arg) +{ + struct cobalt_process *process = arg; + + cobalt_nsem_reclaim(process); + cobalt_timer_reclaim(process); + cobalt_sched_reclaim(process); + cobalt_reclaim_resource(process, cobalt_cond_reclaim, cond); + cobalt_reclaim_resource(process, cobalt_mutex_reclaim, mutex); + cobalt_reclaim_resource(process, cobalt_event_reclaim, event); + cobalt_reclaim_resource(process, cobalt_monitor_reclaim, monitor); + cobalt_reclaim_resource(process, cobalt_sem_reclaim, sem); + detach_process(process); + /* + * The cobalt_process descriptor release may be deferred until + * the last mapping on the private heap is gone. However, this + * is potentially stale memory already. + */ +} + +struct xnthread_personality cobalt_personality = { + .name = "cobalt", + .magic = 0, + .ops = { + .attach_process = cobalt_process_attach, + .detach_process = cobalt_process_detach, + .map_thread = cobalt_thread_map, + .exit_thread = cobalt_thread_exit, + .finalize_thread = cobalt_thread_finalize, + }, +}; +EXPORT_SYMBOL_GPL(cobalt_personality); + +__init int cobalt_init(void) +{ + unsigned int i, size; + int ret; + + size = sizeof(*process_hash) * PROCESS_HASH_SIZE; + process_hash = kmalloc(size, GFP_KERNEL); + if (process_hash == NULL) { + printk(XENO_ERR "cannot allocate processes hash table\n"); + return -ENOMEM; + } + + ret = xndebug_init(); + if (ret) + goto fail_debug; + + for (i = 0; i < PROCESS_HASH_SIZE; i++) + INIT_HLIST_HEAD(&process_hash[i]); + + xnsynch_init(&yield_sync, XNSYNCH_FIFO, NULL); + + ret = cobalt_memdev_init(); + if (ret) + goto fail_memdev; + + ret = cobalt_register_personality(&cobalt_personality); + if (ret) + goto fail_register; + + ret = cobalt_signal_init(); + if (ret) + goto fail_siginit; + + ret = pipeline_trap_kevents(); + if (ret) + goto fail_kevents; + + if (gid_arg != -1) + printk(XENO_INFO "allowing access to group %d\n", gid_arg); + + return 0; +fail_kevents: + cobalt_signal_cleanup(); +fail_siginit: + cobalt_unregister_personality(0); +fail_register: + cobalt_memdev_cleanup(); +fail_memdev: + xnsynch_destroy(&yield_sync); + xndebug_cleanup(); +fail_debug: + kfree(process_hash); + + return ret; +} --- linux/kernel/xenomai/posix/nsem.c 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/kernel/xenomai/posix/nsem.c 2022-03-21 12:58:29.019892569 +0100 @@ -0,0 +1,299 @@ +/* + * Copyright (C) 2013 Gilles Chanteperdrix . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include +#include +#include +#include +#include +#include "internal.h" +#include "sem.h" +#include "thread.h" +#include + +DEFINE_PRIVATE_XNLOCK(named_sem_lock); + +struct cobalt_named_sem { + struct cobalt_sem *sem; + struct cobalt_sem_shadow __user *usem; + unsigned int refs; + struct xnid id; +}; + +static struct cobalt_named_sem * +sem_search(struct cobalt_process *process, xnhandle_t handle) +{ + struct xnid *i; + + i = xnid_fetch(&process->usems, handle); + if (i == NULL) + return NULL; + + return container_of(i, struct cobalt_named_sem, id); +} + +static struct cobalt_sem_shadow __user * +sem_open(struct cobalt_process *process, + struct cobalt_sem_shadow __user *ushadow, + struct filename *filename, int oflags, mode_t mode, + unsigned int value) +{ + const char *name = filename->name; + struct cobalt_sem_shadow shadow; + struct cobalt_named_sem *u, *v; + struct cobalt_sem *sem; + xnhandle_t handle; + spl_t s; + int rc; + + if (name[0] != '/' || name[1] == '\0') + return ERR_PTR(-EINVAL); + + retry_bind: + rc = xnregistry_bind(&name[1], XN_NONBLOCK, XN_RELATIVE, &handle); + switch (rc) { + case 0: + /* Found */ + if ((oflags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL)) + return ERR_PTR(-EEXIST); + + xnlock_get_irqsave(&named_sem_lock, s); + u = sem_search(process, handle); + if (u) { + ++u->refs; + xnlock_put_irqrestore(&named_sem_lock, s); + return u->usem; + } + xnlock_put_irqrestore(&named_sem_lock, s); + + xnlock_get_irqsave(&nklock, s); + sem = xnregistry_lookup(handle, NULL); + if (sem && sem->magic != COBALT_SEM_MAGIC) { + xnlock_put_irqrestore(&nklock, s); + return ERR_PTR(-EINVAL); + } + + if (sem) { + ++sem->refs; + xnlock_put_irqrestore(&nklock, s); + } else { + xnlock_put_irqrestore(&nklock, s); + goto retry_bind; + } + + __cobalt_sem_shadow_init(sem, COBALT_NAMED_SEM_MAGIC, &shadow); + break; + + case -EWOULDBLOCK: + /* Not found */ + if ((oflags & O_CREAT) == 0) + return ERR_PTR(-ENOENT); + + shadow.magic = 0; + sem = __cobalt_sem_init(&name[1], &shadow, + SEM_PSHARED | SEM_NAMED, value); + if (IS_ERR(sem)) { + rc = PTR_ERR(sem); + if (rc == -EEXIST) + goto retry_bind; + return ERR_PTR(rc); + } + + sem->pathname = filename; + handle = shadow.handle; + break; + + default: + return ERR_PTR(rc); + } + + if (cobalt_copy_to_user(ushadow, &shadow, sizeof(shadow))) { + __cobalt_sem_destroy(handle); + return ERR_PTR(-EFAULT); + } + + u = xnmalloc(sizeof(*u)); + if (u == NULL) { + __cobalt_sem_destroy(handle); + return ERR_PTR(-ENOMEM); + } + + u->sem = sem; + u->usem = ushadow; + u->refs = 1; + + xnlock_get_irqsave(&named_sem_lock, s); + v = sem_search(process, handle); + if (v) { + ++v->refs; + xnlock_put_irqrestore(&named_sem_lock, s); + xnlock_get_irqsave(&nklock, s); + --sem->refs; + xnlock_put_irqrestore(&nklock, s); + putname(filename); + xnfree(u); + u = v; + } else { + xnid_enter(&process->usems, &u->id, handle); + xnlock_put_irqrestore(&named_sem_lock, s); + } + + trace_cobalt_psem_open(name, handle, oflags, mode, value); + + return u->usem; +} + +static int sem_close(struct cobalt_process *process, xnhandle_t handle) +{ + struct cobalt_named_sem *u; + spl_t s; + int err; + + xnlock_get_irqsave(&named_sem_lock, s); + u = sem_search(process, handle); + if (u == NULL) { + err = -ENOENT; + goto err_unlock; + } + + if (--u->refs) { + err = 0; + goto err_unlock; + } + + xnid_remove(&process->usems, &u->id); + xnlock_put_irqrestore(&named_sem_lock, s); + + __cobalt_sem_destroy(handle); + + xnfree(u); + return 1; + + err_unlock: + xnlock_put_irqrestore(&named_sem_lock, s); + return err; +} + +struct cobalt_sem_shadow __user * +__cobalt_sem_open(struct cobalt_sem_shadow __user *usm, + const char __user *u_name, + int oflags, mode_t mode, unsigned int value) +{ + struct cobalt_process *process; + struct filename *filename; + + process = cobalt_current_process(); + if (process == NULL) + return ERR_PTR(-EPERM); + + filename = getname(u_name); + if (IS_ERR(filename)) + return ERR_CAST(filename); + + usm = sem_open(process, usm, filename, oflags, mode, value); + if (IS_ERR(usm)) { + trace_cobalt_psem_open_failed(filename->name, oflags, mode, + value, PTR_ERR(usm)); + putname(filename); + } + + return usm; +} + +COBALT_SYSCALL(sem_open, lostage, + (struct cobalt_sem_shadow __user *__user *u_addrp, + const char __user *u_name, + int oflags, mode_t mode, unsigned int value)) +{ + struct cobalt_sem_shadow __user *usm; + + if (__xn_get_user(usm, u_addrp)) + return -EFAULT; + + usm = __cobalt_sem_open(usm, u_name, oflags, mode, value); + if (IS_ERR(usm)) + return PTR_ERR(usm); + + return __xn_put_user(usm, u_addrp) ? -EFAULT : 0; +} + +COBALT_SYSCALL(sem_close, lostage, + (struct cobalt_sem_shadow __user *usm)) +{ + struct cobalt_process *process; + xnhandle_t handle; + + process = cobalt_current_process(); + if (process == NULL) + return -EPERM; + + handle = cobalt_get_handle_from_user(&usm->handle); + trace_cobalt_psem_close(handle); + + return sem_close(process, handle); +} + +static inline int sem_unlink(const char *name) +{ + xnhandle_t handle; + int ret; + + if (name[0] != '/') + return -EINVAL; + + ret = xnregistry_bind(name + 1, XN_NONBLOCK, XN_RELATIVE, &handle); + if (ret == -EWOULDBLOCK) + return -ENOENT; + + if (__cobalt_sem_destroy(handle) == -EBUSY) + xnregistry_unlink(xnregistry_key(handle)); + + return 0; +} + +COBALT_SYSCALL(sem_unlink, lostage, + (const char __user *u_name)) +{ + struct filename *filename; + int ret; + + filename = getname(u_name); + if (IS_ERR(filename)) + return PTR_ERR(filename); + + trace_cobalt_psem_unlink(filename->name); + ret = sem_unlink(filename->name); + putname(filename); + + return ret; +} + +static void reclaim_named_sem(void *arg, struct xnid *i) +{ + struct cobalt_process *process = arg; + struct cobalt_named_sem *u; + + u = container_of(i, struct cobalt_named_sem, id); + u->refs = 1; + sem_close(process, xnid_key(i)); +} + +void cobalt_nsem_reclaim(struct cobalt_process *process) +{ + xntree_cleanup(&process->usems, process, reclaim_named_sem); +} --- linux/kernel/xenomai/posix/thread.c 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/kernel/xenomai/posix/thread.c 2022-03-21 12:58:29.015892608 +0100 @@ -0,0 +1,954 @@ +/* + * Written by Gilles Chanteperdrix . + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include +#include +#include +#include +#include +#include +#include "internal.h" +#include "thread.h" +#include "sched.h" +#include "signal.h" +#include "timer.h" +#include "clock.h" +#include "sem.h" +#define CREATE_TRACE_POINTS +#include + +xnticks_t cobalt_time_slice = CONFIG_XENO_OPT_RR_QUANTUM * 1000; + +#define PTHREAD_HSLOTS (1 << 8) /* Must be a power of 2 */ + +/* Process-local index, pthread_t x mm_struct (cobalt_local_hkey). */ +struct local_thread_hash { + pid_t pid; + struct cobalt_thread *thread; + struct cobalt_local_hkey hkey; + struct local_thread_hash *next; +}; + +/* System-wide index on task_pid_nr(). */ +struct global_thread_hash { + pid_t pid; + struct cobalt_thread *thread; + struct global_thread_hash *next; +}; + +static struct local_thread_hash *local_index[PTHREAD_HSLOTS]; + +static struct global_thread_hash *global_index[PTHREAD_HSLOTS]; + +static inline struct local_thread_hash * +thread_hash(const struct cobalt_local_hkey *hkey, + struct cobalt_thread *thread, pid_t pid) +{ + struct global_thread_hash **ghead, *gslot; + struct local_thread_hash **lhead, *lslot; + u32 hash; + void *p; + spl_t s; + + p = xnmalloc(sizeof(*lslot) + sizeof(*gslot)); + if (p == NULL) + return NULL; + + lslot = p; + lslot->hkey = *hkey; + lslot->thread = thread; + lslot->pid = pid; + hash = jhash2((u32 *)&lslot->hkey, + sizeof(lslot->hkey) / sizeof(u32), 0); + lhead = &local_index[hash & (PTHREAD_HSLOTS - 1)]; + + gslot = p + sizeof(*lslot); + gslot->pid = pid; + gslot->thread = thread; + hash = jhash2((u32 *)&pid, sizeof(pid) / sizeof(u32), 0); + ghead = &global_index[hash & (PTHREAD_HSLOTS - 1)]; + + xnlock_get_irqsave(&nklock, s); + lslot->next = *lhead; + *lhead = lslot; + gslot->next = *ghead; + *ghead = gslot; + xnlock_put_irqrestore(&nklock, s); + + return lslot; +} + +static inline void thread_unhash(const struct cobalt_local_hkey *hkey) +{ + struct global_thread_hash **gtail, *gslot; + struct local_thread_hash **ltail, *lslot; + pid_t pid; + u32 hash; + spl_t s; + + hash = jhash2((u32 *) hkey, sizeof(*hkey) / sizeof(u32), 0); + ltail = &local_index[hash & (PTHREAD_HSLOTS - 1)]; + + xnlock_get_irqsave(&nklock, s); + + lslot = *ltail; + while (lslot && + (lslot->hkey.u_pth != hkey->u_pth || + lslot->hkey.mm != hkey->mm)) { + ltail = &lslot->next; + lslot = *ltail; + } + + if (lslot == NULL) { + xnlock_put_irqrestore(&nklock, s); + return; + } + + *ltail = lslot->next; + pid = lslot->pid; + hash = jhash2((u32 *)&pid, sizeof(pid) / sizeof(u32), 0); + gtail = &global_index[hash & (PTHREAD_HSLOTS - 1)]; + gslot = *gtail; + while (gslot && gslot->pid != pid) { + gtail = &gslot->next; + gslot = *gtail; + } + /* gslot must be found here. */ + XENO_BUG_ON(COBALT, !(gslot && gtail)); + *gtail = gslot->next; + + xnlock_put_irqrestore(&nklock, s); + + xnfree(lslot); +} + +static struct cobalt_thread * +thread_lookup(const struct cobalt_local_hkey *hkey) +{ + struct local_thread_hash *lslot; + struct cobalt_thread *thread; + u32 hash; + spl_t s; + + hash = jhash2((u32 *)hkey, sizeof(*hkey) / sizeof(u32), 0); + lslot = local_index[hash & (PTHREAD_HSLOTS - 1)]; + + xnlock_get_irqsave(&nklock, s); + + while (lslot != NULL && + (lslot->hkey.u_pth != hkey->u_pth || lslot->hkey.mm != hkey->mm)) + lslot = lslot->next; + + thread = lslot ? lslot->thread : NULL; + + xnlock_put_irqrestore(&nklock, s); + + return thread; +} + +struct cobalt_thread *cobalt_thread_find(pid_t pid) /* nklocked, IRQs off */ +{ + struct global_thread_hash *gslot; + u32 hash; + + hash = jhash2((u32 *)&pid, sizeof(pid) / sizeof(u32), 0); + + gslot = global_index[hash & (PTHREAD_HSLOTS - 1)]; + while (gslot && gslot->pid != pid) + gslot = gslot->next; + + return gslot ? gslot->thread : NULL; +} +EXPORT_SYMBOL_GPL(cobalt_thread_find); + +struct cobalt_thread *cobalt_thread_find_local(pid_t pid) /* nklocked, IRQs off */ +{ + struct cobalt_thread *thread; + + thread = cobalt_thread_find(pid); + if (thread == NULL || thread->hkey.mm != current->mm) + return NULL; + + return thread; +} +EXPORT_SYMBOL_GPL(cobalt_thread_find_local); + +struct cobalt_thread *cobalt_thread_lookup(unsigned long pth) /* nklocked, IRQs off */ +{ + struct cobalt_local_hkey hkey; + + hkey.u_pth = pth; + hkey.mm = current->mm; + return thread_lookup(&hkey); +} +EXPORT_SYMBOL_GPL(cobalt_thread_lookup); + +void cobalt_thread_map(struct xnthread *curr) +{ + struct cobalt_thread *thread; + + thread = container_of(curr, struct cobalt_thread, threadbase); + thread->process = cobalt_current_process(); + XENO_BUG_ON(COBALT, thread->process == NULL); +} + +struct xnthread_personality *cobalt_thread_exit(struct xnthread *curr) +{ + struct cobalt_thread *thread; + spl_t s; + + thread = container_of(curr, struct cobalt_thread, threadbase); + /* + * Unhash first, to prevent further access to the TCB from + * userland. + */ + thread_unhash(&thread->hkey); + xnlock_get_irqsave(&nklock, s); + cobalt_mark_deleted(thread); + list_del(&thread->next); + xnlock_put_irqrestore(&nklock, s); + cobalt_signal_flush(thread); + xnsynch_destroy(&thread->monitor_synch); + xnsynch_destroy(&thread->sigwait); + + return NULL; +} + +struct xnthread_personality *cobalt_thread_finalize(struct xnthread *zombie) +{ + struct cobalt_thread *thread; + + thread = container_of(zombie, struct cobalt_thread, threadbase); + xnfree(thread); + + return NULL; +} + +int __cobalt_thread_setschedparam_ex(struct cobalt_thread *thread, int policy, + const struct sched_param_ex *param_ex) +{ + struct xnsched_class *sched_class; + union xnsched_policy_param param; + xnticks_t tslice; + int ret = 0; + spl_t s; + + xnlock_get_irqsave(&nklock, s); + + if (!cobalt_obj_active(thread, COBALT_THREAD_MAGIC, + struct cobalt_thread)) { + ret = -ESRCH; + goto out; + } + + tslice = thread->threadbase.rrperiod; + sched_class = cobalt_sched_policy_param(¶m, policy, + param_ex, &tslice); + if (sched_class == NULL) { + ret = -EINVAL; + goto out; + } + xnthread_set_slice(&thread->threadbase, tslice); + if (cobalt_call_extension(thread_setsched, &thread->extref, ret, + sched_class, ¶m) && ret) + goto out; + ret = xnthread_set_schedparam(&thread->threadbase, + sched_class, ¶m); + xnsched_run(); +out: + xnlock_put_irqrestore(&nklock, s); + + return ret; +} + +int __cobalt_thread_getschedparam_ex(struct cobalt_thread *thread, + int *policy_r, + struct sched_param_ex *param_ex) +{ + struct xnsched_class *base_class; + struct xnthread *base_thread; + spl_t s; + + xnlock_get_irqsave(&nklock, s); + + if (!cobalt_obj_active(thread, COBALT_THREAD_MAGIC, + struct cobalt_thread)) { + xnlock_put_irqrestore(&nklock, s); + return -ESRCH; + } + + base_thread = &thread->threadbase; + base_class = base_thread->base_class; + *policy_r = base_class->policy; + + param_ex->sched_priority = xnthread_base_priority(base_thread); + if (param_ex->sched_priority == 0) /* SCHED_FIFO/SCHED_WEAK */ + *policy_r = SCHED_NORMAL; + + if (base_class == &xnsched_class_rt) { + if (xnthread_test_state(base_thread, XNRRB)) { + u_ns2ts(¶m_ex->sched_rr_quantum, base_thread->rrperiod); + *policy_r = SCHED_RR; + } + goto out; + } + +#ifdef CONFIG_XENO_OPT_SCHED_WEAK + if (base_class == &xnsched_class_weak) { + if (*policy_r != SCHED_WEAK) + param_ex->sched_priority = -param_ex->sched_priority; + goto out; + } +#endif +#ifdef CONFIG_XENO_OPT_SCHED_SPORADIC + if (base_class == &xnsched_class_sporadic) { + param_ex->sched_ss_low_priority = base_thread->pss->param.low_prio; + u_ns2ts(¶m_ex->sched_ss_repl_period, base_thread->pss->param.repl_period); + u_ns2ts(¶m_ex->sched_ss_init_budget, base_thread->pss->param.init_budget); + param_ex->sched_ss_max_repl = base_thread->pss->param.max_repl; + goto out; + } +#endif +#ifdef CONFIG_XENO_OPT_SCHED_TP + if (base_class == &xnsched_class_tp) { + param_ex->sched_tp_partition = + base_thread->tps - base_thread->sched->tp.partitions; + goto out; + } +#endif +#ifdef CONFIG_XENO_OPT_SCHED_QUOTA + if (base_class == &xnsched_class_quota) { + param_ex->sched_quota_group = base_thread->quota->tgid; + goto out; + } +#endif + +out: + xnlock_put_irqrestore(&nklock, s); + + return 0; +} + +static int pthread_create(struct cobalt_thread **thread_p, + int policy, + const struct sched_param_ex *param_ex, + struct task_struct *task) +{ + struct cobalt_process *process = cobalt_current_process(); + struct xnsched_class *sched_class; + union xnsched_policy_param param; + struct xnthread_init_attr iattr; + struct cobalt_thread *thread; + xnticks_t tslice; + int ret, n; + spl_t s; + + thread = xnmalloc(sizeof(*thread)); + if (thread == NULL) + return -EAGAIN; + + tslice = cobalt_time_slice; + sched_class = cobalt_sched_policy_param(¶m, policy, + param_ex, &tslice); + if (sched_class == NULL) { + xnfree(thread); + return -EINVAL; + } + + iattr.name = task->comm; + iattr.flags = XNUSER|XNFPU; + iattr.personality = &cobalt_personality; + iattr.affinity = CPU_MASK_ALL; + ret = xnthread_init(&thread->threadbase, &iattr, sched_class, ¶m); + if (ret) { + xnfree(thread); + return ret; + } + + thread->magic = COBALT_THREAD_MAGIC; + xnsynch_init(&thread->monitor_synch, XNSYNCH_FIFO, NULL); + + xnsynch_init(&thread->sigwait, XNSYNCH_FIFO, NULL); + sigemptyset(&thread->sigpending); + for (n = 0; n < _NSIG; n++) + INIT_LIST_HEAD(thread->sigqueues + n); + + xnthread_set_slice(&thread->threadbase, tslice); + cobalt_set_extref(&thread->extref, NULL, NULL); + + /* + * We need an anonymous registry entry to obtain a handle for + * fast mutex locking. + */ + ret = xnthread_register(&thread->threadbase, ""); + if (ret) { + xnsynch_destroy(&thread->monitor_synch); + xnsynch_destroy(&thread->sigwait); + __xnthread_discard(&thread->threadbase); + xnfree(thread); + return ret; + } + + xnlock_get_irqsave(&nklock, s); + list_add_tail(&thread->next, process ? &process->thread_list + : &cobalt_global_thread_list); + xnlock_put_irqrestore(&nklock, s); + + thread->hkey.u_pth = 0; + thread->hkey.mm = NULL; + + *thread_p = thread; + + return 0; +} + +static void pthread_discard(struct cobalt_thread *thread) +{ + spl_t s; + + xnsynch_destroy(&thread->monitor_synch); + xnsynch_destroy(&thread->sigwait); + + xnlock_get_irqsave(&nklock, s); + list_del(&thread->next); + xnlock_put_irqrestore(&nklock, s); + __xnthread_discard(&thread->threadbase); + xnfree(thread); +} + +static inline int pthread_setmode_np(int clrmask, int setmask, int *mode_r) +{ + const int valid_flags = XNLOCK|XNWARN|XNTRAPLB; + int old; + + /* + * The conforming mode bit is actually zero, since jumping to + * this code entailed switching to primary mode already. + */ + if ((clrmask & ~valid_flags) != 0 || (setmask & ~valid_flags) != 0) + return -EINVAL; + + old = xnthread_set_mode(clrmask, setmask); + if (mode_r) + *mode_r = old; + + if ((clrmask & ~setmask) & XNLOCK) + /* Reschedule if the scheduler has been unlocked. */ + xnsched_run(); + + return 0; +} + +static struct cobalt_thread *thread_lookup_or_shadow(unsigned long pth, + __u32 __user *u_winoff, + int *promoted_r) +{ + struct cobalt_local_hkey hkey; + struct cobalt_thread *thread; + + *promoted_r = 0; + + hkey.u_pth = pth; + hkey.mm = current->mm; + + thread = thread_lookup(&hkey); + if (thread == NULL) { + if (u_winoff == NULL) + return ERR_PTR(-ESRCH); + + thread = cobalt_thread_shadow(&hkey, u_winoff); + if (!IS_ERR(thread)) + *promoted_r = 1; + } + + return thread; +} + +int cobalt_thread_setschedparam_ex(unsigned long pth, + int policy, + const struct sched_param_ex *param_ex, + __u32 __user *u_winoff, + int __user *u_promoted) +{ + struct cobalt_thread *thread; + int ret, promoted; + + trace_cobalt_pthread_setschedparam(pth, policy, param_ex); + + thread = thread_lookup_or_shadow(pth, u_winoff, &promoted); + if (IS_ERR(thread)) + return PTR_ERR(thread); + + ret = __cobalt_thread_setschedparam_ex(thread, policy, param_ex); + if (ret) + return ret; + + return cobalt_copy_to_user(u_promoted, &promoted, sizeof(promoted)); +} + +COBALT_SYSCALL(thread_setschedparam_ex, conforming, + (unsigned long pth, + int policy, + const struct sched_param_ex __user *u_param, + __u32 __user *u_winoff, + int __user *u_promoted)) +{ + struct sched_param_ex param_ex; + + if (cobalt_copy_from_user(¶m_ex, u_param, sizeof(param_ex))) + return -EFAULT; + + return cobalt_thread_setschedparam_ex(pth, policy, ¶m_ex, + u_winoff, u_promoted); +} + +int cobalt_thread_getschedparam_ex(unsigned long pth, + int *policy_r, + struct sched_param_ex *param_ex) +{ + struct cobalt_local_hkey hkey; + struct cobalt_thread *thread; + int ret; + + hkey.u_pth = pth; + hkey.mm = current->mm; + thread = thread_lookup(&hkey); + if (thread == NULL) + return -ESRCH; + + ret = __cobalt_thread_getschedparam_ex(thread, policy_r, param_ex); + if (ret) + return ret; + + trace_cobalt_pthread_getschedparam(pth, *policy_r, param_ex); + + return 0; +} + +COBALT_SYSCALL(thread_getschedparam_ex, current, + (unsigned long pth, + int __user *u_policy, + struct sched_param_ex __user *u_param)) +{ + struct sched_param_ex param_ex; + int ret, policy; + + ret = cobalt_thread_getschedparam_ex(pth, &policy, ¶m_ex); + if (ret) + return ret; + + ret = cobalt_copy_to_user(u_policy, &policy, sizeof(policy)); + if (ret) + return ret; + + return cobalt_copy_to_user(u_param, ¶m_ex, sizeof(param_ex)); +} + +int cobalt_thread_setschedprio(unsigned long pth, + int prio, + __u32 __user *u_winoff, + int __user *u_promoted) +{ + struct sched_param_ex param_ex; + struct cobalt_thread *thread; + int ret, policy, promoted; + + trace_cobalt_pthread_setschedprio(pth, prio); + + thread = thread_lookup_or_shadow(pth, u_winoff, &promoted); + if (IS_ERR(thread)) + return PTR_ERR(thread); + + ret = __cobalt_thread_getschedparam_ex(thread, &policy, ¶m_ex); + if (ret) + return ret; + + param_ex.sched_priority = prio; + + ret = __cobalt_thread_setschedparam_ex(thread, policy, ¶m_ex); + if (ret) + return ret; + + return cobalt_copy_to_user(u_promoted, &promoted, sizeof(promoted)); +} + +COBALT_SYSCALL(thread_setschedprio, conforming, + (unsigned long pth, + int prio, + __u32 __user *u_winoff, + int __user *u_promoted)) +{ + return cobalt_thread_setschedprio(pth, prio, u_winoff, u_promoted); +} + +int __cobalt_thread_create(unsigned long pth, int policy, + struct sched_param_ex *param_ex, + int xid, __u32 __user *u_winoff) +{ + struct cobalt_thread *thread = NULL; + struct task_struct *p = current; + struct cobalt_local_hkey hkey; + int ret; + + trace_cobalt_pthread_create(pth, policy, param_ex); + + /* + * We have been passed the pthread_t identifier the user-space + * Cobalt library has assigned to our caller; we'll index our + * internal pthread_t descriptor in kernel space on it. + */ + hkey.u_pth = pth; + hkey.mm = p->mm; + + ret = pthread_create(&thread, policy, param_ex, p); + if (ret) + return ret; + + ret = cobalt_map_user(&thread->threadbase, u_winoff); + if (ret) { + pthread_discard(thread); + return ret; + } + + if (!thread_hash(&hkey, thread, task_pid_vnr(p))) { + ret = -EAGAIN; + goto fail; + } + + thread->hkey = hkey; + + if (xid > 0 && cobalt_push_personality(xid) == NULL) { + ret = -EINVAL; + goto fail; + } + + return xnthread_harden(); +fail: + xnthread_cancel(&thread->threadbase); + + return ret; +} + +COBALT_SYSCALL(thread_create, init, + (unsigned long pth, int policy, + struct sched_param_ex __user *u_param, + int xid, + __u32 __user *u_winoff)) +{ + struct sched_param_ex param_ex; + int ret; + + ret = cobalt_copy_from_user(¶m_ex, u_param, sizeof(param_ex)); + if (ret) + return ret; + + return __cobalt_thread_create(pth, policy, ¶m_ex, xid, u_winoff); +} + +struct cobalt_thread * +cobalt_thread_shadow(struct cobalt_local_hkey *hkey, + __u32 __user *u_winoff) +{ + struct cobalt_thread *thread = NULL; + struct sched_param_ex param_ex; + int ret; + + if (xnthread_current()) + return ERR_PTR(-EBUSY); + + param_ex.sched_priority = 0; + trace_cobalt_pthread_create(hkey->u_pth, SCHED_NORMAL, ¶m_ex); + ret = pthread_create(&thread, SCHED_NORMAL, ¶m_ex, current); + if (ret) + return ERR_PTR(ret); + + ret = cobalt_map_user(&thread->threadbase, u_winoff); + if (ret) { + pthread_discard(thread); + return ERR_PTR(ret); + } + + if (!thread_hash(hkey, thread, task_pid_vnr(current))) { + ret = -EAGAIN; + goto fail; + } + + thread->hkey = *hkey; + + xnthread_harden(); + + return thread; +fail: + xnthread_cancel(&thread->threadbase); + + return ERR_PTR(ret); +} + +COBALT_SYSCALL(thread_setmode, primary, + (int clrmask, int setmask, int __user *u_mode_r)) +{ + int ret, old; + + trace_cobalt_pthread_setmode(clrmask, setmask); + + ret = pthread_setmode_np(clrmask, setmask, &old); + if (ret) + return ret; + + if (u_mode_r && cobalt_copy_to_user(u_mode_r, &old, sizeof(old))) + return -EFAULT; + + return 0; +} + +COBALT_SYSCALL(thread_setname, current, + (unsigned long pth, const char __user *u_name)) +{ + struct cobalt_local_hkey hkey; + struct cobalt_thread *thread; + char name[XNOBJECT_NAME_LEN]; + struct task_struct *p; + spl_t s; + + if (cobalt_strncpy_from_user(name, u_name, + sizeof(name) - 1) < 0) + return -EFAULT; + + name[sizeof(name) - 1] = '\0'; + hkey.u_pth = pth; + hkey.mm = current->mm; + + trace_cobalt_pthread_setname(pth, name); + + xnlock_get_irqsave(&nklock, s); + + thread = thread_lookup(&hkey); + if (thread == NULL) { + xnlock_put_irqrestore(&nklock, s); + return -ESRCH; + } + + ksformat(thread->threadbase.name, + XNOBJECT_NAME_LEN - 1, "%s", name); + p = xnthread_host_task(&thread->threadbase); + get_task_struct(p); + + xnlock_put_irqrestore(&nklock, s); + + knamecpy(p->comm, name); + put_task_struct(p); + + return 0; +} + +COBALT_SYSCALL(thread_kill, conforming, + (unsigned long pth, int sig)) +{ + struct cobalt_local_hkey hkey; + struct cobalt_thread *thread; + int ret; + spl_t s; + + trace_cobalt_pthread_kill(pth, sig); + + xnlock_get_irqsave(&nklock, s); + + hkey.u_pth = pth; + hkey.mm = current->mm; + thread = thread_lookup(&hkey); + if (thread == NULL) + ret = -ESRCH; + else + ret = __cobalt_kill(thread, sig, 0); + + xnlock_put_irqrestore(&nklock, s); + + return ret; +} + +COBALT_SYSCALL(thread_join, primary, (unsigned long pth)) +{ + struct cobalt_local_hkey hkey; + struct cobalt_thread *thread; + spl_t s; + + trace_cobalt_pthread_join(pth); + + xnlock_get_irqsave(&nklock, s); + + hkey.u_pth = pth; + hkey.mm = current->mm; + thread = thread_lookup(&hkey); + + xnlock_put_irqrestore(&nklock, s); + + if (thread == NULL) + return -ESRCH; + + return xnthread_join(&thread->threadbase, false); +} + +COBALT_SYSCALL(thread_getpid, current, (unsigned long pth)) +{ + struct cobalt_local_hkey hkey; + struct cobalt_thread *thread; + pid_t pid; + spl_t s; + + trace_cobalt_pthread_pid(pth); + + xnlock_get_irqsave(&nklock, s); + + hkey.u_pth = pth; + hkey.mm = current->mm; + thread = thread_lookup(&hkey); + if (thread == NULL) + pid = -ESRCH; + else + pid = xnthread_host_pid(&thread->threadbase); + + xnlock_put_irqrestore(&nklock, s); + + return pid; +} + +COBALT_SYSCALL(thread_getstat, current, + (pid_t pid, struct cobalt_threadstat __user *u_stat)) +{ + struct cobalt_threadstat stat; + struct cobalt_thread *p; + struct xnthread *thread; + xnticks_t xtime; + spl_t s; + + trace_cobalt_pthread_stat(pid); + + if (pid == 0) { + thread = xnthread_current(); + if (thread == NULL) + return -EPERM; + xnlock_get_irqsave(&nklock, s); + } else { + xnlock_get_irqsave(&nklock, s); + p = cobalt_thread_find(pid); + if (p == NULL) { + xnlock_put_irqrestore(&nklock, s); + return -ESRCH; + } + thread = &p->threadbase; + } + + /* We have to hold the nklock to keep most values consistent. */ + stat.cpu = xnsched_cpu(thread->sched); + stat.cprio = xnthread_current_priority(thread); + xtime = xnstat_exectime_get_total(&thread->stat.account); + if (thread->sched->curr == thread) + xtime += xnstat_exectime_now() - + xnstat_exectime_get_last_switch(thread->sched); + stat.xtime = xnclock_ticks_to_ns(&nkclock, xtime); + stat.msw = xnstat_counter_get(&thread->stat.ssw); + stat.csw = xnstat_counter_get(&thread->stat.csw); + stat.xsc = xnstat_counter_get(&thread->stat.xsc); + stat.pf = xnstat_counter_get(&thread->stat.pf); + stat.status = xnthread_get_state(thread); + if (thread->lock_count > 0) + stat.status |= XNLOCK; + stat.timeout = xnthread_get_timeout(thread, + xnclock_read_monotonic(&nkclock)); + strcpy(stat.name, thread->name); + strcpy(stat.personality, thread->personality->name); + xnlock_put_irqrestore(&nklock, s); + + return cobalt_copy_to_user(u_stat, &stat, sizeof(stat)); +} + +#ifdef CONFIG_XENO_OPT_COBALT_EXTENSION + +int cobalt_thread_extend(struct cobalt_extension *ext, + void *priv) +{ + struct cobalt_thread *thread = cobalt_current_thread(); + struct xnthread_personality *prev; + + trace_cobalt_pthread_extend(thread->hkey.u_pth, ext->core.name); + + prev = cobalt_push_personality(ext->core.xid); + if (prev == NULL) + return -EINVAL; + + cobalt_set_extref(&thread->extref, ext, priv); + + return 0; +} +EXPORT_SYMBOL_GPL(cobalt_thread_extend); + +void cobalt_thread_restrict(void) +{ + struct cobalt_thread *thread = cobalt_current_thread(); + + trace_cobalt_pthread_restrict(thread->hkey.u_pth, + thread->threadbase.personality->name); + cobalt_pop_personality(&cobalt_personality); + cobalt_set_extref(&thread->extref, NULL, NULL); +} +EXPORT_SYMBOL_GPL(cobalt_thread_restrict); + +#endif /* !CONFIG_XENO_OPT_COBALT_EXTENSION */ + +const char *cobalt_trace_parse_sched_params(struct trace_seq *p, int policy, + struct sched_param_ex *params) +{ + const char *ret = trace_seq_buffer_ptr(p); + + switch (policy) { + case SCHED_QUOTA: + trace_seq_printf(p, "priority=%d, group=%d", + params->sched_priority, + params->sched_quota_group); + break; + case SCHED_TP: + trace_seq_printf(p, "priority=%d, partition=%d", + params->sched_priority, + params->sched_tp_partition); + break; + case SCHED_NORMAL: + break; + case SCHED_SPORADIC: + trace_seq_printf(p, "priority=%d, low_priority=%d, " + "budget=(%ld.%09ld), period=(%ld.%09ld), " + "maxrepl=%d", + params->sched_priority, + params->sched_ss_low_priority, + params->sched_ss_init_budget.tv_sec, + params->sched_ss_init_budget.tv_nsec, + params->sched_ss_repl_period.tv_sec, + params->sched_ss_repl_period.tv_nsec, + params->sched_ss_max_repl); + break; + case SCHED_RR: + case SCHED_FIFO: + case SCHED_COBALT: + case SCHED_WEAK: + default: + trace_seq_printf(p, "priority=%d", params->sched_priority); + break; + } + trace_seq_putc(p, '\0'); + + return ret; +} --- linux/kernel/xenomai/posix/Makefile 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/kernel/xenomai/posix/Makefile 2022-03-21 12:58:29.012892638 +0100 @@ -0,0 +1,38 @@ + +ccflags-y += -I$(srctree)/kernel + +obj-$(CONFIG_XENOMAI) += xenomai.o + +xenomai-y := \ + clock.o \ + cond.o \ + corectl.o \ + event.o \ + io.o \ + memory.o \ + monitor.o \ + mqueue.o \ + mutex.o \ + nsem.o \ + process.o \ + sched.o \ + sem.o \ + signal.o \ + syscall.o \ + thread.o \ + timer.o \ + timerfd.o + +syscall_entries := $(srctree)/$(src)/gen-syscall-entries.sh + +quiet_cmd_syscall_entries = GEN $@ + cmd_syscall_entries = $(CONFIG_SHELL) '$(syscall_entries)' $(filter-out FORCE,$^) > $@ + +$(obj)/syscall_entries.h: $(syscall_entries) $(wildcard $(srctree)/$(src)/*.c) FORCE + $(call if_changed,syscall_entries) + +target += syscall_entries.h + +$(obj)/syscall.o: $(obj)/syscall_entries.h + +xenomai-$(CONFIG_XENO_ARCH_SYS3264) += compat.o syscall32.o --- linux/kernel/xenomai/posix/syscall.c 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/kernel/xenomai/posix/syscall.c 2022-03-21 12:58:29.008892677 +0100 @@ -0,0 +1,797 @@ +/* + * Copyright (C) 2005 Philippe Gerum + * Copyright (C) 2005 Gilles Chanteperdrix + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "internal.h" +#include "thread.h" +#include "sched.h" +#include "mutex.h" +#include "cond.h" +#include "mqueue.h" +#include "sem.h" +#include "signal.h" +#include "timer.h" +#include "monitor.h" +#include "clock.h" +#include "event.h" +#include "timerfd.h" +#include "io.h" +#include "corectl.h" +#include "../debug.h" +#include + +/* Syscall must run into the Linux domain. */ +#define __xn_exec_lostage 0x1 +/* Syscall must run into the Xenomai domain. */ +#define __xn_exec_histage 0x2 +/* Shadow syscall: caller must be mapped. */ +#define __xn_exec_shadow 0x4 +/* Switch back toggle; caller must return to its original mode. */ +#define __xn_exec_switchback 0x8 +/* Exec in current domain. */ +#define __xn_exec_current 0x10 +/* Exec in conforming domain, Xenomai or Linux. */ +#define __xn_exec_conforming 0x20 +/* Attempt syscall restart in the opposite domain upon -ENOSYS. */ +#define __xn_exec_adaptive 0x40 +/* Do not restart syscall upon signal receipt. */ +#define __xn_exec_norestart 0x80 +/* Shorthand for shadow init syscall. */ +#define __xn_exec_init __xn_exec_lostage +/* Shorthand for shadow syscall in Xenomai space. */ +#define __xn_exec_primary (__xn_exec_shadow|__xn_exec_histage) +/* Shorthand for shadow syscall in Linux space. */ +#define __xn_exec_secondary (__xn_exec_shadow|__xn_exec_lostage) +/* Shorthand for syscall in Linux space with switchback if shadow. */ +#define __xn_exec_downup (__xn_exec_lostage|__xn_exec_switchback) +/* Shorthand for non-restartable primary syscall. */ +#define __xn_exec_nonrestartable (__xn_exec_primary|__xn_exec_norestart) +/* Domain probing syscall starting in conforming mode. */ +#define __xn_exec_probing (__xn_exec_conforming|__xn_exec_adaptive) +/* Hand over mode selection to syscall. */ +#define __xn_exec_handover (__xn_exec_current|__xn_exec_adaptive) + +typedef long (*cobalt_syshand)(unsigned long arg1, unsigned long arg2, + unsigned long arg3, unsigned long arg4, + unsigned long arg5); + +static void prepare_for_signal(struct task_struct *p, + struct xnthread *thread, + struct pt_regs *regs, + int sysflags) +{ + int notify = 0; + spl_t s; + + xnlock_get_irqsave(&nklock, s); + + if (xnthread_test_info(thread, XNKICKED)) { + if (signal_pending(p)) { + __xn_error_return(regs, + (sysflags & __xn_exec_norestart) ? + -EINTR : -ERESTARTSYS); + notify = !xnthread_test_state(thread, XNSSTEP); + xnthread_clear_info(thread, XNBREAK); + } + xnthread_clear_info(thread, XNKICKED); + } + + xnlock_put_irqrestore(&nklock, s); + + xnthread_test_cancel(); + + xnthread_relax(notify, SIGDEBUG_MIGRATE_SIGNAL); +} + +static COBALT_SYSCALL(migrate, current, (int domain)) +{ + struct xnthread *thread = xnthread_current(); + + if (is_secondary_domain()) { + if (domain == COBALT_PRIMARY) { + if (thread == NULL) + return -EPERM; + /* + * Paranoid: a corner case where userland + * fiddles with SIGSHADOW while the target + * thread is still waiting to be started. + */ + if (xnthread_test_state(thread, XNDORMANT)) + return 0; + + return xnthread_harden() ? : 1; + } + return 0; + } + + /* We are running on the head stage, apply relax request. */ + if (domain == COBALT_SECONDARY) { + xnthread_relax(0, 0); + return 1; + } + + return 0; +} + +static COBALT_SYSCALL(trace, current, + (int op, unsigned long a1, + unsigned long a2, unsigned long a3)) +{ + int ret = -EINVAL; + + switch (op) { + case __xntrace_op_max_begin: + ret = xntrace_max_begin(a1); + break; + + case __xntrace_op_max_end: + ret = xntrace_max_end(a1); + break; + + case __xntrace_op_max_reset: + ret = xntrace_max_reset(); + break; + + case __xntrace_op_user_start: + ret = xntrace_user_start(); + break; + + case __xntrace_op_user_stop: + ret = xntrace_user_stop(a1); + break; + + case __xntrace_op_user_freeze: + ret = xntrace_user_freeze(a1, a2); + break; + + case __xntrace_op_special: + ret = xntrace_special(a1 & 0xFF, a2); + break; + + case __xntrace_op_special_u64: + ret = xntrace_special_u64(a1 & 0xFF, + (((u64) a2) << 32) | a3); + break; + + case __xntrace_op_latpeak_freeze: + xntrace_latpeak_freeze(a1); + ret = 0; + break; + + } + return ret; +} + +static COBALT_SYSCALL(ftrace_puts, current, + (const char __user *str)) +{ + char buf[256]; + unsigned len; + + len = cobalt_strncpy_from_user(buf, str, sizeof(buf)); + if (len < 0) + return -EFAULT; + +#ifdef CONFIG_TRACING + __trace_puts(_THIS_IP_, buf, len); +#endif + + return 0; +} + +static COBALT_SYSCALL(archcall, current, + (unsigned long a1, unsigned long a2, + unsigned long a3, unsigned long a4, + unsigned long a5)) +{ + return xnarch_local_syscall(a1, a2, a3, a4, a5); +} + +static COBALT_SYSCALL(get_current, current, + (xnhandle_t __user *u_handle)) +{ + struct xnthread *cur = xnthread_current(); + + if (cur == NULL) + return -EPERM; + + return cobalt_copy_to_user(u_handle, &cur->handle, + sizeof(*u_handle)); +} + +static COBALT_SYSCALL(backtrace, lostage, + (int nr, unsigned long __user *u_backtrace, int reason)) +{ + unsigned long backtrace[SIGSHADOW_BACKTRACE_DEPTH]; + int ret; + + /* + * In case backtrace() in userland is broken or fails. We may + * want to know about this in kernel space however, for future + * use. + */ + if (nr <= 0) + return 0; + /* + * We may omit the older frames if we can't store the full + * backtrace. + */ + if (nr > SIGSHADOW_BACKTRACE_DEPTH) + nr = SIGSHADOW_BACKTRACE_DEPTH; + /* + * Fetch the backtrace array, filled with PC values as seen + * from the relaxing thread in user-space. This can't fail + */ + ret = cobalt_copy_from_user(backtrace, u_backtrace, nr * sizeof(long)); + if (ret) + return ret; + + xndebug_trace_relax(nr, backtrace, reason); + + return 0; +} + +static COBALT_SYSCALL(serialdbg, current, + (const char __user *u_msg, int len)) +{ + char buf[128]; + int n; + + while (len > 0) { + n = len; + if (n > sizeof(buf)) + n = sizeof(buf); + if (cobalt_copy_from_user(buf, u_msg, n)) + return -EFAULT; + raw_printk("%.*s", n, buf); + u_msg += n; + len -= n; + } + + return 0; +} + +static void stringify_feature_set(unsigned long fset, char *buf, int size) +{ + unsigned long feature; + int nc, nfeat; + + *buf = '\0'; + + for (feature = 1, nc = nfeat = 0; fset != 0 && size > 0; feature <<= 1) { + if (fset & feature) { + nc = ksformat(buf, size, "%s%s", + nfeat > 0 ? " " : "", + get_feature_label(feature)); + nfeat++; + size -= nc; + buf += nc; + fset &= ~feature; + } + } +} + +static COBALT_SYSCALL(bind, lostage, + (struct cobalt_bindreq __user *u_breq)) +{ + unsigned long featreq, featmis; + struct cobalt_bindreq breq; + struct cobalt_featinfo *f; + int abirev; + + if (cobalt_copy_from_user(&breq, u_breq, sizeof(breq))) + return -EFAULT; + + f = &breq.feat_ret; + featreq = breq.feat_req; + if (!realtime_core_running() && (featreq & __xn_feat_control) == 0) + return -EAGAIN; + + /* + * Calculate the missing feature set: + * kernel_unavailable_set & user_mandatory_set. + */ + featmis = (~XENOMAI_FEAT_DEP & (featreq & XENOMAI_FEAT_MAN)); + abirev = breq.abi_rev; + + /* + * Pass back the supported feature set and the ABI revision + * level to user-space. + */ + f->feat_all = XENOMAI_FEAT_DEP; + stringify_feature_set(XENOMAI_FEAT_DEP, f->feat_all_s, + sizeof(f->feat_all_s)); + f->feat_man = featreq & XENOMAI_FEAT_MAN; + stringify_feature_set(f->feat_man, f->feat_man_s, + sizeof(f->feat_man_s)); + f->feat_mis = featmis; + stringify_feature_set(featmis, f->feat_mis_s, + sizeof(f->feat_mis_s)); + f->feat_req = featreq; + stringify_feature_set(featreq, f->feat_req_s, + sizeof(f->feat_req_s)); + f->feat_abirev = XENOMAI_ABI_REV; + collect_arch_features(f); + + pipeline_collect_features(f); + f->vdso_offset = cobalt_umm_offset(&cobalt_ppd_get(1)->umm, nkvdso); + + if (cobalt_copy_to_user(u_breq, &breq, sizeof(breq))) + return -EFAULT; + + /* + * If some mandatory features the user-space code relies on + * are missing at kernel level, we cannot go further. + */ + if (featmis) + return -EINVAL; + + if (!check_abi_revision(abirev)) + return -ENOEXEC; + + return cobalt_bind_core(featreq); +} + +static COBALT_SYSCALL(extend, lostage, (unsigned int magic)) +{ + return cobalt_bind_personality(magic); +} + +static int CoBaLt_ni(void) +{ + return -ENOSYS; +} + +/* + * We have a single syscall table for all ABI models, i.e. 64bit + * native + 32bit emulation) or plain 32bit. + * + * The syscall table is set up in a single step, based on three + * subsequent sources of initializers: + * + * - first, all syscall entries are defaulted to a placeholder + * returning -ENOSYS (__COBALT_CALL_NI), as the table may be sparse. + * + * - then __COBALT_CALL_ENTRY() produces a native call entry + * (e.g. pure 64bit call handler for a 64bit architecture, 32bit + * handler for a 32bit architecture), optionally followed by a set of + * 32bit syscall entries offset by an arch-specific base index, which + * default to the native calls. These nitty-gritty details are defined + * by . 32bit architectures - or 64bit ones + * for which we don't support any 32bit ABI model - will simply define + * __COBALT_CALL32_ENTRY() as an empty macro. + * + * - finally, 32bit thunk entries are generated by including + * , overriding the default handlers + * installed during the previous step. + * + * For instance, with CONFIG_IA32_EMULATION support enabled in an + * x86_64 kernel, sc_cobalt_mq_timedreceive would appear twice in the + * table, as: + * + * [sc_cobalt_mq_timedreceive] = CoBaLt_mq_timedreceive, + * ... + * [sc_cobalt_mq_timedreceive + __COBALT_IA32_BASE] = CoBaLt32emu_mq_timedreceive, + * + * CoBaLt32emu_mq_timedreceive() would do the required thunking for + * dealing with the 32<->64bit conversion of arguments. On the other + * hand, sc_cobalt_sched_yield - which do not require any thunk - + * would also appear twice, but both entries would point at the native + * syscall implementation: + * + * [sc_cobalt_sched_yield] = CoBaLt_sched_yield, + * ... + * [sc_cobalt_sched_yield + __COBALT_IA32_BASE] = CoBaLt_sched_yield, + * + * Accordingly, applications targeting the ia32 model issue syscalls + * in the range [__COBALT_IA32_BASE..__COBALT_IA32_BASE + + * __NR_COBALT_SYSCALLS-1], whilst native (32/64bit) ones issue + * syscalls in the range [0..__NR_COBALT_SYSCALLS-1]. + * + * In short, this is an incremental process where the arch-specific + * code can override the 32bit syscall entries, pointing at the thunk + * routines it may need for handing 32bit calls over their respective + * 64bit implementation. + * + * By convention, there is NO pure 32bit syscall, which means that + * each 32bit syscall defined by a compat ABI interface MUST match a + * native (64bit) syscall. This is important as we share the call + * modes (i.e. __xn_exec_ bits) between all ABI models. + * + * --rpm + */ +#define __syshand__(__name) ((cobalt_syshand)(CoBaLt_ ## __name)) + +#define __COBALT_NI __syshand__(ni) + +#define __COBALT_CALL_NI \ + [0 ... __NR_COBALT_SYSCALLS-1] = __COBALT_NI, \ + __COBALT_CALL32_INITHAND(__COBALT_NI) + +#define __COBALT_CALL_NFLAGS \ + [0 ... __NR_COBALT_SYSCALLS-1] = 0, \ + __COBALT_CALL32_INITMODE(0) + +#define __COBALT_CALL_ENTRY(__name) \ + [sc_cobalt_ ## __name] = __syshand__(__name), \ + __COBALT_CALL32_ENTRY(__name, __syshand__(__name)) + +#define __COBALT_MODE(__name, __mode) \ + [sc_cobalt_ ## __name] = __xn_exec_##__mode, + +#ifdef CONFIG_XENO_ARCH_SYS3264 +#include "syscall32.h" +#endif + +#include "syscall_entries.h" + +static const cobalt_syshand cobalt_syscalls[] = { + __COBALT_CALL_NI + __COBALT_CALL_ENTRIES +#ifdef CONFIG_XENO_ARCH_SYS3264 +#include +#endif +}; + +static const int cobalt_sysmodes[] = { + __COBALT_CALL_NFLAGS + __COBALT_CALL_MODES +}; + +static inline int allowed_syscall(struct cobalt_process *process, + struct xnthread *thread, + int sysflags, int nr) +{ + if (nr == sc_cobalt_bind) + return 1; + + if (process == NULL) + return 0; + + if (thread == NULL && (sysflags & __xn_exec_shadow)) + return 0; + + return cap_raised(current_cap(), CAP_SYS_NICE); +} + +int handle_head_syscall(bool caller_is_relaxed, struct pt_regs *regs) +{ + struct cobalt_process *process; + int switched, sigs, sysflags; + struct xnthread *thread; + cobalt_syshand handler; + struct task_struct *p; + unsigned long args[6]; + unsigned int nr, code; + long ret; + + if (!__xn_syscall_p(regs)) + goto linux_syscall; + + thread = xnthread_current(); + code = __xn_syscall(regs); + if (code >= ARRAY_SIZE(cobalt_syscalls)) + goto bad_syscall; + + nr = code & (__NR_COBALT_SYSCALLS - 1); + + trace_cobalt_head_sysentry(code); + + process = cobalt_current_process(); + if (process == NULL) { + process = cobalt_search_process(current->mm); + cobalt_set_process(process); + } + + handler = cobalt_syscalls[code]; + sysflags = cobalt_sysmodes[nr]; + + /* + * Executing Cobalt services requires CAP_SYS_NICE, except for + * sc_cobalt_bind which does its own checks. + */ + if (unlikely(!allowed_syscall(process, thread, sysflags, nr))) { + /* + * Exclude get_current from reporting, it is used to probe the + * execution context. + */ + if (XENO_DEBUG(COBALT) && nr != sc_cobalt_get_current) + printk(XENO_WARNING + "syscall <%d> denied to %s[%d]\n", + nr, current->comm, task_pid_nr(current)); + __xn_error_return(regs, -EPERM); + goto ret_handled; + } + + if (sysflags & __xn_exec_conforming) + /* + * If the conforming exec bit is set, turn the exec + * bitmask for the syscall into the most appropriate + * setup for the caller, i.e. Xenomai domain for + * shadow threads, Linux otherwise. + */ + sysflags |= (thread ? __xn_exec_histage : __xn_exec_lostage); + + /* + * Here we have to dispatch the syscall execution properly, + * depending on: + * + * o Whether the syscall must be run into the Linux or Xenomai + * domain, or indifferently in the current Xenomai domain. + * + * o Whether the caller currently runs in the Linux or Xenomai + * domain. + */ +restart: + /* + * Process adaptive syscalls by restarting them in the + * opposite domain upon receiving -ENOSYS from the syscall + * handler. + */ + switched = 0; + if (sysflags & __xn_exec_lostage) { + /* + * The syscall must run from the Linux domain. + */ + if (!caller_is_relaxed) { + /* + * Request originates from the Xenomai domain: + * relax the caller then invoke the syscall + * handler right after. + */ + xnthread_relax(1, SIGDEBUG_MIGRATE_SYSCALL); + switched = 1; + } else + /* + * Request originates from the Linux domain: + * propagate the event to our Linux-based + * handler, so that the syscall is executed + * from there. + */ + return KEVENT_PROPAGATE; + } else if (sysflags & (__xn_exec_histage | __xn_exec_current)) { + /* + * Syscall must run either from the Xenomai domain, or + * from the calling domain. + * + * If the request originates from the Linux domain, + * hand it over to our secondary-mode dispatcher. + * Otherwise, invoke the syscall handler immediately. + */ + if (caller_is_relaxed) + return KEVENT_PROPAGATE; + } + + /* + * 'thread' has to be valid from that point: all syscalls + * regular threads may call have been pipelined to the root + * handler (lostage ones), or rejected by allowed_syscall(). + */ + + p = current; + pipeline_get_syscall_args(p, regs, args); + + ret = handler(args[0], args[1], args[2], args[3], args[4]); + if (ret == -ENOSYS && (sysflags & __xn_exec_adaptive)) { + if (switched) { + ret = xnthread_harden(); + if (ret) { + switched = 0; + goto done; + } + } else /* Mark the primary -> secondary transition. */ + xnthread_set_localinfo(thread, XNDESCENT); + sysflags ^= + (__xn_exec_lostage | __xn_exec_histage | + __xn_exec_adaptive); + goto restart; + } +done: + __xn_status_return(regs, ret); + sigs = 0; + if (!xnsched_root_p()) { + if (signal_pending(p) || + xnthread_test_info(thread, XNKICKED)) { + sigs = 1; + prepare_for_signal(p, thread, regs, sysflags); + } else if (xnthread_test_state(thread, XNWEAK) && + thread->res_count == 0) { + if (switched) + switched = 0; + else + xnthread_relax(0, 0); + } + } + if (!sigs && (sysflags & __xn_exec_switchback) && switched) + /* -EPERM will be trapped later if needed. */ + xnthread_harden(); + +ret_handled: + /* Update the stats and userland-visible state. */ + if (thread) { + xnthread_clear_localinfo(thread, XNDESCENT); + xnstat_counter_inc(&thread->stat.xsc); + xnthread_sync_window(thread); + } + + trace_cobalt_head_sysexit(__xn_reg_rval(regs)); + + return KEVENT_STOP; + +linux_syscall: + if (xnsched_root_p()) + /* + * The call originates from the Linux domain, either + * from a relaxed shadow or from a regular Linux task; + * just propagate the event so that we will fall back + * to handle_root_syscall(). + */ + return KEVENT_PROPAGATE; + + if (!__xn_rootcall_p(regs, &code)) + goto bad_syscall; + + if (pipeline_handle_vdso_fallback(code, regs)) + return KEVENT_STOP; + + /* + * We know this is a Cobalt thread since it runs over the head + * domain, however the current syscall should be handled by + * the host kernel instead. Before this happens, we have to + * re-enter the root domain. + */ + xnthread_relax(1, SIGDEBUG_MIGRATE_SYSCALL); + + return KEVENT_PROPAGATE; + +bad_syscall: + printk(XENO_WARNING "bad syscall <%#x>\n", code); + + __xn_error_return(regs, -ENOSYS); + + return KEVENT_STOP; +} + +int handle_root_syscall(struct pt_regs *regs) +{ + int sysflags, switched, sigs; + struct xnthread *thread; + cobalt_syshand handler; + struct task_struct *p; + unsigned long args[6]; + unsigned int nr, code; + long ret; + + /* + * Catch cancellation requests pending for user shadows + * running mostly in secondary mode, i.e. XNWEAK. In that + * case, we won't run prepare_for_signal() that frequently, so + * check for cancellation here. + */ + xnthread_test_cancel(); + + if (!__xn_syscall_p(regs)) + /* Fall back to Linux syscall handling. */ + return KEVENT_PROPAGATE; + + thread = xnthread_current(); + /* code has already been checked in the head domain handler. */ + code = __xn_syscall(regs); + nr = code & (__NR_COBALT_SYSCALLS - 1); + + trace_cobalt_root_sysentry(code); + + /* Processing a Xenomai syscall. */ + + handler = cobalt_syscalls[code]; + sysflags = cobalt_sysmodes[nr]; + + if (thread && (sysflags & __xn_exec_conforming)) + sysflags |= __xn_exec_histage; +restart: + /* + * Process adaptive syscalls by restarting them in the + * opposite domain upon receiving -ENOSYS from the syscall + * handler. + */ + switched = 0; + if (sysflags & __xn_exec_histage) { + /* + * This request originates from the Linux domain but + * should run into the Xenomai domain: harden the + * caller before invoking the syscall handler. + */ + ret = xnthread_harden(); + if (ret) { + __xn_error_return(regs, ret); + goto ret_handled; + } + switched = 1; + } else { + /* + * We want to run the syscall in the current Linux + * domain. This is a slow path, so proceed with any + * pending schedparam update on the fly. + */ + if (thread) + xnthread_propagate_schedparam(thread); + } + + p = current; + pipeline_get_syscall_args(p, regs, args); + + ret = handler(args[0], args[1], args[2], args[3], args[4]); + if (ret == -ENOSYS && (sysflags & __xn_exec_adaptive)) { + sysflags ^= __xn_exec_histage; + if (switched) { + xnthread_relax(1, SIGDEBUG_MIGRATE_SYSCALL); + sysflags &= ~__xn_exec_adaptive; + /* Mark the primary -> secondary transition. */ + xnthread_set_localinfo(thread, XNDESCENT); + } + goto restart; + } + + __xn_status_return(regs, ret); + + sigs = 0; + if (!xnsched_root_p()) { + /* + * We may have gained a shadow TCB from the syscall we + * just invoked, so make sure to fetch it. + */ + thread = xnthread_current(); + if (signal_pending(p)) { + sigs = 1; + prepare_for_signal(p, thread, regs, sysflags); + } else if (xnthread_test_state(thread, XNWEAK) && + thread->res_count == 0) + sysflags |= __xn_exec_switchback; + } + if (!sigs && (sysflags & __xn_exec_switchback) + && (switched || xnsched_primary_p())) + xnthread_relax(0, 0); + +ret_handled: + /* Update the stats and userland-visible state. */ + if (thread) { + xnthread_clear_localinfo(thread, XNDESCENT|XNHICCUP); + xnstat_counter_inc(&thread->stat.xsc); + xnthread_sync_window(thread); + } + + trace_cobalt_root_sysexit(__xn_reg_rval(regs)); + + return KEVENT_STOP; +} + +long cobalt_restart_syscall_placeholder(struct restart_block *param) +{ + return -EINVAL; +} --- linux/kernel/xenomai/posix/event.c 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/kernel/xenomai/posix/event.c 2022-03-21 12:58:29.005892706 +0100 @@ -0,0 +1,415 @@ +/* + * Copyright (C) 2012 Philippe Gerum + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include "internal.h" +#include "thread.h" +#include "clock.h" +#include "event.h" +#include +#include + +/* + * Cobalt event notification services + * + * An event flag group is a synchronization object represented by a + * regular native integer; every available bit in such word can be + * used to map a user-defined event flag. When a flag is set, the + * associated event is said to have occurred. + * + * Xenomai threads and interrupt handlers can use event flags to + * signal the occurrence of events to other threads; those threads can + * either wait for the events to occur in a conjunctive manner (all + * awaited events must have occurred to wake up), or in a disjunctive + * way (at least one of the awaited events must have occurred to wake + * up). + * + * We expose this non-POSIX feature through the internal API, as a + * fast IPC mechanism available to the Copperplate interface. + */ + +struct event_wait_context { + struct xnthread_wait_context wc; + unsigned int value; + int mode; +}; + +COBALT_SYSCALL(event_init, current, + (struct cobalt_event_shadow __user *u_event, + unsigned int value, int flags)) +{ + struct cobalt_event_shadow shadow; + struct cobalt_event_state *state; + int pshared, synflags, ret; + struct cobalt_event *event; + struct cobalt_umm *umm; + unsigned long stateoff; + spl_t s; + + trace_cobalt_event_init(u_event, value, flags); + + event = xnmalloc(sizeof(*event)); + if (event == NULL) + return -ENOMEM; + + pshared = (flags & COBALT_EVENT_SHARED) != 0; + umm = &cobalt_ppd_get(pshared)->umm; + state = cobalt_umm_alloc(umm, sizeof(*state)); + if (state == NULL) { + xnfree(event); + return -EAGAIN; + } + + ret = xnregistry_enter_anon(event, &event->resnode.handle); + if (ret) { + cobalt_umm_free(umm, state); + xnfree(event); + return ret; + } + + event->state = state; + event->flags = flags; + synflags = (flags & COBALT_EVENT_PRIO) ? XNSYNCH_PRIO : XNSYNCH_FIFO; + xnsynch_init(&event->synch, synflags, NULL); + state->value = value; + state->flags = 0; + state->nwaiters = 0; + stateoff = cobalt_umm_offset(umm, state); + XENO_BUG_ON(COBALT, stateoff != (__u32)stateoff); + + xnlock_get_irqsave(&nklock, s); + cobalt_add_resource(&event->resnode, event, pshared); + event->magic = COBALT_EVENT_MAGIC; + xnlock_put_irqrestore(&nklock, s); + + shadow.flags = flags; + shadow.handle = event->resnode.handle; + shadow.state_offset = (__u32)stateoff; + + return cobalt_copy_to_user(u_event, &shadow, sizeof(*u_event)); +} + +int __cobalt_event_wait(struct cobalt_event_shadow __user *u_event, + unsigned int bits, + unsigned int __user *u_bits_r, + int mode, const struct timespec64 *ts) +{ + unsigned int rbits = 0, testval; + xnticks_t timeout = XN_INFINITE; + struct cobalt_event_state *state; + xntmode_t tmode = XN_RELATIVE; + struct event_wait_context ewc; + struct cobalt_event *event; + xnhandle_t handle; + int ret = 0, info; + spl_t s; + + handle = cobalt_get_handle_from_user(&u_event->handle); + + if (ts) { + if (!timespec64_valid(ts)) + return -EINVAL; + + timeout = ts2ns(ts); + if (timeout) { + timeout++; + tmode = XN_ABSOLUTE; + } else + timeout = XN_NONBLOCK; + trace_cobalt_event_timedwait(u_event, bits, mode, ts); + } else + trace_cobalt_event_wait(u_event, bits, mode); + + xnlock_get_irqsave(&nklock, s); + + event = xnregistry_lookup(handle, NULL); + if (event == NULL || event->magic != COBALT_EVENT_MAGIC) { + ret = -EINVAL; + goto out; + } + + state = event->state; + + if (bits == 0) { + /* + * Special case: we don't wait for any event, we only + * return the current flag group value. + */ + rbits = state->value; + goto out; + } + + state->flags |= COBALT_EVENT_PENDED; + rbits = state->value & bits; + testval = mode & COBALT_EVENT_ANY ? rbits : bits; + if (rbits && rbits == testval) + goto done; + + if (timeout == XN_NONBLOCK) { + ret = -EWOULDBLOCK; + goto done; + } + + ewc.value = bits; + ewc.mode = mode; + xnthread_prepare_wait(&ewc.wc); + state->nwaiters++; + info = xnsynch_sleep_on(&event->synch, timeout, tmode); + if (info & XNRMID) { + ret = -EIDRM; + goto out; + } + if (info & (XNBREAK|XNTIMEO)) { + state->nwaiters--; + ret = (info & XNBREAK) ? -EINTR : -ETIMEDOUT; + } else + rbits = ewc.value; +done: + if (!xnsynch_pended_p(&event->synch)) + state->flags &= ~COBALT_EVENT_PENDED; +out: + xnlock_put_irqrestore(&nklock, s); + + if (ret == 0 && + cobalt_copy_to_user(u_bits_r, &rbits, sizeof(rbits))) + return -EFAULT; + + return ret; +} + +int __cobalt_event_wait64(struct cobalt_event_shadow __user *u_event, + unsigned int bits, + unsigned int __user *u_bits_r, + int mode, const struct __kernel_timespec __user *u_ts) +{ + struct timespec64 ts, *tsp = NULL; + int ret; + + if (u_ts) { + tsp = &ts; + ret = cobalt_get_timespec64(&ts, u_ts); + if (ret) + return ret; + } + + return __cobalt_event_wait(u_event, bits, u_bits_r, mode, tsp); +} + +COBALT_SYSCALL(event_wait, primary, + (struct cobalt_event_shadow __user *u_event, + unsigned int bits, + unsigned int __user *u_bits_r, + int mode, const struct __user_old_timespec __user *u_ts)) +{ + struct timespec64 ts, *tsp = NULL; + int ret; + + if (u_ts) { + tsp = &ts; + ret = cobalt_get_u_timespec(&ts, u_ts); + if (ret) + return ret; + } + + return __cobalt_event_wait(u_event, bits, u_bits_r, mode, tsp); +} + +COBALT_SYSCALL(event_wait64, primary, + (struct cobalt_event_shadow __user *u_event, + unsigned int bits, + unsigned int __user *u_bits_r, + int mode, const struct __kernel_timespec __user *u_ts)) +{ + return __cobalt_event_wait64(u_event, bits, u_bits_r, mode, u_ts); +} + +COBALT_SYSCALL(event_sync, current, + (struct cobalt_event_shadow __user *u_event)) +{ + unsigned int bits, waitval, testval; + struct xnthread_wait_context *wc; + struct cobalt_event_state *state; + struct event_wait_context *ewc; + struct cobalt_event *event; + struct xnthread *p, *tmp; + xnhandle_t handle; + int ret = 0; + spl_t s; + + handle = cobalt_get_handle_from_user(&u_event->handle); + + xnlock_get_irqsave(&nklock, s); + + event = xnregistry_lookup(handle, NULL); + if (event == NULL || event->magic != COBALT_EVENT_MAGIC) { + ret = -EINVAL; + goto out; + } + + /* + * Userland has already updated the bitmask, our job is to + * wake up any thread which could be satisfied by its current + * value. + */ + state = event->state; + bits = state->value; + + xnsynch_for_each_sleeper_safe(p, tmp, &event->synch) { + wc = xnthread_get_wait_context(p); + ewc = container_of(wc, struct event_wait_context, wc); + waitval = ewc->value & bits; + testval = ewc->mode & COBALT_EVENT_ANY ? waitval : ewc->value; + if (waitval && waitval == testval) { + state->nwaiters--; + ewc->value = waitval; + xnsynch_wakeup_this_sleeper(&event->synch, p); + } + } + + xnsched_run(); +out: + xnlock_put_irqrestore(&nklock, s); + + return ret; +} + +COBALT_SYSCALL(event_destroy, current, + (struct cobalt_event_shadow __user *u_event)) +{ + struct cobalt_event *event; + xnhandle_t handle; + spl_t s; + + trace_cobalt_event_destroy(u_event); + + handle = cobalt_get_handle_from_user(&u_event->handle); + + xnlock_get_irqsave(&nklock, s); + + event = xnregistry_lookup(handle, NULL); + if (event == NULL || event->magic != COBALT_EVENT_MAGIC) { + xnlock_put_irqrestore(&nklock, s); + return -EINVAL; + } + + cobalt_event_reclaim(&event->resnode, s); /* drops lock */ + + return 0; +} + +COBALT_SYSCALL(event_inquire, current, + (struct cobalt_event_shadow __user *u_event, + struct cobalt_event_info __user *u_info, + pid_t __user *u_waitlist, + size_t waitsz)) +{ + int nrpend = 0, nrwait = 0, nrpids, ret = 0; + unsigned long pstamp, nstamp = 0; + struct cobalt_event_info info; + struct cobalt_event *event; + pid_t *t = NULL, fbuf[16]; + struct xnthread *thread; + xnhandle_t handle; + spl_t s; + + handle = cobalt_get_handle_from_user(&u_event->handle); + + nrpids = waitsz / sizeof(pid_t); + + xnlock_get_irqsave(&nklock, s); + + for (;;) { + pstamp = nstamp; + event = xnregistry_lookup(handle, &nstamp); + if (event == NULL || event->magic != COBALT_EVENT_MAGIC) { + xnlock_put_irqrestore(&nklock, s); + return -EINVAL; + } + /* + * Allocate memory to return the wait list without + * holding any lock, then revalidate the handle. + */ + if (t == NULL) { + nrpend = 0; + if (!xnsynch_pended_p(&event->synch)) + break; + xnsynch_for_each_sleeper(thread, &event->synch) + nrpend++; + if (u_waitlist == NULL) + break; + xnlock_put_irqrestore(&nklock, s); + if (nrpids > nrpend) + nrpids = nrpend; + if (nrpend <= ARRAY_SIZE(fbuf)) + t = fbuf; /* Use fast buffer. */ + else { + t = xnmalloc(nrpend * sizeof(pid_t)); + if (t == NULL) + return -ENOMEM; + } + xnlock_get_irqsave(&nklock, s); + } else if (pstamp == nstamp) + break; + else { + xnlock_put_irqrestore(&nklock, s); + if (t != fbuf) + xnfree(t); + t = NULL; + xnlock_get_irqsave(&nklock, s); + } + } + + info.flags = event->flags; + info.value = event->value; + info.nrwait = nrpend; + + if (xnsynch_pended_p(&event->synch) && u_waitlist != NULL) { + xnsynch_for_each_sleeper(thread, &event->synch) { + if (nrwait >= nrpids) + break; + t[nrwait++] = xnthread_host_pid(thread); + } + } + + xnlock_put_irqrestore(&nklock, s); + + ret = cobalt_copy_to_user(u_info, &info, sizeof(info)); + if (ret == 0 && nrwait > 0) + ret = cobalt_copy_to_user(u_waitlist, t, nrwait * sizeof(pid_t)); + + if (t && t != fbuf) + xnfree(t); + + return ret ?: nrwait; +} + +void cobalt_event_reclaim(struct cobalt_resnode *node, spl_t s) +{ + struct cobalt_event *event; + struct cobalt_umm *umm; + int pshared; + + event = container_of(node, struct cobalt_event, resnode); + xnregistry_remove(node->handle); + cobalt_del_resource(node); + xnsynch_destroy(&event->synch); + pshared = (event->flags & COBALT_EVENT_SHARED) != 0; + xnlock_put_irqrestore(&nklock, s); + + umm = &cobalt_ppd_get(pshared)->umm; + cobalt_umm_free(umm, event->state); + xnfree(event); +} --- linux/kernel/xenomai/posix/compat.c 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/kernel/xenomai/posix/compat.c 2022-03-21 12:58:29.001892745 +0100 @@ -0,0 +1,544 @@ +/* + * Copyright (C) 2014 Philippe Gerum + * + * Xenomai is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * Xenomai is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include +#include +#include +#include +#include +#include + +int sys32_get_timespec(struct timespec64 *ts, + const struct old_timespec32 __user *u_cts) +{ + struct old_timespec32 cts; + + if (u_cts == NULL || !access_rok(u_cts, sizeof(*u_cts))) + return -EFAULT; + + if (__xn_get_user(cts.tv_sec, &u_cts->tv_sec) || + __xn_get_user(cts.tv_nsec, &u_cts->tv_nsec)) + return -EFAULT; + + ts->tv_sec = cts.tv_sec; + ts->tv_nsec = cts.tv_nsec; + + return 0; +} +EXPORT_SYMBOL_GPL(sys32_get_timespec); + +int sys32_put_timespec(struct old_timespec32 __user *u_cts, + const struct timespec64 *ts) +{ + struct old_timespec32 cts; + + if (u_cts == NULL || !access_wok(u_cts, sizeof(*u_cts))) + return -EFAULT; + + cts.tv_sec = ts->tv_sec; + cts.tv_nsec = ts->tv_nsec; + + if (__xn_put_user(cts.tv_sec, &u_cts->tv_sec) || + __xn_put_user(cts.tv_nsec, &u_cts->tv_nsec)) + return -EFAULT; + + return 0; +} +EXPORT_SYMBOL_GPL(sys32_put_timespec); + +int sys32_get_itimerspec(struct itimerspec64 *its, + const struct old_itimerspec32 __user *cits) +{ + int ret = sys32_get_timespec(&its->it_value, &cits->it_value); + + return ret ?: sys32_get_timespec(&its->it_interval, &cits->it_interval); +} +EXPORT_SYMBOL_GPL(sys32_get_itimerspec); + +int sys32_put_itimerspec(struct old_itimerspec32 __user *cits, + const struct itimerspec64 *its) +{ + int ret = sys32_put_timespec(&cits->it_value, &its->it_value); + + return ret ?: sys32_put_timespec(&cits->it_interval, &its->it_interval); +} +EXPORT_SYMBOL_GPL(sys32_put_itimerspec); + +int sys32_get_timeval(struct __kernel_old_timeval *tv, + const struct old_timeval32 __user *ctv) +{ + return (ctv == NULL || + !access_rok(ctv, sizeof(*ctv)) || + __xn_get_user(tv->tv_sec, &ctv->tv_sec) || + __xn_get_user(tv->tv_usec, &ctv->tv_usec)) ? -EFAULT : 0; +} +EXPORT_SYMBOL_GPL(sys32_get_timeval); + +int sys32_put_timeval(struct old_timeval32 __user *ctv, + const struct __kernel_old_timeval *tv) +{ + return (ctv == NULL || + !access_wok(ctv, sizeof(*ctv)) || + __xn_put_user(tv->tv_sec, &ctv->tv_sec) || + __xn_put_user(tv->tv_usec, &ctv->tv_usec)) ? -EFAULT : 0; +} +EXPORT_SYMBOL_GPL(sys32_put_timeval); + +int sys32_get_timex(struct __kernel_timex *tx, + const struct old_timex32 __user *ctx) +{ + struct __kernel_old_timeval time; + int ret; + + memset(tx, 0, sizeof(*tx)); + + ret = sys32_get_timeval(&time, &ctx->time); + if (ret) + return ret; + + tx->time.tv_sec = time.tv_sec; + tx->time.tv_usec = time.tv_usec; + + if (!access_rok(ctx, sizeof(*ctx)) || + __xn_get_user(tx->modes, &ctx->modes) || + __xn_get_user(tx->offset, &ctx->offset) || + __xn_get_user(tx->freq, &ctx->freq) || + __xn_get_user(tx->maxerror, &ctx->maxerror) || + __xn_get_user(tx->esterror, &ctx->esterror) || + __xn_get_user(tx->status, &ctx->status) || + __xn_get_user(tx->constant, &ctx->constant) || + __xn_get_user(tx->precision, &ctx->precision) || + __xn_get_user(tx->tolerance, &ctx->tolerance) || + __xn_get_user(tx->tick, &ctx->tick) || + __xn_get_user(tx->ppsfreq, &ctx->ppsfreq) || + __xn_get_user(tx->jitter, &ctx->jitter) || + __xn_get_user(tx->shift, &ctx->shift) || + __xn_get_user(tx->stabil, &ctx->stabil) || + __xn_get_user(tx->jitcnt, &ctx->jitcnt) || + __xn_get_user(tx->calcnt, &ctx->calcnt) || + __xn_get_user(tx->errcnt, &ctx->errcnt) || + __xn_get_user(tx->stbcnt, &ctx->stbcnt)) + return -EFAULT; + + return 0; +} +EXPORT_SYMBOL_GPL(sys32_get_timex); + +int sys32_put_timex(struct old_timex32 __user *ctx, + const struct __kernel_timex *tx) +{ + struct __kernel_old_timeval time; + int ret; + + time.tv_sec = tx->time.tv_sec; + time.tv_usec = tx->time.tv_usec; + + ret = sys32_put_timeval(&ctx->time, &time); + if (ret) + return ret; + + if (!access_wok(ctx, sizeof(*ctx)) || + __xn_put_user(tx->modes, &ctx->modes) || + __xn_put_user(tx->offset, &ctx->offset) || + __xn_put_user(tx->freq, &ctx->freq) || + __xn_put_user(tx->maxerror, &ctx->maxerror) || + __xn_put_user(tx->esterror, &ctx->esterror) || + __xn_put_user(tx->status, &ctx->status) || + __xn_put_user(tx->constant, &ctx->constant) || + __xn_put_user(tx->precision, &ctx->precision) || + __xn_put_user(tx->tolerance, &ctx->tolerance) || + __xn_put_user(tx->tick, &ctx->tick) || + __xn_put_user(tx->ppsfreq, &ctx->ppsfreq) || + __xn_put_user(tx->jitter, &ctx->jitter) || + __xn_put_user(tx->shift, &ctx->shift) || + __xn_put_user(tx->stabil, &ctx->stabil) || + __xn_put_user(tx->jitcnt, &ctx->jitcnt) || + __xn_put_user(tx->calcnt, &ctx->calcnt) || + __xn_put_user(tx->errcnt, &ctx->errcnt) || + __xn_put_user(tx->stbcnt, &ctx->stbcnt)) + return -EFAULT; + + return 0; +} +EXPORT_SYMBOL_GPL(sys32_put_timex); + +int sys32_get_fdset(fd_set *fds, const compat_fd_set __user *cfds, + size_t cfdsize) +{ + int rdpos, wrpos, rdlim = cfdsize / sizeof(compat_ulong_t); + + if (cfds == NULL || !access_rok(cfds, cfdsize)) + return -EFAULT; + + for (rdpos = 0, wrpos = 0; rdpos < rdlim; rdpos++, wrpos++) + if (__xn_get_user(fds->fds_bits[wrpos], cfds->fds_bits + rdpos)) + return -EFAULT; + + return 0; +} + +int sys32_put_fdset(compat_fd_set __user *cfds, const fd_set *fds, + size_t fdsize) +{ + int rdpos, wrpos, wrlim = fdsize / sizeof(long); + + if (cfds == NULL || !access_wok(cfds, wrlim * sizeof(compat_ulong_t))) + return -EFAULT; + + for (rdpos = 0, wrpos = 0; wrpos < wrlim; rdpos++, wrpos++) + if (__xn_put_user(fds->fds_bits[rdpos], cfds->fds_bits + wrpos)) + return -EFAULT; + + return 0; +} + +int sys32_get_param_ex(int policy, + struct sched_param_ex *p, + const struct compat_sched_param_ex __user *u_cp) +{ + struct compat_sched_param_ex cpex; + + if (u_cp == NULL || cobalt_copy_from_user(&cpex, u_cp, sizeof(cpex))) + return -EFAULT; + + p->sched_priority = cpex.sched_priority; + + switch (policy) { + case SCHED_SPORADIC: + p->sched_ss_low_priority = cpex.sched_ss_low_priority; + p->sched_ss_max_repl = cpex.sched_ss_max_repl; + p->sched_ss_repl_period.tv_sec = cpex.sched_ss_repl_period.tv_sec; + p->sched_ss_repl_period.tv_nsec = cpex.sched_ss_repl_period.tv_nsec; + p->sched_ss_init_budget.tv_sec = cpex.sched_ss_init_budget.tv_sec; + p->sched_ss_init_budget.tv_nsec = cpex.sched_ss_init_budget.tv_nsec; + break; + case SCHED_RR: + p->sched_rr_quantum.tv_sec = cpex.sched_rr_quantum.tv_sec; + p->sched_rr_quantum.tv_nsec = cpex.sched_rr_quantum.tv_nsec; + break; + case SCHED_TP: + p->sched_tp_partition = cpex.sched_tp_partition; + break; + case SCHED_QUOTA: + p->sched_quota_group = cpex.sched_quota_group; + break; + } + + return 0; +} +EXPORT_SYMBOL_GPL(sys32_get_param_ex); + +int sys32_put_param_ex(int policy, + struct compat_sched_param_ex __user *u_cp, + const struct sched_param_ex *p) +{ + struct compat_sched_param_ex cpex; + + if (u_cp == NULL) + return -EFAULT; + + cpex.sched_priority = p->sched_priority; + + switch (policy) { + case SCHED_SPORADIC: + cpex.sched_ss_low_priority = p->sched_ss_low_priority; + cpex.sched_ss_max_repl = p->sched_ss_max_repl; + cpex.sched_ss_repl_period.tv_sec = p->sched_ss_repl_period.tv_sec; + cpex.sched_ss_repl_period.tv_nsec = p->sched_ss_repl_period.tv_nsec; + cpex.sched_ss_init_budget.tv_sec = p->sched_ss_init_budget.tv_sec; + cpex.sched_ss_init_budget.tv_nsec = p->sched_ss_init_budget.tv_nsec; + break; + case SCHED_RR: + cpex.sched_rr_quantum.tv_sec = p->sched_rr_quantum.tv_sec; + cpex.sched_rr_quantum.tv_nsec = p->sched_rr_quantum.tv_nsec; + break; + case SCHED_TP: + cpex.sched_tp_partition = p->sched_tp_partition; + break; + case SCHED_QUOTA: + cpex.sched_quota_group = p->sched_quota_group; + break; + } + + return cobalt_copy_to_user(u_cp, &cpex, sizeof(cpex)); +} +EXPORT_SYMBOL_GPL(sys32_put_param_ex); + +int sys32_get_mqattr(struct mq_attr *ap, + const struct compat_mq_attr __user *u_cap) +{ + struct compat_mq_attr cattr; + + if (u_cap == NULL || + cobalt_copy_from_user(&cattr, u_cap, sizeof(cattr))) + return -EFAULT; + + ap->mq_flags = cattr.mq_flags; + ap->mq_maxmsg = cattr.mq_maxmsg; + ap->mq_msgsize = cattr.mq_msgsize; + ap->mq_curmsgs = cattr.mq_curmsgs; + + return 0; +} +EXPORT_SYMBOL_GPL(sys32_get_mqattr); + +int sys32_put_mqattr(struct compat_mq_attr __user *u_cap, + const struct mq_attr *ap) +{ + struct compat_mq_attr cattr; + + cattr.mq_flags = ap->mq_flags; + cattr.mq_maxmsg = ap->mq_maxmsg; + cattr.mq_msgsize = ap->mq_msgsize; + cattr.mq_curmsgs = ap->mq_curmsgs; + + return u_cap == NULL ? -EFAULT : + cobalt_copy_to_user(u_cap, &cattr, sizeof(cattr)); +} +EXPORT_SYMBOL_GPL(sys32_put_mqattr); + +int sys32_get_sigevent(struct sigevent *ev, + const struct compat_sigevent *__user u_cev) +{ + struct compat_sigevent cev; + compat_int_t *cp; + int ret, *p; + + if (u_cev == NULL) + return -EFAULT; + + ret = cobalt_copy_from_user(&cev, u_cev, sizeof(cev)); + if (ret) + return ret; + + memset(ev, 0, sizeof(*ev)); + ev->sigev_value.sival_ptr = compat_ptr(cev.sigev_value.sival_ptr); + ev->sigev_signo = cev.sigev_signo; + ev->sigev_notify = cev.sigev_notify; + /* + * Extensions may define extra fields we don't know about in + * the padding area, so we have to load it entirely. + */ + p = ev->_sigev_un._pad; + cp = cev._sigev_un._pad; + while (p < &ev->_sigev_un._pad[ARRAY_SIZE(ev->_sigev_un._pad)] && + cp < &cev._sigev_un._pad[ARRAY_SIZE(cev._sigev_un._pad)]) + *p++ = *cp++; + + return 0; +} +EXPORT_SYMBOL_GPL(sys32_get_sigevent); + +int sys32_get_sigset(sigset_t *set, const compat_sigset_t *u_cset) +{ +#ifdef __BIG_ENDIAN + compat_sigset_t v; + + if (cobalt_copy_from_user(&v, u_cset, sizeof(compat_sigset_t))) + return -EFAULT; + switch (_NSIG_WORDS) { + case 4: set->sig[3] = v.sig[6] | (((long)v.sig[7]) << 32 ); + case 3: set->sig[2] = v.sig[4] | (((long)v.sig[5]) << 32 ); + case 2: set->sig[1] = v.sig[2] | (((long)v.sig[3]) << 32 ); + case 1: set->sig[0] = v.sig[0] | (((long)v.sig[1]) << 32 ); + } +#else + if (cobalt_copy_from_user(set, u_cset, sizeof(compat_sigset_t))) + return -EFAULT; +#endif + return 0; +} +EXPORT_SYMBOL_GPL(sys32_get_sigset); + +int sys32_put_sigset(compat_sigset_t *u_cset, const sigset_t *set) +{ +#ifdef __BIG_ENDIAN + compat_sigset_t v; + switch (_NSIG_WORDS) { + case 4: v.sig[7] = (set->sig[3] >> 32); v.sig[6] = set->sig[3]; + case 3: v.sig[5] = (set->sig[2] >> 32); v.sig[4] = set->sig[2]; + case 2: v.sig[3] = (set->sig[1] >> 32); v.sig[2] = set->sig[1]; + case 1: v.sig[1] = (set->sig[0] >> 32); v.sig[0] = set->sig[0]; + } + return cobalt_copy_to_user(u_cset, &v, sizeof(*u_cset)) ? -EFAULT : 0; +#else + return cobalt_copy_to_user(u_cset, set, sizeof(*u_cset)) ? -EFAULT : 0; +#endif +} +EXPORT_SYMBOL_GPL(sys32_put_sigset); + +int sys32_get_sigval(union sigval *val, const union compat_sigval *u_cval) +{ + union compat_sigval cval; + int ret; + + if (u_cval == NULL) + return -EFAULT; + + ret = cobalt_copy_from_user(&cval, u_cval, sizeof(cval)); + if (ret) + return ret; + + val->sival_ptr = compat_ptr(cval.sival_ptr); + + return 0; +} +EXPORT_SYMBOL_GPL(sys32_get_sigval); + +int sys32_put_siginfo(void __user *u_si, const struct siginfo *si, + int overrun) +{ + struct compat_siginfo __user *u_p = u_si; + int ret; + + if (u_p == NULL) + return -EFAULT; + + ret = __xn_put_user(si->si_signo, &u_p->si_signo); + ret |= __xn_put_user(si->si_errno, &u_p->si_errno); + ret |= __xn_put_user(si->si_code, &u_p->si_code); + + /* + * Copy the generic/standard siginfo bits to userland. + */ + switch (si->si_code) { + case SI_TIMER: + ret |= __xn_put_user(si->si_tid, &u_p->si_tid); + ret |= __xn_put_user(ptr_to_compat(si->si_ptr), &u_p->si_ptr); + ret |= __xn_put_user(overrun, &u_p->si_overrun); + break; + case SI_QUEUE: + case SI_MESGQ: + ret |= __xn_put_user(ptr_to_compat(si->si_ptr), &u_p->si_ptr); + fallthrough; + case SI_USER: + ret |= __xn_put_user(si->si_pid, &u_p->si_pid); + ret |= __xn_put_user(si->si_uid, &u_p->si_uid); + } + + return ret; +} +EXPORT_SYMBOL_GPL(sys32_put_siginfo); + +int sys32_get_msghdr(struct user_msghdr *msg, + const struct compat_msghdr __user *u_cmsg) +{ + compat_uptr_t tmp1, tmp2, tmp3; + + if (u_cmsg == NULL || + !access_rok(u_cmsg, sizeof(*u_cmsg)) || + __xn_get_user(tmp1, &u_cmsg->msg_name) || + __xn_get_user(msg->msg_namelen, &u_cmsg->msg_namelen) || + __xn_get_user(tmp2, &u_cmsg->msg_iov) || + __xn_get_user(msg->msg_iovlen, &u_cmsg->msg_iovlen) || + __xn_get_user(tmp3, &u_cmsg->msg_control) || + __xn_get_user(msg->msg_controllen, &u_cmsg->msg_controllen) || + __xn_get_user(msg->msg_flags, &u_cmsg->msg_flags)) + return -EFAULT; + + if (msg->msg_namelen > sizeof(struct sockaddr_storage)) + msg->msg_namelen = sizeof(struct sockaddr_storage); + + msg->msg_name = compat_ptr(tmp1); + msg->msg_iov = compat_ptr(tmp2); + msg->msg_control = compat_ptr(tmp3); + + return 0; +} +EXPORT_SYMBOL_GPL(sys32_get_msghdr); + +int sys32_get_mmsghdr(struct mmsghdr *mmsg, + const struct compat_mmsghdr __user *u_cmmsg) +{ + if (u_cmmsg == NULL || + !access_rok(u_cmmsg, sizeof(*u_cmmsg)) || + __xn_get_user(mmsg->msg_len, &u_cmmsg->msg_len)) + return -EFAULT; + + return sys32_get_msghdr(&mmsg->msg_hdr, &u_cmmsg->msg_hdr); +} +EXPORT_SYMBOL_GPL(sys32_get_mmsghdr); + +int sys32_put_msghdr(struct compat_msghdr __user *u_cmsg, + const struct user_msghdr *msg) +{ + if (u_cmsg == NULL || + !access_wok(u_cmsg, sizeof(*u_cmsg)) || + __xn_put_user(ptr_to_compat(msg->msg_name), &u_cmsg->msg_name) || + __xn_put_user(msg->msg_namelen, &u_cmsg->msg_namelen) || + __xn_put_user(ptr_to_compat(msg->msg_iov), &u_cmsg->msg_iov) || + __xn_put_user(msg->msg_iovlen, &u_cmsg->msg_iovlen) || + __xn_put_user(ptr_to_compat(msg->msg_control), &u_cmsg->msg_control) || + __xn_put_user(msg->msg_controllen, &u_cmsg->msg_controllen) || + __xn_put_user(msg->msg_flags, &u_cmsg->msg_flags)) + return -EFAULT; + + return 0; +} +EXPORT_SYMBOL_GPL(sys32_put_msghdr); + +int sys32_put_mmsghdr(struct compat_mmsghdr __user *u_cmmsg, + const struct mmsghdr *mmsg) +{ + if (u_cmmsg == NULL || + !access_wok(u_cmmsg, sizeof(*u_cmmsg)) || + __xn_put_user(mmsg->msg_len, &u_cmmsg->msg_len)) + return -EFAULT; + + return sys32_put_msghdr(&u_cmmsg->msg_hdr, &mmsg->msg_hdr); +} +EXPORT_SYMBOL_GPL(sys32_put_mmsghdr); + +int sys32_get_iovec(struct iovec *iov, + const struct compat_iovec __user *u_ciov, + int ciovlen) +{ + const struct compat_iovec __user *p; + struct compat_iovec ciov; + int ret, n; + + for (n = 0, p = u_ciov; n < ciovlen; n++, p++) { + ret = cobalt_copy_from_user(&ciov, p, sizeof(ciov)); + if (ret) + return ret; + iov[n].iov_base = compat_ptr(ciov.iov_base); + iov[n].iov_len = ciov.iov_len; + } + + return 0; +} +EXPORT_SYMBOL_GPL(sys32_get_iovec); + +int sys32_put_iovec(struct compat_iovec __user *u_ciov, + const struct iovec *iov, + int iovlen) +{ + struct compat_iovec __user *p; + struct compat_iovec ciov; + int ret, n; + + for (n = 0, p = u_ciov; n < iovlen; n++, p++) { + ciov.iov_base = ptr_to_compat(iov[n].iov_base); + ciov.iov_len = iov[n].iov_len; + ret = cobalt_copy_to_user(p, &ciov, sizeof(*p)); + if (ret) + return ret; + } + + return 0; +} +EXPORT_SYMBOL_GPL(sys32_put_iovec); --- linux/kernel/xenomai/posix/event.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-patched/kernel/xenomai/posix/event.h 2022-03-21 12:58:28.99889277