--- zzzz-none-000/linux-4.4.271/kernel/trace/trace.c 2021-06-03 06:22:09.000000000 +0000 +++ hawkeye-5590-750/linux-4.4.271/kernel/trace/trace.c 2023-04-19 10:22:30.000000000 +0000 @@ -1363,6 +1363,7 @@ struct saved_cmdlines_buffer { unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; unsigned *map_cmdline_to_pid; + unsigned *map_cmdline_to_tgid; unsigned cmdline_num; int cmdline_idx; char *saved_cmdlines; @@ -1396,12 +1397,23 @@ return -ENOMEM; } + s->map_cmdline_to_tgid = kmalloc_array(val, + sizeof(*s->map_cmdline_to_tgid), + GFP_KERNEL); + if (!s->map_cmdline_to_tgid) { + kfree(s->map_cmdline_to_pid); + kfree(s->saved_cmdlines); + return -ENOMEM; + } + s->cmdline_idx = 0; s->cmdline_num = val; memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(s->map_pid_to_cmdline)); memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP, val * sizeof(*s->map_cmdline_to_pid)); + memset(s->map_cmdline_to_tgid, NO_CMDLINE_MAP, + val * sizeof(*s->map_cmdline_to_tgid)); return 0; } @@ -1570,14 +1582,17 @@ tpid = tsk->pid & (PID_MAX_DEFAULT - 1); + preempt_disable(); /* * It's not the end of the world if we don't get * the lock, but we also don't want to spin * nor do we want to disable interrupts, * so if we miss here, then better luck next time. */ - if (!arch_spin_trylock(&trace_cmdline_lock)) + if (!arch_spin_trylock(&trace_cmdline_lock)) { + preempt_enable(); return 0; + } idx = savedcmd->map_pid_to_cmdline[tpid]; if (idx == NO_CMDLINE_MAP) { @@ -1589,8 +1604,9 @@ savedcmd->map_cmdline_to_pid[idx] = tsk->pid; set_cmdline(idx, tsk->comm); - + savedcmd->map_cmdline_to_tgid[idx] = tsk->tgid; arch_spin_unlock(&trace_cmdline_lock); + preempt_enable(); return 1; } @@ -1633,6 +1649,35 @@ preempt_enable(); } +static int __find_tgid_locked(int pid) +{ + unsigned map; + int tgid; + + map = savedcmd->map_pid_to_cmdline[pid]; + if (map != NO_CMDLINE_MAP) + tgid = savedcmd->map_cmdline_to_tgid[map]; + else + tgid = -1; + + return tgid; +} + +int trace_find_tgid(int pid) +{ + int tgid; + + preempt_disable(); + arch_spin_lock(&trace_cmdline_lock); + + tgid = __find_tgid_locked(pid); + + arch_spin_unlock(&trace_cmdline_lock); + preempt_enable(); + + return tgid; +} + void tracing_record_cmdline(struct task_struct *tsk) { if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on()) @@ -1773,11 +1818,151 @@ } EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit); +#ifdef CONFIG_SRD_TRACE + +#include +#include +#include + +#define SRD_NUM_ENTRIES (8 << 10) + +#define SRD_WITH_PARENT 0 +#define SRD_WITH_JIFFIES 0 + +struct srd_record { + void *ip; +#if SRD_WITH_PARENT + void *parent; +#endif +}; + +struct srd_percpu { + uint32_t index; +#if SRD_WITH_JIFFIES + uint32_t jiffies; +#endif +}; + +struct silent_reboot_debug { + atomic_t index; + struct srd_record *r; + dma_addr_t r_pa; + int ncpu; + struct srd_percpu cpu[CONFIG_NR_CPUS]; +} *srd; + +#define SRD_REC_SIZE_PER_CPU (SRD_NUM_ENTRIES * sizeof(*srd->r)) + +dma_addr_t srd_pa; +struct srd_record *srd_rec[CONFIG_NR_CPUS]; + +void srd_info_record(unsigned long ip, unsigned long parent_ip) +{ + unsigned long flags; + struct srd_record *r; + int index, cpu = smp_processor_id(); + + if (!srd || !srd->r) + return; + + local_irq_save(flags); + +#if SRD_WITH_JIFFIES + srd->cpu[cpu].jiffies = jiffies; +#endif + index = srd->cpu[cpu].index++; + + r = srd_rec[cpu] + (index % SRD_NUM_ENTRIES); + + r->ip = (void *)ip; +#if SRD_WITH_PARENT + r->parent = (void *)parent_ip; +#endif + + local_irq_restore(flags); +} + +#define SRD_PRINT_STR "srd: 0x%p -> 0x%p 0x%x\n" \ + "ip: 0x%p -> 0x%p 0x%x\n", \ + srd, (void *)srd_pa, \ + sizeof(*srd) / sizeof(uint32_t), \ + srd->r, (void *)srd->r_pa, \ + (SRD_REC_SIZE_PER_CPU * srd->ncpu) / sizeof(uint32_t) +static ssize_t +tracing_srd_read(struct file *filp, char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + char str[256]; + int count; + + if (srd && !*ppos) { + count = snprintf(str, sizeof(str), SRD_PRINT_STR); + return simple_read_from_buffer(ubuf, cnt, ppos, str, count); + } else { + return 0; + } +} + +static const struct file_operations tracing_srd_fops = { + .read = tracing_srd_read, +}; + + +void srd_buf_init(struct dentry *d_tracer) +{ + int n = 0, cpu; + + /* + * Would have been ideal to do this in tracer_alloc_buffers. + * However, the dma-map subsystem is not up during early_initcall, + * and we get the following error + * __dma_alloc_remap: not initialised + * Backtrace: + * [] (dump_backtrace+0x0/0x120) from [] (dump_stack+0x20/0x24) + * r6:00001000 r5:c0d7b640 r4:ddf32000 r3:c0881f44 + * [] (dump_stack+0x0/0x24) from [] (arm_dma_alloc+0x3e0/0x410) + * [] (arm_dma_alloc+0x0/0x410) from [] (tracer_alloc_buffers+0x130/0x2c8) + * [] (tracer_alloc_buffers+0x0/0x2c8) from [] (do_one_initcall+0x40/0x17c) + * r7:00000000 r6:c071140c r5:c07368e4 r4:c07368e0 + * [] (do_one_initcall+0x0/0x17c) from [] (kernel_init+0x6c/0x1cc) + * r9:00000000 r8:00000000 r7:00000013 r6:c0066f28 r5:c07368e4 + * r4:c07368e0 + * [] (kernel_init+0x0/0x1cc) from [] (do_exit+0x0/0x864) + */ + + srd = dma_alloc_coherent(NULL, sizeof(*srd), &srd_pa, GFP_KERNEL); + if (!srd) + return; + + for_each_possible_cpu(cpu) + n++; + + srd->ncpu = n; + srd->r = dma_alloc_coherent(NULL, SRD_REC_SIZE_PER_CPU * n, + &srd->r_pa, GFP_KERNEL); + if (!srd->r) { + dma_free_coherent(NULL, sizeof(*srd), srd, srd_pa); + return; + } + + for_each_possible_cpu(cpu) + srd_rec[cpu] = &srd->r[cpu * SRD_NUM_ENTRIES]; + + printk(SRD_PRINT_STR); + + trace_create_file("srd", 0444, d_tracer, + srd, &tracing_srd_fops); +} +#endif /* CONFIG_SRD_TRACE */ + void trace_function(struct trace_array *tr, unsigned long ip, unsigned long parent_ip, unsigned long flags, int pc) { +#ifdef CONFIG_SRD_TRACE + srd_info_record(ip, parent_ip); +#else struct trace_event_call *call = &event_function; struct ring_buffer *buffer = tr->trace_buffer.buffer; struct ring_buffer_event *event; @@ -1793,6 +1978,7 @@ if (!call_filter_check_discard(call, entry, buffer, event)) __buffer_unlock_commit(buffer, event); +#endif /* CONFIG_SRD_TRACE */ } #ifdef CONFIG_STACKTRACE @@ -3954,10 +4140,15 @@ { char buf[64]; int r; + unsigned int n; + preempt_disable(); arch_spin_lock(&trace_cmdline_lock); - r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num); + n = savedcmd->cmdline_num; arch_spin_unlock(&trace_cmdline_lock); + preempt_enable(); + + r = scnprintf(buf, sizeof(buf), "%u\n", n); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } @@ -3966,6 +4157,7 @@ { kfree(s->saved_cmdlines); kfree(s->map_cmdline_to_pid); + kfree(s->map_cmdline_to_tgid); kfree(s); } @@ -3982,10 +4174,12 @@ return -ENOMEM; } + preempt_disable(); arch_spin_lock(&trace_cmdline_lock); savedcmd_temp = savedcmd; savedcmd = s; arch_spin_unlock(&trace_cmdline_lock); + preempt_enable(); free_saved_cmdlines_buffer(savedcmd_temp); return 0; @@ -4198,6 +4392,78 @@ } static ssize_t +tracing_saved_tgids_read(struct file *file, char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + char *file_buf; + char *buf; + int len = 0; + int i; + int *pids; + int n = 0; + + preempt_disable(); + arch_spin_lock(&trace_cmdline_lock); + + pids = kmalloc_array(savedcmd->cmdline_num, 2*sizeof(int), GFP_KERNEL); + if (!pids) { + arch_spin_unlock(&trace_cmdline_lock); + preempt_enable(); + return -ENOMEM; + } + + for (i = 0; i < savedcmd->cmdline_num; i++) { + int pid; + + pid = savedcmd->map_cmdline_to_pid[i]; + if (pid == -1 || pid == NO_CMDLINE_MAP) + continue; + + pids[n] = pid; + pids[n+1] = __find_tgid_locked(pid); + n += 2; + } + arch_spin_unlock(&trace_cmdline_lock); + preempt_enable(); + + if (n == 0) { + kfree(pids); + return 0; + } + + /* enough to hold max pair of pids + space, lr and nul */ + len = n * 12; + file_buf = kmalloc(len, GFP_KERNEL); + if (!file_buf) { + kfree(pids); + return -ENOMEM; + } + + buf = file_buf; + for (i = 0; i < n && len > 0; i += 2) { + int r; + + r = snprintf(buf, len, "%d %d\n", pids[i], pids[i+1]); + buf += r; + len -= r; + } + + len = simple_read_from_buffer(ubuf, cnt, ppos, + file_buf, buf - file_buf); + + kfree(file_buf); + kfree(pids); + + return len; +} + +static const struct file_operations tracing_saved_tgids_fops = { + .open = tracing_open_generic, + .read = tracing_saved_tgids_read, + .llseek = generic_file_llseek, +}; + +static ssize_t tracing_set_trace_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { @@ -7051,6 +7317,10 @@ &ftrace_update_tot_cnt, &tracing_dyn_info_fops); #endif +#ifdef CONFIG_SRD_TRACE + srd_buf_init(d_tracer); +#endif /* CONFIG_SRD_TRACE */ + create_trace_instances(d_tracer); update_tracer_options(&global_trace);