{"total":16,"items":[{"citing_arxiv_id":"2605.11617","ref_index":25,"ref_count":1,"confidence":0.9,"is_internal_anchor":false,"paper_title":"MIST: Reliable Streaming Decision Trees for Online Class-Incremental Learning via McDiarmid Bound","primary_cat":"cs.LG","submitted_at":"2026-05-12T06:45:00+00:00","verdict":"UNVERDICTED","verdict_confidence":"LOW","novelty_score":7.0,"formal_verification":"none","one_line_summary":"MIST fixes unreliable splits in streaming decision trees for class-incremental learning by using a K-independent McDiarmid bound on Gini impurity, Bayesian moment projection for knowledge transfer, and KLL quantile sketches for adaptive leaf predictions.","context_count":0,"top_context_role":null,"top_context_polarity":null,"context_text":null},{"citing_arxiv_id":"2605.11255","ref_index":12,"ref_count":1,"confidence":0.9,"is_internal_anchor":false,"paper_title":"HEBATRON: A Hebrew-Specialized Open-Weight Mixture-of-Experts Language Model","primary_cat":"cs.CL","submitted_at":"2026-05-11T21:27:53+00:00","verdict":"UNVERDICTED","verdict_confidence":"LOW","novelty_score":7.0,"formal_verification":"none","one_line_summary":"Hebatron is the first open-weight Hebrew MoE LLM adapted from Nemotron-3, reaching 73.8% on Hebrew reasoning benchmarks while activating only 3B parameters per pass and supporting 65k-token context.","context_count":0,"top_context_role":null,"top_context_polarity":null,"context_text":null},{"citing_arxiv_id":"2605.08949","ref_index":2,"ref_count":1,"confidence":0.9,"is_internal_anchor":false,"paper_title":"Muon-OGD: Muon-based Spectral Orthogonal Gradient Projection for LLM Continual Learning","primary_cat":"cs.LG","submitted_at":"2026-05-09T13:42:08+00:00","verdict":"UNVERDICTED","verdict_confidence":"LOW","novelty_score":5.0,"formal_verification":"none","one_line_summary":"Muon-OGD integrates Muon-style spectral-norm geometry with orthogonal gradient constraints to improve the stability-plasticity trade-off during sequential LLM adaptation.","context_count":0,"top_context_role":null,"top_context_polarity":null,"context_text":null},{"citing_arxiv_id":"2605.03364","ref_index":9,"ref_count":1,"confidence":0.9,"is_internal_anchor":false,"paper_title":"Dynamic Distillation and Gradient Consistency for Robust Long-Tailed Incremental Learning","primary_cat":"cs.CV","submitted_at":"2026-05-05T04:50:06+00:00","verdict":"UNVERDICTED","verdict_confidence":"LOW","novelty_score":4.0,"formal_verification":"none","one_line_summary":"Gradient consistency regularization and entropy-driven dynamic distillation improve accuracy by up to 5% in long-tailed incremental learning, with strong gains in majority-to-minority task ordering.","context_count":0,"top_context_role":null,"top_context_polarity":null,"context_text":null},{"citing_arxiv_id":"2605.02675","ref_index":35,"ref_count":1,"confidence":0.9,"is_internal_anchor":false,"paper_title":"Online Generalised Predictive Coding","primary_cat":"stat.ML","submitted_at":"2026-05-04T14:55:37+00:00","verdict":"UNVERDICTED","verdict_confidence":"LOW","novelty_score":5.0,"formal_verification":"none","one_line_summary":"Online generalised predictive coding (ODEM) tracks latent states in nonlinear and chaotic generative models by separating temporal scales for fast Bayesian belief updating and slow parameter learning.","context_count":0,"top_context_role":null,"top_context_polarity":null,"context_text":null},{"citing_arxiv_id":"2605.02509","ref_index":12,"ref_count":1,"confidence":0.9,"is_internal_anchor":false,"paper_title":"MPCS: Neuroplastic Continual Learning via Multi-Component Plasticity and Topology-Aware EWC","primary_cat":"cs.LG","submitted_at":"2026-05-04T12:04:09+00:00","verdict":"UNVERDICTED","verdict_confidence":"LOW","novelty_score":4.0,"formal_verification":"none","one_line_summary":"MPCS integrates eleven plasticity mechanisms and reaches a Normalized Efficiency Score of 94.2 on a 31-task benchmark, with ablations showing that removing EWC and Hebbian updates yields higher performance at lower cost.","context_count":0,"top_context_role":null,"top_context_polarity":null,"context_text":null},{"citing_arxiv_id":"2605.02105","ref_index":69,"ref_count":1,"confidence":0.9,"is_internal_anchor":false,"paper_title":"Sharpness-Aware Pretraining Mitigates Catastrophic Forgetting","primary_cat":"cs.LG","submitted_at":"2026-05-04T00:02:23+00:00","verdict":"UNVERDICTED","verdict_confidence":"LOW","novelty_score":6.0,"formal_verification":"none","one_line_summary":"Sharpness-aware pretraining and related flat-minima interventions reduce catastrophic forgetting by up to 80% after post-training across 20M-150M models and by 31-40% at 1B scale.","context_count":0,"top_context_role":null,"top_context_polarity":null,"context_text":null},{"citing_arxiv_id":"2605.00195","ref_index":26,"ref_count":2,"confidence":0.9,"is_internal_anchor":false,"paper_title":"Diversity in Large Language Models under Supervised Fine-Tuning","primary_cat":"cs.LG","submitted_at":"2026-04-30T20:20:59+00:00","verdict":"UNVERDICTED","verdict_confidence":"LOW","novelty_score":6.0,"formal_verification":"none","one_line_summary":"TOFU loss mitigates the narrowing of generative diversity in LLMs after supervised fine-tuning by addressing neglect of low-frequency patterns and forgetting of prior knowledge.","context_count":0,"top_context_role":null,"top_context_polarity":null,"context_text":null},{"citing_arxiv_id":"2604.27031","ref_index":7,"ref_count":1,"confidence":0.9,"is_internal_anchor":false,"paper_title":"NORACL: Neurogenesis for Oracle-free Resource-Adaptive Continual Learning","primary_cat":"cs.LG","submitted_at":"2026-04-29T15:04:25+00:00","verdict":"UNVERDICTED","verdict_confidence":"LOW","novelty_score":6.0,"formal_verification":"none","one_line_summary":"NORACL dynamically grows network capacity via neurogenesis-inspired signals to achieve oracle-level continual learning performance without pre-specifying architecture size.","context_count":0,"top_context_role":null,"top_context_polarity":null,"context_text":null},{"citing_arxiv_id":"2604.24637","ref_index":12,"ref_count":1,"confidence":0.9,"is_internal_anchor":false,"paper_title":"Cortex-Inspired Continual Learning: Unsupervised Instantiation and Recovery of Functional Task Networks","primary_cat":"cs.LG","submitted_at":"2026-04-27T16:06:28+00:00","verdict":"UNVERDICTED","verdict_confidence":"LOW","novelty_score":6.0,"formal_verification":"none","one_line_summary":"FTN achieves near-zero forgetting on continual learning benchmarks by isolating task subnetworks via self-organizing binary masks generated through gradient descent, smoothing, and k-winner-take-all.","context_count":0,"top_context_role":null,"top_context_polarity":null,"context_text":null},{"citing_arxiv_id":"2604.21930","ref_index":17,"ref_count":1,"confidence":0.9,"is_internal_anchor":false,"paper_title":"Temporal Taskification in Streaming Continual Learning: A Source of Evaluation Instability","primary_cat":"cs.LG","submitted_at":"2026-04-23T17:59:54+00:00","verdict":"CONDITIONAL","verdict_confidence":"LOW","novelty_score":6.0,"formal_verification":"none","one_line_summary":"Different valid temporal partitions of the same streaming dataset can produce materially different rankings and performance numbers for continual learning methods.","context_count":0,"top_context_role":null,"top_context_polarity":null,"context_text":null},{"citing_arxiv_id":"2604.16801","ref_index":42,"ref_count":1,"confidence":0.9,"is_internal_anchor":false,"paper_title":"Continuous Limits of Coupled Flows in Representation Learning","primary_cat":"cs.LG","submitted_at":"2026-04-18T03:19:54+00:00","verdict":"UNVERDICTED","verdict_confidence":"LOW","novelty_score":6.0,"formal_verification":"none","one_line_summary":"Discrete decentralized learning dynamics on manifolds converge uniformly to an overdamped Langevin SDE whose stationary states produce orthogonally disentangled, linearly separable features.","context_count":0,"top_context_role":null,"top_context_polarity":null,"context_text":null},{"citing_arxiv_id":"2604.13627","ref_index":10,"ref_count":1,"confidence":0.9,"is_internal_anchor":false,"paper_title":"(How) Learning Rates Regulate Catastrophic Overtraining","primary_cat":"cs.LG","submitted_at":"2026-04-15T08:53:42+00:00","verdict":"UNVERDICTED","verdict_confidence":"LOW","novelty_score":5.0,"formal_verification":"none","one_line_summary":"Learning rate decay during SFT increases pretrained model sharpness, which exacerbates catastrophic forgetting and causes overtraining in LLMs.","context_count":0,"top_context_role":null,"top_context_polarity":null,"context_text":null},{"citing_arxiv_id":"2604.07386","ref_index":37,"ref_count":1,"confidence":0.9,"is_internal_anchor":false,"paper_title":"Label Leakage Attacks in Machine Unlearning: A Parameter and Inversion-Based Approach","primary_cat":"cs.CR","submitted_at":"2026-04-08T03:57:48+00:00","verdict":"UNVERDICTED","verdict_confidence":"LOW","novelty_score":6.0,"formal_verification":"none","one_line_summary":"Parameter-difference and model-inversion attacks can identify forgotten classes after machine unlearning on standard image datasets.","context_count":0,"top_context_role":null,"top_context_polarity":null,"context_text":null},{"citing_arxiv_id":"2604.02921","ref_index":13,"ref_count":1,"confidence":0.9,"is_internal_anchor":false,"paper_title":"Debiasing LLMs by Fine-tuning","primary_cat":"q-fin.GN","submitted_at":"2026-04-03T09:37:07+00:00","verdict":"UNVERDICTED","verdict_confidence":"LOW","novelty_score":6.0,"formal_verification":"none","one_line_summary":"Supervised fine-tuning with LoRA on rational benchmark forecasts corrects extrapolation bias out-of-sample in LLM predictions for controlled experiments and cross-sectional stock returns.","context_count":0,"top_context_role":null,"top_context_polarity":null,"context_text":null},{"citing_arxiv_id":"2308.00352","ref_index":255,"ref_count":1,"confidence":0.9,"is_internal_anchor":false,"paper_title":"MetaGPT: Meta Programming for A Multi-Agent Collaborative Framework","primary_cat":"cs.AI","submitted_at":"2023-08-01T07:49:10+00:00","verdict":"UNVERDICTED","verdict_confidence":"LOW","novelty_score":6.0,"formal_verification":"none","one_line_summary":"MetaGPT embeds human SOPs into LLM prompts to create role-specialized agent teams that produce more coherent solutions on collaborative software engineering tasks than prior chat-based multi-agent systems.","context_count":0,"top_context_role":null,"top_context_polarity":null,"context_text":null}],"limit":50,"offset":0}