{"work":{"id":"b2ac5b60-daa9-435b-9369-12271e126edd","openalex_id":null,"doi":null,"arxiv_id":"1512.03012","raw_key":null,"title":"ShapeNet: An Information-Rich 3D Model Repository","authors":null,"authors_text":"Angel X. Chang, Thomas Funkhouser, Leonidas Guibas, Pat Hanrahan, Qixing Huang, Zimo Li","year":2015,"venue":"cs.GR","abstract":"We present ShapeNet: a richly-annotated, large-scale repository of shapes represented by 3D CAD models of objects. ShapeNet contains 3D models from a multitude of semantic categories and organizes them under the WordNet taxonomy. It is a collection of datasets providing many semantic annotations for each 3D model such as consistent rigid alignments, parts and bilateral symmetry planes, physical sizes, keywords, as well as other planned annotations. Annotations are made available through a public web-based interface to enable data visualization of object attributes, promote data-driven geometric analysis, and provide a large-scale quantitative benchmark for research in computer graphics and vision. At the time of this technical report, ShapeNet has indexed more than 3,000,000 models, 220,000 models out of which are classified into 3,135 categories (WordNet synsets). In this report we describe the ShapeNet effort as a whole, provide details for all currently available datasets, and summarize future plans.","external_url":"https://arxiv.org/abs/1512.03012","cited_by_count":null,"metadata_source":"pith","metadata_fetched_at":"2026-05-14T21:17:58.529820+00:00","pith_arxiv_id":"1512.03012","created_at":"2026-05-09T05:50:26.453379+00:00","updated_at":"2026-05-14T21:17:58.529820+00:00","title_quality_ok":true,"display_title":"ShapeNet: An Information-Rich 3D Model Repository","render_title":"ShapeNet: An Information-Rich 3D Model Repository"},"hub":{"state":{"work_id":"b2ac5b60-daa9-435b-9369-12271e126edd","tier":"hub","tier_reason":"10+ Pith inbound or 1,000+ external citations","pith_inbound_count":50,"external_cited_by_count":null,"distinct_field_count":9,"first_pith_cited_at":"2019-03-06T14:50:02+00:00","last_pith_cited_at":"2026-05-13T16:33:10+00:00","author_build_status":"not_needed","summary_status":"needed","contexts_status":"needed","graph_status":"needed","ask_index_status":"not_needed","reader_status":"not_needed","recognition_status":"not_needed","updated_at":"2026-05-15T00:16:14.261626+00:00","tier_text":"hub"},"tier":"hub","role_counts":[{"context_role":"background","n":1}],"polarity_counts":[{"context_polarity":"background","n":1}],"runs":{"context_extract":{"job_type":"context_extract","status":"succeeded","result":{"enqueued_papers":25},"error":null,"updated_at":"2026-05-14T15:52:08.961999+00:00"},"graph_features":{"job_type":"graph_features","status":"succeeded","result":{"co_cited":[{"title":"DINOv2: Learning Robust Visual Features without Supervision","work_id":"26b304e5-b54a-4f26-be7e-83299eca52e4","shared_citers":9},{"title":"DreamFusion: Text-to-3D using 2D Diffusion","work_id":"7529df29-9980-4a8a-b55e-307e3c2f357b","shared_citers":5},{"title":"InstantMesh: Efficient 3D Mesh Generation from a Single Image with Sparse-view Large Reconstruction Models","work_id":"fc55eabb-0871-4dc4-8bab-b572e0d2aac4","shared_citers":5},{"title":"Triposr: Fast 3d object reconstruction from a single image","work_id":"a3f1b90f-95df-4739-8387-e23ce4279f76","shared_citers":5},{"title":"arXiv2506.15442(2025) 10","work_id":"ee52f4d7-462f-4491-9549-4160820ae563","shared_citers":4},{"title":"arXiv preprint arXiv:2007.08501 , year=","work_id":"28c48e8f-4d02-49fb-9564-fa686720784e","shared_citers":4},{"title":"Auto-Encoding Variational Bayes","work_id":"97d95295-30e1-42b4-bbf6-85f0fa4edb44","shared_citers":4},{"title":"Decoupled Weight Decay Regularization","work_id":"07ef7360-d385-4033-83f7-8384a6325204","shared_citers":4},{"title":"Depth Anything 3: Recovering the Visual Space from Any Views","work_id":"0a54b500-1e9d-46c2-85eb-8e16cbac8461","shared_citers":4},{"title":"Flow Matching for Generative Modeling","work_id":"6edb71c4-5d64-40af-a394-9757ea051a36","shared_citers":4},{"title":"LLaMA: Open and Efficient Foundation Language Models","work_id":"c018fc23-6f3f-4035-9d02-28a2173b2b9d","shared_citers":4},{"title":"Qwen3-VL Technical Report","work_id":"1fe243aa-e3c0-4da6-b391-4cbcfc88d5c0","shared_citers":4},{"title":"The Llama 3 Herd of Models","work_id":"1549a635-88af-4ac1-acfe-51ae7bb53345","shared_citers":4},{"title":"The Replica Dataset: A Digital Replica of Indoor Spaces","work_id":"8145b3bf-a202-46d8-a3bc-fad366dd5d4a","shared_citers":4},{"title":"$\\pi_0$: A Vision-Language-Action Flow Model for General Robot Control","work_id":"f790abdc-a796-482f-a40d-f8ee035ecfc2","shared_citers":3},{"title":"$\\pi_{0.5}$: a Vision-Language-Action Model with Open-World Generalization","work_id":"d1ad7304-d09a-49bc-809e-846439f6aff9","shared_citers":3},{"title":"$\\pi^3$: Permutation-Equivariant Visual Geometry Learning","work_id":"8ab9cfd6-a60d-45e6-a572-9b4db82b8527","shared_citers":3},{"title":"An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale","work_id":"e96730e3-129b-4db6-b981-15ab7932e297","shared_citers":3},{"title":"arXiv preprint arXiv:2405.10314 (2024)","work_id":"16da851d-11ab-482c-8a09-2978d97a967c","shared_citers":3},{"title":"arXiv preprint arXiv:2508.15769 (2025)","work_id":"83d3fcf2-2eae-4ce7-b7d9-9911bcb76d2b","shared_citers":3},{"title":"Classifier-Free Diffusion Guidance","work_id":"acf2c588-c088-4a6c-938e-150ad7c666d7","shared_citers":3},{"title":"CoRR , volume =","work_id":"9d7f0b29-b9ca-457f-9518-4a1506b43369","shared_citers":3},{"title":"CoRR , volume =","work_id":"78943aa6-b9e9-4fc1-8d71-e6bcc0b9a0b4","shared_citers":3},{"title":"Denoising Diffusion Implicit Models","work_id":"8fa2128b-d18c-405c-ac92-0e669cf89ac0","shared_citers":3}],"time_series":[{"n":1,"year":2019},{"n":2,"year":2024},{"n":1,"year":2025},{"n":43,"year":2026}],"dependency_candidates":[]},"error":null,"updated_at":"2026-05-14T16:02:22.914081+00:00"},"identity_refresh":{"job_type":"identity_refresh","status":"succeeded","result":{"items":[{"title":"Qwen3 Technical Report","outcome":"unchanged","work_id":"25a4e30c-1232-48e7-9925-02fa12ba7c9e","resolver":"local_arxiv","confidence":0.98,"old_work_id":"25a4e30c-1232-48e7-9925-02fa12ba7c9e"}],"counts":{"fixed":0,"merged":0,"unchanged":1,"quarantined":0,"needs_external_resolution":0},"errors":[],"attempted":1},"error":null,"updated_at":"2026-05-14T15:52:11.344446+00:00"},"summary_claims":{"job_type":"summary_claims","status":"succeeded","result":{"title":"ShapeNet: An Information-Rich 3D Model Repository","claims":[{"claim_text":"We present ShapeNet: a richly-annotated, large-scale repository of shapes represented by 3D CAD models of objects. ShapeNet contains 3D models from a multitude of semantic categories and organizes them under the WordNet taxonomy. It is a collection of datasets providing many semantic annotations for each 3D model such as consistent rigid alignments, parts and bilateral symmetry planes, physical sizes, keywords, as well as other planned annotations. Annotations are made available through a public web-based interface to enable data visualization of object attributes, promote data-driven geometri","claim_type":"abstract","evidence_strength":"source_metadata"}],"why_cited":"Pith tracks ShapeNet: An Information-Rich 3D Model Repository because it crossed a citation-hub threshold.","role_counts":[]},"error":null,"updated_at":"2026-05-14T16:02:31.805832+00:00"}},"summary":{"title":"ShapeNet: An Information-Rich 3D Model Repository","claims":[{"claim_text":"We present ShapeNet: a richly-annotated, large-scale repository of shapes represented by 3D CAD models of objects. ShapeNet contains 3D models from a multitude of semantic categories and organizes them under the WordNet taxonomy. It is a collection of datasets providing many semantic annotations for each 3D model such as consistent rigid alignments, parts and bilateral symmetry planes, physical sizes, keywords, as well as other planned annotations. Annotations are made available through a public web-based interface to enable data visualization of object attributes, promote data-driven geometri","claim_type":"abstract","evidence_strength":"source_metadata"}],"why_cited":"Pith tracks ShapeNet: An Information-Rich 3D Model Repository because it crossed a citation-hub threshold.","role_counts":[]},"graph":{"co_cited":[{"title":"DINOv2: Learning Robust Visual Features without Supervision","work_id":"26b304e5-b54a-4f26-be7e-83299eca52e4","shared_citers":9},{"title":"DreamFusion: Text-to-3D using 2D Diffusion","work_id":"7529df29-9980-4a8a-b55e-307e3c2f357b","shared_citers":5},{"title":"InstantMesh: Efficient 3D Mesh Generation from a Single Image with Sparse-view Large Reconstruction Models","work_id":"fc55eabb-0871-4dc4-8bab-b572e0d2aac4","shared_citers":5},{"title":"Triposr: Fast 3d object reconstruction from a single image","work_id":"a3f1b90f-95df-4739-8387-e23ce4279f76","shared_citers":5},{"title":"arXiv2506.15442(2025) 10","work_id":"ee52f4d7-462f-4491-9549-4160820ae563","shared_citers":4},{"title":"arXiv preprint arXiv:2007.08501 , year=","work_id":"28c48e8f-4d02-49fb-9564-fa686720784e","shared_citers":4},{"title":"Auto-Encoding Variational Bayes","work_id":"97d95295-30e1-42b4-bbf6-85f0fa4edb44","shared_citers":4},{"title":"Decoupled Weight Decay Regularization","work_id":"07ef7360-d385-4033-83f7-8384a6325204","shared_citers":4},{"title":"Depth Anything 3: Recovering the Visual Space from Any Views","work_id":"0a54b500-1e9d-46c2-85eb-8e16cbac8461","shared_citers":4},{"title":"Flow Matching for Generative Modeling","work_id":"6edb71c4-5d64-40af-a394-9757ea051a36","shared_citers":4},{"title":"LLaMA: Open and Efficient Foundation Language Models","work_id":"c018fc23-6f3f-4035-9d02-28a2173b2b9d","shared_citers":4},{"title":"Qwen3-VL Technical Report","work_id":"1fe243aa-e3c0-4da6-b391-4cbcfc88d5c0","shared_citers":4},{"title":"The Llama 3 Herd of Models","work_id":"1549a635-88af-4ac1-acfe-51ae7bb53345","shared_citers":4},{"title":"The Replica Dataset: A Digital Replica of Indoor Spaces","work_id":"8145b3bf-a202-46d8-a3bc-fad366dd5d4a","shared_citers":4},{"title":"$\\pi_0$: A Vision-Language-Action Flow Model for General Robot Control","work_id":"f790abdc-a796-482f-a40d-f8ee035ecfc2","shared_citers":3},{"title":"$\\pi_{0.5}$: a Vision-Language-Action Model with Open-World Generalization","work_id":"d1ad7304-d09a-49bc-809e-846439f6aff9","shared_citers":3},{"title":"$\\pi^3$: Permutation-Equivariant Visual Geometry Learning","work_id":"8ab9cfd6-a60d-45e6-a572-9b4db82b8527","shared_citers":3},{"title":"An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale","work_id":"e96730e3-129b-4db6-b981-15ab7932e297","shared_citers":3},{"title":"arXiv preprint arXiv:2405.10314 (2024)","work_id":"16da851d-11ab-482c-8a09-2978d97a967c","shared_citers":3},{"title":"arXiv preprint arXiv:2508.15769 (2025)","work_id":"83d3fcf2-2eae-4ce7-b7d9-9911bcb76d2b","shared_citers":3},{"title":"Classifier-Free Diffusion Guidance","work_id":"acf2c588-c088-4a6c-938e-150ad7c666d7","shared_citers":3},{"title":"CoRR , volume =","work_id":"9d7f0b29-b9ca-457f-9518-4a1506b43369","shared_citers":3},{"title":"CoRR , volume =","work_id":"78943aa6-b9e9-4fc1-8d71-e6bcc0b9a0b4","shared_citers":3},{"title":"Denoising Diffusion Implicit Models","work_id":"8fa2128b-d18c-405c-ac92-0e669cf89ac0","shared_citers":3}],"time_series":[{"n":1,"year":2019},{"n":2,"year":2024},{"n":1,"year":2025},{"n":43,"year":2026}],"dependency_candidates":[]},"authors":[]}}