{"work":{"id":"a153885b-2460-4177-9053-8d0011adfcb9","openalex_id":null,"doi":null,"arxiv_id":"2501.01005","raw_key":null,"title":"Flashin- fer: Efficient and customizable attention engine for llm inference serving.arXiv preprint arXiv:2501.01005, 2025","authors":null,"authors_text":"18 Z","year":2025,"venue":null,"abstract":null,"external_url":"https://arxiv.org/abs/2501.01005","cited_by_count":null,"metadata_source":"arxiv_reference","metadata_fetched_at":"2026-05-12T10:01:27.703465+00:00","pith_arxiv_id":null,"created_at":"2026-05-09T01:54:34.214114+00:00","updated_at":"2026-05-14T17:03:06.181135+00:00","title_quality_ok":true,"display_title":"Flashin- fer: Efficient and customizable attention engine for llm inference serving","render_title":"Flashin- fer: Efficient and customizable attention engine for llm inference serving"},"hub":{"state":{"work_id":"a153885b-2460-4177-9053-8d0011adfcb9","tier":"hub","tier_reason":"10+ Pith inbound or 1,000+ external citations","pith_inbound_count":13,"external_cited_by_count":null,"distinct_field_count":5,"first_pith_cited_at":"2025-05-05T18:01:17+00:00","last_pith_cited_at":"2026-05-11T08:45:17+00:00","author_build_status":"not_needed","summary_status":"needed","contexts_status":"needed","graph_status":"needed","ask_index_status":"not_needed","reader_status":"not_needed","recognition_status":"not_needed","updated_at":"2026-05-14T22:06:17.395383+00:00","tier_text":"hub"},"tier":"hub","role_counts":[],"polarity_counts":[],"runs":{},"summary":{},"graph":{},"authors":[]}}