{"work":{"id":"c19e9353-3a5c-4f79-9b83-3a458ebf7c01","openalex_id":null,"doi":null,"arxiv_id":"2310.02949","raw_key":null,"title":"Pet- zold, William Yang Wang, Xun Zhao, and Dahua Lin","authors":null,"authors_text":"Xianjun Yang, Xiao Wang, Qi Zhang, Linda R","year":2023,"venue":null,"abstract":null,"external_url":"https://arxiv.org/abs/2310.02949","cited_by_count":null,"metadata_source":"arxiv_reference","metadata_fetched_at":"2026-05-15T02:20:44.441920+00:00","pith_arxiv_id":null,"created_at":"2026-05-10T06:06:19.394146+00:00","updated_at":"2026-05-15T02:20:44.441920+00:00","title_quality_ok":true,"display_title":"Shadow alignment: The ease of subverting safely-aligned language models","render_title":"Shadow alignment: The ease of subverting safely-aligned language models"},"hub":{"state":{"work_id":"c19e9353-3a5c-4f79-9b83-3a458ebf7c01","tier":"hub","tier_reason":"10+ Pith inbound or 1,000+ external citations","pith_inbound_count":10,"external_cited_by_count":null,"distinct_field_count":4,"first_pith_cited_at":"2024-06-17T16:36:12+00:00","last_pith_cited_at":"2026-05-12T20:08:00+00:00","author_build_status":"not_needed","summary_status":"needed","contexts_status":"needed","graph_status":"needed","ask_index_status":"not_needed","reader_status":"not_needed","recognition_status":"not_needed","updated_at":"2026-05-15T05:37:34.252196+00:00","tier_text":"hub"},"tier":"hub","role_counts":[{"context_role":"background","n":1}],"polarity_counts":[{"context_polarity":"support","n":1}],"runs":{},"summary":{},"graph":{},"authors":[]}}