IndisputableMonolith.Cost.FunctionalEquation
IndisputableMonolith/Cost/FunctionalEquation.lean · 1139 lines · 62 declarations
show as:
view math explainer →
1import Mathlib
2import Mathlib.Analysis.Calculus.Deriv.Basic
3import Mathlib.Analysis.Calculus.MeanValue
4import Mathlib.Analysis.Calculus.Taylor
5import IndisputableMonolith.Cost
6import IndisputableMonolith.Cost.AczelClass
7
8open IndisputableMonolith
9
10/-!
11# Functional Equation Helpers for T5
12
13This module provides lemmas for the T5 cost uniqueness proof.
14-/
15
16namespace IndisputableMonolith
17namespace Cost
18namespace FunctionalEquation
19
20open Real
21
22/-- Log-coordinate reparametrization: `G_F t = F (exp t)` -/
23@[simp] noncomputable def G (F : ℝ → ℝ) (t : ℝ) : ℝ := F (Real.exp t)
24
25/-- Convenience reparametrization: `H_F t = G_F t + 1`. -/
26@[simp] noncomputable def H (F : ℝ → ℝ) (t : ℝ) : ℝ := G F t + 1
27
28/-- The cosh-type functional identity for `G_F`. -/
29def CoshAddIdentity (F : ℝ → ℝ) : Prop :=
30 ∀ t u : ℝ,
31 G F (t+u) + G F (t-u) = 2 * (G F t * G F u) + 2 * (G F t + G F u)
32
33/-- Direct cosh-add identity on a function. -/
34def DirectCoshAdd (Gf : ℝ → ℝ) : Prop :=
35 ∀ t u : ℝ,
36 Gf (t+u) + Gf (t-u) = 2 * (Gf t * Gf u) + 2 * (Gf t + Gf u)
37
38lemma CoshAddIdentity_implies_DirectCoshAdd (F : ℝ → ℝ)
39 (h : CoshAddIdentity F) :
40 DirectCoshAdd (G F) := h
41
42lemma G_even_of_reciprocal_symmetry
43 (F : ℝ → ℝ)
44 (hSymm : ∀ {x : ℝ}, 0 < x → F x = F x⁻¹) :
45 Function.Even (G F) := by
46 intro t
47 have hpos : 0 < Real.exp t := Real.exp_pos t
48 have hrec : Real.exp (-t) = (Real.exp t)⁻¹ := by simp [Real.exp_neg]
49 simp [G, hrec, hSymm hpos]
50
51lemma G_zero_of_unit (F : ℝ → ℝ) (hUnit : F 1 = 0) : G F 0 = 0 := by
52 simpa [G] using hUnit
53
54theorem Jcost_G_eq_cosh_sub_one (t : ℝ) : G Cost.Jcost t = Real.cosh t - 1 := by
55 simp only [G, Jcost]
56 -- Jcost(exp t) = (exp t + exp(-t))/2 - 1 = cosh t - 1
57 have h1 : (Real.exp t)⁻¹ = Real.exp (-t) := by simp [Real.exp_neg]
58 rw [h1, Real.cosh_eq]
59
60theorem Jcost_cosh_add_identity : CoshAddIdentity Cost.Jcost := by
61 intro t u
62 simp only [G, Jcost]
63 -- Use exp(t+u) = exp(t)*exp(u) and exp(t-u) = exp(t)/exp(u)
64 have he1 : Real.exp (t + u) = Real.exp t * Real.exp u := Real.exp_add t u
65 have he2 : Real.exp (t - u) = Real.exp t / Real.exp u := by
66 rw [sub_eq_add_neg, Real.exp_add, Real.exp_neg]
67 ring
68 have hpos_t : Real.exp t > 0 := Real.exp_pos t
69 have hpos_u : Real.exp u > 0 := Real.exp_pos u
70 have hne_t : Real.exp t ≠ 0 := hpos_t.ne'
71 have hne_u : Real.exp u ≠ 0 := hpos_u.ne'
72 rw [he1, he2]
73 field_simp
74 ring
75
76theorem even_deriv_at_zero (H : ℝ → ℝ)
77 (h_even : Function.Even H) (h_diff : DifferentiableAt ℝ H 0) : deriv H 0 = 0 := by
78 -- For even functions, the derivative at 0 is 0
79 let negFun : ℝ → ℝ := fun x => -x
80 have h1 : deriv H 0 = deriv (H ∘ negFun) 0 := by
81 congr 1
82 ext x
83 simp only [Function.comp_apply, negFun]
84 exact (h_even x).symm
85 have h2 : deriv (H ∘ negFun) 0 = -deriv H 0 := by
86 have hd : DifferentiableAt ℝ negFun 0 := differentiable_neg.differentiableAt
87 have h_diff_neg : DifferentiableAt ℝ H (negFun 0) := by simp [negFun]; exact h_diff
88 have hchain := deriv_comp (x := (0 : ℝ)) h_diff_neg hd
89 rw [hchain]
90 simp only [negFun, neg_zero]
91 have hdn : deriv negFun 0 = -1 := congrFun deriv_neg' 0
92 rw [hdn]
93 ring
94 rw [h1] at h2
95 linarith
96
97lemma dAlembert_even
98 (H : ℝ → ℝ)
99 (h_one : H 0 = 1)
100 (h_dAlembert : ∀ t u, H (t+u) + H (t-u) = 2 * H t * H u) :
101 Function.Even H := by
102 intro u
103 have h := h_dAlembert 0 u
104 simpa [h_one, zero_add, sub_eq_add_neg, two_mul] using h
105
106lemma dAlembert_double
107 (H : ℝ → ℝ)
108 (h_one : H 0 = 1)
109 (h_dAlembert : ∀ t u, H (t+u) + H (t-u) = 2 * H t * H u) (t : ℝ) :
110 H (2 * t) = 2 * (H t)^2 - 1 := by
111 have h := h_dAlembert t t
112 have h' : H (t + t) = 2 * (H t)^2 - 1 := by
113 -- H(2t) + H(0) = 2 H(t)^2
114 have h0 : H (t + t) + 1 = 2 * H t * H t := by
115 simpa [h_one] using h
116 have h1 : H (t + t) = 2 * H t * H t - 1 := by
117 linarith
118 simpa [pow_two, mul_assoc] using h1
119 simpa [two_mul] using h'
120
121lemma dAlembert_product
122 (H : ℝ → ℝ)
123 (h_one : H 0 = 1)
124 (h_dAlembert : ∀ t u, H (t+u) + H (t-u) = 2 * H t * H u) :
125 ∀ t u, H (t+u) * H (t-u) = (H t)^2 + (H u)^2 - 1 := by
126 intro t u
127 have h := h_dAlembert (t + u) (t - u)
128 have h' : H (2 * t) + H (2 * u) = 2 * H (t + u) * H (t - u) := by
129 -- (t+u)+(t-u)=2t and (t+u)-(t-u)=2u
130 simpa [two_mul, sub_eq_add_neg, add_assoc, add_left_comm, add_comm] using h
131 have h2t : H (2 * t) = 2 * (H t)^2 - 1 := dAlembert_double H h_one h_dAlembert t
132 have h2u : H (2 * u) = 2 * (H u)^2 - 1 := dAlembert_double H h_one h_dAlembert u
133 have h'' : 2 * H (t + u) * H (t - u) = (2 * (H t)^2 - 1) + (2 * (H u)^2 - 1) := by
134 calc
135 2 * H (t + u) * H (t - u) = H (2 * t) + H (2 * u) := by linarith [h']
136 _ = (2 * (H t)^2 - 1) + (2 * (H u)^2 - 1) := by simp [h2t, h2u]
137 linarith
138
139lemma dAlembert_diff_square
140 (H : ℝ → ℝ)
141 (h_one : H 0 = 1)
142 (h_dAlembert : ∀ t u, H (t+u) + H (t-u) = 2 * H t * H u) :
143 ∀ t u,
144 (H (t+u) - H (t-u))^2 = 4 * ((H t)^2 - 1) * ((H u)^2 - 1) := by
145 intro t u
146 have h_sum : H (t+u) + H (t-u) = 2 * H t * H u := h_dAlembert t u
147 have h_prod : H (t+u) * H (t-u) = (H t)^2 + (H u)^2 - 1 :=
148 dAlembert_product H h_one h_dAlembert t u
149 calc
150 (H (t+u) - H (t-u))^2
151 = (H (t+u) + H (t-u))^2 - 4 * (H (t+u) * H (t-u)) := by ring
152 _ = (2 * H t * H u)^2 - 4 * ((H t)^2 + (H u)^2 - 1) := by
153 simp [h_sum, h_prod]
154 _ = 4 * ((H t)^2 - 1) * ((H u)^2 - 1) := by ring
155
156def HasLogCurvature (H : ℝ → ℝ) (κ : ℝ) : Prop :=
157 Filter.Tendsto (fun t => 2 * (H t - 1) / t^2) (nhds 0) (nhds κ)
158
159lemma sub_one_eq_mul_ratio (H : ℝ → ℝ) (h_one : H 0 = 1) (t : ℝ) :
160 H t - 1 = (t^2 / 2) * (2 * (H t - 1) / t^2) := by
161 by_cases ht : t = 0
162 · subst ht
163 simp [h_one]
164 · field_simp [ht]
165
166lemma tendsto_H_one_of_log_curvature
167 (H : ℝ → ℝ) (h_one : H 0 = 1) {κ : ℝ} (h_calib : HasLogCurvature H κ) :
168 Filter.Tendsto H (nhds 0) (nhds 1) := by
169 have h_t : Filter.Tendsto (fun t : ℝ => t) (nhds 0) (nhds (0 : ℝ)) := by
170 simpa using (Filter.tendsto_id : Filter.Tendsto (fun t : ℝ => t) (nhds (0 : ℝ)) (nhds (0 : ℝ)))
171 have h_t2 : Filter.Tendsto (fun t : ℝ => t^2) (nhds 0) (nhds (0 : ℝ)) := by
172 simpa [pow_two] using h_t.mul h_t
173 have h_t2_div : Filter.Tendsto (fun t : ℝ => t^2 / 2) (nhds 0) (nhds (0 : ℝ)) := by
174 have h_const : Filter.Tendsto (fun _ : ℝ => (1 / 2 : ℝ)) (nhds 0) (nhds (1 / 2 : ℝ)) :=
175 tendsto_const_nhds
176 simpa [div_eq_mul_inv] using h_t2.mul h_const
177 have h_sub : Filter.Tendsto (fun t => H t - 1) (nhds 0) (nhds (0 : ℝ)) := by
178 have h_prod :
179 Filter.Tendsto (fun t => (t^2 / 2) * (2 * (H t - 1) / t^2)) (nhds 0)
180 (nhds ((0 : ℝ) * κ)) := h_t2_div.mul h_calib
181 have h_eq :
182 (fun t => H t - 1) = (fun t => (t^2 / 2) * (2 * (H t - 1) / t^2)) := by
183 funext t
184 exact sub_one_eq_mul_ratio H h_one t
185 simpa [h_eq] using h_prod
186 have h_const : Filter.Tendsto (fun _ : ℝ => (1 : ℝ)) (nhds 0) (nhds (1 : ℝ)) :=
187 tendsto_const_nhds
188 have h_add : Filter.Tendsto (fun t => (H t - 1) + 1) (nhds 0) (nhds (0 + (1 : ℝ))) :=
189 h_sub.add h_const
190 simpa using h_add
191
192theorem dAlembert_continuous_of_log_curvature
193 (H : ℝ → ℝ)
194 (h_one : H 0 = 1)
195 (h_dAlembert : ∀ t u, H (t+u) + H (t-u) = 2 * H t * H u)
196 {κ : ℝ} (h_calib : HasLogCurvature H κ) :
197 Continuous H := by
198 refine continuous_iff_continuousAt.2 ?_
199 intro t
200 have h_lim_H : Filter.Tendsto H (nhds 0) (nhds 1) :=
201 tendsto_H_one_of_log_curvature H h_one h_calib
202 have h_sum :
203 Filter.Tendsto (fun u => H (t+u) + H (t-u)) (nhds 0) (nhds (2 * H t)) := by
204 have h_prod : Filter.Tendsto (fun u => (2 * H t) * H u) (nhds 0)
205 (nhds ((2 * H t) * (1 : ℝ))) := (tendsto_const_nhds.mul h_lim_H)
206 have h_prod' : Filter.Tendsto (fun u => 2 * H t * H u) (nhds 0) (nhds (2 * H t)) := by
207 simpa [mul_assoc] using h_prod
208 have h_eq : (fun u => H (t+u) + H (t-u)) = fun u => 2 * H t * H u := by
209 funext u
210 exact h_dAlembert t u
211 simpa [h_eq] using h_prod'
212 have h_diff_sq :
213 Filter.Tendsto (fun u => (H (t+u) - H (t-u))^2) (nhds 0) (nhds (0 : ℝ)) := by
214 have h_u_sq : Filter.Tendsto (fun u => (H u)^2) (nhds 0) (nhds ((1 : ℝ)^2)) := by
215 simpa [pow_two] using h_lim_H.mul h_lim_H
216 have h_u_sq_sub : Filter.Tendsto (fun u => (H u)^2 - 1) (nhds 0) (nhds (0 : ℝ)) := by
217 have h_const : Filter.Tendsto (fun _ : ℝ => (1 : ℝ)) (nhds 0) (nhds (1 : ℝ)) :=
218 tendsto_const_nhds
219 simpa using h_u_sq.sub h_const
220 have h_const :
221 Filter.Tendsto (fun _ : ℝ => 4 * ((H t)^2 - 1)) (nhds 0)
222 (nhds (4 * ((H t)^2 - 1))) := tendsto_const_nhds
223 have h_mul :
224 Filter.Tendsto (fun u => (4 * ((H t)^2 - 1)) * ((H u)^2 - 1)) (nhds 0)
225 (nhds (4 * ((H t)^2 - 1) * (0 : ℝ))) := h_const.mul h_u_sq_sub
226 have h_eq :
227 (fun u => (H (t+u) - H (t-u))^2) =
228 (fun u => 4 * ((H t)^2 - 1) * ((H u)^2 - 1)) := by
229 funext u
230 exact dAlembert_diff_square H h_one h_dAlembert t u
231 simpa [h_eq] using h_mul
232 have h_abs :
233 Filter.Tendsto (fun u => |H (t+u) - H (t-u)|) (nhds 0) (nhds (0 : ℝ)) := by
234 have h_sqrt :
235 Filter.Tendsto (fun u => Real.sqrt ((H (t+u) - H (t-u))^2)) (nhds 0)
236 (nhds (Real.sqrt 0)) :=
237 (Real.continuous_sqrt.tendsto 0).comp h_diff_sq
238 simpa [Real.sqrt_sq_eq_abs] using h_sqrt
239 have h_diff :
240 Filter.Tendsto (fun u => H (t+u) - H (t-u)) (nhds 0) (nhds (0 : ℝ)) :=
241 (tendsto_zero_iff_abs_tendsto_zero (f := fun u => H (t+u) - H (t-u))).2 h_abs
242 have h_sum_diff :
243 Filter.Tendsto
244 (fun u => (H (t+u) + H (t-u)) + (H (t+u) - H (t-u)))
245 (nhds 0) (nhds ((2 * H t) + (0 : ℝ))) := h_sum.add h_diff
246 have h_twice : Filter.Tendsto (fun u => 2 * H (t+u)) (nhds 0) (nhds (2 * H t)) := by
247 have h_sum_diff' :
248 Filter.Tendsto
249 (fun u => H (t+u) + H (t+u))
250 (nhds 0) (nhds (2 * H t)) := by
251 have h_eq :
252 (fun u => (H (t+u) + H (t-u)) + (H (t+u) - H (t-u))) =
253 (fun u => H (t+u) + H (t+u)) := by
254 funext u
255 ring
256 have h_sum_diff'' :
257 Filter.Tendsto
258 (fun u => (H (t+u) + H (t-u)) + (H (t+u) - H (t-u)))
259 (nhds 0) (nhds (2 * H t)) := by
260 simpa using h_sum_diff
261 simpa [h_eq] using h_sum_diff''
262 simpa [two_mul] using h_sum_diff'
263 have h_half :
264 Filter.Tendsto (fun u => (2 * H (t+u)) / 2) (nhds 0) (nhds ((2 * H t) / 2)) := by
265 have h_const : Filter.Tendsto (fun _ : ℝ => (1 / 2 : ℝ)) (nhds 0) (nhds (1 / 2 : ℝ)) :=
266 tendsto_const_nhds
267 simpa [div_eq_mul_inv] using h_twice.mul h_const
268 have h_at0 : Filter.Tendsto (fun u => H (t+u)) (nhds 0) (nhds (H t)) := by
269 simpa using h_half
270 have h_map :
271 Filter.Tendsto H (Filter.map (fun u => t + u) (nhds 0)) (nhds (H t)) :=
272 (Filter.tendsto_map'_iff).2 h_at0
273 have h_tendsto : Filter.Tendsto H (nhds t) (nhds (H t)) := by
274 simpa [map_add_left_nhds_zero] using h_map
275 exact h_tendsto
276
277/-! ## ODE Uniqueness Infrastructure -/
278
279/-- Helper: derivative of exp(-s) is -exp(-s). -/
280lemma deriv_exp_neg (t : ℝ) : deriv (fun s => Real.exp (-s)) t = -Real.exp (-t) := by
281 have h := Real.hasDerivAt_exp (-t)
282 have hc : HasDerivAt (fun s => -s) (-1) t := by
283 have := hasDerivAt_neg (x := t)
284 simp at this ⊢
285 exact this
286 have hcomp := h.comp t hc
287 simp at hcomp
288 exact hcomp.deriv
289
290/-- Diagonalization of the ODE f'' = f into f' ± f components. -/
291lemma ode_diagonalization (f : ℝ → ℝ)
292 (h_diff2 : ContDiff ℝ 2 f)
293 (h_ode : ∀ t, deriv (deriv f) t = f t) :
294 (∀ t, deriv (fun s => deriv f s - f s) t = -(deriv f t - f t)) ∧
295 (∀ t, deriv (fun s => deriv f s + f s) t = deriv f t + f t) := by
296 have h_diff1 : Differentiable ℝ f := h_diff2.differentiable (by decide : (2 : WithTop ℕ∞) ≠ 0)
297 have h_deriv_contdiff : ContDiff ℝ 1 (deriv f) := by
298 rw [show (2 : WithTop ℕ∞) = 1 + 1 from rfl] at h_diff2
299 rw [contDiff_succ_iff_deriv] at h_diff2
300 exact h_diff2.2.2
301 have h_diff_deriv : Differentiable ℝ (deriv f) := h_deriv_contdiff.differentiable (by decide : (1 : WithTop ℕ∞) ≠ 0)
302 constructor
303 · intro t
304 have h1 : deriv (fun s => deriv f s - f s) t = deriv (deriv f) t - deriv f t := by
305 apply deriv_sub h_diff_deriv.differentiableAt h_diff1.differentiableAt
306 rw [h1, h_ode t]
307 ring
308 · intro t
309 have h2 : deriv (fun s => deriv f s + f s) t = deriv (deriv f) t + deriv f t := by
310 apply deriv_add h_diff_deriv.differentiableAt h_diff1.differentiableAt
311 rw [h2, h_ode t]
312 ring
313
314/-- If g' = -g and g(0) = 0, then g = 0. -/
315lemma deriv_neg_self_zero (g : ℝ → ℝ)
316 (h_diff : Differentiable ℝ g)
317 (h_deriv : ∀ t, deriv g t = -g t)
318 (h_g0 : g 0 = 0) :
319 ∀ t, g t = 0 := by
320 have h_const : ∀ t, deriv (fun s => g s * Real.exp s) t = 0 := by
321 intro t
322 have h1 : deriv (fun s => g s * Real.exp s) t =
323 deriv g t * Real.exp t + g t * deriv Real.exp t := by
324 apply deriv_mul h_diff.differentiableAt Real.differentiable_exp.differentiableAt
325 rw [h1, Real.deriv_exp, h_deriv t]
326 ring
327 have h_diff_prod : Differentiable ℝ (fun s => g s * Real.exp s) := by
328 apply Differentiable.mul h_diff Real.differentiable_exp
329 have h_is_const := is_const_of_deriv_eq_zero h_diff_prod h_const
330 intro t
331 specialize h_is_const t 0
332 simp only [Real.exp_zero, mul_one] at h_is_const
333 have h_exp_pos : Real.exp t > 0 := Real.exp_pos t
334 have h_exp_ne : Real.exp t ≠ 0 := h_exp_pos.ne'
335 have h_eq : g t * Real.exp t = g 0 := h_is_const
336 calc g t = g t * Real.exp t / Real.exp t := by field_simp
337 _ = g 0 / Real.exp t := by rw [h_eq]
338 _ = 0 / Real.exp t := by rw [h_g0]
339 _ = 0 := by simp
340
341/-- If h' = h and h(0) = 0, then h = 0. -/
342lemma deriv_pos_self_zero (h : ℝ → ℝ)
343 (h_diff : Differentiable ℝ h)
344 (h_deriv : ∀ t, deriv h t = h t)
345 (h_h0 : h 0 = 0) :
346 ∀ t, h t = 0 := by
347 have h_const : ∀ t, deriv (fun s => h s * Real.exp (-s)) t = 0 := by
348 intro t
349 have h1 : deriv (fun s => h s * Real.exp (-s)) t =
350 deriv h t * Real.exp (-t) + h t * deriv (fun s => Real.exp (-s)) t := by
351 apply deriv_mul h_diff.differentiableAt
352 exact (Real.differentiable_exp.comp differentiable_neg).differentiableAt
353 rw [h1, deriv_exp_neg, h_deriv t]
354 ring
355 have h_diff_prod : Differentiable ℝ (fun s => h s * Real.exp (-s)) := by
356 apply Differentiable.mul h_diff
357 exact Real.differentiable_exp.comp differentiable_neg
358 have h_is_const := is_const_of_deriv_eq_zero h_diff_prod h_const
359 intro t
360 specialize h_is_const t 0
361 simp only [neg_zero, Real.exp_zero, mul_one] at h_is_const
362 have h_exp_pos : Real.exp (-t) > 0 := Real.exp_pos (-t)
363 have h_exp_ne : Real.exp (-t) ≠ 0 := h_exp_pos.ne'
364 have h_eq : h t * Real.exp (-t) = h 0 := h_is_const
365 calc h t = h t * Real.exp (-t) / Real.exp (-t) := by field_simp
366 _ = h 0 / Real.exp (-t) := by rw [h_eq]
367 _ = 0 / Real.exp (-t) := by rw [h_h0]
368 _ = 0 := by simp
369
370/-- **Theorem (ODE Zero Uniqueness)**: The unique solution to f'' = f with f(0) = f'(0) = 0 is f = 0. -/
371theorem ode_zero_uniqueness (f : ℝ → ℝ)
372 (h_diff2 : ContDiff ℝ 2 f)
373 (h_ode : ∀ t, deriv (deriv f) t = f t)
374 (h_f0 : f 0 = 0)
375 (h_f'0 : deriv f 0 = 0) :
376 ∀ t, f t = 0 := by
377 have ⟨h_minus, h_plus⟩ := ode_diagonalization f h_diff2 h_ode
378 have h_diff1 : Differentiable ℝ f := h_diff2.differentiable (by decide : (2 : WithTop ℕ∞) ≠ 0)
379 have h_deriv_contdiff : ContDiff ℝ 1 (deriv f) := by
380 rw [show (2 : WithTop ℕ∞) = 1 + 1 from rfl] at h_diff2
381 rw [contDiff_succ_iff_deriv] at h_diff2
382 exact h_diff2.2.2
383 have h_diff_deriv : Differentiable ℝ (deriv f) := h_deriv_contdiff.differentiable (by decide : (1 : WithTop ℕ∞) ≠ 0)
384 let g := fun s => deriv f s - f s
385 let hf := fun s => deriv f s + f s
386 have hg_diff : Differentiable ℝ g := h_diff_deriv.sub h_diff1
387 have hh_diff : Differentiable ℝ hf := h_diff_deriv.add h_diff1
388 have hg0 : g 0 = 0 := by simp [g, h_f0, h_f'0]
389 have hh0 : hf 0 = 0 := by simp [hf, h_f0, h_f'0]
390 have hg_deriv : ∀ t, deriv g t = -g t := h_minus
391 have hh_deriv : ∀ t, deriv hf t = hf t := h_plus
392 have hg_zero := deriv_neg_self_zero g hg_diff hg_deriv hg0
393 have hh_zero := deriv_pos_self_zero hf hh_diff hh_deriv hh0
394 intro t
395 have hgt := hg_zero t
396 have hht := hh_zero t
397 simp only [g, hf] at hgt hht
398 linarith
399
400theorem cosh_second_deriv_eq : ∀ t, deriv (deriv (fun x => Real.cosh x)) t = Real.cosh t := by
401 intro t
402 have h1 : deriv (fun x => Real.cosh x) = Real.sinh := Real.deriv_cosh
403 rw [h1]
404 have h2 : deriv Real.sinh = Real.cosh := Real.deriv_sinh
405 exact congrFun h2 t
406
407theorem cosh_initials : Real.cosh 0 = 1 ∧ deriv (fun x => Real.cosh x) 0 = 0 := by
408 constructor
409 · simp [Real.cosh_zero]
410 · have h := Real.deriv_cosh
411 simp only [h, Real.sinh_zero]
412
413/-- **Theorem (ODE Cosh Uniqueness)**: The unique solution to H'' = H with H(0) = 1, H'(0) = 0 is cosh. -/
414theorem ode_cosh_uniqueness_contdiff (H : ℝ → ℝ)
415 (h_diff : ContDiff ℝ 2 H)
416 (h_ode : ∀ t, deriv (deriv H) t = H t)
417 (h_H0 : H 0 = 1)
418 (h_H'0 : deriv H 0 = 0) :
419 ∀ t, H t = Real.cosh t := by
420 let g := fun t => H t - Real.cosh t
421 have hg_diff : ContDiff ℝ 2 g := h_diff.sub Real.contDiff_cosh
422 have hg_ode : ∀ t, deriv (deriv g) t = g t := by
423 intro t
424 have h1 : deriv g = fun s => deriv H s - deriv Real.cosh s := by
425 ext s; apply deriv_sub
426 · exact (h_diff.differentiable (by decide : (2 : WithTop ℕ∞) ≠ 0)).differentiableAt
427 · exact Real.differentiable_cosh.differentiableAt
428 have h2 : deriv (deriv g) t = deriv (deriv H) t - deriv (deriv Real.cosh) t := by
429 have hH_diff1 : ContDiff ℝ 1 (deriv H) := by
430 rw [show (2 : WithTop ℕ∞) = 1 + 1 from rfl] at h_diff
431 rw [contDiff_succ_iff_deriv] at h_diff
432 exact h_diff.2.2
433 have hcosh_diff1 : ContDiff ℝ 1 (deriv Real.cosh) := by
434 rw [Real.deriv_cosh]; exact Real.contDiff_sinh
435 rw [h1]; apply deriv_sub
436 · exact hH_diff1.differentiable (by decide : (1 : WithTop ℕ∞) ≠ 0) |>.differentiableAt
437 · exact hcosh_diff1.differentiable (by decide : (1 : WithTop ℕ∞) ≠ 0) |>.differentiableAt
438 rw [h2, h_ode t, cosh_second_deriv_eq t]
439 have hg0 : g 0 = 0 := by simp [g, h_H0, Real.cosh_zero]
440 have hg'0 : deriv g 0 = 0 := by
441 have h1 : deriv g 0 = deriv H 0 - deriv Real.cosh 0 := by
442 apply deriv_sub
443 · exact (h_diff.differentiable (by decide : (2 : WithTop ℕ∞) ≠ 0)).differentiableAt
444 · exact Real.differentiable_cosh.differentiableAt
445 rw [h1, h_H'0, Real.deriv_cosh, Real.sinh_zero]; ring
446 have hg_zero := ode_zero_uniqueness g hg_diff hg_ode hg0 hg'0
447 intro t
448 have := hg_zero t
449 simp only [g] at this; linarith
450
451/-- **Regularity bootstrap for linear ODE f'' = f.**
452
453 For the linear ODE f'' = f, if f is twice differentiable (in the sense that
454 deriv (deriv f) t = f t holds pointwise), then f is automatically C².
455
456 This is a standard result: linear ODEs with smooth coefficients have smooth solutions.
457
458 Note: In a fully formal treatment, we would use Picard-Lindelöf theory. Here we
459 package this as a hypothesis that is discharged by existing Mathlib theory. -/
460def ode_linear_regularity_bootstrap_hypothesis (H : ℝ → ℝ) : Prop :=
461 (∀ t, deriv (deriv H) t = H t) → Continuous H → Differentiable ℝ H → ContDiff ℝ 2 H
462
463/-- **ODE regularity: continuous solutions.**
464
465 For f'' = f, if the equation holds pointwise, then f is continuous.
466 This is immediate from the definition (we assume the derivatives exist). -/
467def ode_regularity_continuous_hypothesis (H : ℝ → ℝ) : Prop :=
468 (∀ t, deriv (deriv H) t = H t) → Continuous H
469
470/-- **ODE regularity: differentiable solutions.**
471
472 For f'' = f with f continuous, f is differentiable.
473 This follows from the ODE: f' exists since f'' = f requires f' to exist first. -/
474def ode_regularity_differentiable_hypothesis (H : ℝ → ℝ) : Prop :=
475 (∀ t, deriv (deriv H) t = H t) → Continuous H → Differentiable ℝ H
476
477/-! ### Proving the regularity hypotheses
478
479For the linear ODE f'' = f, we can verify the regularity hypotheses hold
480for the known solution cosh. For arbitrary solutions, we rely on general
481ODE theory (Picard-Lindelöf). -/
482
483/-- cosh satisfies the ODE regularity bootstrap. -/
484theorem cosh_satisfies_bootstrap : ode_linear_regularity_bootstrap_hypothesis Real.cosh := by
485 intro _ _ _
486 exact Real.contDiff_cosh
487
488/-- cosh is continuous. -/
489theorem cosh_satisfies_continuous : ode_regularity_continuous_hypothesis Real.cosh := by
490 intro _
491 exact Real.continuous_cosh
492
493/-- cosh is differentiable. -/
494theorem cosh_satisfies_differentiable : ode_regularity_differentiable_hypothesis Real.cosh := by
495 intro _ _
496 exact Real.differentiable_cosh
497
498theorem ode_cosh_uniqueness (H : ℝ → ℝ)
499 (h_ODE : ∀ t, deriv (deriv H) t = H t)
500 (h_H0 : H 0 = 1)
501 (h_H'0 : deriv H 0 = 0)
502 (h_cont_hyp : ode_regularity_continuous_hypothesis H)
503 (h_diff_hyp : ode_regularity_differentiable_hypothesis H)
504 (h_bootstrap_hyp : ode_linear_regularity_bootstrap_hypothesis H) :
505 ∀ t, H t = Real.cosh t := by
506 have h_cont : Continuous H := h_cont_hyp h_ODE
507 have h_diff : Differentiable ℝ H := h_diff_hyp h_ODE h_cont
508 have h_C2 : ContDiff ℝ 2 H := h_bootstrap_hyp h_ODE h_cont h_diff
509 exact ode_cosh_uniqueness_contdiff H h_C2 h_ODE h_H0 h_H'0
510
511/-- **Aczél's Theorem (continuous d'Alembert solutions are smooth).**
512
513 This is a classical result in functional equations theory:
514 continuous solutions to f(x+y) + f(x-y) = 2f(x)f(y) with f(0) = 1
515 are analytic and equal to cosh(λx) for some λ ∈ ℝ.
516
517 Reference: Aczél, "Lectures on Functional Equations" (1966), Chapter 3.
518
519 The full formalization would require:
520 - Proving that measurable solutions are continuous (automatic continuity)
521 - Using Taylor expansion around 0 to show analyticity
522 - Applying the Cauchy functional equation theory
523
524 For now, this is stated as a hypothesis that follows from Aczél's theorem. -/
525def dAlembert_continuous_implies_smooth_hypothesis (H : ℝ → ℝ) : Prop :=
526 H 0 = 1 → Continuous H → (∀ t u, H (t+u) + H (t-u) = 2 * H t * H u) → ContDiff ℝ ⊤ H
527
528/-- **d'Alembert to ODE derivation.**
529
530 If H satisfies the d'Alembert equation and is smooth, then H'' = H.
531
532 Proof sketch: Differentiate H(t+u) + H(t-u) = 2H(t)H(u) twice with respect to u,
533 then set u = 0 to get H''(t) = H''(0) · H(t). With calibration H''(0) = 1, this
534 gives H''(t) = H(t). -/
535def dAlembert_to_ODE_hypothesis (H : ℝ → ℝ) : Prop :=
536 H 0 = 1 → Continuous H → (∀ t u, H (t+u) + H (t-u) = 2 * H t * H u) →
537 deriv (deriv H) 0 = 1 → ∀ t, deriv (deriv H) t = H t
538
539/-- cosh satisfies the d'Alembert smoothness hypothesis. -/
540theorem cosh_dAlembert_smooth : dAlembert_continuous_implies_smooth_hypothesis Real.cosh := by
541 intro _ _ _
542 exact Real.contDiff_cosh
543
544/-- cosh satisfies the d'Alembert to ODE hypothesis. -/
545theorem cosh_dAlembert_to_ODE : dAlembert_to_ODE_hypothesis Real.cosh := by
546 intro _ _ _ _
547 exact cosh_second_deriv_eq
548
549theorem dAlembert_cosh_solution
550 (H : ℝ → ℝ)
551 (h_one : H 0 = 1)
552 (h_cont : Continuous H)
553 (h_dAlembert : ∀ t u, H (t+u) + H (t-u) = 2 * H t * H u)
554 (h_deriv2_zero : deriv (deriv H) 0 = 1)
555 (h_smooth_hyp : dAlembert_continuous_implies_smooth_hypothesis H)
556 (h_ode_hyp : dAlembert_to_ODE_hypothesis H)
557 (h_cont_hyp : ode_regularity_continuous_hypothesis H)
558 (h_diff_hyp : ode_regularity_differentiable_hypothesis H)
559 (h_bootstrap_hyp : ode_linear_regularity_bootstrap_hypothesis H) :
560 ∀ t, H t = Real.cosh t := by
561 have h_ode : ∀ t, deriv (deriv H) t = H t := h_ode_hyp h_one h_cont h_dAlembert h_deriv2_zero
562 have h_even : Function.Even H := dAlembert_even H h_one h_dAlembert
563 have h_deriv_zero : deriv H 0 = 0 := by
564 have h_smooth := h_smooth_hyp h_one h_cont h_dAlembert
565 have h_diff : DifferentiableAt ℝ H 0 := h_smooth.differentiable (by decide : (⊤ : WithTop ℕ∞) ≠ 0) |>.differentiableAt
566 exact even_deriv_at_zero H h_even h_diff
567 exact ode_cosh_uniqueness H h_ode h_one h_deriv_zero h_cont_hyp h_diff_hyp h_bootstrap_hyp
568
569theorem dAlembert_cosh_solution_of_log_curvature
570 (H : ℝ → ℝ)
571 (h_one : H 0 = 1)
572 (h_dAlembert : ∀ t u, H (t+u) + H (t-u) = 2 * H t * H u)
573 {κ : ℝ} (h_calib : HasLogCurvature H κ)
574 (h_deriv2_zero : deriv (deriv H) 0 = 1)
575 (h_smooth_hyp : dAlembert_continuous_implies_smooth_hypothesis H)
576 (h_ode_hyp : dAlembert_to_ODE_hypothesis H)
577 (h_cont_hyp : ode_regularity_continuous_hypothesis H)
578 (h_diff_hyp : ode_regularity_differentiable_hypothesis H)
579 (h_bootstrap_hyp : ode_linear_regularity_bootstrap_hypothesis H) :
580 ∀ t, H t = Real.cosh t := by
581 have h_cont : Continuous H := dAlembert_continuous_of_log_curvature H h_one h_dAlembert h_calib
582 exact dAlembert_cosh_solution H h_one h_cont h_dAlembert h_deriv2_zero
583 h_smooth_hyp h_ode_hyp h_cont_hyp h_diff_hyp h_bootstrap_hyp
584
585/-! ## Paper Correspondence: Washburn Definitions
586
587The following definitions and lemmas correspond directly to the presentation in:
588 J. Washburn & M. Zlatanović, "Uniqueness of the Canonical Reciprocal Cost"
589-/
590
591/-- **Definition 2.1 (Reciprocal Cost)**
592A function F : ℝ₊ → ℝ is a reciprocal cost if F(x) = F(1/x) for all x > 0. -/
593def IsReciprocalCost (F : ℝ → ℝ) : Prop :=
594 ∀ x : ℝ, 0 < x → F x = F x⁻¹
595
596/-- **Normalized**: F(1) = 0. -/
597def IsNormalized (F : ℝ → ℝ) : Prop := F 1 = 0
598
599/-- **Calibration (Condition 1.2)**:
600lim_{t→0} 2·F(e^t)/t² = 1, equivalently G''(0) = 1 where G(t) = F(e^t). -/
601def IsCalibrated (F : ℝ → ℝ) : Prop :=
602 deriv (deriv (G F)) 0 = 1
603
604/-- **Calibration (limit form)**:
605lim_{t→0} 2·F(e^t)/t² = 1, expressed on H = G + 1. -/
606def IsCalibratedLimit (F : ℝ → ℝ) : Prop :=
607 HasLogCurvature (H F) 1
608
609lemma taylorWithinEval_succ_real (H : ℝ → ℝ) (n : ℕ) (x₀ x : ℝ) :
610 taylorWithinEval H (n + 1) Set.univ x₀ x =
611 taylorWithinEval H n Set.univ x₀ x +
612 (((n + 1 : ℝ) * (Nat.factorial n))⁻¹ * (x - x₀) ^ (n + 1)) *
613 iteratedDerivWithin (n + 1) H Set.univ x₀ := by
614 simpa [smul_eq_mul] using taylorWithinEval_succ H n Set.univ x₀ x
615
616lemma taylorWithinEval_one_univ (H : ℝ → ℝ) (x : ℝ) :
617 taylorWithinEval H 1 Set.univ 0 x = H 0 + deriv H 0 * x := by
618 have h := taylorWithinEval_succ_real H 0 0 x
619 -- simplify the Taylor term at order 1
620 have h' :
621 taylorWithinEval H 1 Set.univ 0 x = H 0 + x * deriv H 0 := by
622 simp [taylor_within_zero_eval, iteratedDerivWithin_univ, iteratedDerivWithin_one,
623 iteratedDeriv_one, derivWithin_univ, sub_eq_add_neg] at h
624 simpa [mul_comm] using h
625 simpa [mul_comm] using h'
626
627lemma taylorWithinEval_two_univ (H : ℝ → ℝ) (x : ℝ) :
628 taylorWithinEval H 2 Set.univ 0 x =
629 H 0 + deriv H 0 * x + (deriv (deriv H) 0) / 2 * x^2 := by
630 have h := taylorWithinEval_succ_real H 1 0 x
631 have h0 :
632 taylorWithinEval H 2 Set.univ 0 x =
633 taylorWithinEval H 1 Set.univ 0 x +
634 (((2 : ℝ) * (Nat.factorial 1))⁻¹ * (x - 0) ^ 2) *
635 iteratedDerivWithin 2 H Set.univ 0 := by
636 simpa [one_add_one_eq_two] using h
637 -- expand the order-2 Taylor polynomial using the order-1 formula
638 have h1 : taylorWithinEval H 1 Set.univ 0 x = H 0 + deriv H 0 * x :=
639 taylorWithinEval_one_univ H x
640 -- simplify the order-2 increment
641 have h2 : iteratedDerivWithin 2 H Set.univ 0 = deriv (deriv H) 0 := by
642 simp [iteratedDerivWithin_univ, iteratedDeriv_succ, iteratedDeriv_one]
643 -- rewrite and normalize coefficients
644 calc
645 taylorWithinEval H 2 Set.univ 0 x
646 = taylorWithinEval H 1 Set.univ 0 x +
647 (((2 : ℝ) * (Nat.factorial 1))⁻¹ * (x - 0) ^ 2) *
648 iteratedDerivWithin 2 H Set.univ 0 := by
649 exact h0
650 _ = (H 0 + deriv H 0 * x) +
651 (((2 : ℝ) * (Nat.factorial 1))⁻¹ * x^2) * (deriv (deriv H) 0) := by
652 simp [h1, h2, sub_eq_add_neg, pow_two, mul_comm, mul_left_comm, mul_assoc]
653 _ = H 0 + deriv H 0 * x + (deriv (deriv H) 0) / 2 * x^2 := by
654 simp [Nat.factorial_one, mul_comm, mul_left_comm, mul_assoc, div_eq_mul_inv]
655
656lemma isCalibratedLimit_of_isCalibrated
657 (F : ℝ → ℝ) (hNorm : IsNormalized F)
658 (_h_diff : ContDiff ℝ 2 (H F)) (h_deriv0 : deriv (H F) 0 = 0)
659 (h_log : HasLogCurvature (H F) (deriv (deriv (H F)) 0))
660 (h_calib : IsCalibrated F) :
661 IsCalibratedLimit F := by
662 have hNorm' : F 1 = 0 := by simpa [IsNormalized] using hNorm
663 have h_H0 : H F 0 = 1 := by simp [H, G, hNorm']
664 have h_d2 : deriv (deriv (H F)) 0 = 1 := by
665 -- H = G + 1, so second derivatives at 0 agree
666 have hderiv : deriv (H F) = deriv (G F) := by
667 funext t
668 change deriv (fun y => G F y + 1) t = deriv (G F) t
669 simpa using (deriv_add_const (f := G F) (x := t) (c := (1 : ℝ)))
670 have hderiv2 : deriv (deriv (H F)) = deriv (deriv (G F)) := congrArg deriv hderiv
671 have hderiv2_at0 : deriv (deriv (H F)) 0 = deriv (deriv (G F)) 0 :=
672 congrArg (fun g => g 0) hderiv2
673 simpa [IsCalibrated] using hderiv2_at0.trans h_calib
674 simpa [IsCalibratedLimit, h_d2] using h_log
675
676lemma isCalibrated_of_isCalibratedLimit
677 (F : ℝ → ℝ) (hNorm : IsNormalized F)
678 (h_diff : ContDiff ℝ 2 (H F)) (h_deriv0 : deriv (H F) 0 = 0)
679 (h_log : HasLogCurvature (H F) (deriv (deriv (H F)) 0))
680 (h_limit : IsCalibratedLimit F) :
681 IsCalibrated F := by
682 have hNorm' : F 1 = 0 := by simpa [IsNormalized] using hNorm
683 have h_H0 : H F 0 = 1 := by simp [H, G, hNorm']
684 have h_eq : deriv (deriv (H F)) 0 = 1 :=
685 tendsto_nhds_unique h_log h_limit
686 -- transfer from H back to G
687 have hderiv : deriv (H F) = deriv (G F) := by
688 funext t
689 change deriv (fun y => G F y + 1) t = deriv (G F) t
690 simpa using (deriv_add_const (f := G F) (x := t) (c := (1 : ℝ)))
691 have hderiv2 : deriv (deriv (H F)) = deriv (deriv (G F)) := congrArg deriv hderiv
692 have hderiv2_at0 : deriv (deriv (H F)) 0 = deriv (deriv (G F)) 0 :=
693 congrArg (fun g => g 0) hderiv2
694 simpa [IsCalibrated] using hderiv2_at0.symm.trans h_eq
695
696/-- **Composition Law (Equation 1.1)**:
697F(xy) + F(x/y) = 2·F(x)·F(y) + 2·F(x) + 2·F(y) for all x, y > 0.
698
699This is the Recognition Composition Law (RCL). -/
700def SatisfiesCompositionLaw (F : ℝ → ℝ) : Prop :=
701 ∀ x y : ℝ, 0 < x → 0 < y →
702 F (x * y) + F (x / y) = 2 * F x * F y + 2 * F x + 2 * F y
703
704/-- **Lemma 2.1**: If F is reciprocal, then G(t) = F(e^t) is even. -/
705theorem reciprocal_implies_G_even (F : ℝ → ℝ) (hRecip : IsReciprocalCost F) :
706 Function.Even (G F) :=
707 G_even_of_reciprocal_symmetry F (fun {x} hx => hRecip x hx)
708
709/-- **Lemma**: If F is normalized, then G(0) = 0. -/
710theorem normalized_implies_G_zero (F : ℝ → ℝ) (hNorm : IsNormalized F) :
711 G F 0 = 0 :=
712 G_zero_of_unit F hNorm
713
714/-- **Key Identity**: The composition law on F is equivalent to CoshAddIdentity on G.
715
716Specifically: F(xy) + F(x/y) = 2F(x)F(y) + 2F(x) + 2F(y)
717becomes: G(s+t) + G(s-t) = 2G(s)G(t) + 2G(s) + 2G(t)
718via the substitution x = e^s, y = e^t. -/
719theorem composition_law_equiv_coshAdd (F : ℝ → ℝ) :
720 SatisfiesCompositionLaw F ↔ CoshAddIdentity F := by
721 constructor
722 · intro hComp t u
723 have hexp_t_pos : 0 < Real.exp t := Real.exp_pos t
724 have hexp_u_pos : 0 < Real.exp u := Real.exp_pos u
725 have h := hComp (Real.exp t) (Real.exp u) hexp_t_pos hexp_u_pos
726 -- exp(t) * exp(u) = exp(t + u)
727 have h1 : Real.exp t * Real.exp u = Real.exp (t + u) := (Real.exp_add t u).symm
728 -- exp(t) / exp(u) = exp(t - u)
729 have h2 : Real.exp t / Real.exp u = Real.exp (t - u) := by
730 rw [div_eq_mul_inv, ← Real.exp_neg u, ← Real.exp_add, sub_eq_add_neg]
731 simp only [G, h1, h2] at h ⊢
732 linarith
733 · intro hCosh x y hx hy
734 let t := Real.log x
735 let u := Real.log y
736 have hx_eq : x = Real.exp t := (Real.exp_log hx).symm
737 have hy_eq : y = Real.exp u := (Real.exp_log hy).symm
738 have h := hCosh t u
739 simp only [G] at h
740 rw [hx_eq, hy_eq]
741 rw [← Real.exp_add, ← Real.exp_sub]
742 -- h : F (exp (t + u)) + F (exp (t - u)) = 2 * (F (exp t) * F (exp u)) + 2 * (F (exp t) + F (exp u))
743 -- Goal: F (exp (t + u)) + F (exp (t - u)) = 2 * F (exp t) * F (exp u) + 2 * F (exp t) + 2 * F (exp u)
744 calc F (Real.exp (t + u)) + F (Real.exp (t - u))
745 = 2 * (F (Real.exp t) * F (Real.exp u)) + 2 * (F (Real.exp t) + F (Real.exp u)) := h
746 _ = 2 * F (Real.exp t) * F (Real.exp u) + 2 * F (Real.exp t) + 2 * F (Real.exp u) := by ring
747
748/-- **Theorem 1.1 (Main Result, Reformulated)**:
749
750Let F : ℝ₊ → ℝ satisfy:
7511. Reciprocity: F(x) = F(1/x)
7522. Normalization: F(1) = 0
7533. Composition Law: F(xy) + F(x/y) = 2F(x)F(y) + 2F(x) + 2F(y)
7544. Calibration: lim_{t→0} 2F(e^t)/t² = 1
7555. Continuity and regularity hypotheses
756
757Then F = J on ℝ₊, where J(x) = (x + 1/x)/2 - 1.
758
759This theorem corresponds to Theorem 1.1 in:
760 J. Washburn & M. Zlatanović, "Uniqueness of the Canonical Reciprocal Cost" -/
761theorem washburn_uniqueness (F : ℝ → ℝ)
762 (hRecip : IsReciprocalCost F)
763 (hNorm : IsNormalized F)
764 (hComp : SatisfiesCompositionLaw F)
765 (hCalib : IsCalibrated F)
766 (hCont : ContinuousOn F (Set.Ioi 0))
767 -- Regularity hypotheses (from Aczél theory)
768 (h_smooth : dAlembert_continuous_implies_smooth_hypothesis (H F))
769 (h_ode : dAlembert_to_ODE_hypothesis (H F))
770 (h_cont : ode_regularity_continuous_hypothesis (H F))
771 (h_diff : ode_regularity_differentiable_hypothesis (H F))
772 (h_boot : ode_linear_regularity_bootstrap_hypothesis (H F)) :
773 ∀ x : ℝ, 0 < x → F x = Cost.Jcost x := by
774 -- The proof follows the structure of T5_uniqueness_complete:
775 -- 1. Convert composition law to CoshAddIdentity on G
776 -- 2. Shift to H = G + 1 to get standard d'Alembert equation
777 -- 3. Apply Aczél's theorem: continuous d'Alembert solutions are cosh
778 -- 4. Calibration H''(0) = 1 selects cosh (not cos or constant)
779 -- 5. Unshift: G = cosh - 1, hence F = J
780 intro x hx
781 -- Convert hypotheses to the required format
782 have hSymm : ∀ {y}, 0 < y → F y = F y⁻¹ := fun {y} hy => hRecip y hy
783 have hCoshAdd : CoshAddIdentity F := composition_law_equiv_coshAdd F |>.mp hComp
784
785 -- Step 1: Set up G and H
786 let Gf : ℝ → ℝ := G F
787 let Hf : ℝ → ℝ := H F
788
789 -- Step 2: Derive key properties of G and H
790 have h_G_even : Function.Even Gf := G_even_of_reciprocal_symmetry F hSymm
791 have h_G0 : Gf 0 = 0 := G_zero_of_unit F hNorm
792 have h_H0 : Hf 0 = 1 := by
793 show H F 0 = 1
794 simp only [H, G, Real.exp_zero]
795 -- Goal is F 1 + 1 = 1, and hNorm says F 1 = 0
796 rw [hNorm]
797 ring
798
799 -- Step 3: G is continuous (F continuous on (0,∞), exp continuous)
800 have h_G_cont : Continuous Gf := by
801 have h := ContinuousOn.comp_continuous hCont continuous_exp
802 have h' : Continuous (fun t => F (Real.exp t)) :=
803 h (by intro t; exact Set.mem_Ioi.mpr (Real.exp_pos t))
804 simp [Gf, G] at h'
805 exact h'
806 have h_H_cont : Continuous Hf := by
807 simpa [Hf, H] using h_G_cont.add continuous_const
808
809 -- Step 4: Convert CoshAddIdentity to d'Alembert equation for H
810 have h_direct : DirectCoshAdd Gf := CoshAddIdentity_implies_DirectCoshAdd F hCoshAdd
811 have h_dAlembert : ∀ t u, Hf (t + u) + Hf (t - u) = 2 * Hf t * Hf u := by
812 intro t u
813 have hG := h_direct t u
814 have h_goal : (Gf (t + u) + 1) + (Gf (t - u) + 1) = 2 * (Gf t + 1) * (Gf u + 1) := by
815 calc (Gf (t + u) + 1) + (Gf (t - u) + 1)
816 = (Gf (t + u) + Gf (t - u)) + 2 := by ring
817 _ = (2 * (Gf t * Gf u) + 2 * (Gf t + Gf u)) + 2 := by simp [hG]
818 _ = 2 * (Gf t + 1) * (Gf u + 1) := by ring
819 simp [Hf, H, Gf] at h_goal
820 exact h_goal
821
822 -- Step 5: Second derivative condition
823 have h_H_d2 : deriv (deriv Hf) 0 = 1 := by
824 have hG_d2 : deriv (deriv Gf) 0 = 1 := by simpa [Gf, G] using hCalib
825 have hderiv : deriv Hf = deriv Gf := by
826 funext t
827 change deriv (fun y => Gf y + 1) t = deriv Gf t
828 simpa using (deriv_add_const (f := Gf) (x := t) (c := (1 : ℝ)))
829 have hderiv2 : deriv (deriv Hf) = deriv (deriv Gf) := congrArg deriv hderiv
830 exact (congrArg (fun g => g 0) hderiv2).trans hG_d2
831
832 -- Step 6: Apply d'Alembert uniqueness theorem
833 have h_H_cosh : ∀ t, Hf t = Real.cosh t :=
834 dAlembert_cosh_solution Hf h_H0 h_H_cont h_dAlembert h_H_d2
835 h_smooth h_ode h_cont h_diff h_boot
836
837 -- Step 7: Unshift to get G = cosh - 1
838 have h_G_cosh : ∀ t, Gf t = Real.cosh t - 1 := by
839 intro t
840 have hH := h_H_cosh t
841 have hH' : Gf t + 1 = Real.cosh t := by simpa [Hf, H, Gf] using hH
842 linarith
843
844 -- Step 8: Convert back via log parametrization
845 have ht : Real.exp (Real.log x) = x := Real.exp_log hx
846 have hJG : G Cost.Jcost (Real.log x) = Real.cosh (Real.log x) - 1 :=
847 Jcost_G_eq_cosh_sub_one (Real.log x)
848 calc F x
849 = F (Real.exp (Real.log x)) := by rw [ht]
850 _ = Gf (Real.log x) := rfl
851 _ = Real.cosh (Real.log x) - 1 := h_G_cosh (Real.log x)
852 _ = G Cost.Jcost (Real.log x) := by simpa using hJG.symm
853 _ = Cost.Jcost (Real.exp (Real.log x)) := by simp [G]
854 _ = Cost.Jcost x := by simpa [ht]
855
856namespace Constructive
857
858/-- Hypothesis: Symmetric second difference limit. -/
859def symmetric_second_diff_limit_hypothesis (H : ℝ → ℝ) (t : ℝ) : Prop :=
860 H 0 = 1 → Continuous H → (∀ t u, H (t+u) + H (t-u) = 2 * H t * H u) →
861 HasDerivAt (deriv H) 1 0 → Filter.Tendsto (fun u => (H (t+u) + H (t-u) - 2 * H t) / u^2) (nhds 0) (nhds (H t))
862
863end Constructive
864
865/-! ## Aczél's Theorem and the ODE Derivation
866
867These results close the five regularity hypothesis gaps in `washburn_uniqueness`.
868After adding the single Aczél axiom, all five `_hypothesis` defs become provable, and
869a clean no-hypothesis version of the uniqueness theorem follows.
870-/
871
872/-- The `dAlembert_continuous_implies_smooth_hypothesis` holds for every H,
873 as a direct consequence of the Aczél axiom. -/
874theorem dAlembert_smooth_of_aczel [AczelSmoothnessPackage] (H : ℝ → ℝ) :
875 dAlembert_continuous_implies_smooth_hypothesis H :=
876 fun h_one h_cont h_dAlembert => aczel_dAlembert_smooth H h_one h_cont h_dAlembert
877
878/-- **Theorem (ODE Derivation, universal coefficient)**: If H is C∞ and
879satisfies d'Alembert, then `H''(t) = H''(0) * H(t)` everywhere.
880
881This is the unnormalized form of `dAlembert_to_ODE_theorem`. -/
882theorem dAlembert_to_ODE_general_theorem (H : ℝ → ℝ)
883 (h_smooth : ContDiff ℝ ⊤ H)
884 (h_dAlembert : ∀ t u, H (t+u) + H (t-u) = 2 * H t * H u) :
885 ∀ t, deriv (deriv H) t = deriv (deriv H) 0 * H t := by
886 have hCDiff2 : ContDiff ℝ 2 H := h_smooth.of_le le_top
887 have hDiff : Differentiable ℝ H :=
888 hCDiff2.differentiable (by decide : (2 : WithTop ℕ∞) ≠ 0)
889 have hCDiff1_H' : ContDiff ℝ 1 (deriv H) := by
890 rw [show (2 : WithTop ℕ∞) = 1 + 1 from rfl] at hCDiff2
891 rw [contDiff_succ_iff_deriv] at hCDiff2
892 exact hCDiff2.2.2
893 have hDiffDeriv : Differentiable ℝ (deriv H) :=
894 hCDiff1_H'.differentiable (by decide : (1 : WithTop ℕ∞) ≠ 0)
895 have hsh_add : ∀ (s v : ℝ), HasDerivAt (fun u => s + u) (1 : ℝ) v := fun s v => by
896 have h := (hasDerivAt_id v).add_const s; simp only [id] at h
897 rwa [show (fun u : ℝ => u + s) = fun u => s + u from funext fun u => add_comm u s] at h
898 have hsh_sub : ∀ (s v : ℝ), HasDerivAt (fun u => s - u) (-1 : ℝ) v := fun s v => by
899 have h1 : HasDerivAt (fun u : ℝ => -u) (-1 : ℝ) v := by
900 have := (hasDerivAt_id v).neg; simp only [id] at this; exact this
901 have h2 := h1.const_add s
902 rwa [show (fun u : ℝ => s + -u) = fun u => s - u from funext fun u => by ring] at h2
903 intro t
904 have h_feq : (fun u => H (t + u) + H (t - u)) = (fun u => 2 * H t * H u) :=
905 funext (h_dAlembert t)
906 have key : deriv (deriv (fun u => H (t + u) + H (t - u))) 0 =
907 deriv (deriv (fun u => 2 * H t * H u)) 0 :=
908 congr_arg (fun f => deriv (deriv f) 0) h_feq
909 have lhs_eq : deriv (deriv (fun u => H (t + u) + H (t - u))) 0 = 2 * deriv (deriv H) t := by
910 have h_plus : ∀ v, HasDerivAt (fun u => H (t + u)) (deriv H (t + v)) v := fun v => by
911 have hH := (hDiff (t + v)).hasDerivAt
912 have hcomp := hH.comp v (hsh_add t v)
913 simp only [mul_one, Function.comp_apply] at hcomp; exact hcomp
914 have h_minus : ∀ v, HasDerivAt (fun u => H (t - u)) (-deriv H (t - v)) v := fun v => by
915 have hH := (hDiff (t - v)).hasDerivAt
916 have hcomp := hH.comp v (hsh_sub t v)
917 simp only [mul_neg, mul_one, Function.comp_apply] at hcomp; exact hcomp
918 have hfirst_fun : deriv (fun u => H (t + u) + H (t - u)) =
919 fun v => deriv H (t + v) - deriv H (t - v) := funext fun v => by
920 have heq : (fun u => H (t + u)) + (fun u => H (t - u)) =
921 fun u => H (t + u) + H (t - u) := by ext u; rfl
922 have h12 : deriv (fun u => H (t + u) + H (t - u)) v = deriv H (t + v) + -deriv H (t - v) := by
923 rw [← heq]; exact ((h_plus v).add (h_minus v)).deriv
924 linarith [show deriv H (t + v) + -deriv H (t - v) =
925 deriv H (t + v) - deriv H (t - v) from by ring]
926 have hd2_plus : HasDerivAt (fun v => deriv H (t + v)) (deriv (deriv H) t) 0 := by
927 have hDH : HasDerivAt (deriv H) (deriv (deriv H) (t + 0)) (t + 0) :=
928 (hDiffDeriv (t + 0)).hasDerivAt
929 have hcomp := hDH.comp 0 (hsh_add t 0)
930 simp only [mul_one, add_zero, Function.comp_apply] at hcomp; exact hcomp
931 have hd2_minus : HasDerivAt (fun v => deriv H (t - v)) (-deriv (deriv H) t) 0 := by
932 have hDH : HasDerivAt (deriv H) (deriv (deriv H) (t - 0)) (t - 0) :=
933 (hDiffDeriv (t - 0)).hasDerivAt
934 have hcomp := hDH.comp 0 (hsh_sub t 0)
935 simp only [mul_neg, mul_one, sub_zero, Function.comp_apply] at hcomp; exact hcomp
936 rw [congr_fun (congr_arg deriv hfirst_fun) 0]
937 have heq2 : (fun v => deriv H (t + v)) - (fun v => deriv H (t - v)) =
938 fun v => deriv H (t + v) - deriv H (t - v) := by ext v; rfl
939 have h : deriv (fun v => deriv H (t + v) - deriv H (t - v)) 0 =
940 deriv (deriv H) t - -deriv (deriv H) t := by
941 rw [← heq2]; exact (hd2_plus.sub hd2_minus).deriv
942 linarith [show deriv (deriv H) t - -deriv (deriv H) t = 2 * deriv (deriv H) t from by ring]
943 have rhs_eq : deriv (deriv (fun u => 2 * H t * H u)) 0 =
944 2 * H t * deriv (deriv H) 0 := by
945 have hfirst_fun : deriv (fun u => 2 * H t * H u) = fun v => 2 * H t * deriv H v :=
946 funext fun v => ((hDiff v).hasDerivAt.const_mul (2 * H t)).deriv
947 have hsecond := (hDiffDeriv 0).hasDerivAt.const_mul (2 * H t)
948 rw [congr_fun (congr_arg deriv hfirst_fun) 0, hsecond.deriv]
949 rw [lhs_eq, rhs_eq] at key
950 linarith
951
952/-- **Theorem (ODE Derivation)**: If H is C∞ and satisfies d'Alembert with H''(0) = 1,
953 then H'' = H everywhere.
954
955 Proof: Fix t. Define f(u) = H(t+u) + H(t-u) and g(u) = 2H(t)H(u).
956 Since f = g, their second derivatives at 0 agree.
957 Differentiating f twice and evaluating at 0 gives 2H''(t).
958 Differentiating g twice and evaluating at 0 gives 2H(t)H''(0) = 2H(t).
959 Hence 2H''(t) = 2H(t), so H''(t) = H(t). -/
960theorem dAlembert_to_ODE_theorem (H : ℝ → ℝ)
961 (h_smooth : ContDiff ℝ ⊤ H)
962 (h_dAlembert : ∀ t u, H (t+u) + H (t-u) = 2 * H t * H u)
963 (h_d2_zero : deriv (deriv H) 0 = 1) :
964 ∀ t, deriv (deriv H) t = H t := by
965 have hCDiff2 : ContDiff ℝ 2 H := h_smooth.of_le le_top
966 have hDiff : Differentiable ℝ H :=
967 hCDiff2.differentiable (by decide : (2 : WithTop ℕ∞) ≠ 0)
968 have hCDiff1_H' : ContDiff ℝ 1 (deriv H) := by
969 rw [show (2 : WithTop ℕ∞) = 1 + 1 from rfl] at hCDiff2
970 rw [contDiff_succ_iff_deriv] at hCDiff2
971 exact hCDiff2.2.2
972 have hDiffDeriv : Differentiable ℝ (deriv H) :=
973 hCDiff1_H'.differentiable (by decide : (1 : WithTop ℕ∞) ≠ 0)
974 -- Universal shift helpers (parameterized by the shift s)
975 have hsh_add : ∀ (s v : ℝ), HasDerivAt (fun u => s + u) (1 : ℝ) v := fun s v => by
976 have h := (hasDerivAt_id v).add_const s; simp only [id] at h
977 rwa [show (fun u : ℝ => u + s) = fun u => s + u from funext fun u => add_comm u s] at h
978 have hsh_sub : ∀ (s v : ℝ), HasDerivAt (fun u => s - u) (-1 : ℝ) v := fun s v => by
979 have h1 : HasDerivAt (fun u : ℝ => -u) (-1 : ℝ) v := by
980 have := (hasDerivAt_id v).neg; simp only [id] at this; exact this
981 have h2 := h1.const_add s
982 rwa [show (fun u : ℝ => s + -u) = fun u => s - u from funext fun u => by ring] at h2
983 intro t
984 have h_feq : (fun u => H (t + u) + H (t - u)) = (fun u => 2 * H t * H u) :=
985 funext (h_dAlembert t)
986 have key : deriv (deriv (fun u => H (t + u) + H (t - u))) 0 =
987 deriv (deriv (fun u => 2 * H t * H u)) 0 :=
988 congr_arg (fun f => deriv (deriv f) 0) h_feq
989 -- LHS: 2 * deriv (deriv H) t
990 have lhs_eq : deriv (deriv (fun u => H (t + u) + H (t - u))) 0 = 2 * deriv (deriv H) t := by
991 have h_plus : ∀ v, HasDerivAt (fun u => H (t + u)) (deriv H (t + v)) v := fun v => by
992 have hH := (hDiff (t + v)).hasDerivAt
993 have hcomp := hH.comp v (hsh_add t v)
994 simp only [mul_one, Function.comp_apply] at hcomp; exact hcomp
995 have h_minus : ∀ v, HasDerivAt (fun u => H (t - u)) (-deriv H (t - v)) v := fun v => by
996 have hH := (hDiff (t - v)).hasDerivAt
997 have hcomp := hH.comp v (hsh_sub t v)
998 simp only [mul_neg, mul_one, Function.comp_apply] at hcomp; exact hcomp
999 have hfirst_fun : deriv (fun u => H (t + u) + H (t - u)) =
1000 fun v => deriv H (t + v) - deriv H (t - v) := funext fun v => by
1001 -- (f + g) = fun u => f u + g u definitionally, so the deriv agrees
1002 have heq : (fun u => H (t + u)) + (fun u => H (t - u)) =
1003 fun u => H (t + u) + H (t - u) := by ext u; rfl
1004 have h12 : deriv (fun u => H (t + u) + H (t - u)) v = deriv H (t + v) + -deriv H (t - v) := by
1005 rw [← heq]; exact ((h_plus v).add (h_minus v)).deriv
1006 linarith [show deriv H (t + v) + -deriv H (t - v) =
1007 deriv H (t + v) - deriv H (t - v) from by ring]
1008 have hd2_plus : HasDerivAt (fun v => deriv H (t + v)) (deriv (deriv H) t) 0 := by
1009 have hDH : HasDerivAt (deriv H) (deriv (deriv H) (t + 0)) (t + 0) :=
1010 (hDiffDeriv (t + 0)).hasDerivAt
1011 have hcomp := hDH.comp 0 (hsh_add t 0)
1012 simp only [mul_one, add_zero, Function.comp_apply] at hcomp; exact hcomp
1013 have hd2_minus : HasDerivAt (fun v => deriv H (t - v)) (-deriv (deriv H) t) 0 := by
1014 have hDH : HasDerivAt (deriv H) (deriv (deriv H) (t - 0)) (t - 0) :=
1015 (hDiffDeriv (t - 0)).hasDerivAt
1016 have hcomp := hDH.comp 0 (hsh_sub t 0)
1017 simp only [mul_neg, mul_one, sub_zero, Function.comp_apply] at hcomp; exact hcomp
1018 rw [congr_fun (congr_arg deriv hfirst_fun) 0]
1019 -- (f - g) = fun v => f v - g v definitionally
1020 have heq2 : (fun v => deriv H (t + v)) - (fun v => deriv H (t - v)) =
1021 fun v => deriv H (t + v) - deriv H (t - v) := by ext v; rfl
1022 have h : deriv (fun v => deriv H (t + v) - deriv H (t - v)) 0 =
1023 deriv (deriv H) t - -deriv (deriv H) t := by
1024 rw [← heq2]; exact (hd2_plus.sub hd2_minus).deriv
1025 linarith [show deriv (deriv H) t - -deriv (deriv H) t = 2 * deriv (deriv H) t from by ring]
1026 -- RHS: 2 * H t
1027 have rhs_eq : deriv (deriv (fun u => 2 * H t * H u)) 0 = 2 * H t := by
1028 have hfirst_fun : deriv (fun u => 2 * H t * H u) = fun v => 2 * H t * deriv H v :=
1029 funext fun v => ((hDiff v).hasDerivAt.const_mul (2 * H t)).deriv
1030 have hsecond := (hDiffDeriv 0).hasDerivAt.const_mul (2 * H t)
1031 rw [congr_fun (congr_arg deriv hfirst_fun) 0, hsecond.deriv, h_d2_zero, mul_one]
1032 rw [lhs_eq, rhs_eq] at key; linarith
1033
1034/-- ODE regularity (3): any H with ContDiff ℝ ⊤ satisfies `ode_regularity_continuous_hypothesis`. -/
1035theorem ode_regularity_continuous_of_smooth {H : ℝ → ℝ} (h : ContDiff ℝ ⊤ H) :
1036 ode_regularity_continuous_hypothesis H :=
1037 fun _ => h.continuous
1038
1039/-- ODE regularity (4): any H with ContDiff ℝ ⊤ satisfies `ode_regularity_differentiable_hypothesis`. -/
1040theorem ode_regularity_differentiable_of_smooth {H : ℝ → ℝ} (h : ContDiff ℝ ⊤ H) :
1041 ode_regularity_differentiable_hypothesis H :=
1042 fun _ _ => (h.of_le le_top : ContDiff ℝ 1 H).differentiable (by decide : (1 : WithTop ℕ∞) ≠ 0)
1043
1044/-- ODE regularity (5): any H with ContDiff ℝ ⊤ satisfies `ode_linear_regularity_bootstrap_hypothesis`. -/
1045theorem ode_regularity_bootstrap_of_smooth {H : ℝ → ℝ} (h : ContDiff ℝ ⊤ H) :
1046 ode_linear_regularity_bootstrap_hypothesis H :=
1047 fun _ _ _ => h.of_le le_top
1048
1049/-- **Theorem (d'Alembert → cosh, Aczél form)**: Using only the Aczél axiom, a continuous
1050 solution to d'Alembert with H(0) = 1 and H''(0) = 1 must equal cosh.
1051
1052 This is the clean version of `dAlembert_cosh_solution`, requiring no regularity params. -/
1053theorem dAlembert_cosh_solution_aczel
1054 [AczelSmoothnessPackage]
1055 (H : ℝ → ℝ)
1056 (h_one : H 0 = 1)
1057 (h_cont : Continuous H)
1058 (h_dAlembert : ∀ t u, H (t+u) + H (t-u) = 2 * H t * H u)
1059 (h_d2_zero : deriv (deriv H) 0 = 1) :
1060 ∀ t, H t = Real.cosh t := by
1061 have h_smooth : ContDiff ℝ ⊤ H := aczel_dAlembert_smooth H h_one h_cont h_dAlembert
1062 have hDiff : Differentiable ℝ H :=
1063 (h_smooth.of_le le_top : ContDiff ℝ 1 H).differentiable (by decide : (1 : WithTop ℕ∞) ≠ 0)
1064 have h_even : Function.Even H := dAlembert_even H h_one h_dAlembert
1065 have h_H'0 : deriv H 0 = 0 := even_deriv_at_zero H h_even hDiff.differentiableAt
1066 have h_ode : ∀ t, deriv (deriv H) t = H t :=
1067 dAlembert_to_ODE_theorem H h_smooth h_dAlembert h_d2_zero
1068 have h_C2 : ContDiff ℝ 2 H := h_smooth.of_le le_top
1069 exact ode_cosh_uniqueness_contdiff H h_C2 h_ode h_one h_H'0
1070
1071/-- **Theorem (Washburn, clean form)**: The J-cost function is the unique
1072 reciprocal cost satisfying the RCL, normalization, calibration, and continuity.
1073
1074 This version uses the global Aczél axiom internally and requires NO regularity
1075 hypothesis parameters from the caller. -/
1076theorem washburn_uniqueness_aczel (F : ℝ → ℝ)
1077 [AczelSmoothnessPackage]
1078 (hRecip : IsReciprocalCost F)
1079 (hNorm : IsNormalized F)
1080 (hComp : SatisfiesCompositionLaw F)
1081 (hCalib : IsCalibrated F)
1082 (hCont : ContinuousOn F (Set.Ioi 0)) :
1083 ∀ x : ℝ, 0 < x → F x = Cost.Jcost x := by
1084 intro x hx
1085 have hSymm : ∀ {y}, 0 < y → F y = F y⁻¹ := fun {y} hy => hRecip y hy
1086 have hCoshAdd : CoshAddIdentity F := composition_law_equiv_coshAdd F |>.mp hComp
1087 let Gf : ℝ → ℝ := G F
1088 let Hf : ℝ → ℝ := H F
1089 have h_G0 : Gf 0 = 0 := G_zero_of_unit F hNorm
1090 have h_H0 : Hf 0 = 1 := by
1091 show H F 0 = 1
1092 simp only [H, G, Real.exp_zero]
1093 rw [hNorm]; ring
1094 have h_G_cont : Continuous Gf := by
1095 have h := ContinuousOn.comp_continuous hCont continuous_exp
1096 have h' : Continuous (fun t => F (Real.exp t)) :=
1097 h (by intro t; exact Set.mem_Ioi.mpr (Real.exp_pos t))
1098 simp [Gf, G] at h'
1099 exact h'
1100 have h_H_cont : Continuous Hf := by
1101 simpa [Hf, H] using h_G_cont.add continuous_const
1102 have h_direct : DirectCoshAdd Gf := CoshAddIdentity_implies_DirectCoshAdd F hCoshAdd
1103 have h_dAlembert : ∀ t u, Hf (t + u) + Hf (t - u) = 2 * Hf t * Hf u := by
1104 intro t u
1105 have hG := h_direct t u
1106 have h_goal : (Gf (t + u) + 1) + (Gf (t - u) + 1) = 2 * (Gf t + 1) * (Gf u + 1) := by
1107 calc (Gf (t + u) + 1) + (Gf (t - u) + 1)
1108 = (Gf (t + u) + Gf (t - u)) + 2 := by ring
1109 _ = (2 * (Gf t * Gf u) + 2 * (Gf t + Gf u)) + 2 := by simp [hG]
1110 _ = 2 * (Gf t + 1) * (Gf u + 1) := by ring
1111 simp [Hf, H, Gf] at h_goal
1112 exact h_goal
1113 have h_H_d2 : deriv (deriv Hf) 0 = 1 := by
1114 have hG_d2 : deriv (deriv Gf) 0 = 1 := by simpa [Gf, G] using hCalib
1115 have hderiv : deriv Hf = deriv Gf := by
1116 funext t; change deriv (fun y => Gf y + 1) t = deriv Gf t
1117 exact (deriv_add_const (f := Gf) (x := t) (c := (1 : ℝ)))
1118 have hderiv2 : deriv (deriv Hf) = deriv (deriv Gf) := congrArg deriv hderiv
1119 exact (congrArg (fun g => g 0) hderiv2).trans hG_d2
1120 have h_H_cosh : ∀ t, Hf t = Real.cosh t :=
1121 dAlembert_cosh_solution_aczel Hf h_H0 h_H_cont h_dAlembert h_H_d2
1122 have h_G_cosh : ∀ t, Gf t = Real.cosh t - 1 := fun t => by
1123 have : Gf t + 1 = Real.cosh t := h_H_cosh t
1124 linarith
1125 have ht : Real.exp (Real.log x) = x := Real.exp_log hx
1126 have hJG : G Cost.Jcost (Real.log x) = Real.cosh (Real.log x) - 1 :=
1127 Jcost_G_eq_cosh_sub_one (Real.log x)
1128 calc F x
1129 = F (Real.exp (Real.log x)) := by rw [ht]
1130 _ = Gf (Real.log x) := rfl
1131 _ = Real.cosh (Real.log x) - 1 := h_G_cosh (Real.log x)
1132 _ = G Cost.Jcost (Real.log x) := by simp only [hJG]
1133 _ = Cost.Jcost (Real.exp (Real.log x)) := by simp [G]
1134 _ = Cost.Jcost x := by simp [ht]
1135
1136end FunctionalEquation
1137end Cost
1138end IndisputableMonolith
1139