# Theory Nats

(*<*)
(*
* Domains of natural numbers.
* (C)opyright 2009-2011, Peter Gammie, peteg42 at gmail.com.
*)

theory Nats
imports HOLCF
begin
(*>*)

section‹Boxed Types, HOL's natural numbers hoisted.›

subsection‹Unboxed naturals.›

text‹We can represent the unboxed naturals as a discrete domain (every
number is equal to itself and unequal to everything else, and there is no
bottom). Think of these functions as the machine operations.

We could actually lift all HOL types and classes in this way; indeed
the HOLCF theory "Lift" does something similar, but is not as
fine-grained as this development.

Note: we default to just CPOs (not pointed CPOs) in this theory. We
notation for unboxed operators of \citet{SPJ-JL:1991}.›

default_sort predomain

type_synonym UNat = "nat discr"

instantiation discr :: (zero) zero
begin
definition zero_discr_def: "0 ≡ Discr 0"
instance ..
end

lemma zero_discr[simp]: "undiscr 0 = 0" unfolding zero_discr_def by simp

instantiation discr :: (one) one
begin
definition one_discr_def: "1 ≡ Discr 1"
instance ..
end

lemma one_discr[simp]: "undiscr 1 = 1" unfolding one_discr_def by simp

instantiation discr :: (ord) ord
begin
definition less_def[simp]: "m < n ≡ (undiscr m) < (undiscr n)"
definition le_def[simp]:   "m ≤ n ≡ (undiscr m) ≤ (undiscr n)"
instance ..
end

definition
uPlus :: "UNat → UNat → UNat" where
"uPlus ≡ Λ x y. Discr (undiscr x + undiscr y)"

abbreviation
uPlus_syn :: "UNat ⇒ UNat ⇒ UNat" (infixl "+⇩#" 65) where
"x +⇩# y ≡ uPlus⋅x⋅y"

instantiation discr :: (plus) plus
begin
definition plus_discr_def[simp]: "x + y ≡ Discr (undiscr x + undiscr y)"
instance ..
end

definition
uMinus :: "UNat → UNat → UNat" where
"uMinus ≡ Λ x y. Discr (undiscr x - undiscr y)"

abbreviation
uMinus_syn :: "UNat ⇒ UNat ⇒ UNat" (infixl "-⇩#" 65) where
"x -⇩# y ≡ uMinus⋅x⋅y"

instantiation discr :: (minus) minus
begin
definition minus_discr_def[simp]: "x - y ≡ Discr (undiscr x - undiscr y)"
instance ..
end

definition
uMult :: "UNat → UNat → UNat" where
"uMult ≡ Λ x y. Discr (undiscr x * undiscr y)"

abbreviation
uMult_syn :: "UNat ⇒ UNat ⇒ UNat" (infixl "*⇩#" 65) where
"x *⇩# y ≡ uMult⋅x⋅y"

instantiation discr :: (times) times
begin
definition times_discr_def[simp]: "x * y ≡ Discr (undiscr x * undiscr y)"
instance ..
end

lemma uMult_unit_left: "1 *⇩# (x::UNat) = x"
unfolding uMult_def one_discr_def by simp
lemma uMult_unit_right: "(x::UNat) *⇩# 1 = x"
unfolding uMult_def one_discr_def by simp

lemma uMult_assoc: "(x *⇩# y) *⇩# z = x *⇩# (y *⇩# z)"
unfolding uMult_def by simp

lemma uMult_commute: "x *⇩# y = y *⇩# x"
unfolding uMult_def by simp

lemma uMult_left_commute: "a *⇩# (b *⇩# c) = b *⇩# (a *⇩# c)"
unfolding uMult_def by simp

lemmas uMult_arithmetic =
uMult_unit_left uMult_unit_right uMult_assoc uMult_commute uMult_left_commute

text‹

As Brian Huffman helpfully observed, the "@{term "⊥"}" type
constructor supports the monadic operations; it's isomorphic to
Haskell's @{term "Maybe a"}, or ML's @{typ "'a option"}.

Note that @{term "return"} is @{term "up"}.

›

definition
bbind :: "('a::cpo)⇩⊥ → ('a → ('b::pcpo)) → 'b" where
"bbind ≡ Λ b g. fup⋅g⋅b"

abbreviation
bbind_syn :: "('a::cpo)⇩⊥ ⇒ ('a → ('b::pcpo)) ⇒ 'b" (infixl ">>=" 65) where
"b >>= g ≡ bbind⋅b⋅g"

lemma bbind_strict1[simp]: "bbind⋅⊥ = ⊥"
lemma bbind_strict2[simp]: "x >>= ⊥ = ⊥"
by (cases x, simp_all add: bbind_def)

lemma bbind_leftID[simp]: "up⋅a >>= f = f⋅a" by (simp add: bbind_def)
lemma bbind_rightID[simp]: "m >>= up = m" by (cases m, simp_all)

lemma bbind_assoc[simp]: "f >>= g >>= h = f >>= (Λ x. g⋅x >>= h)"
by (cases f, simp_all)

lemma bbind_case_distr_strict: "f⋅⊥ = ⊥ ⟹ f⋅(g >>= h) = g >>= (Λ x. f⋅(h⋅x))"
unfolding bbind_def by (cases g, simp_all)

text‹

and has many useful algebraic properties. Unfortunately it is more
work than is useful to write the lemmas in a way that makes the
higher-order unifier in the simplifier happy. Seems easier just to
unfold the definition and go from there.

›

definition
bKleisli :: "('a::cpo → ('b::cpo)⇩⊥) → ('b → ('c::cpo)⇩⊥) → ('a → 'c⇩⊥)" where
"bKleisli ≡ Λ f g x. f⋅x >>= g"

abbreviation
bKleisli_syn :: "('a::cpo → ('b::cpo)⇩⊥) ⇒ ('b → ('c::cpo)⇩⊥) ⇒ ('a → 'c⇩⊥)" (infixl ">=>" 65) where
"b >=> g ≡ bKleisli⋅b⋅g"

lemma bKleisli_strict1[simp]: "bKleisli⋅⊥ = ⊥"
lemma bKleisli_strict2[simp]: "b >=> ⊥ = ⊥"
by (rule cfun_eqI, simp add: bKleisli_def)

lemma bKleisli_bbind: "(f >>= g) >=> h = f >>= (Λ x. g⋅x >=> h)"
by (cases f, simp_all)

text ‹

The "Box" type denotes computations that, when demanded, yield
either @{term "⊥"} or an unboxed value. Note that the @{term "Box"}
constructor is strict, and so merely tags the lifted computation @{typ
"'a⇩⊥"}. @{typ "'a"} can be pointed or unpointed. This approach enables
us to distinguish the boxed values from the lifted-function-space that
models recursive functions with unboxed codomains.

›

domain 'a Box = Box (unbox :: "'a⇩⊥")

definition
box :: "('a::predomain) → 'a Box" where
"box ≡ Box oo up"

lemma boxI: "Box⋅(up⋅x) = box⋅x" unfolding box_def by simp
lemma unbox_box[simp]: "unbox⋅(box⋅x) = up⋅x" unfolding box_def by simp
lemma unbox_Box[simp]: "x ≠ ⊥ ⟹ unbox⋅(Box⋅x) = x" by simp

text‹

If we suceed in @{term "box"}ing something, then clearly that
something was not @{term "⊥"}.

›

lemma box_casedist[case_names bottom Box, cases type: Box]:
assumes xbot: "x = ⊥ ⟹ P"
and xbox: "⋀u. x = box⋅u ⟹ P"
shows "P"
proof(cases x)
case bottom with xbot show ?thesis by simp
next
case (Box u) with xbox show ?thesis
by (cases u, simp_all add: box_def up_def cont_Iup bottomI[OF minimal_up])
qed

lemma bbind_leftID'[simp]: "unbox⋅a >>= box = a" by (cases a, simp_all add: bbind_def)

(*<*)
text ‹

The optimisations of \citet{SPJ-JL:1991}.

p11: Repeated unboxing of the same value can be done once (roughly:
store the value in a register). Their story is more general.

›

lemma box_repeated:
"x >>= (Λ x'. f >>= (Λ y'. x >>= body⋅x'⋅y'))
= x >>= (Λ x'. f >>= (Λ y'. body⋅x'⋅y'⋅x'))"
by (cases x, simp_all)
(*>*)

text‹Lift binary predicates over @{typ "'a discr"} into @{typ "'a discr Box"}.›

text ‹@{term "bliftM"} and @{term "bliftM2"} encapsulate the boxing
and unboxing.›

definition
bliftM :: "('a::predomain → 'b::predomain) ⇒ 'a Box → 'b Box" where
"bliftM f ≡ Λ x. unbox⋅x >>= (Λ x'. box⋅(f⋅x'))"

lemma bliftM_strict1[simp]: "bliftM f⋅⊥ = ⊥" by (simp add: bliftM_def)
lemma bliftM_op[simp]: "bliftM f⋅(box⋅x) = box⋅(f⋅x)" by (simp add: bliftM_def)

definition
bliftM2 :: "('a::predomain → 'b::predomain → 'c::predomain) ⇒ 'a Box → 'b Box → 'c Box" where
"bliftM2 f ≡ Λ x y. unbox⋅x >>= (Λ x'. unbox⋅y >>= (Λ y'. box⋅(f⋅x'⋅y')))"

lemma bliftM2_strict1[simp]: "bliftM2 f⋅⊥ = ⊥" by (rule cfun_eqI)+ (simp add: bliftM2_def)
lemma bliftM2_strict2[simp]: "bliftM2 f⋅x⋅⊥ = ⊥" by (cases x, simp_all add: bliftM2_def)
lemma bliftM2_op[simp]: "bliftM2 f⋅(box⋅x)⋅(box⋅y) = box⋅(f⋅x⋅y)" by (simp add: bliftM2_def)

text‹

Define the arithmetic operations. We need extra continuity lemmas as
we're using the full function space, so we can re-use the conventional
operators. The goal is to work at this level.

›

instantiation Box :: ("{predomain,plus}") plus
begin
definition plus_Box_def: "x + y ≡ bliftM2 (Λ a b. a + b)⋅x⋅y"
instance ..
end

lemma plus_Box_cont[simp, cont2cont]:
"⟦cont g; cont h⟧ ⟹ cont (λx. (g x :: 'a :: {predomain, plus} Box) + h x)"
unfolding plus_Box_def by simp

lemma plus_Box_strict1[simp]: "⊥ + (y :: 'a::{predomain, plus} Box) = ⊥"
unfolding plus_Box_def by simp
lemma plus_Box_strict2[simp]: "(x :: 'a::{predomain, plus} Box) + ⊥ = ⊥"
unfolding plus_Box_def by simp

instantiation Box :: ("{predomain,minus}") minus
begin
definition minus_Box_def: "x - y ≡ bliftM2 (Λ a b. a - b)⋅x⋅y"
instance ..
end

lemma minus_Box_cont[simp, cont2cont]:
"⟦cont g; cont h⟧ ⟹ cont (λx. (g x :: 'a :: {predomain, minus} Box) - h x)"
unfolding minus_Box_def by simp

lemma minus_Box_strict1[simp]: "⊥ - (y :: 'a::{predomain, minus} Box) = ⊥"
unfolding minus_Box_def by simp
lemma minus_Box_strict2[simp]: "(x :: 'a::{predomain, minus} Box) - ⊥ = ⊥"
unfolding minus_Box_def by simp

instantiation Box :: ("{predomain,times}") times
begin
definition times_Box_def: "x * y ≡ bliftM2 (Λ a b. a * b)⋅x⋅y"
instance ..
end

lemma times_Box_cont[simp, cont2cont]:
"⟦cont g; cont h⟧ ⟹ cont (λx. (g x :: 'a :: {predomain, times} Box) * h x)"
unfolding times_Box_def by simp

lemma times_Box_strict1[simp]: "⊥ * (y :: 'a::{predomain, times} Box) = ⊥"
unfolding times_Box_def by simp
lemma times_Box_strict2[simp]: "(x :: 'a::{predomain, times} Box) * ⊥ = ⊥"
unfolding times_Box_def by simp

definition
bpred :: "('a::countable discr ⇒ 'a discr ⇒ bool) ⇒ 'a discr Box → 'a discr Box → tr" where
"bpred p ≡ Λ x y. unbox⋅x >>= (Λ x'. unbox⋅y >>= (Λ y'. if p x' y' then TT else FF))"

lemma bpred_strict1[simp]: "bpred p⋅⊥ = ⊥" unfolding bpred_def by (rule cfun_eqI, simp)
lemma bpred_strict2[simp]: "bpred p⋅x⋅⊥ = ⊥" unfolding bpred_def by (cases x, simp_all)

lemma bpred_eval[simp]: "bpred p⋅(box⋅x)⋅(box⋅y) = (if p x y then TT else FF)"
unfolding bpred_def by simp

abbreviation
beq_syn :: "'a::countable discr Box ⇒ 'a discr Box ⇒ tr" (infix "=⇩B" 50) where
"x =⇩B y ≡ bpred (=)⋅x⋅y"

abbreviation
ble_syn :: "'a::{countable,ord} discr Box ⇒ 'a discr Box ⇒ tr" (infix "≤⇩B" 50) where
"x ≤⇩B y ≡ bpred (≤)⋅x⋅y"

abbreviation
blt_syn :: "'a::{countable,ord} discr Box ⇒ 'a discr Box ⇒ tr" (infix "<⇩B" 50) where
"x <⇩B y ≡ bpred (<)⋅x⋅y"

subsection‹The flat domain of natural numbers›

text‹Lift arithmetic to the boxed naturals. Define some things that make
playing with boxed naturals more convenient.›

type_synonym Nat = "UNat Box"

instantiation Box :: ("{predomain, zero}") zero
begin
definition zero_Nat_def: "0 ≡ box⋅0"
instance ..
end

instantiation Box :: ("{predomain, one}") one
begin
definition one_Nat_def: "1 ≡ box⋅1"
instance ..
end

text ‹We need to know the underlying operations are continuous for these to work.›

lemma plus_Nat_eval[simp]: "(box⋅x :: Nat) + box⋅y = box⋅(x + y)" unfolding plus_Box_def by simp
lemma minus_Nat_eval[simp]: "(box⋅x :: Nat) - box⋅y = box⋅(x - y)" unfolding minus_Box_def by simp
lemma times_Nat_eval[simp]: "(box⋅x :: Nat) * box⋅y = box⋅(x * y)" unfolding times_Box_def by simp

definition
Nat_case :: "'a::domain → (Nat → 'a) → Nat → 'a" where
"Nat_case ≡ Λ z s n. unbox⋅n >>= (Λ n'. case_nat z (λn''. s⋅(box⋅(Discr n''))) (undiscr n'))"

lemma cont_case_nat[simp]:
"⟦cont (λx. f x); ⋀n. cont (λx. g x n) ⟧ ⟹ cont (λx. case_nat (f x) (g x) n)"
by (cases n, simp_all)

lemma Nat_case_strict[simp]: "Nat_case⋅z⋅s⋅⊥ = ⊥" by (simp add: Nat_case_def)
lemma Nat_case_zero[simp]: "Nat_case⋅z⋅s⋅0 = z" by (simp add: Nat_case_def zero_Nat_def zero_discr_def)
lemma Nat_case_suc[simp]:  "Nat_case⋅z⋅s⋅(box⋅(Discr (Suc n))) = s⋅(box⋅(Discr n))"

assumes ndef: "n ≠ ⊥"
shows "Nat_case⋅z⋅s⋅(n + 1) = s⋅n"
proof -
from ndef obtain nu where nu: "n = box⋅nu"
unfolding box_def
apply (cases "n" rule: Box.exhaust)
apply simp
apply (case_tac "u")
apply simp_all
done
then obtain u where "nu = Discr u"
unfolding box_def
apply (cases nu)
apply simp
done
with nu show ?thesis unfolding one_Nat_def by simp
qed

lemma Nat_case_case_nat: "Nat_case⋅z⋅s⋅(box⋅(Discr n)) = case_nat z (λn'. s⋅(box⋅(Discr n'))) n"

lemma Nat_casedist[case_names bottom zero Suc]:
fixes x :: Nat
assumes xbot: "x = ⊥ ⟹ P"
and xzero: "x = 0 ⟹ P"
and xsuc: "⋀n. x = n + 1 ⟹ P"
shows "P"
proof(cases x rule: Box.exhaust)
case bottom with xbot show ?thesis by simp
next
case (Box u) hence xu: "x = Box⋅u" and ubottom: "u ≠ ⊥" .
from ubottom obtain n where ndn: "u = up⋅(Discr n)" apply (cases u) apply simp_all apply (case_tac x) apply simp done
show ?thesis
proof(cases n)
case 0 with ndn xu xzero show ?thesis unfolding zero_Nat_def by (simp add: boxI zero_discr_def)
next
case (Suc m) with ndn xu xsuc[where n="box⋅(Discr m)"]
show ?thesis
unfolding plus_Box_def unfolding one_Nat_def by (simp add: boxI one_discr_def)
qed
qed

lemma cont_Nat_case[simp]:
"⟦cont (λx. f x); ⋀n. cont (λx. g x⋅n) ⟧ ⟹ cont (λx. Nat_case⋅(f x)⋅(g x)⋅n)"
apply (cases n rule: Nat_casedist)
apply simp_all
apply (case_tac na rule: Box.exhaust)
apply simp_all
done

lemma Nat_induct[case_names bottom zero Suc]:
fixes P :: "Nat ⇒ bool"
assumes xbot: "P ⊥"
and xzero: "P 0"
and xsuc: "⋀n. ⟦n ≠ ⊥; P n ⟧ ⟹ P (n + 1)"
shows "P x"
proof(cases x rule: box_casedist)
case bottom with xbot show ?thesis by simp
next
case (Box u)
then obtain n where un: "u = Discr n" by (cases u, simp)
{
fix n
have "⋀x. x = box⋅(Discr n) ⟹ P x"
proof(induct n)
case 0 with xzero show ?case unfolding zero_Nat_def zero_discr_def by simp
next
case (Suc n)
hence "P (box⋅(Discr n))" by simp
with xsuc[where n="box⋅(Discr n)"] have "P (box⋅(Discr n) + 1)" unfolding box_def by simp
with Suc show ?case unfolding one_Nat_def one_discr_def plus_Box_def by simp
qed
}
with un Box show ?thesis by simp
qed

lemma plus_commute: "(x :: Nat) + y = y + x"
proof -
have dc: "⋀u v. (undiscr (u::nat discr) + undiscr v) = (undiscr v + undiscr u)"
by simp
thus ?thesis
apply (cases x)
apply simp
apply (cases y)
qed

lemma mult_unit: "(x::Nat) * 1 = x"
unfolding times_Box_def one_Nat_def by (cases x, simp_all)

lemma mult_commute: "(x :: Nat) * y = y * x"
proof -
have dc: "⋀u v. (undiscr (u::nat discr) * undiscr v) = (undiscr v * undiscr u)"
by simp
show ?thesis
apply (cases x)
apply simp
apply (cases y)
qed

lemma mult_assoc: "((x :: Nat) * y) * z = x * (y * z)"
proof -
have ac: "⋀u v w. undiscr (u::nat discr) * (undiscr v * undiscr w)
=  (undiscr u * undiscr v) * undiscr w" by simp
show ?thesis
apply (cases x)
apply simp
apply (cases y)
apply simp
apply (cases z)
apply simp
qed

text‹Restore the HOLCF default sort.›

default_sort "domain"

(*<*)
end
(*>*)


# Theory LList

(*
* Lazy lists.
* (C)opyright 2009-2011, Peter Gammie, peteg42 at gmail.com.
*)

(*<*)
theory LList
imports
HOLCF
Nats
begin
(*>*)

section‹The fully-lazy list type.›

text‹The list can contain anything that is a predomain.›

default_sort predomain

domain 'a llist =
lnil ("lnil")
| lcons (lazy "'a") (lazy "'a llist") (infixr ":@" 65)

(*<*)
(* Why aren't these in the library? *)

lemma llist_map_eval_simps[simp]:
"llist_map⋅f⋅⊥ = ⊥"
"llist_map⋅f⋅lnil = lnil"
"llist_map⋅f⋅(x :@ xs) = f⋅x :@ llist_map⋅f⋅xs"
apply (subst llist_map_unfold)
apply simp
apply (subst llist_map_unfold)
apply (subst llist_map_unfold)
done
(*>*)

lemma llist_case_distr_strict:
"f⋅⊥ = ⊥ ⟹ f⋅(llist_case⋅g⋅h⋅xxs) = llist_case⋅(f⋅g)⋅(Λ x xs. f⋅(h⋅x⋅xs))⋅xxs"
by (cases xxs) simp_all

fixrec lsingleton :: "('a::predomain) → 'a llist"
where
"lsingleton⋅x = x :@ lnil"

fixrec lappend :: "'a llist → 'a llist → 'a llist"
where
"lappend⋅lnil⋅ys = ys"
| "lappend⋅(x :@ xs)⋅ys = x :@ (lappend⋅xs⋅ys)"

abbreviation
lappend_syn :: "'a llist ⇒ 'a llist ⇒ 'a llist" (infixr ":++" 65) where
"xs :++ ys ≡ lappend⋅xs⋅ys"

lemma lappend_strict': "lappend⋅⊥ = (Λ a. ⊥)"
by fixrec_simp

text‹This gives us that @{thm lappend_strict'}.›

text ‹This is where we use @{thm inst_cfun_pcpo}›
lemma lappend_strict[simp]: "lappend⋅⊥ = ⊥"
by (rule cfun_eqI) (simp add: lappend_strict')

lemma lappend_assoc: "(xs :++ ys) :++ zs = xs :++ (ys :++ zs)"
by (induct xs, simp_all)

lemma lappend_lnil_id_left[simp]: "lappend⋅lnil = ID"
by (rule cfun_eqI) simp

lemma lappend_lnil_id_right[simp]: "xs :++ lnil = xs"
by (induct xs) simp_all

fixrec lconcat :: "'a llist llist → 'a llist"
where
"lconcat⋅lnil = lnil"
| "lconcat⋅(x :@ xs) = x :++ lconcat⋅xs"

lemma lconcat_strict[simp]: "lconcat⋅⊥ = ⊥"
by fixrec_simp

fixrec lall :: "('a → tr) → 'a llist → tr"
where
"lall⋅p⋅lnil = TT"
| "lall⋅p⋅(x :@ xs) = (p⋅x andalso lall⋅p⋅xs)"

lemma lall_strict[simp]: "lall⋅p⋅⊥ = ⊥"
by fixrec_simp

fixrec lfilter :: "('a → tr) → 'a llist → 'a llist"
where
"lfilter⋅p⋅lnil = lnil"
| "lfilter⋅p⋅(x :@ xs) = If p⋅x then x :@ lfilter⋅p⋅xs else lfilter⋅p⋅xs"

lemma lfilter_strict[simp]: "lfilter⋅p⋅⊥ = ⊥"
by fixrec_simp

lemma lfilter_const_true: "lfilter⋅(Λ x. TT)⋅xs = xs"
by (induct xs, simp_all)

lemma lfilter_lnil: "(lfilter⋅p⋅xs = lnil) = (lall⋅(neg oo p)⋅xs = TT)"
proof(induct xs)
fix a l assume indhyp: "(lfilter⋅p⋅l = lnil) = (lall⋅(Tr.neg oo p)⋅l = TT)"
thus "(lfilter⋅p⋅(a :@ l) = lnil) = (lall⋅(Tr.neg oo p)⋅(a :@ l) = TT)"
by (cases "p⋅a" rule: trE, simp_all)
qed simp_all

lemma filter_filter: "lfilter⋅p⋅(lfilter⋅q⋅xs) = lfilter⋅(Λ x. q⋅x andalso p⋅x)⋅xs"
proof(induct xs)
fix a l assume "lfilter⋅p⋅(lfilter⋅q⋅l) = lfilter⋅(Λ(x::'a). q⋅x andalso p⋅x)⋅l"
thus "lfilter⋅p⋅(lfilter⋅q⋅(a :@ l)) = lfilter⋅(Λ(x::'a). q⋅x andalso p⋅x)⋅(a :@ l)"
by (cases "q⋅a" rule: trE, simp_all)
qed simp_all

fixrec ldropWhile :: "('a → tr) → 'a llist → 'a llist"
where
"ldropWhile⋅p⋅lnil = lnil"
| "ldropWhile⋅p⋅(x :@ xs) = If p⋅x then ldropWhile⋅p⋅xs else x :@ xs"

lemma ldropWhile_strict[simp]: "ldropWhile⋅p⋅⊥ = ⊥"
by fixrec_simp

lemma ldropWhile_lnil: "(ldropWhile⋅p⋅xs = lnil) = (lall⋅p⋅xs = TT)"
proof(induct xs)
fix a l assume "(ldropWhile⋅p⋅l = lnil) = (lall⋅p⋅l = TT)"
thus "(ldropWhile⋅p⋅(a :@ l) = lnil) = (lall⋅p⋅(a :@ l) = TT)"
by (cases "p⋅a" rule: trE, simp_all)
qed simp_all

fixrec literate :: "('a → 'a) → 'a → 'a llist"
where
"literate⋅f⋅x = x :@ literate⋅f⋅(f⋅x)"

declare literate.simps[simp del]

text‹This order of tests is convenient for the nub proof. I can
imagine the other would be convenient for other proofs...›

fixrec lmember :: "('a → 'a → tr) → 'a → 'a llist → tr"
where
"lmember⋅eq⋅x⋅lnil = FF"
| "lmember⋅eq⋅x⋅(lcons⋅y⋅ys) = (lmember⋅eq⋅x⋅ys orelse eq⋅y⋅x)"

lemma lmember_strict[simp]: "lmember⋅eq⋅x⋅⊥ = ⊥"
by fixrec_simp

fixrec llength :: "'a llist → Nat"
where
"llength⋅lnil = 0"
| "llength⋅(lcons⋅x⋅xs) = 1 + llength⋅xs"

lemma llength_strict[simp]: "llength⋅⊥ = ⊥"
by fixrec_simp

fixrec lmap :: "('a → 'b) → 'a llist → 'b llist"
where
"lmap⋅f⋅lnil = lnil"
| "lmap⋅f⋅(x :@ xs) = f⋅x :@ lmap⋅f⋅xs"

lemma lmap_strict[simp]: "lmap⋅f⋅⊥ = ⊥"
by fixrec_simp

lemma lmap_lmap:
"lmap⋅f⋅(lmap⋅g⋅xs) = lmap⋅(f oo g)⋅xs"
by (induct xs) simp_all

definition
"lconcatMap ≡ (Λ f. lconcat oo lmap⋅f)"

lemma lconcatMap_comp_simps[simp]:
"lconcatMap⋅f⋅⊥ = ⊥"
"lconcatMap⋅f⋅lnil = lnil"
"lconcatMap⋅f⋅(x :@ xs) = f⋅x :++ lconcatMap⋅f⋅xs"

lemma lconcatMap_lsingleton[simp]:
"lconcatMap⋅lsingleton⋅x = x"
by (induct x) (simp_all add: lconcatMap_def)

text‹This @{term "zipWith"} function is only fully defined if the
lists have the same length.›

fixrec lzipWith0 :: "('a → 'b → 'c) → 'a llist → 'b llist → 'c llist"
where
"lzipWith0⋅f⋅(a :@ as)⋅(b :@ bs) = f⋅a⋅b :@ lzipWith0⋅f⋅as⋅bs"
| "lzipWith0⋅f⋅lnil⋅lnil = lnil"

lemma lzipWith0_stricts [simp]:
"lzipWith0⋅f⋅⊥⋅ys = ⊥"
"lzipWith0⋅f⋅lnil⋅⊥ = ⊥"
"lzipWith0⋅f⋅(x :@ xs)⋅⊥ = ⊥"
by fixrec_simp+

lemma lzipWith0_undefs [simp]:
"lzipWith0⋅f⋅lnil⋅(y :@ ys) = ⊥"
"lzipWith0⋅f⋅(x :@ xs)⋅lnil = ⊥"
by fixrec_simp+

text‹This @{term "zipWith"} function follows Haskell's in being more
permissive: zipping uneven lists results in a list as long as the
shortest one. This is what the backtracking monad expects.›

fixrec lzipWith :: "('a → 'b → 'c) → 'a llist → 'b llist → 'c llist"
where
"lzipWith⋅f⋅(a :@ as)⋅(b :@ bs) = f⋅a⋅b :@ lzipWith⋅f⋅as⋅bs"
| (unchecked) "lzipWith⋅f⋅xs⋅ys = lnil"

lemma lzipWith_simps [simp]:
"lzipWith⋅f⋅(x :@ xs)⋅(y :@ ys) = f⋅x⋅y :@ lzipWith⋅f⋅xs⋅ys"
"lzipWith⋅f⋅(x :@ xs)⋅lnil = lnil"
"lzipWith⋅f⋅lnil⋅(y :@ ys) = lnil"
"lzipWith⋅f⋅lnil⋅lnil = lnil"
by fixrec_simp+

lemma lzipWith_stricts [simp]:
"lzipWith⋅f⋅⊥⋅ys = ⊥"
"lzipWith⋅f⋅(x :@ xs)⋅⊥ = ⊥"
by fixrec_simp+

text‹Homomorphism properties, see Bird's life's work.›

lemma lmap_lappend_dist:
"lmap⋅f⋅(xs :++ ys) = lmap⋅f⋅xs :++ lmap⋅f⋅ys"
by (induct xs) simp_all

lemma lconcat_lappend_dist:
"lconcat⋅(xs :++ ys) = lconcat⋅xs :++ lconcat⋅ys"
by (induct xs) (simp_all add: lappend_assoc)

lemma lconcatMap_assoc:
"lconcatMap⋅h⋅(lconcatMap⋅g⋅f) = lconcatMap⋅(Λ v. lconcatMap⋅h⋅(g⋅v))⋅f"
by (induct f) (simp_all add: lmap_lappend_dist lconcat_lappend_dist lconcatMap_def)

lemma lconcatMap_lappend_dist:
"lconcatMap⋅f⋅(xs :++ ys) = lconcatMap⋅f⋅xs :++ lconcatMap⋅f⋅ys"
unfolding lconcatMap_def by (simp add: lconcat_lappend_dist lmap_lappend_dist)

(* The following avoid some case_tackery. *)

lemma lmap_not_bottoms[simp]:
"x ≠ ⊥ ⟹ lmap⋅f⋅x ≠ ⊥"
by (cases x) simp_all

lemma lsingleton_not_bottom[simp]:
"lsingleton⋅x ≠ ⊥"
by simp

lemma lappend_not_bottom[simp]:
"⟦ xs ≠ ⊥; xs = lnil ⟹ ys ≠ ⊥ ⟧ ⟹ xs :++ ys ≠ ⊥"
apply (cases xs)
apply simp_all
done

default_sort "domain"

(*<*)
end
(*>*)


# Theory Maybe

(*<*)
(*
* (C)opyright 2009-2011, Peter Gammie, peteg42 at gmail.com.
*)

theory Maybe
imports HOLCF
begin
(*>*)

text‹This section defines the monadic machinery for the ‹'a
Maybe› type. @{term "return"} is @{term "Just"}.›

domain 'a Maybe = Nothing | Just (lazy "'a")

definition
mfail :: "'a Maybe"
where
"mfail ≡ Nothing"

definition
mcatch :: "'a Maybe → 'a Maybe → 'a Maybe"
where
"mcatch ≡ Λ body handler. case body of Nothing ⇒ handler | Just⋅x ⇒ Just⋅x"

lemma mcatch_strict[simp]: "mcatch⋅⊥ = ⊥"
by (rule cfun_eqI, simp add: mcatch_def)

definition
mbind :: "'a Maybe → ('a → 'b Maybe) → 'b Maybe" where
"mbind ≡ Λ f g. (case f of Nothing ⇒ Nothing | Just⋅f' ⇒ g⋅f')"

abbreviation
mbind_syn :: "'a Maybe ⇒ ('a → 'b Maybe) ⇒ 'b Maybe" (infixl ">>=⇩M" 55) where
"f >>=⇩M g ≡ mbind⋅f⋅g"

lemma mbind_strict1[simp]: "⊥ >>=⇩M g = ⊥" by (simp add: mbind_def)

lemma leftID[simp]: "Just⋅a >>=⇩M f = f⋅a" by (simp add: mbind_def)
lemma rightID[simp]: "m >>=⇩M Just = m" by (cases m, simp_all add: mbind_def)

lemma assoc[simp]: "(m >>=⇩M f) >>=⇩M g = m >>=⇩M (Λ x. f⋅x >>=⇩M g)"
by (cases m, simp_all add: mbind_def)

lemma nothing_bind[simp]: "Nothing >>=⇩M f = Nothing" by (simp add: mbind_def)
lemma just_bind[simp]: "Just⋅x >>=⇩M f = f⋅x" by (simp add: mbind_def)

definition
mliftM2 :: "('a → 'b → 'c) ⇒ 'a Maybe → 'b Maybe → 'c Maybe" where
"mliftM2 f ≡ Λ x y. x >>=⇩M (Λ x'. y >>=⇩M (Λ y'. Just⋅(f⋅x'⋅y')))"

lemma mliftM2_Nothing1[simp]: "mliftM2 f⋅Nothing⋅y = Nothing" by (simp add: mliftM2_def)
lemma mliftM2_Nothing2[simp]: "mliftM2 f⋅(Just⋅x)⋅Nothing = Nothing" by (simp add: mliftM2_def)
lemma mliftM2_op[simp]: "mliftM2 f⋅(Just⋅x)⋅(Just⋅y) = Just⋅(f⋅x⋅y)" by (simp add: mliftM2_def)

lemma mliftM2_strict1[simp]: "mliftM2 f⋅⊥ = ⊥" by (rule cfun_eqI)+ (simp add: mliftM2_def)
lemma mliftM2_strict2[simp]: "mliftM2 f⋅(Just⋅x)⋅⊥ = ⊥" by (simp add: mliftM2_def)

end


# Theory FixedPointTheorems

(*<*)
(*
* The worker/wrapper transformation, following Gill and Hutton.
* (C)opyright 2009-2011, Peter Gammie, peteg42 at gmail.com.
*)

theory FixedPointTheorems
imports
HOLCF
begin

setup ‹
(fn _ => fn s => Latex.string ("\\" ^ "<" ^ s ^ "\\>"))
›

(* LaTeXsugar fights with HOLCF syntax: at least cdot *)

(* THEOREMS *)
notation (Rule output)
Pure.imp  ("\<^latex>‹\\mbox{}\\inferrule{\\mbox{›_\<^latex>‹}}›\<^latex>‹{\\mbox{›_\<^latex>‹}}›")

syntax (Rule output)
"_bigimpl" :: "asms ⇒ prop ⇒ prop"
("\<^latex>‹\\mbox{}\\inferrule{›_\<^latex>‹}›\<^latex>‹{\\mbox{›_\<^latex>‹}}›")

"_asms" :: "prop ⇒ asms ⇒ asms"
("\<^latex>‹\\mbox{›_\<^latex>‹}\\\\›/ _")

"_asm" :: "prop ⇒ asms" ("\<^latex>‹\\mbox{›_\<^latex>‹}›")

(*>*)
section‹Fixed-point theorems for program transformation›

text‹

We begin by recounting some standard theorems from the early days of
denotational semantics. The origins of these results are lost to
history; the interested reader can find some of it in
\citet{Bekic:1969, Manna:1974, Greibach:1975, Stoy:1977,
DBLP:books/daglib/0002432, Harel:1980, Plotkin:1983, Winskel:1993,
DBLP:journals/toplas/Sangiorgi09}.

›

subsection‹The rolling rule›

text‹

The \emph{rolling rule} captures what intuitively happens when we
re-order a recursive computation consisting of two parts. This theorem
dates from the 1970s at the latest -- see \citet[p210]{Stoy:1977} and
\citet{Plotkin:1983}. The following proofs were provided by
\citet{GillHutton:2009}.

›

lemma rolling_rule_ltr: "fix⋅(g oo f) ⊑ g⋅(fix⋅(f oo g))"
proof -
have "g⋅(fix⋅(f oo g)) ⊑ g⋅(fix⋅(f oo g))"
by (rule below_refl) ― ‹reflexivity›
hence "g⋅((f oo g)⋅(fix⋅(f oo g))) ⊑ g⋅(fix⋅(f oo g))"
using fix_eq[where F="f oo g"] by simp ― ‹computation›
hence "(g oo f)⋅(g⋅(fix⋅(f oo g))) ⊑ g⋅(fix⋅(f oo g))"
by simp ― ‹re-associate @{term "(oo)"}›
thus "fix⋅(g oo f) ⊑ g⋅(fix⋅(f oo g))"
using fix_least_below by blast ― ‹induction›
qed

lemma rolling_rule_rtl: "g⋅(fix⋅(f oo g)) ⊑ fix⋅(g oo f)"
proof -
have "fix⋅(f oo g) ⊑ f⋅(fix⋅(g oo f))" by (rule rolling_rule_ltr)
hence "g⋅(fix⋅(f oo g)) ⊑ g⋅(f⋅(fix⋅(g oo f)))"
by (rule monofun_cfun_arg) ― ‹g is monotonic›
thus "g⋅(fix⋅(f oo g)) ⊑ fix⋅(g oo f)"
using fix_eq[where F="g oo f"] by simp ― ‹computation›
qed

lemma rolling_rule: "fix⋅(g oo f) = g⋅(fix⋅(f oo g))"
by (rule below_antisym[OF rolling_rule_ltr rolling_rule_rtl])
(*

This property of a fixed-point operator is termed \emph{dinaturality}
by \citet{DBLP:conf/lics/SimpsonP00}.

*)

subsection‹Least-fixed-point fusion›

text‹

\label{sec:lfp-fusion}

\emph{Least-fixed-point fusion} provides a kind of induction that has
proven to be very useful in calculational settings. Intuitively it
lifts the step-by-step correspondence between @{term "f"} and @{term
"h"} witnessed by the strict function @{term "g"} to the fixed points
of @{term "f"} and @{term "g"}:
$\begin{diagram} \node{\bullet} \arrow{e,t}{h} \node{\bullet}\\ \node{\bullet} \arrow{n,l}{g} \arrow{e,b}{f} \node{\bullet} \arrow{n,r}{g} \end{diagram} \qquad \Longrightarrow \qquad \begin{diagram} \node{\mathsf{fix}\ h}\\ \node{\mathsf{fix}\ f} \arrow{n,r}{g} \end{diagram}$
\citet*{FokkingaMeijer:1991}, and also their later
\citet*{barbed-wire:1991}, made extensive use of this rule, as did
\citet{Tullsen:PhDThesis} in his program transformation tool PATH.
This diagram is strongly reminiscent of the simulations used to
establish refinement relations between imperative programs and their
specifications \citep*{EdR:cup98}.

The following proof is close to the third variant of
\citet[p215]{Stoy:1977}. We relate the two fixpoints using the rule
\texttt{parallel\_fix\_ind}:
\begin{center}
@{thm[mode=Rule] parallel_fix_ind}
\end{center}
in a very straightforward way:

›

lemma lfp_fusion:
assumes "g⋅⊥ = ⊥"
assumes "g oo f = h oo g"
shows "g⋅(fix⋅f) = fix⋅h"
proof(induct rule: parallel_fix_ind)
case 2 show "g⋅⊥ = ⊥" by fact
case (3 x y)
from ‹g⋅x = y› ‹g oo f = h oo g› show "g⋅(f⋅x) = h⋅y"
qed simp

text‹

This lemma also goes by the name of \emph{Plotkin's axiom}
\citep{PittsAM:relpod} or \emph{uniformity}
\citep{DBLP:conf/lics/SimpsonP00}.

›
(*<*)

(* The rest of this theory is only of historical interest. *)

text ‹Some may find the pointed version easier to read.›

lemma lfp_fusion_pointed:
assumes Cfg: "⋀f. C⋅(F⋅f) = G⋅(C⋅f)"
and strictC: "C⋅⊥ = ⊥"
shows "C⋅(fix⋅F) = fix⋅G"
using lfp_fusion[where f=F and g=C and h=G] assms

text‹

Alternative proofs. This is the intuitive'' one
\citet[p125]{Gunter:1992} and \citet[p46]{Tullsen:PhDThesis}, where we
can shuffle @{term "g"} to the end of the the iteration of @{term "f"}
using @{term "fgh"}.

›

lemma lfp_fusion2_aux:
assumes fgh: "g oo f = h oo g"
shows "g⋅(iterate i⋅f⋅⊥) = iterate i⋅h⋅(g⋅⊥)"
proof(induct i)
case (Suc i)
have "g⋅(iterate (Suc i)⋅f⋅⊥) = (g oo f)⋅(iterate i⋅f⋅⊥)" by simp
also have "... = h⋅(g⋅(iterate i⋅f⋅⊥))" using fgh by (simp add: cfcomp1)
also have "... = h⋅(iterate i⋅h⋅(g⋅⊥))" using Suc by simp
also have "... = iterate (Suc i)⋅h⋅(g⋅⊥)" by simp
finally show ?case .
qed simp

lemma lfp_fusion2:
assumes fgh: "g oo f = h oo g"
and strictg: "g⋅⊥ = ⊥"
shows "g⋅(fix⋅f) = fix⋅h"
proof -
have "g⋅(fix⋅f) = g⋅(⨆i. iterate i⋅f⋅⊥)" by (simp only: fix_def2)
also have "... = (⨆i. g⋅(iterate i⋅f⋅⊥))" by (simp add: contlub_cfun_arg)
also have "... = (⨆i. (iterate i⋅h⋅(g⋅⊥)))" by (simp only: lfp_fusion2_aux[OF fgh])
also have "... = fix⋅h" using strictg by (simp only: fix_def2)
finally show ?thesis .
qed

text‹This is the first one by \citet[p213]{Stoy:1977}, almost
identical to the above.›

lemma lfp_fusion3_aux:
assumes fgh: "g oo f = h oo g"
and strictg: "g⋅⊥ = ⊥"
shows "g⋅(iterate i⋅f⋅⊥) = iterate i⋅h⋅⊥"
proof(induct i)
case 0 from strictg show ?case by simp
next
case (Suc i)
have "g⋅(iterate (Suc i)⋅f⋅⊥) = (g oo f)⋅(iterate i⋅f⋅⊥)" by simp
also have "... = h⋅(g⋅(iterate i⋅f⋅⊥))" using fgh by (simp add: cfcomp1)
also have "... = h⋅(iterate i⋅h⋅⊥)" using Suc by simp
also have "... = iterate (Suc i)⋅h⋅⊥" by simp
finally show ?case .
qed

lemma lfp_fusion3:
assumes fgh: "g oo f = h oo g"
and strictg: "g⋅⊥ = ⊥"
shows "g⋅(fix⋅f) = fix⋅h"
proof -
have "g⋅(fix⋅f) = g⋅(⨆i. iterate i⋅f⋅⊥)" by (simp only: fix_def2)
also have "... = (⨆i. g⋅(iterate i⋅f⋅⊥))" by (simp add: contlub_cfun_arg)
also have "... = (⨆i. (iterate i⋅h⋅⊥))" by (simp only: lfp_fusion3_aux[OF fgh strictg])
also have "... = fix⋅h" by (simp only: fix_def2)
finally show ?thesis .
qed

text‹Stoy's second proof \citep[p214]{Stoy:1977} is similar to the
original proof using fixed-point induction.›

lemma lfp_fusion4:
assumes fgh: "g oo f = h oo g"
and strictg: "g⋅⊥ = ⊥"
shows "g⋅(fix⋅f) = fix⋅h"
proof(rule below_antisym)
show "fix⋅h ⊑ g⋅(fix⋅f)"
proof -
have "h⋅(g⋅(fix⋅f)) = (g oo f)⋅(fix⋅f)" using fgh by simp
also have "... = g⋅(fix⋅f)" by (subst fix_eq, simp)
finally show ?thesis by (rule fix_least)
qed
let ?P = "λx. g⋅x ⊑ fix⋅h"
show "?P (fix⋅f)"
proof(induct rule: fix_ind[where P="?P"])
case 2 with strictg show ?case by simp
next
case (3 x) hence indhyp: "g⋅x ⊑ fix⋅h" .
have "g⋅(f⋅x) = (h oo g)⋅x" using fgh[symmetric] by simp
with indhyp show "g⋅(f⋅x) ⊑ fix⋅h"
by (subst fix_eq, simp add: monofun_cfun)
qed simp
qed

text‹A wrinkly variant from \citet[p11]{barbed-wire:1991}.›

lemma lfp_fusion_barbed_variant:
assumes ff': "f⋅⊥ = f'⋅⊥"
and fgh: "f oo g = h oo f"
and f'g'h: "f' oo g' = h oo f'"
shows "f⋅(fix⋅g) = f'⋅(fix⋅g')"
proof(induct rule: parallel_fix_ind)
case 2 show "f⋅⊥ = f'⋅⊥" by (rule ff')
case (3 x y)
from ‹f⋅x = f'⋅y› have "h⋅(f⋅x) = h⋅(f'⋅y)" by simp
with fgh f'g'h have "f⋅(g⋅x) = f'⋅(g'⋅y)"
using cfcomp2[where f="f" and g="g", symmetric]
cfcomp2[where f="f'" and g="g'", symmetric]
by simp
thus ?case by simp
qed simp
(*>*)

(*<*)
end
(*>*)


# Theory WorkerWrapper

(*<*)
(*
* The worker/wrapper transformation, following Gill and Hutton.
* (C)opyright 2009-2011, Peter Gammie, peteg42 at gmail.com, commenced December 2007.
*)

theory WorkerWrapper
imports
HOLCF
FixedPointTheorems
begin

(*>*)
section‹The transformation according to Gill and Hutton›

text‹

\begin{figure}[tb]
\begin{center}
\fbox{\parbox{0.96\textwidth}{For a recursive definition @{haskell "comp =
\\mathsf{fix}\\ body"} for some @{haskell "body :: A \\to A"} and a pair of
functions @{haskell "wrap :: B \\to A"} and @{haskell "unwrap :: A \\to B"} where
@{haskell "wrap \\circ unwrap = id_A"}, we have:

comp = wrap\ work\\
\quad work = \mathsf{fix}\ (unwrap \circ body \circ wrap)

Also:

(unwrap \circ wrap)\ work = work
\end{center}%
\caption{The worker/wrapper transformation and fusion rule of \citet{GillHutton:2009}.}\label{fig:ww}
\end{figure}

The worker/wrapper transformation and associated fusion rule as
formalised by \citet*{GillHutton:2009} are reproduced in
Figure~\ref{fig:ww}, and the reader is referred to the original paper
for further motivation and background.

Armed with the rolling rule we can show that Gill and Hutton's
justification of the worker/wrapper transformation is sound. There is
a battery of these transformations with varying strengths of
hypothesis.

The first requires @{term "wrap oo unwrap"} to be the identity for all
values.

›

lemma worker_wrapper_id:
fixes wrap :: "'b::pcpo → 'a::pcpo"
fixes unwrap :: "'a → 'b"
assumes wrap_unwrap: "wrap oo unwrap = ID"
assumes comp_body: "computation = fix⋅body"
shows "computation = wrap⋅(fix⋅(unwrap oo body oo wrap))"
proof -
from comp_body have "computation = fix⋅(ID oo body)"
by simp
also from wrap_unwrap have "… = fix⋅(wrap oo unwrap oo body)"
also have "... = wrap⋅(fix⋅(unwrap oo body oo wrap))"
using rolling_rule[where f="unwrap oo body" and g="wrap"]
finally show ?thesis .
qed

text‹

The second weakens this assumption by requiring that @{term "wrap oo
wrap"} only act as the identity on values in the image of @{term
"body"}.

›

lemma worker_wrapper_body:
fixes wrap :: "'b::pcpo → 'a::pcpo"
fixes unwrap :: "'a → 'b"
assumes wrap_unwrap: "wrap oo unwrap oo body = body"
assumes comp_body: "computation = fix⋅body"
shows "computation = wrap⋅(fix⋅(unwrap oo body oo wrap))"
proof -
from comp_body have "computation = fix⋅(wrap oo unwrap oo body)"
using wrap_unwrap by (simp add: assoc_oo wrap_unwrap)
also have "... = wrap⋅(fix⋅(unwrap oo body oo wrap))"
using rolling_rule[where f="unwrap oo body" and g="wrap"]
finally show ?thesis .
qed

text‹

This is particularly useful when the computation being transformed is
strict in its argument.

Finally we can allow the identity to take the full recursive context
into account. This rule was described by Gill and Hutton but not used.

›

lemma worker_wrapper_fix:
fixes wrap :: "'b::pcpo → 'a::pcpo"
fixes unwrap :: "'a → 'b"
assumes wrap_unwrap: "fix⋅(wrap oo unwrap oo body) = fix⋅body"
assumes comp_body: "computation = fix⋅body"
shows "computation = wrap⋅(fix⋅(unwrap oo body oo wrap))"
proof -
from comp_body have "computation = fix⋅(wrap oo unwrap oo body)"
using wrap_unwrap by (simp add: assoc_oo wrap_unwrap)
also have "... = wrap⋅(fix⋅(unwrap oo body oo wrap))"
using rolling_rule[where f="unwrap oo body" and g="wrap"]
finally show ?thesis .
qed

text‹

Gill and Hutton's ‹worker_wrapper_fusion› rule is intended to
allow the transformation of @{term "(unwrap oo wrap)⋅R"} to @{term
"R"} in recursive contexts, where @{term "R"} is meant to be a
self-call. Note that it assumes that the first worker/wrapper
hypothesis can be established.

›

lemma worker_wrapper_fusion:
fixes wrap :: "'b::pcpo → 'a::pcpo"
fixes unwrap :: "'a → 'b"
assumes wrap_unwrap: "wrap oo unwrap = ID"
assumes work: "work = fix⋅(unwrap oo body oo wrap)"
shows "(unwrap oo wrap)⋅work = work"
proof -
have "(unwrap oo wrap)⋅work = (unwrap oo wrap)⋅(fix⋅(unwrap oo body oo wrap))"
using work by simp
also have "… = (unwrap oo wrap)⋅(fix⋅(unwrap oo body oo wrap oo unwrap oo wrap))"
using wrap_unwrap by (simp add: assoc_oo)
also have "… = fix⋅(unwrap oo wrap oo unwrap oo body oo wrap)"
using rolling_rule[where f="unwrap oo body oo wrap" and g="unwrap oo wrap"]
also have "… = fix⋅(unwrap oo body oo wrap)"
using wrap_unwrap by (simp add: assoc_oo)
finally show ?thesis using work by simp
qed

text‹

The following sections show that this rule only preserves partial
correctness. This is because Gill and Hutton apply it in the context
of the fold/unfold program transformation framework of
\citet*{BurstallDarlington:1977}, which need not preserve termination.
We show that the fusion rule does in fact require extra conditions to
be totally correct and propose one such sufficient condition.

›
(*<*)

end
(*>*)


# Theory CounterExample

(*<*)
theory CounterExample
imports
HOLCF
WorkerWrapper
begin
(*>*)

subsection‹Worker/wrapper fusion is partially correct›

text‹

We now examine how Gill and Hutton apply their worker/wrapper fusion
rule in the context of the fold/unfold framework.

The key step of those left implicit in the original paper is the use
of the \textsf{fold} rule to justify replacing the worker with the
fused version. Schematically, the fold/unfold framework maintains a
history of all definitions that have appeared during transformation,
and the \textsf{fold} rule treats this as a set of rewrite rules
oriented right-to-left. (The \textsf{unfold} rule treats the current
working set of definitions as rewrite rules oriented left-to-right.)
Hence as each definition @{haskell "f = body"} yields a rule of the form
f"}. Clearly this has dire implications for the preservation of
termination behaviour.

\citet{Tullsen:PhDThesis} in his \S3.1.2 observes that the semantic
essence of the \textsf{fold} rule is Park induction:
\begin{center}
@{thm[mode=Rule] "fix_least"[where F=f]} {\texttt{fix\_least}}
\end{center}
viz that @{haskell "f\\ x = x"} implies only the partially correct @{haskell "fix\\ f
\\sqsubseteq x"}, and not the totally correct @{haskell "fix\\ f = x"}. We use
this characterisation to show that if @{haskell "unwrap"} is non-strict
(i.e. @{haskell "unwrap \\bot \\neq \\bot"}) then there are programs where
worker/wrapper fusion as used by Gill and Hutton need only be
partially correct.

Consider the scenario described in Figure~\ref{fig:ww}. After applying
the worker/wrapper transformation, we attempt to apply fusion by
finding a residual expression @{term "body'"} such that the body of
the worker, i.e. the expression @{term "unwrap oo body oo wrap"}, can
be rewritten as @{term "body' oo unwrap oo wrap"}. Intuitively this is
the semantic form of workers where all self-calls are fusible. Our
goal is to justify redefining @{term "work"} to @{term "fix⋅body'"},
i.e. to establish:
\begin{center}
@{term "fix⋅(unwrap oo body oo wrap) = fix⋅body'"}
\end{center}
We show that worker/wrapper fusion as proposed by Gill and Hutton is
partially correct using Park induction:

›

lemma fusion_partially_correct:
assumes wrap_unwrap: "wrap oo unwrap = ID"
assumes work: "work = fix⋅(unwrap oo body oo wrap)"
assumes body': "unwrap oo body oo wrap = body' oo unwrap oo wrap"
shows "fix⋅body' ⊑ work"
proof(rule fix_least)
have "work = (unwrap oo body oo wrap)⋅work"
using work by (simp add: fix_eq[symmetric])
also have "... = (body' oo unwrap oo wrap)⋅work"
using body' by simp
also have "... = (body' oo unwrap oo wrap)⋅((unwrap oo body oo wrap)⋅work)"
using work by (simp add: fix_eq[symmetric])
also have "... = (body' oo unwrap oo wrap oo unwrap oo body oo wrap)⋅work"
by simp
also have "... = (body' oo unwrap oo body oo wrap)⋅work"
using wrap_unwrap by (simp add: assoc_oo)
also have "... = body'⋅work"
using work by (simp add: fix_eq[symmetric])
finally show "body'⋅work = work" by simp
qed

text‹

The next section shows the converse does not obtain.

›

subsection‹A non-strict @{term "unwrap"} may go awry›

text‹

\label{sec:ww-counterexample}

If @{term "unwrap"} is non-strict, then it is possible that the fusion
rule proposed by Gill and Hutton does not preserve termination. To
show this we take a small artificial example. The type @{term "A"} is
target type @{term "B"} is the non-strict lift of @{term "A"}.

›

domain A = A
domain B = B (lazy "A")

text‹

The functions @{term "wrap"} and @{term "unwrap"} that map between
these types are routine. Note that @{term "wrap"} is (necessarily)
strict due to the property @{thm "retraction_strict"}.

›

fixrec wrap :: "B → A"
where "wrap⋅(B⋅a) = a"

(*<*)
lemma wrap_strict[simp]: "wrap⋅⊥ = ⊥"
by fixrec_simp
(*>*)

fixrec unwrap :: "A → B"
where "unwrap = B"

text‹

Discharging the worker/wrapper hypothesis is similarly routine.

›

lemma wrap_unwrap: "wrap oo unwrap = ID"

text‹

The candidate computation we transform can be any that uses the
recursion parameter @{term "r"} non-strictly. The following is
especially trivial.

›

fixrec body :: "A → A"
where "body⋅r = A"

text‹

The wrinkle is that the transformed worker can be strict in the
recursion parameter @{term "r"}, as @{term "unwrap"} always lifts it.

›

fixrec body' :: "B → B"
where "body'⋅(B⋅a) = B⋅A"
(*<*)

lemma body'_strict[simp]: "body'⋅⊥ = ⊥"
by fixrec_simp

(*>*)
text‹

As explained above, we set up the fusion opportunity:

›

lemma body_body': "unwrap oo body oo wrap = body' oo unwrap oo wrap"

text‹

This result depends crucially on @{term "unwrap"} being non-strict.

Our earlier result shows that the proposed transformation is partially
correct:

›

lemma "fix⋅body' ⊑ fix⋅(unwrap oo body oo wrap)"
by (rule fusion_partially_correct[OF wrap_unwrap refl body_body'])

text‹

However it is easy to see that it is not totally correct:

›

lemma "¬ fix⋅(unwrap oo body oo wrap) ⊑ fix⋅body'"
proof -
have l: "fix⋅(unwrap oo body oo wrap) = B⋅A"
by (subst fix_eq) simp
have r: "fix⋅body' = ⊥"
from l r show ?thesis by simp
qed

text‹

This trick works whenever @{term "unwrap"} is not strict. In the
following section we show that requiring @{term "unwrap"} to be strict
leads to a straightforward proof of total correctness.

Note that if we have already established that @{term "wrap oo unwrap =
ID"}, then making @{term "unwrap"} strict preserves this equation:

›

lemma
assumes "wrap oo unwrap = ID"
shows "wrap oo strictify⋅unwrap = ID"
proof(rule cfun_eqI)
fix x
from assms
show "(wrap oo strictify⋅unwrap)⋅x = ID⋅x"
by (cases "x = ⊥") (simp_all add: cfun_eq_iff retraction_strict)
qed

text‹

From this we conclude that the worker/wrapper transformation itself
cannot exploit any laziness in @{term "unwrap"} under the
context-insensitive assumptions of @{thm [source]
"worker_wrapper_id"}. This is not to say that other program
transformations may not be able to.

›

(*<*)
lemma
"¬ strictify⋅unwrap oo body oo wrap = body' oo strictify⋅unwrap oo wrap"
by (simp add: cfun_eq_iff exI[where x="⊥"])
(*>*)

(*<*)
end
(*>*)


# Theory WorkerWrapperNew

(*<*)
(*
* The worker/wrapper transformation, following Gill and Hutton.
* (C)opyright 2009-2011, Peter Gammie, peteg42 at gmail.com.
*)

theory WorkerWrapperNew
imports
HOLCF
FixedPointTheorems
WorkerWrapper
begin

(*>*)
section‹A totally-correct fusion rule›

text‹
\label{sec:ww-fixed}

We now show that a termination-preserving worker/wrapper fusion rule
can be obtained by requiring @{term "unwrap"} to be strict. (As we
observed earlier, @{term "wrap"} must always be strict due to the
assumption that ‹wrap oo unwrap = ID›.)

Our first result shows that a combined worker/wrapper transformation
and fusion rule is sound, using the assumptions of ‹worker_wrapper_id› and the ubiquitous ‹lfp_fusion› rule.

›

lemma worker_wrapper_fusion_new:
fixes wrap :: "'b::pcpo → 'a::pcpo"
fixes unwrap :: "'a → 'b"
fixes body' :: "'b → 'b"
assumes wrap_unwrap: "wrap oo unwrap = (ID :: 'a → 'a)"
assumes unwrap_strict: "unwrap⋅⊥ = ⊥"
assumes body_body': "unwrap oo body oo wrap = body' oo (unwrap oo wrap)"
shows "fix⋅body = wrap⋅(fix⋅body')"
proof -
from body_body'
have "unwrap oo body oo (wrap oo unwrap) = (body' oo unwrap oo (wrap oo unwrap))"
with wrap_unwrap have "unwrap oo body = body' oo unwrap"
by simp
with unwrap_strict have "unwrap⋅(fix⋅body) = fix⋅body'"
by (rule lfp_fusion)
hence "(wrap oo unwrap)⋅(fix⋅body) = wrap⋅(fix⋅body')"
by simp
with wrap_unwrap show ?thesis by simp
qed

text‹

We can also show a more general result which allows fusion to be
optionally performed on a per-recursive-call basis using
\texttt{parallel\_fix\_ind}:

›

lemma worker_wrapper_fusion_new_general:
fixes wrap :: "'b::pcpo → 'a::pcpo"
fixes unwrap :: "'a → 'b"
assumes wrap_unwrap: "wrap oo unwrap = (ID :: 'a → 'a)"
assumes unwrap_strict: "unwrap⋅⊥ = ⊥"
assumes body_body': "⋀r. (unwrap oo wrap)⋅r = r
⟹ (unwrap oo body oo wrap)⋅r = body'⋅r"
shows "fix⋅body = wrap⋅(fix⋅body')"
proof -
let ?P = "λ(x, y). x = y ∧ unwrap⋅(wrap⋅x) = x"
have "?P (fix⋅(unwrap oo body oo wrap), (fix⋅body'))"
proof(induct rule: parallel_fix_ind)
case 2 with retraction_strict unwrap_strict wrap_unwrap show "?P (⊥, ⊥)"
case (3 x y)
hence xy: "x = y" and unwrap_wrap: "unwrap⋅(wrap⋅x) = x" by auto
from body_body' xy unwrap_wrap
have "(unwrap oo body oo wrap)⋅x = body'⋅y"
by simp
moreover
from wrap_unwrap
have "unwrap⋅(wrap⋅((unwrap oo body oo wrap)⋅x)) = (unwrap oo body oo wrap)⋅x"
ultimately show ?case by simp
qed simp
thus ?thesis
using worker_wrapper_id[OF wrap_unwrap refl] by simp
qed

text‹

This justifies the syntactically-oriented rules shown in
Figure~\ref{fig:wwc2}; note the scoping of the fusion rule.

Those familiar with the bananas'' work of \citet*{barbed-wire:1991}
will not be surprised that adding a strictness assumption justifies an
equational fusion rule.

\begin{figure}[tb]
\begin{center}
\fbox{\parbox{0.96\textwidth}{For a recursive definition @{haskell "comp =
body"} of type @{haskell "A"} and a pair of functions @{haskell "wrap :: B \\to A"}
and @{haskell "unwrap :: A \\to B"} where @{haskell "wrap \\circ unwrap = id_A"} and
@{haskell "unwrap\\ \\bot = \\bot"}, define:

comp & = wrap\ work\\
work & = unwrap\ (body[wrap\ work / comp])

unwrap\ (wrap\ work) \Longrightarrow work
\end{center}%
\caption{The syntactic worker/wrapper transformation and fusion rule.}\label{fig:wwc2}
\end{figure}

›

(*<*)
end
(*>*)


# Theory Accumulator

(*<*)
(*
* The worker/wrapper transformation, following Gill and Hutton.
* (C)opyright 2009-2011, Peter Gammie, peteg42 at gmail.com.
*)

theory Accumulator
imports
HOLCF
LList
WorkerWrapperNew
begin
(*>*)

section‹Naive reverse becomes accumulator-reverse.›

text‹\label{sec:accum}›

subsection‹Hughes lists, naive reverse, worker-wrapper optimisation.›

text‹The Hughes'' list type.›

type_synonym 'a H = "'a llist → 'a llist"

definition
list2H :: "'a llist → 'a H" where
"list2H ≡ lappend"

lemma acc_c2a_strict[simp]: "list2H⋅⊥ = ⊥"
by (rule cfun_eqI, simp add: list2H_def)

definition
H2list :: "'a H → 'a llist" where
"H2list ≡ Λ f . f⋅lnil"

text‹The paper only claims the homomorphism holds for finite lists,
but in fact it holds for all lazy lists in HOLCF. They are trying to
dodge an explicit appeal to the equation @{thm "inst_cfun_pcpo"},
which does not hold in Haskell.›

lemma H_llist_hom_append: "list2H⋅(xs :++ ys) = list2H⋅xs oo list2H⋅ys" (is "?lhs = ?rhs")
proof(rule cfun_eqI)
fix zs
have "?lhs⋅zs = (xs :++ ys) :++ zs" by (simp add: list2H_def)
also have "… = xs :++ (ys :++ zs)" by (rule lappend_assoc)
also have "… = list2H⋅xs⋅(ys :++ zs)" by (simp add: list2H_def)
also have "… = list2H⋅xs⋅(list2H⋅ys⋅zs)" by (simp add: list2H_def)
also have "… = (list2H⋅xs oo list2H⋅ys)⋅zs" by simp
finally show "?lhs⋅zs = (list2H⋅xs oo list2H⋅ys)⋅zs" .
qed

lemma H_llist_hom_id: "list2H⋅lnil = ID" by (simp add: list2H_def)

lemma H2list_list2H_inv: "H2list oo list2H = ID"
by (rule cfun_eqI, simp add: H2list_def list2H_def)

text‹\citet[\S4.2]{GillHutton:2009} define the naive reverse
function as follows.›

fixrec lrev :: "'a llist → 'a llist"
where
"lrev⋅lnil = lnil"
| "lrev⋅(x :@ xs) = lrev⋅xs :++ (x :@ lnil)"

text‹Note body'' is the generator of @{term "lrev_def"}.›

lemma lrev_strict[simp]: "lrev⋅⊥ = ⊥"
by fixrec_simp

fixrec lrev_body :: "('a llist → 'a llist) → 'a llist → 'a llist"
where
"lrev_body⋅r⋅lnil = lnil"
| "lrev_body⋅r⋅(x :@ xs) = r⋅xs :++ (x :@ lnil)"

lemma lrev_body_strict[simp]: "lrev_body⋅r⋅⊥ = ⊥"
by fixrec_simp

text‹This is trivial but syntactically a bit touchy. Would be nicer
to define @{term "lrev_body"} as the generator of the fixpoint
definition of @{term "lrev"} directly.›

lemma lrev_lrev_body_eq: "lrev = fix⋅lrev_body"
by (rule cfun_eqI, subst lrev_def, subst lrev_body.unfold, simp)

text‹Wrap / unwrap functions.›

definition
unwrapH :: "('a llist → 'a llist) → 'a llist → 'a H" where
"unwrapH ≡ Λ f xs . list2H⋅(f⋅xs)"

lemma unwrapH_strict[simp]: "unwrapH⋅⊥ = ⊥"
unfolding unwrapH_def by (rule cfun_eqI, simp)

definition
wrapH :: "('a llist → 'a H) → 'a llist → 'a llist" where
"wrapH ≡ Λ f xs . H2list⋅(f⋅xs)"

lemma wrapH_unwrapH_id: "wrapH oo unwrapH = ID" (is "?lhs = ?rhs")
proof(rule cfun_eqI)+
fix f xs
have "?lhs⋅f⋅xs = H2list⋅(list2H⋅(f⋅xs))" by (simp add: wrapH_def unwrapH_def)
also have "… = (H2list oo list2H)⋅(f⋅xs)" by simp
also have "… = ID⋅(f⋅xs)" by (simp only: H2list_list2H_inv)
also have "… = ?rhs⋅f⋅xs" by simp
finally show "?lhs⋅f⋅xs = ?rhs⋅f⋅xs" .
qed

subsection‹Gill/Hutton-style worker/wrapper.›

definition
lrev_work :: "'a llist → 'a H" where
"lrev_work ≡ fix⋅(unwrapH oo lrev_body oo wrapH)"

definition
lrev_wrap :: "'a llist → 'a llist" where
"lrev_wrap ≡ wrapH⋅lrev_work"

lemma lrev_lrev_ww_eq: "lrev = lrev_wrap"
using worker_wrapper_id[OF wrapH_unwrapH_id lrev_lrev_body_eq]

subsection‹Optimise worker/wrapper.›

text‹Intermediate worker.›

fixrec lrev_body1 :: "('a llist → 'a H) → 'a llist → 'a H"
where
"lrev_body1⋅r⋅lnil = list2H⋅lnil"
| "lrev_body1⋅r⋅(x :@ xs) = list2H⋅(wrapH⋅r⋅xs :++ (x :@ lnil))"

definition
lrev_work1 :: "'a llist → 'a H" where
"lrev_work1 ≡ fix⋅lrev_body1"

lemma lrev_body_lrev_body1_eq: "lrev_body1 = unwrapH oo lrev_body oo wrapH"
apply (rule cfun_eqI)+
apply (subst lrev_body.unfold)
apply (subst lrev_body1.unfold)
apply (case_tac xa)
apply (simp_all add: list2H_def wrapH_def unwrapH_def)
done

lemma lrev_work1_lrev_work_eq: "lrev_work1 = lrev_work"
by (unfold lrev_work_def lrev_work1_def,
rule cfun_arg_cong[OF lrev_body_lrev_body1_eq])

text‹Now use the homomorphism.›

fixrec lrev_body2 :: "('a llist → 'a H) → 'a llist → 'a H"
where
"lrev_body2⋅r⋅lnil = ID"
| "lrev_body2⋅r⋅(x :@ xs) = list2H⋅(wrapH⋅r⋅xs) oo list2H⋅(x :@ lnil)"

lemma lrev_body2_strict[simp]: "lrev_body2⋅r⋅⊥ = ⊥"
by fixrec_simp

definition
lrev_work2 :: "'a llist → 'a H" where
"lrev_work2 ≡ fix⋅lrev_body2"

lemma lrev_work2_strict[simp]: "lrev_work2⋅⊥ = ⊥"
unfolding lrev_work2_def
by (subst fix_eq) simp

lemma lrev_body2_lrev_body1_eq: "lrev_body2 = lrev_body1"
by ((rule cfun_eqI)+
, (subst lrev_body1.unfold, subst lrev_body2.unfold)

lemma lrev_work2_lrev_work1_eq: "lrev_work2 = lrev_work1"
by (unfold lrev_work2_def lrev_work1_def
, rule cfun_arg_cong[OF lrev_body2_lrev_body1_eq])

text ‹Simplify.›

fixrec lrev_body3 :: "('a llist → 'a H) → 'a llist → 'a H"
where
"lrev_body3⋅r⋅lnil = ID"
| "lrev_body3⋅r⋅(x :@ xs) = r⋅xs oo list2H⋅(x :@ lnil)"

lemma lrev_body3_strict[simp]: "lrev_body3⋅r⋅⊥ = ⊥"
by fixrec_simp

definition
lrev_work3 :: "'a llist → 'a H" where
"lrev_work3 ≡ fix⋅lrev_body3"

lemma lrev_wwfusion: "list2H⋅((wrapH⋅lrev_work2)⋅xs) = lrev_work2⋅xs"
proof -
{
have "list2H oo wrapH⋅lrev_work2 = unwrapH⋅(wrapH⋅lrev_work2)"
by (rule cfun_eqI, simp add: unwrapH_def)
also have "… = (unwrapH oo wrapH)⋅lrev_work2" by simp
also have "… = lrev_work2"
apply -
apply (rule worker_wrapper_fusion[OF wrapH_unwrapH_id, where body="lrev_body"])
apply (auto iff: lrev_body2_lrev_body1_eq lrev_body_lrev_body1_eq lrev_work2_def lrev_work1_def)
done
finally have "list2H oo wrapH⋅lrev_work2 = lrev_work2" .
}
thus ?thesis using cfun_eq_iff[where f="list2H oo wrapH⋅lrev_work2" and g="lrev_work2"] by auto
qed

text‹If we use this result directly, we only get a partially-correct
program transformation, see \citet{Tullsen:PhDThesis} for details.›

lemma "lrev_work3 ⊑ lrev_work2"
unfolding lrev_work3_def
proof(rule fix_least)
{
fix xs have "lrev_body3⋅lrev_work2⋅xs = lrev_work2⋅xs"
proof(cases xs)
case bottom thus ?thesis by simp
next
case lnil thus ?thesis
unfolding lrev_work2_def
by (subst fix_eq[where F="lrev_body2"], simp)
next
case (lcons y ys)
hence "lrev_body3⋅lrev_work2⋅xs = lrev_work2⋅ys oo list2H⋅(y :@ lnil)" by simp
also have "… = list2H⋅((wrapH⋅lrev_work2)⋅ys) oo list2H⋅(y :@ lnil)"
using lrev_wwfusion[where xs=ys] by simp
also from lcons have "… = lrev_body2⋅lrev_work2⋅xs" by simp
also have "… = lrev_work2⋅xs"
unfolding lrev_work2_def by (simp only: fix_eq[symmetric])
finally show ?thesis by simp
qed
}
thus "lrev_body3⋅lrev_work2 = lrev_work2" by (rule cfun_eqI)
qed

text‹We can't show the reverse inclusion in the same way as the
fusion law doesn't hold for the optimised definition. (Intuitively we
haven't established that it is equal to the original @{term "lrev"}
definition.) We could show termination of the optimised definition
though, as it operates on finite lists. Alternatively we can use
induction (over the list argument) to show total equivalence.

The following lemma shows that the fusion Gill/Hutton want to do is
completely sound in this context, by appealing to the lazy list
induction principle.›

lemma lrev_work3_lrev_work2_eq: "lrev_work3 = lrev_work2" (is "?lhs = ?rhs")
proof(rule cfun_eqI)
fix x
show "?lhs⋅x = ?rhs⋅x"
proof(induct x)
show "lrev_work3⋅⊥ = lrev_work2⋅⊥"
apply (unfold lrev_work3_def lrev_work2_def)
apply (subst fix_eq[where F="lrev_body2"])
apply (subst fix_eq[where F="lrev_body3"])
next
show "lrev_work3⋅lnil = lrev_work2⋅lnil"
apply (unfold lrev_work3_def lrev_work2_def)
apply (subst fix_eq[where F="lrev_body2"])
apply (subst fix_eq[where F="lrev_body3"])
next
fix a l assume "lrev_work3⋅l = lrev_work2⋅l"
thus "lrev_work3⋅(a :@ l) = lrev_work2⋅(a :@ l)"
apply (unfold lrev_work3_def lrev_work2_def)
apply (subst fix_eq[where F="lrev_body2"])
apply (subst fix_eq[where F="lrev_body3"])
apply (fold lrev_work3_def lrev_work2_def)
apply (simp add: lrev_body3.unfold lrev_body2.unfold lrev_wwfusion)
done
qed simp_all
qed

text‹Use the combined worker/wrapper-fusion rule. Note we get a weaker lemma.›

lemma lrev3_2_syntactic: "lrev_body3 oo (unwrapH oo wrapH) = lrev_body2"
apply (subst lrev_body2.unfold, subst lrev_body3.unfold)
apply (rule cfun_eqI)+
apply (case_tac xa)
done

lemma lrev_work3_lrev_work2_eq': "lrev = wrapH⋅lrev_work3"
proof -
from lrev_lrev_body_eq
have "lrev = fix⋅lrev_body" .
also from wrapH_unwrapH_id unwrapH_strict
have "… = wrapH⋅(fix⋅lrev_body3)"
by (rule worker_wrapper_fusion_new
, simp add: lrev3_2_syntactic lrev_body2_lrev_body1_eq lrev_body_lrev_body1_eq)
finally show ?thesis unfolding lrev_work3_def by simp
qed

text‹Final syntactic tidy-up.›

fixrec lrev_body_final :: "('a llist → 'a H) → 'a llist → 'a H"
where
"lrev_body_final⋅r⋅lnil⋅ys = ys"
| "lrev_body_final⋅r⋅(x :@ xs)⋅ys = r⋅xs⋅(x :@ ys)"

definition
lrev_work_final :: "'a llist → 'a H" where
"lrev_work_final ≡ fix⋅lrev_body_final"

definition
lrev_final :: "'a llist → 'a llist" where
"lrev_final ≡ Λ xs. lrev_work_final⋅xs⋅lnil"

lemma lrev_body_final_lrev_body3_eq': "lrev_body_final⋅r⋅xs = lrev_body3⋅r⋅xs"
apply (subst lrev_body_final.unfold)
apply (subst lrev_body3.unfold)
apply (cases xs)
apply (simp_all add: list2H_def ID_def cfun_eqI)
done

lemma lrev_body_final_lrev_body3_eq: "lrev_body_final = lrev_body3"
by (simp only: lrev_body_final_lrev_body3_eq' cfun_eqI)

lemma lrev_final_lrev_eq: "lrev = lrev_final" (is "?lhs = ?rhs")
proof -
have "?lhs = lrev_wrap" by (rule lrev_lrev_ww_eq)
also have "… = wrapH⋅lrev_work" by (simp only: lrev_wrap_def)
also have "… = wrapH⋅lrev_work1" by (simp only: lrev_work1_lrev_work_eq)
also have "… = wrapH⋅lrev_work2" by (simp only: lrev_work2_lrev_work1_eq)
also have "… = wrapH⋅lrev_work3" by (simp only: lrev_work3_lrev_work2_eq)
also have "… = wrapH⋅lrev_work_final" by (simp only: lrev_work3_def lrev_work_final_def lrev_body_final_lrev_body3_eq)
also have "… = lrev_final" by (simp add: lrev_final_def cfun_eqI H2list_def wrapH_def)
finally show ?thesis .
qed

(*<*)
end
(*>*)


# Theory UnboxedNats

(*<*)
(*
* The worker/wrapper transformation, following Gill and Hutton.
* (C)opyright 2009-2011, Peter Gammie, peteg42 at gmail.com.
*)

theory UnboxedNats
imports
HOLCF
Nats
WorkerWrapperNew
begin
(*>*)

section‹Unboxing types.›

text‹The original application of the worker/wrapper transformation
was the unboxing of flat types by \citet{SPJ-JL:1991}. We can model
the boxed and unboxed types as (respectively) pointed and unpointed
domains in HOLCF. Concretely @{typ "UNat"} denotes the discrete domain
of naturals, @{typ "UNat⇩⊥"} the lifted (flat and pointed) variant, and
@{typ "Nat"} the standard boxed domain, isomorphic to @{typ
"UNat⇩⊥"}. This latter distinction helps us keep the boxed naturals and
lifted function codomains separated; applications of @{term "unbox"}
should be thought of in the same way as Haskell's @{term "newtype"}
constructors, i.e. operationally equivalent to @{term "ID"}.

The divergence monad is used to handle the unboxing, see below.›

subsection‹Factorial example.›

text‹Standard definition of factorial.›

fixrec fac :: "Nat → Nat"
where
"fac⋅n = If n =⇩B 0 then 1 else n * fac⋅(n - 1)"

declare fac.simps[simp del]

lemma fac_strict[simp]: "fac⋅⊥ = ⊥"
by fixrec_simp

definition
fac_body :: "(Nat → Nat) → Nat → Nat" where
"fac_body ≡ Λ r n. If n =⇩B 0 then 1 else n * r⋅(n - 1)"

lemma fac_body_strict[simp]: "fac_body⋅r⋅⊥ = ⊥"
unfolding fac_body_def by simp

lemma fac_fac_body_eq: "fac = fix⋅fac_body"
unfolding fac_body_def by (rule cfun_eqI, subst fac_def, simp)

text‹Wrap / unwrap functions. Note the explicit lifting of the
co-domain. For some reason the published version of
\citet{GillHutton:2009} does not discuss this point: if we're going to
handle recursive functions, we need a bottom.

@{term "unbox"} simply removes the tag, yielding a possibly-divergent
unboxed value, the result of the function.›

definition
unwrapB :: "(Nat → Nat) → UNat → UNat⇩⊥" where
"unwrapB ≡ Λ f. unbox oo f oo box"

text‹Note that the monadic bind operator @{term "(>>=)"} here stands
in for the \textsf{case} construct in the paper.›

definition
wrapB :: "(UNat → UNat⇩⊥) → Nat → Nat" where
"wrapB ≡ Λ f x . unbox⋅x >>= f >>= box"

lemma wrapB_unwrapB_body:
assumes strictF: "f⋅⊥ = ⊥"
shows "(wrapB oo unwrapB)⋅f = f" (is "?lhs = ?rhs")
proof(rule cfun_eqI)
fix x :: Nat
have "?lhs⋅x = unbox⋅x >>= (Λ x'. unwrapB⋅f⋅x' >>= box)"
unfolding wrapB_def by simp
also have "… = unbox⋅x >>= (Λ x'. unbox⋅(f⋅(box⋅x')) >>= box)"
unfolding unwrapB_def by simp
also from strictF have "… = f⋅x" by (cases x, simp_all)
finally show "?lhs⋅x = ?rhs⋅x" .
qed

text‹Apply worker/wrapper.›

definition
fac_work :: "UNat → UNat⇩⊥" where
"fac_work ≡ fix⋅(unwrapB oo fac_body oo wrapB)"

definition
fac_wrap :: "Nat → Nat" where
"fac_wrap ≡ wrapB⋅fac_work"

lemma fac_fac_ww_eq: "fac = fac_wrap" (is "?lhs = ?rhs")
proof -
have "wrapB oo unwrapB oo fac_body = fac_body"
using wrapB_unwrapB_body[OF fac_body_strict]
by - (rule cfun_eqI, simp)
thus ?thesis
using worker_wrapper_body[where computation=fac and body=fac_body and wrap=wrapB and unwrap=unwrapB]
unfolding fac_work_def fac_wrap_def by (simp add: fac_fac_body_eq)
qed

text‹This is not entirely faithful to the paper, as they don't
explicitly handle the lifting of the codomain.›

definition
fac_body' :: "(UNat → UNat⇩⊥) → UNat → UNat⇩⊥" where
"fac_body' ≡ Λ r n.
unbox⋅(If box⋅n =⇩B 0
then 1
else unbox⋅(box⋅n - 1) >>= r >>= (Λ b. box⋅n * box⋅b))"

lemma fac_body'_fac_body: "fac_body' = unwrapB oo fac_body oo wrapB" (is "?lhs = ?rhs")
proof(rule cfun_eqI)+
fix r x
show "?lhs⋅r⋅x = ?rhs⋅r⋅x"
using bbind_case_distr_strict[where f="Λ y. box⋅x * y" and g="unbox⋅(box⋅x - 1)"]
bbind_case_distr_strict[where f="Λ y. box⋅x * y" and h="box"]
unfolding fac_body'_def fac_body_def unwrapB_def wrapB_def by simp
qed

text‹The @{term "up"} constructors here again mediate the
isomorphism, operationally doing nothing. Note the switch to the
machine-oriented \emph{if} construct: the test @{term "n = 0"} cannot
diverge.›

definition
fac_body_final :: "(UNat → UNat⇩⊥) → UNat → UNat⇩⊥" where
"fac_body_final ≡ Λ r n.
if n = 0 then up⋅1 else r⋅(n -⇩# 1) >>= (Λ b. up⋅(n *⇩# b))"

lemma fac_body_final_fac_body': "fac_body_final = fac_body'" (is "?lhs = ?rhs")
proof(rule cfun_eqI)+
fix r x
show "?lhs⋅r⋅x = ?rhs⋅r⋅x"
using bbind_case_distr_strict[where f="unbox" and g="r⋅(x -⇩# 1)" and h="(Λ b. box⋅(x *⇩# b))"]
unfolding fac_body_final_def fac_body'_def uMinus_def uMult_def zero_Nat_def one_Nat_def
by simp
qed

definition
fac_work_final :: "UNat → UNat⇩⊥" where
"fac_work_final ≡ fix⋅fac_body_final"

definition
fac_final :: "Nat → Nat" where
"fac_final ≡ Λ n. unbox⋅n >>= fac_work_final >>= box"

lemma fac_fac_final: "fac = fac_final" (is "?lhs=?rhs")
proof -
have "?lhs = fac_wrap" by (rule fac_fac_ww_eq)
also have "… = wrapB⋅fac_work" by (simp only: fac_wrap_def)
also have "… = wrapB⋅(fix⋅(unwrapB oo fac_body oo wrapB))" by (simp only: fac_work_def)
also have "… = wrapB⋅(fix⋅fac_body')" by (simp only: fac_body'_fac_body)
also have "… = wrapB⋅fac_work_final" by (simp only: fac_body_final_fac_body' fac_work_final_def)
also have "… = fac_final" by (simp add: fac_final_def wrapB_def)
finally show ?thesis .
qed

(* **************************************** *)

subsection‹Introducing an accumulator.›

text‹

The final version of factorial uses unboxed naturals but is not
tail-recursive. We can apply worker/wrapper once more to introduce an
accumulator, similar to \S\ref{sec:accum}.

The monadic machinery complicates things slightly here. We use
\emph{Kleisli composition}, denoted @{term "(>=>)"}, in the
homomorphism.

Firstly we introduce an accumulator'' monoid and show the
homomorphism.

›

type_synonym UNatAcc = "UNat → UNat⇩⊥"

definition
n2a :: "UNat → UNatAcc" where
"n2a ≡ Λ m n. up⋅(m *⇩# n)"

definition
a2n :: "UNatAcc → UNat⇩⊥" where
"a2n ≡ Λ a. a⋅1"

lemma a2n_strict[simp]: "a2n⋅⊥ = ⊥"
unfolding a2n_def by simp

lemma a2n_n2a: "a2n⋅(n2a⋅u) = up⋅u"
unfolding a2n_def n2a_def by (simp add: uMult_arithmetic)

lemma A_hom_mult: "n2a⋅(x *⇩# y) = (n2a⋅x >=> n2a⋅y)"
unfolding n2a_def bKleisli_def by (simp add: uMult_arithmetic)

definition
unwrapA :: "(UNat → UNat⇩⊥) → UNat → UNatAcc" where
"unwrapA ≡ Λ f n. f⋅n >>= n2a"

lemma unwrapA_strict[simp]: "unwrapA⋅⊥ = ⊥"
unfolding unwrapA_def by (rule cfun_eqI) simp

definition
wrapA :: "(UNat → UNatAcc) → UNat → UNat⇩⊥" where
"wrapA ≡ Λ f. a2n oo f"

lemma wrapA_unwrapA_id: "wrapA oo unwrapA = ID"
unfolding wrapA_def unwrapA_def
apply (rule cfun_eqI)+
apply (case_tac "x⋅xa")
done

text‹Some steps along the way.›

definition
fac_acc_body1 :: "(UNat → UNatAcc) → UNat → UNatAcc" where
"fac_acc_body1 ≡ Λ r n.
if n = 0 then n2a⋅1 else wrapA⋅r⋅(n -⇩# 1) >>= (Λ res. n2a⋅(n *⇩# res))"

lemma fac_acc_body1_fac_body_final_eq: "fac_acc_body1 = unwrapA oo fac_body_final oo wrapA"
unfolding fac_acc_body1_def fac_body_final_def wrapA_def unwrapA_def
by (rule cfun_eqI)+ simp

text‹Use the homomorphism.›

definition
fac_acc_body2 :: "(UNat → UNatAcc) → UNat → UNatAcc" where
"fac_acc_body2 ≡ Λ r n.
if n = 0 then n2a⋅1 else wrapA⋅r⋅(n -⇩# 1) >>= (Λ res. n2a⋅n >=> n2a⋅res)"

lemma fac_acc_body2_body1_eq: "fac_acc_body2 = fac_acc_body1"
unfolding fac_acc_body1_def fac_acc_body2_def
by (rule cfun_eqI)+ (simp add: A_hom_mult)

text‹Apply worker/wrapper.›

definition
fac_acc_body3 :: "(UNat → UNatAcc) → UNat → UNatAcc" where
"fac_acc_body3 ≡ Λ r n.
if n = 0 then n2a⋅1 else n2a⋅n >=> r⋅(n -⇩# 1)"

lemma fac_acc_body3_body2: "fac_acc_body3 oo (unwrapA oo wrapA) = fac_acc_body2" (is "?lhs=?rhs")
proof(rule cfun_eqI)+
fix r n acc
show "((fac_acc_body3 oo (unwrapA oo wrapA))⋅r⋅n⋅acc) = fac_acc_body2⋅r⋅n⋅acc"
unfolding fac_acc_body2_def fac_acc_body3_def unwrapA_def
using bbind_case_distr_strict[where f="Λ y. n2a⋅n >=> y" and h="n2a", symmetric]
by simp
qed

lemma fac_work_final_body3_eq: "fac_work_final = wrapA⋅(fix⋅fac_acc_body3)"
unfolding fac_work_final_def
by (rule worker_wrapper_fusion_new[OF wrapA_unwrapA_id unwrapA_strict])

definition
fac_acc_body_final :: "(UNat → UNatAcc) → UNat → UNatAcc" where
"fac_acc_body_final ≡ Λ r n acc.
if n = 0 then up⋅acc else r⋅(n -⇩# 1)⋅(n *⇩# acc)"

definition
fac_acc_work_final :: "UNat → UNat⇩⊥" where
"fac_acc_work_final ≡ Λ x. fix⋅fac_acc_body_final⋅x⋅1"

lemma fac_acc_work_final_fac_acc_work3_eq: "fac_acc_body_final = fac_acc_body3" (is "?lhs=?rhs")
unfolding fac_acc_body3_def fac_acc_body_final_def n2a_def bKleisli_def
by (rule cfun_eqI)+

lemma fac_acc_work_final_fac_work: "fac_acc_work_final = fac_work_final" (is "?lhs=?rhs")
proof -
have "?rhs = wrapA⋅(fix⋅fac_acc_body3)" by (rule fac_work_final_body3_eq)
also have "… = wrapA⋅(fix⋅fac_acc_body_final)"
using fac_acc_work_final_fac_acc_work3_eq by simp
also have "… = ?lhs"
unfolding fac_acc_work_final_def wrapA_def a2n_def
finally show ?thesis by simp
qed

(*<*)
end
(*>*)


# Theory Streams

(*<*)
(*
* The worker/wrapper transformation, following Gill and Hutton.
* (C)opyright 2009-2011, Peter Gammie, peteg42 at gmail.com.
*)

theory Streams
imports
HOLCF
Nats
WorkerWrapper
begin
(*>*)

section‹Memoisation using streams.›

text ‹
\label{sec:memoisation-example}›

subsection‹Streams.›

text‹The type of infinite streams.›

domain 'a Stream = stcons (lazy sthead :: "'a") (lazy sttail :: "'a Stream") (infixr "&&" 65)

(*<*)
lemma Stream_bisimI[intro]:
"(⋀xs ys. R xs ys
⟹ (xs = ⊥ ∧ ys = ⊥)
∨ (∃h t t'. R t t' ∧ xs = h && t ∧ ys = h && t'))
⟹ Stream_bisim R"
unfolding Stream.bisim_def by auto
(*>*)

fixrec smap :: "('a → 'b) → 'a Stream → 'b Stream"
where
"smap⋅f⋅(x && xs) = f⋅x && smap⋅f⋅xs"

(*<*)
lemma smap_strict[simp]: "smap⋅f⋅⊥ = ⊥"
by fixrec_simp
(*>*)

lemma smap_smap: "smap⋅f⋅(smap⋅g⋅xs) = smap⋅(f oo g)⋅xs"
(*<*) by (induct xs, simp_all) (*>*)

fixrec i_th :: "'a Stream → Nat → 'a"
where
"i_th⋅(x && xs) = Nat_case⋅x⋅(i_th⋅xs)"

abbreviation
i_th_syn :: "'a Stream ⇒ Nat ⇒ 'a" (infixl "!!" 100) where
"s !! i ≡ i_th⋅s⋅i"

(*<*)
lemma i_th_strict1[simp]: "⊥ !! i = ⊥"
by fixrec_simp

lemma i_th_strict2[simp]: "s !! ⊥ = ⊥" by (subst i_th.unfold, cases s, simp_all)
lemma i_th_0[simp]: "(s !! 0) = sthead⋅s" by (subst i_th.unfold, cases s, simp_all)
lemma i_th_suc[simp]: "i ≠ ⊥ ⟹ (x && xs) !! (i + 1) = xs !! i" by (subst i_th.unfold, simp)
(*>*)

text‹The infinite stream of natural numbers.›

fixrec nats :: "Nat Stream"
where
"nats = 0 && smap⋅(Λ x. 1 + x)⋅nats"

(*<*)
declare nats.simps[simp del]
(*>*)

subsection‹The wrapper/unwrapper functions.›

definition
unwrapS' :: "(Nat → 'a) → 'a Stream" where
"unwrapS' ≡ Λ f . smap⋅f⋅nats"

lemma unwrapS'_unfold: "unwrapS'⋅f = f⋅0 && smap⋅(f oo (Λ x. 1 + x))⋅nats"
(*<*)by (unfold unwrapS'_def, subst nats.unfold, simp add: smap_smap)(*>*)

fixrec unwrapS :: "(Nat → 'a) → 'a Stream"
where
"unwrapS⋅f = f⋅0 && unwrapS⋅(f oo (Λ x. 1 + x))"

(*<*)
declare unwrapS.simps[simp del]
(*>*)

text‹The two versions of @{term "unwrapS"} are equivalent. We could
try to fold some definitions here but it's easier if the stream
constructor is manifest.›

lemma unwrapS_unwrapS'_eq: "unwrapS = unwrapS'" (is "?lhs = ?rhs")
proof(rule cfun_eqI)
fix f show "?lhs⋅f = ?rhs⋅f"
proof(coinduct rule: Stream.coinduct)
let ?R = "λs s'. (∃f. s = f⋅0 && unwrapS⋅(f oo (Λ x. 1 + x))
∧ s' = f⋅0 && smap⋅(f oo (Λ x. 1 + x))⋅nats)"
show "Stream_bisim ?R"
proof
fix s s' assume "?R s s'"
then obtain f where fs:  "s = f⋅0 && unwrapS⋅(f oo (Λ x. 1 + x))"
and fs': "s' = f⋅0 && smap⋅(f oo (Λ x. 1 + x))⋅nats"
by blast
have "?R (unwrapS⋅(f oo (Λ x. 1 + x))) (smap⋅(f oo (Λ x. 1 + x))⋅nats)"
by ( rule exI[where x="f oo (Λ x. 1 + x)"]
, subst unwrapS.unfold, subst nats.unfold, simp add: smap_smap)
with fs fs'
show "(s = ⊥ ∧ s' = ⊥)
∨ (∃h t t'.
(∃f. t = f⋅0 && unwrapS⋅(f oo (Λ x. 1 + x))
∧ t' = f⋅0 && smap⋅(f oo (Λ x. 1 + x))⋅nats)
∧ s = h && t ∧ s' = h && t')" by best
qed
show "?R (?lhs⋅f) (?rhs⋅f)"
proof -
have lhs: "?lhs⋅f = f⋅0 && unwrapS⋅(f oo (Λ x. 1 + x))" by (subst unwrapS.unfold, simp)
have rhs: "?rhs⋅f = f⋅0 && smap⋅(f oo (Λ x. 1 + x))⋅nats" by (rule unwrapS'_unfold)
from lhs rhs show ?thesis by best
qed
qed
qed

definition
wrapS :: "'a Stream → Nat → 'a" where
"wrapS ≡ Λ s i . s !! i"

text‹Note the identity requires that @{term "f"} be
strict. \citet[\S6.1]{GillHutton:2009} do not make this requirement,
an oversight on their part.

In practice all functions worth memoising are strict in the memoised
argument.›

lemma wrapS_unwrapS_id':
assumes strictF: "(f::Nat → 'a)⋅⊥ = ⊥"
shows "unwrapS⋅f !! n = f⋅n"
using strictF
proof(induct n arbitrary: f rule: Nat_induct)
case bottom with strictF show ?case by simp
next
case zero thus ?case by (subst unwrapS.unfold, simp)
next
case (Suc i f)
have "unwrapS⋅f !! (i + 1) = (f⋅0 && unwrapS⋅(f oo (Λ x. 1 + x))) !! (i + 1)"
by (subst unwrapS.unfold, simp)
also from Suc have "… = unwrapS⋅(f oo (Λ x. 1 + x)) !! i" by simp
also from Suc have "… = (f oo (Λ x. 1 + x))⋅i" by simp
also have "… = f⋅(i + 1)" by (simp add: plus_commute)
finally show ?case .
qed

lemma wrapS_unwrapS_id: "f⋅⊥ = ⊥ ⟹ (wrapS oo unwrapS)⋅f = f"
by (rule cfun_eqI, simp add: wrapS_unwrapS_id' wrapS_def)

subsection‹Fibonacci example.›

definition
fib_body :: "(Nat → Nat) → Nat → Nat" where
"fib_body ≡ Λ r. Nat_case⋅1⋅(Nat_case⋅1⋅(Λ n. r⋅n + r⋅(n + 1)))"

(*<*)
lemma fib_body_strict[simp]: "fib_body⋅r⋅⊥ = ⊥" by (simp add: fib_body_def)
(*>*)

definition
fib :: "Nat → Nat" where
"fib ≡ fix⋅fib_body"

(*<*)
lemma fib_strict[simp]: "fib⋅⊥ = ⊥"
by (unfold fib_def, subst fix_eq, fold fib_def, simp add: fib_body_def)
(*>*)

text‹Apply worker/wrapper.›

definition
fib_work :: "Nat Stream" where
"fib_work ≡ fix⋅(unwrapS oo fib_body oo wrapS)"

definition
fib_wrap :: "Nat → Nat" where
"fib_wrap ≡ wrapS⋅fib_work"

lemma wrapS_unwrapS_fib_body: "wrapS oo unwrapS oo fib_body = fib_body"
proof(rule cfun_eqI)
fix r show "(wrapS oo unwrapS oo fib_body)⋅r = fib_body⋅r"
using wrapS_unwrapS_id[where f="fib_body⋅r"] by simp
qed

lemma fib_ww_eq: "fib = fib_wrap"
using worker_wrapper_body[OF wrapS_unwrapS_fib_body]
by (simp add: fib_def fib_wrap_def fib_work_def)

text‹Optimise.›

fixrec
fib_work_final :: "Nat Stream"
and
fib_f_final :: "Nat → Nat"
where
"fib_work_final = smap⋅fib_f_final⋅nats"
| "fib_f_final = Nat_case⋅1⋅(Nat_case⋅1⋅(Λ n'. fib_work_final !! n' + fib_work_final !! (n' + 1)))"

declare fib_f_final.simps[simp del] fib_work_final.simps[simp del]

definition
fib_final :: "Nat → Nat" where
"fib_final ≡ Λ n. fib_work_final !! n"

text‹

This proof is only fiddly due to the way mutual recursion is encoded:
we need to use Beki\'{c}'s Theorem \citep{Bekic:1969}\footnote{The
interested reader can find some historical commentary in
\citet{Harel:1980, DBLP:journals/toplas/Sangiorgi09}.} to massage the
definitions into their final form.

›

lemma fib_work_final_fib_work_eq: "fib_work_final = fib_work" (is "?lhs = ?rhs")
proof -
let ?wb = "Λ r. Nat_case⋅1⋅(Nat_case⋅1⋅(Λ n'. r !! n' + r !! (n' + 1)))"
let ?mr = "Λ (fwf :: Nat Stream, fff). (smap⋅fff⋅nats, ?wb⋅fwf)"
have "?lhs = fst (fix⋅?mr)"
by (simp add: fib_work_final_def split_def csplit_def)
also have "… = (μ fwf. fst (?mr⋅(fwf, μ fff. snd (?mr⋅(fwf, fff)))))"
using fix_cprod[where F="?mr"] by simp
also have "… = (μ fwf. smap⋅(μ fff. ?wb⋅fwf)⋅nats)" by simp
also have "… = (μ fwf. smap⋅(?wb⋅fwf)⋅nats)" by (simp add: fix_const)
also have "… = ?rhs"
unfolding fib_body_def fib_work_def unwrapS_unwrapS'_eq unwrapS'_def wrapS_def
finally show ?thesis .
qed

lemma fib_final_fib_eq: "fib_final = fib" (is "?lhs = ?rhs")
proof -
have "?lhs = (Λ n. fib_work_final !! n)" by (simp add: fib_final_def)
also have "… = (Λ n. fib_work !! n)" by (simp only: fib_work_final_fib_work_eq)
also have "… = fib_wrap" by (simp add: fib_wrap_def wrapS_def)
also have "… = ?rhs" by (simp only: fib_ww_eq)
finally show ?thesis .
qed

(*<*)
end
(*>*)


# Theory Continuations

(*<*)
(*
* The worker/wrapper transformation, following Gill and Hutton.
* (C)opyright 2009-2011, Peter Gammie, peteg42 at gmail.com.
*)

theory Continuations
imports
HOLCF
Maybe
Nats
WorkerWrapperNew
begin
(*>*)

section‹Tagless interpreter via double-barreled continuations›

text‹\label{sec:continuations}›

type_synonym 'a Cont = "('a → 'a) → 'a"

definition
val2cont :: "'a → 'a Cont" where
"val2cont ≡ (Λ a c. c⋅a)"

definition
cont2val :: "'a Cont → 'a" where
"cont2val ≡ (Λ f. f⋅ID)"

lemma cont2val_val2cont_id: "cont2val oo val2cont = ID"
by (rule cfun_eqI, simp add: val2cont_def cont2val_def)

domain Expr =
Val (lazy val::"Nat")
| Throw
| Catch (lazy cbody::"Expr") (lazy chandler::"Expr")

fixrec eval :: "Expr → Nat Maybe"
where
"eval⋅(Val⋅n) = Just⋅n"
| "eval⋅(Add⋅x⋅y) = mliftM2 (Λ a b. a + b)⋅(eval⋅x)⋅(eval⋅y)"
| "eval⋅Throw = mfail"
| "eval⋅(Catch⋅x⋅y) = mcatch⋅(eval⋅x)⋅(eval⋅y)"

fixrec eval_body :: "(Expr → Nat Maybe) → Expr → Nat Maybe"
where
"eval_body⋅r⋅(Val⋅n) = Just⋅n"
| "eval_body⋅r⋅(Add⋅x⋅y) = mliftM2 (Λ a b. a + b)⋅(r⋅x)⋅(r⋅y)"
| "eval_body⋅r⋅Throw = mfail"
| "eval_body⋅r⋅(Catch⋅x⋅y) = mcatch⋅(r⋅x)⋅(r⋅y)"

lemma eval_body_strictExpr[simp]: "eval_body⋅r⋅⊥ = ⊥"
by (subst eval_body.unfold, simp)

lemma eval_eval_body_eq: "eval = fix⋅eval_body"
by (rule cfun_eqI, subst eval_def, subst eval_body.unfold, simp)

subsection‹Worker/wrapper›

definition
unwrapC :: "(Expr → Nat Maybe) → (Expr → (Nat → Nat Maybe) → Nat Maybe → Nat Maybe)" where
"unwrapC ≡ Λ g e s f. case g⋅e of Nothing ⇒ f | Just⋅n ⇒ s⋅n"

lemma unwrapC_strict[simp]: "unwrapC⋅⊥ = ⊥"
unfolding unwrapC_def by (rule cfun_eqI)+ simp

definition
wrapC :: "(Expr → (Nat → Nat Maybe) → Nat Maybe → Nat Maybe) → (Expr → Nat Maybe)" where
"wrapC ≡ Λ g e. g⋅e⋅Just⋅Nothing"

lemma wrapC_unwrapC_id: "wrapC oo unwrapC = ID"
proof(intro cfun_eqI)
fix g e
show "(wrapC oo unwrapC)⋅g⋅e = ID⋅g⋅e"
by (cases "g⋅e", simp_all add: wrapC_def unwrapC_def)
qed

definition
eval_work :: "Expr → (Nat → Nat Maybe) → Nat Maybe → Nat Maybe" where
"eval_work ≡ fix⋅(unwrapC oo eval_body oo wrapC)"

definition
eval_wrap :: "Expr → Nat Maybe" where
"eval_wrap ≡ wrapC⋅eval_work"

fixrec eval_body' :: "(Expr → (Nat → Nat Maybe) → Nat Maybe → Nat Maybe)
→ Expr → (Nat → Nat Maybe) → Nat Maybe → Nat Maybe"
where
"eval_body'⋅r⋅(Val⋅n)⋅s⋅f = s⋅n"
| "eval_body'⋅r⋅(Add⋅x⋅y)⋅s⋅f = (case wrapC⋅r⋅x of
Nothing ⇒ f
| Just⋅n ⇒ (case wrapC⋅r⋅y of
Nothing ⇒ f
| Just⋅m ⇒ s⋅(n + m)))"
| "eval_body'⋅r⋅Throw⋅s⋅f = f"
| "eval_body'⋅r⋅(Catch⋅x⋅y)⋅s⋅f = (case wrapC⋅r⋅x of
Nothing ⇒ (case wrapC⋅r⋅y of
Nothing ⇒ f
| Just⋅n ⇒ s⋅n)
| Just⋅n ⇒ s⋅n)"

lemma eval_body'_strictExpr[simp]: "eval_body'⋅r⋅⊥⋅s⋅f = ⊥"
by (subst eval_body'.unfold, simp)

definition
eval_work' :: "Expr → (Nat → Nat Maybe) → Nat Maybe → Nat Maybe" where
"eval_work' ≡ fix⋅eval_body'"

text‹This proof is unfortunately quite messy, due to the
simplifier's inability to cope with HOLCF's case distinctions.›

lemma eval_body'_eval_body_eq: "eval_body' = unwrapC oo eval_body oo wrapC"
apply (intro cfun_eqI)
apply (unfold unwrapC_def wrapC_def)
apply (case_tac xa)
apply simp_all
apply (case_tac "x⋅Expr1⋅Just⋅Nothing")
apply simp_all
apply (case_tac "x⋅Expr2⋅Just⋅Nothing")
apply simp_all
apply (case_tac "x⋅Expr1⋅Just⋅Nothing")
apply simp_all
done

fixrec eval_body_final :: "(Expr → (Nat → Nat Maybe) → Nat Maybe → Nat Maybe)
→ Expr → (Nat → Nat Maybe) → Nat Maybe → Nat Maybe"
where
"eval_body_final⋅r⋅(Val⋅n)⋅s⋅f = s⋅n"
| "eval_body_final⋅r⋅(Add⋅x⋅y)⋅s⋅f = r⋅x⋅(Λ n. r⋅y⋅(Λ m. s⋅(n + m))⋅f)⋅f"
| "eval_body_final⋅r⋅Throw⋅s⋅f = f"
| "eval_body_final⋅r⋅(Catch⋅x⋅y)⋅s⋅f = r⋅x⋅s⋅(r⋅y⋅s⋅f)"

lemma eval_body_final_strictExpr[simp]: "eval_body_final⋅r⋅⊥⋅s⋅f = ⊥"
by (subst eval_body_final.unfold, simp)

lemma eval_body'_eval_body_final_eq: "eval_body_final oo unwrapC oo wrapC = eval_body'"
apply (rule cfun_eqI)+
apply (case_tac xa)
done

definition
eval_work_final :: "Expr → (Nat → Nat Maybe) → Nat Maybe → Nat Maybe" where
"eval_work_final ≡ fix⋅eval_body_final"

definition
eval_final :: "Expr → Nat Maybe" where
"eval_final ≡ (Λ e. eval_work_final⋅e⋅Just⋅Nothing)"

lemma "eval = eval_final"
proof -
have "eval = fix⋅eval_body" by (rule eval_eval_body_eq)
also from wrapC_unwrapC_id unwrapC_strict have "… = wrapC⋅(fix⋅eval_body_final)"
apply (rule worker_wrapper_fusion_new)
using eval_body'_eval_body_final_eq eval_body'_eval_body_eq by simp
also have "… = eval_final"
unfolding eval_final_def eval_work_final_def wrapC_def
by simp
finally show ?thesis .
qed

(*<*)
end
(*>*)


# Theory Backtracking

(*<*)
(*
* The worker/wrapper transformation, following Gill and Hutton.
* (C)opyright 2009-2011, Peter Gammie, peteg42 at gmail.com.
*)

theory Backtracking
imports
HOLCF
Nats
WorkerWrapperNew
begin

(*>*)
section‹Backtracking using lazy lists and continuations›

text‹
\label{sec:ww-backtracking}

To illustrate the utility of worker/wrapper fusion to programming
language semantics, we consider here the first-order part of a
higher-order backtracking language by \citet{DBLP:conf/icfp/WandV04};
these papers for a broader motivation for these languages.

As syntax is typically considered to be inductively generated, with
each syntactic object taken to be finite and completely defined, we
define the syntax for our language using a HOL datatype:

›

datatype expr = const nat | add expr expr | disj expr expr | fail
(*<*)

lemma case_expr_cont[cont2cont]:
assumes f1: "⋀y. cont (λx. f1 x y)"
assumes f2: "⋀y z. cont (λx. f2 x y z)"
assumes f3: "⋀y z. cont (λx. f3 x y z)"
assumes f4: "cont (λx. f4 x)"
shows "cont (λx. case_expr (f1 x) (f2 x) (f3 x) (f4 x) expr)"
using assms by (cases expr) simp_all

(* Presumably obsolete in the HG version, not so in Isabelle2011. *)

fun
expr_encode :: "expr ⇒ nat"
where
"expr_encode (const n) = prod_encode (0, n)"
| "expr_encode (add e1 e2) = prod_encode (1, (prod_encode (expr_encode e1, expr_encode e2)))"
| "expr_encode (disj e1 e2) = prod_encode (2, (prod_encode (expr_encode e1, expr_encode e2)))"
| "expr_encode fail = prod_encode (3, 0)"

lemma expr_encode_inj:
"expr_encode x = expr_encode y ⟹ x = y"
by (induct x arbitrary: y) ((case_tac y, auto dest!: inj_onD[OF inj_prod_encode, where A=UNIV])[1])+

instance expr :: countable
by (rule countable_classI[OF expr_encode_inj])

(*>*)
text‹

The language consists of constants, an addition function, a
disjunctive choice between expressions, and failure. We give it a
direct semantics using the monad of lazy lists of natural numbers,
with the goal of deriving an an extensionally-equivalent evaluator
that uses double-barrelled continuations.

Our theory of lazy lists is entirely standard.

›

default_sort predomain

domain 'a llist =
lnil
| lcons (lazy "'a") (lazy "'a llist")

text‹

By relaxing the default sort of type variables to ‹predomain›,
our polymorphic definitions can be used at concrete types that do not
contain @{term "⊥"}. These include those constructed from HOL types
using the discrete ordering type constructor @{typ "'a discr"}, and in
particular our interpretation @{typ "nat discr"} of the natural
numbers.

The following standard list functions underpin the monadic
infrastructure:

›

fixrec lappend :: "'a llist → 'a llist → 'a llist" where
"lappend⋅lnil⋅ys = ys"
| "lappend⋅(lcons⋅x⋅xs)⋅ys = lcons⋅x⋅(lappend⋅xs⋅ys)"

fixrec lconcat :: "'a llist llist → 'a llist" where
"lconcat⋅lnil = lnil"
| "lconcat⋅(lcons⋅x⋅xs) = lappend⋅x⋅(lconcat⋅xs)"

fixrec lmap :: "('a → 'b) → 'a llist → 'b llist" where
"lmap⋅f⋅lnil = lnil"
| "lmap⋅f⋅(lcons⋅x⋅xs) = lcons⋅(f⋅x)⋅(lmap⋅f⋅xs)"
(*<*)

lemma lappend_strict'[simp]: "lappend⋅⊥ = (Λ a. ⊥)"
by fixrec_simp

lemma lconcat_strict[simp]: "lconcat⋅⊥ = ⊥"
by fixrec_simp

lemma lmap_strict[simp]: "lmap⋅f⋅⊥ = ⊥"
by fixrec_simp

(*>*)
text‹

›

type_synonym S = "nat discr llist"

definition returnS :: "nat discr → S" where
"returnS = (Λ x. lcons⋅x⋅lnil)"

definition bindS :: "S → (nat discr → S) → S" where
"bindS = (Λ x g. lconcat⋅(lmap⋅g⋅x))"

text‹

Unfortunately the lack of higher-order polymorphism in HOL prevents us
from providing the general typing one would expect a monad to have in

The evaluator uses the following extra constants:

›

definition addS :: "S → S → S" where
"addS ≡ (Λ x y. bindS⋅x⋅(Λ xv. bindS⋅y⋅(Λ yv. returnS⋅(xv + yv))))"

definition disjS :: "S → S → S" where
"disjS ≡ lappend"

definition failS :: "S" where
"failS ≡ lnil"

text‹

We interpret our language using these combinators in the obvious
way. The only complication is that, even though our evaluator is
primitive recursive, we must explicitly use the fixed point operator
as the worker/wrapper technique requires us to talk about the body of
the recursive definition.

›

definition
evalS_body :: "(expr discr → nat discr llist)
→ (expr discr → nat discr llist)"
where
"evalS_body ≡ Λ r e. case undiscr e of
const n ⇒ returnS⋅(Discr n)
| disj e1 e2 ⇒ disjS⋅(r⋅(Discr e1))⋅(r⋅(Discr e2))
| fail ⇒ failS"

abbreviation evalS :: "expr discr → nat discr llist" where
"evalS ≡ fix⋅evalS_body"

text‹

We aim to transform this evaluator into one using double-barrelled
continuations; one will serve as a "success" context, taking a natural
number into "the rest of the computation", and the other outright
failure.

In general we could work with an arbitrary observation type ala
\citet{DBLP:conf/icalp/Reynolds74}, but for convenience we use the
clearly adequate concrete type @{typ "nat discr llist"}.

›

type_synonym Obs = "nat discr llist"
type_synonym Failure = "Obs"
type_synonym Success = "nat discr → Failure → Obs"
type_synonym K = "Success → Failure → Obs"

text‹

To ease our development we adopt what
\citet[\S5]{DBLP:conf/icfp/WandV04} call a "failure computation"
instead of a failure continuation, which would have the type @{typ
"unit → Obs"}.

The monad over the continuation type @{typ "K"} is as follows:

›

definition returnK :: "nat discr → K" where
"returnK ≡ (Λ x. Λ s f. s⋅x⋅f)"

definition bindK :: "K → (nat discr → K) → K" where
"bindK ≡ Λ x g. Λ s f. x⋅(Λ xv f'. g⋅xv⋅s⋅f')⋅f"

text‹

Our extra constants are defined as follows:

›

definition addK :: "K → K → K" where
"addK ≡ (Λ x y. bindK⋅x⋅(Λ xv. bindK⋅y⋅(Λ yv. returnK⋅(xv + yv))))"

definition disjK :: "K → K → K" where
"disjK ≡ (Λ g h. Λ s f. g⋅s⋅(h⋅s⋅f))"

definition failK :: "K" where
"failK ≡ Λ s f. f"

text‹

The continuation semantics is again straightforward:

›

definition
evalK_body :: "(expr discr → K) → (expr discr → K)"
where
"evalK_body ≡ Λ r e. case undiscr e of
const n ⇒ returnK⋅(Discr n)
| disj e1 e2 ⇒ disjK⋅(r⋅(Discr e1))⋅(r⋅(Discr e2))
| fail ⇒ failK"

abbreviation evalK :: "expr discr → K" where
"evalK ≡ fix⋅evalK_body"

text‹

We now set up a worker/wrapper relation between these two semantics.

The kernel of @{term "unwrap"} is the following function that converts
a lazy list into an equivalent continuation representation.

›

fixrec SK :: "S → K" where
"SK⋅lnil = failK"
| "SK⋅(lcons⋅x⋅xs) = (Λ s f. s⋅x⋅(SK⋅xs⋅s⋅f))"

definition
unwrap :: "(expr discr → nat discr llist) → (expr discr → K)"
where
"unwrap ≡ Λ r e. SK⋅(r⋅e)"
(*<*)

lemma SK_strict[simp]: "SK⋅⊥ = ⊥"
by fixrec_simp

lemma unwrap_strict[simp]: "unwrap⋅⊥ = ⊥"
unfolding unwrap_def by simp

(*>*)
text‹

Symmetrically @{term "wrap"} converts an evaluator using continuations
into one generating lazy lists by passing it the right continuations.

›

definition KS :: "K → S" where
"KS ≡ (Λ k. k⋅lcons⋅lnil)"

definition wrap :: "(expr discr → K) → (expr discr → nat discr llist)" where
"wrap ≡ Λ r e. KS⋅(r⋅e)"
(*<*)

lemma KS_strict[simp]: "KS⋅⊥ = ⊥"
unfolding KS_def by simp

lemma wrap_strict[simp]: "wrap⋅⊥ = ⊥"
unfolding wrap_def by simp

(*>*)
text‹

The worker/wrapper condition follows directly from these definitions.

›

lemma KS_SK_id:
"KS⋅(SK⋅xs) = xs"
by (induct xs) (simp_all add: KS_def failK_def)

lemma wrap_unwrap_id:
"wrap oo unwrap = ID"
unfolding wrap_def unwrap_def

text‹

The worker/wrapper transformation is only non-trivial if @{term
"wrap"} and @{term "unwrap"} do not witness an isomorphism. In this
case we can show that we do not even have a Galois connection.

›

lemma cfun_not_below:
"f⋅x \<notsqsubseteq> g⋅x ⟹ f \<notsqsubseteq> g"
by (auto simp: cfun_below_iff)

lemma unwrap_wrap_not_under_id:
"unwrap oo wrap \<notsqsubseteq> ID"
proof -
let ?witness = "Λ e. (Λ s f. lnil :: K)"
have "(unwrap oo wrap)⋅?witness⋅(Discr fail)⋅⊥⋅(lcons⋅0⋅lnil)
\<notsqsubseteq> ?witness⋅(Discr fail)⋅⊥⋅(lcons⋅0⋅lnil)"
by (simp add: failK_def wrap_def unwrap_def KS_def)
hence "(unwrap oo wrap)⋅?witness \<notsqsubseteq> ?witness"
by (fastforce intro!: cfun_not_below)
thus ?thesis by (simp add: cfun_not_below)
qed

text‹

We now apply \texttt{worker\_wrapper\_id}:

›

definition eval_work :: "expr discr → K" where
"eval_work ≡ fix⋅(unwrap oo evalS_body oo wrap)"

definition eval_ww :: "expr discr → nat discr llist" where
"eval_ww ≡ wrap⋅eval_work"

lemma "evalS = eval_ww"
unfolding eval_ww_def eval_work_def
using worker_wrapper_id[OF wrap_unwrap_id]
by simp

text‹

We now show how the monadic operations correspond by showing that
@{term "SK"} witnesses a \emph{monad morphism}
\citet[Definition~2.1]{DBLP:journals/ngc/DanvyGR01}, the mapping needs
to hold for our specific operations in addition to the common monadic
scaffolding.

›

lemma SK_returnS_returnK:
"SK⋅(returnS⋅x) = returnK⋅x"
by (simp add: returnS_def returnK_def failK_def)

lemma SK_lappend_distrib:
"SK⋅(lappend⋅xs⋅ys)⋅s⋅f = SK⋅xs⋅s⋅(SK⋅ys⋅s⋅f)"
by (induct xs) (simp_all add: failK_def)

lemma SK_bindS_bindK:
"SK⋅(bindS⋅x⋅g) = bindK⋅(SK⋅x)⋅(SK oo g)"
by (induct x)
bindS_def bindK_def failK_def
SK_lappend_distrib)

by (clarsimp simp: cfcomp1
SK_bindS_bindK SK_returnS_returnK)

lemma SK_disjS_disjK:
"SK⋅(disjS⋅xs⋅ys) = disjK⋅(SK⋅xs)⋅(SK⋅ys)"
by (simp add: cfun_eq_iff disjS_def disjK_def SK_lappend_distrib)

lemma SK_failS_failK:
"SK⋅failS = failK"
unfolding failS_def by simp

text‹

These lemmas directly establish the precondition for our all-in-one
worker/wrapper and fusion rule:

›

lemma evalS_body_evalK_body:
"unwrap oo evalS_body oo wrap = evalK_body oo unwrap oo wrap"
proof(intro cfun_eqI)
fix r e' s f
obtain e :: "expr"
where ee': "e' = Discr e" by (cases e')
have "(unwrap oo evalS_body oo wrap)⋅r⋅(Discr e)⋅s⋅f
= (evalK_body oo unwrap oo wrap)⋅r⋅(Discr e)⋅s⋅f"
by (cases e)
SK_disjS_disjK SK_failS_failK)
with ee' show "(unwrap oo evalS_body oo wrap)⋅r⋅e'⋅s⋅f
= (evalK_body oo unwrap oo wrap)⋅r⋅e'⋅s⋅f"
by simp
qed

theorem evalS_evalK:
"evalS = wrap⋅evalK"
using worker_wrapper_fusion_new[OF wrap_unwrap_id unwrap_strict]
evalS_body_evalK_body
by simp

text‹

This proof can be considered an instance of the approach of
\citet{DBLP:journals/jfp/HuttonJG10}, which uses the worker/wrapper
machinery to relate two algebras.

This result could be obtained by a structural induction over the
syntax of the language. However our goal here is to show how such a
transformation can be achieved by purely equational means; this has
the advantange that our proof can be locally extended, e.g. to the
full language of \citet{DBLP:journals/ngc/DanvyGR01} simply by proving
extra equations. In contrast the higher-order language of
\citet{DBLP:conf/icfp/WandV04} is beyond the reach of this approach.

›
(*<*)

end
(*>*)


# Theory Nub

(*<*)
(*
* The worker/wrapper transformation, following Gill and Hutton.
* (C)opyright 2009-2011, Peter Gammie, peteg42 at gmail.com.
*)

theory Nub
imports
HOLCF
LList
Maybe
Nats
WorkerWrapperNew
begin
(*>*)

section‹Transforming $O(n^2)$ \emph{nub} into an $O(n\lg n)$ one›

text‹Andy Gill's solution, mechanised.›

subsection‹The @{term "nub"} function.›

fixrec nub :: "Nat llist → Nat llist"
where
"nub⋅lnil = lnil"
| "nub⋅(x :@ xs) = x :@ nub⋅(lfilter⋅(neg oo (Λ y. x =⇩B y))⋅xs)"

lemma nub_strict[simp]: "nub⋅⊥ = ⊥"
by fixrec_simp

fixrec nub_body :: "(Nat llist → Nat llist) → Nat llist → Nat llist"
where
"nub_body⋅f⋅lnil = lnil"
| "nub_body⋅f⋅(x :@ xs) = x :@ f⋅(lfilter⋅(neg oo (Λ y. x =⇩B y))⋅xs)"

lemma nub_nub_body_eq: "nub = fix⋅nub_body"
by (rule cfun_eqI, subst nub_def, subst nub_body.unfold, simp)

(* **************************************** *)

subsection‹Optimised data type.›

text ‹Implement sets using lazy lists for now. Lifting up HOL's @{typ "'a
set"} type causes continuity grief.›

type_synonym NatSet = "Nat llist"

definition
SetEmpty :: "NatSet" where
"SetEmpty ≡ lnil"

definition
SetInsert :: "Nat → NatSet → NatSet" where
"SetInsert ≡ lcons"

definition
SetMem :: "Nat → NatSet → tr" where
"SetMem ≡ lmember⋅(bpred (=))"

lemma SetMem_strict[simp]: "SetMem⋅x⋅⊥ = ⊥" by (simp add: SetMem_def)
lemma SetMem_SetEmpty[simp]: "SetMem⋅x⋅SetEmpty = FF"
lemma SetMem_SetInsert: "SetMem⋅v⋅(SetInsert⋅x⋅s) = (SetMem⋅v⋅s orelse x =⇩B v)"

text ‹AndyG's new type.›

domain R = R (lazy resultR :: "Nat llist") (lazy exceptR :: "NatSet")

definition
nextR :: "R → (Nat * R) Maybe" where
"nextR = (Λ r. case ldropWhile⋅(Λ x. SetMem⋅x⋅(exceptR⋅r))⋅(resultR⋅r) of
lnil ⇒ Nothing
| x :@ xs ⇒ Just⋅(x, R⋅xs⋅(exceptR⋅r)))"

lemma nextR_strict1[simp]: "nextR⋅⊥ = ⊥" by (simp add: nextR_def)
lemma nextR_strict2[simp]: "nextR⋅(R⋅⊥⋅S) = ⊥" by (simp add: nextR_def)

lemma nextR_lnil[simp]: "nextR⋅(R⋅lnil⋅S) = Nothing" by (simp add: nextR_def)

definition
filterR :: "Nat → R → R" where
"filterR ≡ (Λ v r. R⋅(resultR⋅r)⋅(SetInsert⋅v⋅(exceptR⋅r)))"

definition
c2a :: "Nat llist → R" where
"c2a ≡ Λ xs. R⋅xs⋅SetEmpty"

definition
a2c :: "R → Nat llist" where
"a2c ≡ Λ r. lfilter⋅(Λ v. neg⋅(SetMem⋅v⋅(exceptR⋅r)))⋅(resultR⋅r)"

lemma a2c_strict[simp]: "a2c⋅⊥ = ⊥" unfolding a2c_def by simp

lemma a2c_c2a_id: "a2c oo c2a = ID"
by (rule cfun_eqI, simp add: a2c_def c2a_def lfilter_const_true)

definition
wrap :: "(R → Nat llist) → Nat llist → Nat llist" where
"wrap ≡ Λ f xs. f⋅(c2a⋅xs)"

definition
unwrap :: "(Nat llist → Nat llist) → R → Nat llist" where
"unwrap ≡ Λ f r. f⋅(a2c⋅r)"

lemma unwrap_strict[simp]: "unwrap⋅⊥ = ⊥"
unfolding unwrap_def by (rule cfun_eqI, simp)

lemma wrap_unwrap_id: "wrap oo unwrap = ID"
using cfun_fun_cong[OF a2c_c2a_id]
by - ((rule cfun_eqI)+, simp add: wrap_def unwrap_def)

text ‹Equivalences needed for later.›

lemma TR_deMorgan: "neg⋅(x orelse y) = (neg⋅x andalso neg⋅y)"
by (rule trE[where p=x], simp_all)

lemma case_maybe_case:
"(case (case L of lnil ⇒ Nothing | x :@ xs ⇒ Just⋅(h⋅x⋅xs)) of
Nothing ⇒ f | Just⋅(a, b) ⇒ g⋅a⋅b)
=
(case L of lnil ⇒ f | x :@ xs ⇒ g⋅(fst (h⋅x⋅xs))⋅(snd (h⋅x⋅xs)))"
apply (cases L, simp_all)
apply (case_tac "h⋅a⋅llist")
apply simp
done

lemma case_a2c_case_caseR:
"(case a2c⋅w of lnil ⇒ f | x :@ xs ⇒ g⋅x⋅xs)
= (case nextR⋅w of Nothing ⇒ f | Just⋅(x, r) ⇒ g⋅x⋅(a2c⋅r))" (is "?lhs = ?rhs")
proof -
have "?rhs = (case (case ldropWhile⋅(Λ x. SetMem⋅x⋅(exceptR⋅w))⋅(resultR⋅w) of
lnil ⇒ Nothing
| x :@ xs ⇒ Just⋅(x, R⋅xs⋅(exceptR⋅w))) of Nothing ⇒ f | Just⋅(x, r) ⇒ g⋅x⋅(a2c⋅r))"
also have "… = (case ldropWhile⋅(Λ x. SetMem⋅x⋅(exceptR⋅w))⋅(resultR⋅w) of
lnil ⇒ f | x :@ xs ⇒ g⋅x⋅(a2c⋅(R⋅xs⋅(exceptR⋅w))))"
using case_maybe_case[where L="ldropWhile⋅(Λ x. SetMem⋅x⋅(exceptR⋅w))⋅(resultR⋅w)"
and f=f and g="Λ x r. g⋅x⋅(a2c⋅r)" and h="Λ x xs. (x, R⋅xs⋅(exceptR⋅w))"]
by simp
also have "… = ?lhs"
apply (cases "resultR⋅w")
apply simp_all
apply (rule_tac p="SetMem⋅a⋅(exceptR⋅w)" in trE)
apply simp_all
apply (induct_tac llist)
apply simp_all
apply (rule_tac p="SetMem⋅aa⋅(exceptR⋅w)" in trE)
apply simp_all
done
finally show "?lhs = ?rhs" by simp
qed

lemma filter_filterR: "lfilter⋅(neg oo (Λ y. x =⇩B y))⋅(a2c⋅r) = a2c⋅(filterR⋅x⋅r)"
using filter_filter[where p="Tr.neg oo (Λ y. x =⇩B y)" and q="Λ v. Tr.neg⋅(SetMem⋅v⋅(exceptR⋅r))"]
unfolding a2c_def filterR_def
by (cases r, simp_all add: SetMem_SetInsert TR_deMorgan)

text‹Apply worker/wrapper. Unlike Gill/Hutton, we manipulate the body of
the worker into the right form then apply the lemma.›

definition
nub_body' :: "(R → Nat llist) → R → Nat llist" where
"nub_body' ≡ Λ f r. case a2c⋅r of lnil ⇒ lnil
| x :@ xs ⇒ x :@ f⋅(c2a⋅(lfilter⋅(neg oo (Λ y. x =⇩B y))⋅xs))"

lemma nub_body_nub_body'_eq: "unwrap oo nub_body oo wrap = nub_body'"
unfolding nub_body_def nub_body'_def unwrap_def wrap_def a2c_def c2a_def
by ((rule cfun_eqI)+
, case_tac "lfilter⋅(Λ v. Tr.neg⋅(SetMem⋅v⋅(exceptR⋅xa)))⋅(resultR⋅xa)"

definition
nub_body'' :: "(R → Nat llist) → R → Nat llist" where
"nub_body'' ≡ Λ f r. case nextR⋅r of Nothing ⇒ lnil
| Just⋅(x, xs) ⇒ x :@ f⋅(c2a⋅(lfilter⋅(neg oo (Λ y. x =⇩B y))⋅(a2c⋅xs)))"

lemma nub_body'_nub_body''_eq: "nub_body' = nub_body''"
proof(rule cfun_eqI)+
fix f r show "nub_body'⋅f⋅r = nub_body''⋅f⋅r"
unfolding nub_body'_def nub_body''_def
using case_a2c_case_caseR[where f="lnil" and g="Λ x xs. x :@ f⋅(c2a⋅(lfilter⋅(Tr.neg oo (Λ y. x =⇩B y))⋅xs))" and w="r"]
by simp
qed

definition
nub_body''' :: "(R → Nat llist) → R → Nat llist" where
"nub_body''' ≡ (Λ f r. case nextR⋅r of Nothing ⇒ lnil
| Just⋅(x, xs) ⇒ x :@ f⋅(filterR⋅x⋅xs))"

lemma nub_body''_nub_body'''_eq: "nub_body'' = nub_body''' oo (unwrap oo wrap)"
unfolding nub_body''_def nub_body'''_def wrap_def unwrap_def
by ((rule cfun_eqI)+, simp add: filter_filterR)

text‹Finally glue it all together.›

lemma nub_wrap_nub_body''': "nub = wrap⋅(fix⋅nub_body''')"
using worker_wrapper_fusion_new[OF wrap_unwrap_id unwrap_strict, where body=nub_body]
nub_nub_body_eq
nub_body_nub_body'_eq
nub_body'_nub_body''_eq
nub_body''_nub_body'''_eq
by simp

end


# Theory Last

(*<*)
(*
* The worker/wrapper transformation, following Gill and Hutton.
* (C)opyright 2011, Peter Gammie, peteg42 at gmail.com.
*)

theory Last
imports
HOLCF
LList
WorkerWrapper
begin

(*>*)
section‹Optimise last''.›

text‹Andy Gill's solution, mechanised. No fusion, works fine using their rule.›

subsection‹The @{term "last"} function.›

fixrec llast :: "'a llist → 'a"
where
"llast⋅(x :@ yys) = (case yys of lnil ⇒ x | y :@ ys ⇒ llast⋅yys)"

lemma llast_strict[simp]: "llast⋅⊥ = ⊥"
by fixrec_simp

fixrec llast_body :: "('a llist → 'a) → 'a llist → 'a"
where
"llast_body⋅f⋅(x :@ yys) = (case yys of lnil ⇒ x | y :@ ys ⇒ f⋅yys)"

lemma llast_llast_body: "llast = fix⋅llast_body"
by (rule cfun_eqI, subst llast_def, subst llast_body.unfold, simp)

definition wrap :: "('a → 'a llist → 'a) → ('a llist → 'a)" where
"wrap ≡ Λ f (x :@ xs). f⋅x⋅xs"

definition unwrap :: "('a llist → 'a) → ('a → 'a llist → 'a)" where
"unwrap ≡ Λ f x xs. f⋅(x :@ xs)"

lemma unwrap_strict[simp]: "unwrap⋅⊥ = ⊥"
unfolding unwrap_def by ((rule cfun_eqI)+, simp)

lemma wrap_unwrap_ID: "wrap oo unwrap oo llast_body = llast_body"
unfolding llast_body_def wrap_def unwrap_def
apply (rule cfun_eqI)+
apply (case_tac xa)
done

definition llast_worker :: "('a → 'a llist → 'a) → 'a → 'a llist → 'a" where
"llast_worker ≡ Λ r x yys. case yys of lnil ⇒ x | y :@ ys ⇒ r⋅y⋅ys"

definition llast' :: "'a llist → 'a" where
"llast' ≡ wrap⋅(fix⋅llast_worker)"

lemma llast_worker_llast_body: "llast_worker = unwrap oo llast_body oo wrap"
unfolding llast_worker_def llast_body_def wrap_def unwrap_def
apply (rule cfun_eqI)+
apply (case_tac xb)
done

lemma llast'_llast: "llast' = llast" (is "?lhs = ?rhs")
proof -
have "?rhs = fix⋅llast_body" by (simp only: llast_llast_body)
also have "… = wrap⋅(fix⋅(unwrap oo llast_body oo wrap))"
by (simp only: worker_wrapper_body[OF wrap_unwrap_ID])
also have "… = wrap⋅(fix⋅(llast_worker))"
by (simp only: llast_worker_llast_body)
also have "... = ?lhs" unfolding llast'_def by simp
finally show ?thesis by simp
qed

end