Session WorkerWrapper

Theory Nats

(*<*)
(*
 * Domains of natural numbers.
 * (C)opyright 2009-2011, Peter Gammie, peteg42 at gmail.com.
 * License: BSD
 *)

theory Nats
imports HOLCF
begin
(*>*)

section‹Boxed Types, HOL's natural numbers hoisted.›

subsection‹Unboxed naturals.›

text‹We can represent the unboxed naturals as a discrete domain (every
number is equal to itself and unequal to everything else, and there is no
bottom). Think of these functions as the machine operations.

We could actually lift all HOL types and classes in this way; indeed
the HOLCF theory "Lift" does something similar, but is not as
fine-grained as this development.

Note: we default to just CPOs (not pointed CPOs) in this theory. We
adopt bothg the Isabelle syntax for overloaded arithmetic and the
notation for unboxed operators of \citet{SPJ-JL:1991}.›

default_sort predomain

type_synonym UNat = "nat discr"

instantiation discr :: (zero) zero
begin
definition zero_discr_def: "0  Discr 0"
instance ..
end

lemma zero_discr[simp]: "undiscr 0 = 0" unfolding zero_discr_def by simp

instantiation discr :: (one) one
begin
definition one_discr_def: "1  Discr 1"
instance ..
end

lemma one_discr[simp]: "undiscr 1 = 1" unfolding one_discr_def by simp

instantiation discr :: (ord) ord
begin
definition less_def[simp]: "m < n  (undiscr m) < (undiscr n)"
definition le_def[simp]:   "m  n  (undiscr m)  (undiscr n)"
instance ..
end

definition
  uPlus :: "UNat  UNat  UNat" where
  "uPlus  Λ x y. Discr (undiscr x + undiscr y)"

abbreviation
  uPlus_syn :: "UNat  UNat  UNat" (infixl "+#" 65) where
  "x +# y  uPlusxy"

instantiation discr :: (plus) plus
begin
definition plus_discr_def[simp]: "x + y  Discr (undiscr x + undiscr y)"
instance ..
end

definition
  uMinus :: "UNat  UNat  UNat" where
  "uMinus  Λ x y. Discr (undiscr x - undiscr y)"

abbreviation
  uMinus_syn :: "UNat  UNat  UNat" (infixl "-#" 65) where
  "x -# y  uMinusxy"

instantiation discr :: (minus) minus
begin
definition minus_discr_def[simp]: "x - y  Discr (undiscr x - undiscr y)"
instance ..
end

definition
  uMult :: "UNat  UNat  UNat" where
  "uMult  Λ x y. Discr (undiscr x * undiscr y)"

abbreviation
  uMult_syn :: "UNat  UNat  UNat" (infixl "*#" 65) where
  "x *# y  uMultxy"

instantiation discr :: (times) times
begin
definition times_discr_def[simp]: "x * y  Discr (undiscr x * undiscr y)"
instance ..
end

lemma uMult_unit_left: "1 *# (x::UNat) = x"
  unfolding uMult_def one_discr_def by simp
lemma uMult_unit_right: "(x::UNat) *# 1 = x"
  unfolding uMult_def one_discr_def by simp

lemma uMult_assoc: "(x *# y) *# z = x *# (y *# z)"
  unfolding uMult_def by simp

lemma uMult_commute: "x *# y = y *# x"
  unfolding uMult_def by simp

lemma uMult_left_commute: "a *# (b *# c) = b *# (a *# c)"
  unfolding uMult_def by simp

lemmas uMult_arithmetic =
  uMult_unit_left uMult_unit_right uMult_assoc uMult_commute uMult_left_commute

subsection ‹The "@{term }" monad.›

text‹

As Brian Huffman helpfully observed, the "@{term ""}" type
constructor supports the monadic operations; it's isomorphic to
Haskell's @{term "Maybe a"}, or ML's @{typ "'a option"}.

Note that @{term "return"} is @{term "up"}.

›

definition
  bbind :: "('a::cpo)  ('a  ('b::pcpo))  'b" where
  "bbind  Λ b g. fupgb"

abbreviation
  bbind_syn :: "('a::cpo)  ('a  ('b::pcpo))  'b" (infixl ">>=" 65) where
  "b >>= g  bbindbg"

lemma bbind_strict1[simp]: "bbind = "
  by (simp add: bbind_def)
lemma bbind_strict2[simp]: "x >>=  = "
  by (cases x, simp_all add: bbind_def)

lemma bbind_leftID[simp]: "upa >>= f = fa" by (simp add: bbind_def)
lemma bbind_rightID[simp]: "m >>= up = m" by (cases m, simp_all)

lemma bbind_assoc[simp]: "f >>= g >>= h = f >>= (Λ x. gx >>= h)"
  by (cases f, simp_all)

lemma bbind_case_distr_strict: "f =   f(g >>= h) = g >>= (Λ x. f(hx))"
  unfolding bbind_def by (cases g, simp_all)

text‹

Kleisli composition is sometimes helpful. It forms a monad too,
and has many useful algebraic properties. Unfortunately it is more
work than is useful to write the lemmas in a way that makes the
higher-order unifier in the simplifier happy. Seems easier just to
unfold the definition and go from there.

›

definition
  bKleisli :: "('a::cpo  ('b::cpo))  ('b  ('c::cpo))  ('a  'c)" where
  "bKleisli  Λ f g x. fx >>= g"

abbreviation
  bKleisli_syn :: "('a::cpo  ('b::cpo))  ('b  ('c::cpo))  ('a  'c)" (infixl ">=>" 65) where
  "b >=> g  bKleislibg"

lemma bKleisli_strict1[simp]: "bKleisli = "
  by (simp add: bKleisli_def)
lemma bKleisli_strict2[simp]: "b >=>  = "
  by (rule cfun_eqI, simp add: bKleisli_def)

lemma bKleisli_bbind: "(f >>= g) >=> h = f >>= (Λ x. gx >=> h)"
  by (cases f, simp_all)

text ‹

The "Box" type denotes computations that, when demanded, yield
either @{term ""} or an unboxed value. Note that the @{term "Box"}
constructor is strict, and so merely tags the lifted computation @{typ
"'a"}. @{typ "'a"} can be pointed or unpointed. This approach enables
us to distinguish the boxed values from the lifted-function-space that
models recursive functions with unboxed codomains.

›

domain 'a Box = Box (unbox :: "'a")

definition
  box :: "('a::predomain)  'a Box" where
  "box  Box oo up"

lemma boxI: "Box(upx) = boxx" unfolding box_def by simp
lemma unbox_box[simp]: "unbox(boxx) = upx" unfolding box_def by simp
lemma unbox_Box[simp]: "x    unbox(Boxx) = x" by simp

text‹

If we suceed in @{term "box"}ing something, then clearly that
something was not @{term ""}.

›

lemma box_casedist[case_names bottom Box, cases type: Box]:
  assumes xbot: "x =   P"
      and xbox: "u. x = boxu  P"
  shows "P"
proof(cases x)
  case bottom with xbot show ?thesis by simp
next
  case (Box u) with xbox show ?thesis
    by (cases u, simp_all add: box_def up_def cont_Iup bottomI[OF minimal_up])
qed

lemma bbind_leftID'[simp]: "unboxa >>= box = a" by (cases a, simp_all add: bbind_def)

(*<*)
text ‹

The optimisations of \citet{SPJ-JL:1991}.

p11: Repeated unboxing of the same value can be done once (roughly:
store the value in a register). Their story is more general.

›

lemma box_repeated:
  "x >>= (Λ x'. f >>= (Λ y'. x >>= bodyx'y'))
  = x >>= (Λ x'. f >>= (Λ y'. bodyx'y'x'))"
  by (cases x, simp_all)
(*>*)

text‹Lift binary predicates over @{typ "'a discr"} into @{typ "'a discr Box"}.›

text @{term "bliftM"} and @{term "bliftM2"} encapsulate the boxing
and unboxing.›

definition
  bliftM :: "('a::predomain  'b::predomain)  'a Box  'b Box" where
  "bliftM f  Λ x. unboxx >>= (Λ x'. box(fx'))"

lemma bliftM_strict1[simp]: "bliftM f = " by (simp add: bliftM_def)
lemma bliftM_op[simp]: "bliftM f(boxx) = box(fx)" by (simp add: bliftM_def)

definition
  bliftM2 :: "('a::predomain  'b::predomain  'c::predomain)  'a Box  'b Box  'c Box" where
  "bliftM2 f  Λ x y. unboxx >>= (Λ x'. unboxy >>= (Λ y'. box(fx'y')))"

lemma bliftM2_strict1[simp]: "bliftM2 f = " by (rule cfun_eqI)+ (simp add: bliftM2_def)
lemma bliftM2_strict2[simp]: "bliftM2 fx = " by (cases x, simp_all add: bliftM2_def)
lemma bliftM2_op[simp]: "bliftM2 f(boxx)(boxy) = box(fxy)" by (simp add: bliftM2_def)

text‹

Define the arithmetic operations. We need extra continuity lemmas as
we're using the full function space, so we can re-use the conventional
operators. The goal is to work at this level.

›

instantiation Box :: ("{predomain,plus}") plus
begin
definition plus_Box_def: "x + y  bliftM2 (Λ a b. a + b)xy"
instance ..
end

lemma plus_Box_cont[simp, cont2cont]:
  "cont g; cont h  cont (λx. (g x :: 'a :: {predomain, plus} Box) + h x)"
  unfolding plus_Box_def by simp

lemma plus_Box_strict1[simp]: " + (y :: 'a::{predomain, plus} Box) = "
  unfolding plus_Box_def by simp
lemma plus_Box_strict2[simp]: "(x :: 'a::{predomain, plus} Box) +  = "
  unfolding plus_Box_def by simp

instantiation Box :: ("{predomain,minus}") minus
begin
definition minus_Box_def: "x - y  bliftM2 (Λ a b. a - b)xy"
instance ..
end

lemma minus_Box_cont[simp, cont2cont]:
  "cont g; cont h  cont (λx. (g x :: 'a :: {predomain, minus} Box) - h x)"
  unfolding minus_Box_def by simp

lemma minus_Box_strict1[simp]: " - (y :: 'a::{predomain, minus} Box) = "
  unfolding minus_Box_def by simp
lemma minus_Box_strict2[simp]: "(x :: 'a::{predomain, minus} Box) -  = "
  unfolding minus_Box_def by simp

instantiation Box :: ("{predomain,times}") times
begin
definition times_Box_def: "x * y  bliftM2 (Λ a b. a * b)xy"
instance ..
end

lemma times_Box_cont[simp, cont2cont]:
  "cont g; cont h  cont (λx. (g x :: 'a :: {predomain, times} Box) * h x)"
  unfolding times_Box_def by simp

lemma times_Box_strict1[simp]: " * (y :: 'a::{predomain, times} Box) = "
  unfolding times_Box_def by simp
lemma times_Box_strict2[simp]: "(x :: 'a::{predomain, times} Box) *  = "
  unfolding times_Box_def by simp

definition
  bpred :: "('a::countable discr  'a discr  bool)  'a discr Box  'a discr Box  tr" where
  "bpred p  Λ x y. unboxx >>= (Λ x'. unboxy >>= (Λ y'. if p x' y' then TT else FF))"

lemma bpred_strict1[simp]: "bpred p = " unfolding bpred_def by (rule cfun_eqI, simp)
lemma bpred_strict2[simp]: "bpred px = " unfolding bpred_def by (cases x, simp_all)

lemma bpred_eval[simp]: "bpred p(boxx)(boxy) = (if p x y then TT else FF)"
  unfolding bpred_def by simp

abbreviation
  beq_syn :: "'a::countable discr Box  'a discr Box  tr" (infix "=B" 50) where
  "x =B y  bpred (=)xy"

abbreviation
  ble_syn :: "'a::{countable,ord} discr Box  'a discr Box  tr" (infix "B" 50) where
  "x B y  bpred (≤)xy"

abbreviation
  blt_syn :: "'a::{countable,ord} discr Box  'a discr Box  tr" (infix "<B" 50) where
  "x <B y  bpred (<)xy"

subsection‹The flat domain of natural numbers›

text‹Lift arithmetic to the boxed naturals. Define some things that make
playing with boxed naturals more convenient.›

type_synonym Nat = "UNat Box"

instantiation Box :: ("{predomain, zero}") zero
begin
definition zero_Nat_def: "0  box0"
instance ..
end

instantiation Box :: ("{predomain, one}") one
begin
definition one_Nat_def: "1  box1"
instance ..
end

text ‹We need to know the underlying operations are continuous for these to work.›

lemma plus_Nat_eval[simp]: "(boxx :: Nat) + boxy = box(x + y)" unfolding plus_Box_def by simp
lemma minus_Nat_eval[simp]: "(boxx :: Nat) - boxy = box(x - y)" unfolding minus_Box_def by simp
lemma times_Nat_eval[simp]: "(boxx :: Nat) * boxy = box(x * y)" unfolding times_Box_def by simp

definition
  Nat_case :: "'a::domain  (Nat  'a)  Nat  'a" where
  "Nat_case  Λ z s n. unboxn >>= (Λ n'. case_nat z (λn''. s(box(Discr n''))) (undiscr n'))"

lemma cont_case_nat[simp]:
  "cont (λx. f x); n. cont (λx. g x n)   cont (λx. case_nat (f x) (g x) n)"
  by (cases n, simp_all)

lemma Nat_case_strict[simp]: "Nat_casezs = " by (simp add: Nat_case_def)
lemma Nat_case_zero[simp]: "Nat_casezs0 = z" by (simp add: Nat_case_def zero_Nat_def zero_discr_def)
lemma Nat_case_suc[simp]:  "Nat_casezs(box(Discr (Suc n))) = s(box(Discr n))"
  by (simp add: Nat_case_def)

lemma Nat_case_add_1[simp]:
  assumes ndef: "n  "
  shows "Nat_casezs(n + 1) = sn"
proof -
  from ndef obtain nu where nu: "n = boxnu"
    unfolding box_def
    apply (cases "n" rule: Box.exhaust)
    apply simp
    apply (case_tac "u")
    apply simp_all
    done
  then obtain u where "nu = Discr u"
    unfolding box_def
    apply (cases nu)
    apply simp
    done
  with nu show ?thesis unfolding one_Nat_def by simp
qed

lemma Nat_case_case_nat: "Nat_casezs(box(Discr n)) = case_nat z (λn'. s(box(Discr n'))) n"
  by (simp add: Nat_case_def)

lemma Nat_casedist[case_names bottom zero Suc]:
  fixes x :: Nat
  assumes xbot: "x =   P"
      and xzero: "x = 0  P"
      and xsuc: "n. x = n + 1  P"
  shows "P"
proof(cases x rule: Box.exhaust)
  case bottom with xbot show ?thesis by simp
next
  case (Box u) hence xu: "x = Boxu" and ubottom: "u  " .
  from ubottom obtain n where ndn: "u = up(Discr n)" apply (cases u) apply simp_all apply (case_tac x) apply simp done
  show ?thesis
  proof(cases n)
    case 0 with ndn xu xzero show ?thesis unfolding zero_Nat_def by (simp add: boxI zero_discr_def)
  next
    case (Suc m) with ndn xu xsuc[where n="box(Discr m)"]
    show ?thesis
      unfolding plus_Box_def unfolding one_Nat_def by (simp add: boxI one_discr_def)
  qed
qed

lemma cont_Nat_case[simp]:
  "cont (λx. f x); n. cont (λx. g xn)   cont (λx. Nat_case(f x)(g x)n)"
  apply (cases n rule: Nat_casedist)
    apply simp_all
  apply (case_tac na rule: Box.exhaust)
   apply simp_all
  done

lemma Nat_induct[case_names bottom zero Suc]:
  fixes P :: "Nat  bool"
  assumes xbot: "P "
      and xzero: "P 0"
      and xsuc: "n. n  ; P n   P (n + 1)"
  shows "P x"
proof(cases x rule: box_casedist)
  case bottom with xbot show ?thesis by simp
next
  case (Box u)
  then obtain n where un: "u = Discr n" by (cases u, simp)
  {
    fix n
    have "x. x = box(Discr n)  P x"
    proof(induct n)
      case 0 with xzero show ?case unfolding zero_Nat_def zero_discr_def by simp
    next
      case (Suc n)
      hence "P (box(Discr n))" by simp
      with xsuc[where n="box(Discr n)"] have "P (box(Discr n) + 1)" unfolding box_def by simp
      with Suc show ?case unfolding one_Nat_def one_discr_def plus_Box_def by simp
    qed
  }
  with un Box show ?thesis by simp
qed

lemma plus_commute: "(x :: Nat) + y = y + x"
proof -
  have dc: "u v. (undiscr (u::nat discr) + undiscr v) = (undiscr v + undiscr u)"
    by simp
  thus ?thesis
    apply (cases x)
     apply simp
    apply (cases y)
    by (simp_all add: dc plus_Box_def)
qed

lemma mult_unit: "(x::Nat) * 1 = x"
  unfolding times_Box_def one_Nat_def by (cases x, simp_all)

lemma mult_commute: "(x :: Nat) * y = y * x"
proof -
  have dc: "u v. (undiscr (u::nat discr) * undiscr v) = (undiscr v * undiscr u)"
    by simp
  show ?thesis
    apply (cases x)
     apply simp
    apply (cases y)
    by (simp_all add: dc)
qed

lemma mult_assoc: "((x :: Nat) * y) * z = x * (y * z)"
proof -
  have ac: "u v w. undiscr (u::nat discr) * (undiscr v * undiscr w)
                   =  (undiscr u * undiscr v) * undiscr w" by simp
  show ?thesis
    apply (cases x)
     apply simp
    apply (cases y)
     apply simp
    apply (cases z)
     apply simp
    by (simp add: ac)
qed

text‹Restore the HOLCF default sort.›

default_sort "domain"

(*<*)
end
(*>*)

Theory LList

(*
 * Lazy lists.
 * (C)opyright 2009-2011, Peter Gammie, peteg42 at gmail.com.
 * License: BSD
 *)

(*<*)
theory LList
imports
  HOLCF
  Nats
begin
(*>*)

section‹The fully-lazy list type.›

text‹The list can contain anything that is a predomain.›

default_sort predomain

domain 'a llist =
    lnil ("lnil")
  | lcons (lazy "'a") (lazy "'a llist") (infixr ":@" 65)

(*<*)
(* Why aren't these in the library? *)

lemma llist_map_eval_simps[simp]:
  "llist_mapf = "
  "llist_mapflnil = lnil"
  "llist_mapf(x :@ xs) = fx :@ llist_mapfxs"
    apply (subst llist_map_unfold)
    apply simp
   apply (subst llist_map_unfold)
   apply (simp add: lnil_def)
  apply (subst llist_map_unfold)
  apply (simp add: lcons_def)
  done
(*>*)

lemma llist_case_distr_strict:
  "f =   f(llist_caseghxxs) = llist_case(fg)(Λ x xs. f(hxxs))xxs"
  by (cases xxs) simp_all

fixrec lsingleton :: "('a::predomain)  'a llist"
where
  "lsingletonx = x :@ lnil"

fixrec lappend :: "'a llist  'a llist  'a llist"
where
  "lappendlnilys = ys"
| "lappend(x :@ xs)ys = x :@ (lappendxsys)"

abbreviation
  lappend_syn :: "'a llist  'a llist  'a llist" (infixr ":++" 65) where
  "xs :++ ys  lappendxsys"

lemma lappend_strict': "lappend = (Λ a. )"
  by fixrec_simp

text‹This gives us that @{thm lappend_strict'}.›

text ‹This is where we use @{thm inst_cfun_pcpo}
lemma lappend_strict[simp]: "lappend = "
  by (rule cfun_eqI) (simp add: lappend_strict')

lemma lappend_assoc: "(xs :++ ys) :++ zs = xs :++ (ys :++ zs)"
  by (induct xs, simp_all)

lemma lappend_lnil_id_left[simp]: "lappendlnil = ID"
  by (rule cfun_eqI) simp

lemma lappend_lnil_id_right[simp]: "xs :++ lnil = xs"
  by (induct xs) simp_all

fixrec lconcat :: "'a llist llist  'a llist"
where
  "lconcatlnil = lnil"
| "lconcat(x :@ xs) = x :++ lconcatxs"

lemma lconcat_strict[simp]: "lconcat = "
  by fixrec_simp

fixrec lall :: "('a  tr)  'a llist  tr"
where
  "lallplnil = TT"
| "lallp(x :@ xs) = (px andalso lallpxs)"

lemma lall_strict[simp]: "lallp = "
  by fixrec_simp

fixrec lfilter :: "('a  tr)  'a llist  'a llist"
where
  "lfilterplnil = lnil"
| "lfilterp(x :@ xs) = If px then x :@ lfilterpxs else lfilterpxs"

lemma lfilter_strict[simp]: "lfilterp = "
  by fixrec_simp

lemma lfilter_const_true: "lfilter(Λ x. TT)xs = xs"
  by (induct xs, simp_all)

lemma lfilter_lnil: "(lfilterpxs = lnil) = (lall(neg oo p)xs = TT)"
proof(induct xs)
  fix a l assume indhyp: "(lfilterpl = lnil) = (lall(Tr.neg oo p)l = TT)"
  thus "(lfilterp(a :@ l) = lnil) = (lall(Tr.neg oo p)(a :@ l) = TT)"
    by (cases "pa" rule: trE, simp_all)
qed simp_all

lemma filter_filter: "lfilterp(lfilterqxs) = lfilter(Λ x. qx andalso px)xs"
proof(induct xs)
  fix a l assume "lfilterp(lfilterql) = lfilter(Λ(x::'a). qx andalso px)l"
  thus "lfilterp(lfilterq(a :@ l)) = lfilter(Λ(x::'a). qx andalso px)(a :@ l)"
    by (cases "qa" rule: trE, simp_all)
qed simp_all

fixrec ldropWhile :: "('a  tr)  'a llist  'a llist"
where
  "ldropWhileplnil = lnil"
| "ldropWhilep(x :@ xs) = If px then ldropWhilepxs else x :@ xs"

lemma ldropWhile_strict[simp]: "ldropWhilep = "
  by fixrec_simp

lemma ldropWhile_lnil: "(ldropWhilepxs = lnil) = (lallpxs = TT)"
proof(induct xs)
  fix a l assume "(ldropWhilepl = lnil) = (lallpl = TT)"
  thus "(ldropWhilep(a :@ l) = lnil) = (lallp(a :@ l) = TT)"
    by (cases "pa" rule: trE, simp_all)
qed simp_all

fixrec literate :: "('a  'a)  'a  'a llist"
where
  "literatefx = x :@ literatef(fx)"

declare literate.simps[simp del]

text‹This order of tests is convenient for the nub proof. I can
imagine the other would be convenient for other proofs...›

fixrec lmember :: "('a  'a  tr)  'a  'a llist  tr"
where
  "lmembereqxlnil = FF"
| "lmembereqx(lconsyys) = (lmembereqxys orelse eqyx)"

lemma lmember_strict[simp]: "lmembereqx = "
  by fixrec_simp

fixrec llength :: "'a llist  Nat"
where
  "llengthlnil = 0"
| "llength(lconsxxs) = 1 + llengthxs"

lemma llength_strict[simp]: "llength = "
  by fixrec_simp

fixrec lmap :: "('a  'b)  'a llist  'b llist"
where
  "lmapflnil = lnil"
| "lmapf(x :@ xs) = fx :@ lmapfxs"

lemma lmap_strict[simp]: "lmapf = "
  by fixrec_simp

lemma lmap_lmap:
  "lmapf(lmapgxs) = lmap(f oo g)xs"
  by (induct xs) simp_all

text ‹The traditional list monad uses lconcatMap as its bind.›

definition
  "lconcatMap  (Λ f. lconcat oo lmapf)"

lemma lconcatMap_comp_simps[simp]:
  "lconcatMapf = "
  "lconcatMapflnil = lnil"
  "lconcatMapf(x :@ xs) = fx :++ lconcatMapfxs"
  by (simp_all add: lconcatMap_def)

lemma lconcatMap_lsingleton[simp]:
  "lconcatMaplsingletonx = x"
  by (induct x) (simp_all add: lconcatMap_def)

text‹This @{term "zipWith"} function is only fully defined if the
lists have the same length.›

fixrec lzipWith0 :: "('a  'b  'c)  'a llist  'b llist  'c llist"
where
  "lzipWith0f(a :@ as)(b :@ bs) = fab :@ lzipWith0fasbs"
| "lzipWith0flnillnil = lnil"

lemma lzipWith0_stricts [simp]:
  "lzipWith0fys = "
  "lzipWith0flnil = "
  "lzipWith0f(x :@ xs) = "
  by fixrec_simp+

lemma lzipWith0_undefs [simp]:
  "lzipWith0flnil(y :@ ys) = "
  "lzipWith0f(x :@ xs)lnil = "
  by fixrec_simp+

text‹This @{term "zipWith"} function follows Haskell's in being more
permissive: zipping uneven lists results in a list as long as the
shortest one. This is what the backtracking monad expects.›

fixrec lzipWith :: "('a  'b  'c)  'a llist  'b llist  'c llist"
where
  "lzipWithf(a :@ as)(b :@ bs) = fab :@ lzipWithfasbs"
| (unchecked) "lzipWithfxsys = lnil"

lemma lzipWith_simps [simp]:
  "lzipWithf(x :@ xs)(y :@ ys) = fxy :@ lzipWithfxsys"
  "lzipWithf(x :@ xs)lnil = lnil"
  "lzipWithflnil(y :@ ys) = lnil"
  "lzipWithflnillnil = lnil"
  by fixrec_simp+

lemma lzipWith_stricts [simp]:
  "lzipWithfys = "
  "lzipWithf(x :@ xs) = "
  by fixrec_simp+

text‹Homomorphism properties, see Bird's life's work.›

lemma lmap_lappend_dist:
  "lmapf(xs :++ ys) = lmapfxs :++ lmapfys"
  by (induct xs) simp_all

lemma lconcat_lappend_dist:
  "lconcat(xs :++ ys) = lconcatxs :++ lconcatys"
  by (induct xs) (simp_all add: lappend_assoc)

lemma lconcatMap_assoc:
  "lconcatMaph(lconcatMapgf) = lconcatMap(Λ v. lconcatMaph(gv))f"
  by (induct f) (simp_all add: lmap_lappend_dist lconcat_lappend_dist lconcatMap_def)

lemma lconcatMap_lappend_dist:
  "lconcatMapf(xs :++ ys) = lconcatMapfxs :++ lconcatMapfys"
  unfolding lconcatMap_def by (simp add: lconcat_lappend_dist lmap_lappend_dist)

(* The following avoid some case_tackery. *)

lemma lmap_not_bottoms[simp]:
  "x    lmapfx  "
  by (cases x) simp_all

lemma lsingleton_not_bottom[simp]:
  "lsingletonx  "
  by simp

lemma lappend_not_bottom[simp]:
  " xs  ; xs = lnil  ys     xs :++ ys  "
  apply (cases xs)
  apply simp_all
  done

default_sort "domain"

(*<*)
end
(*>*)

Theory Maybe

(*<*)
(*
 * The Maybe monad.
 * (C)opyright 2009-2011, Peter Gammie, peteg42 at gmail.com.
 * License: BSD
 *)

theory Maybe
imports HOLCF
begin
(*>*)

section‹The 'a Maybe› Monad›

text‹This section defines the monadic machinery for the 'a
Maybe› type. @{term "return"} is @{term "Just"}.›

domain 'a Maybe = Nothing | Just (lazy "'a")

definition
  mfail :: "'a Maybe"
where
  "mfail  Nothing"

definition
  mcatch :: "'a Maybe  'a Maybe  'a Maybe"
where
  "mcatch  Λ body handler. case body of Nothing  handler | Justx  Justx"

lemma mcatch_strict[simp]: "mcatch = "
  by (rule cfun_eqI, simp add: mcatch_def)

definition
  mbind :: "'a Maybe  ('a  'b Maybe)  'b Maybe" where
  "mbind  Λ f g. (case f of Nothing  Nothing | Justf'  gf')"

abbreviation
  mbind_syn :: "'a Maybe  ('a  'b Maybe)  'b Maybe" (infixl ">>=M" 55) where
  "f >>=M g  mbindfg"

lemma mbind_strict1[simp]: " >>=M g = " by (simp add: mbind_def)

text‹The standard monad laws.›

lemma leftID[simp]: "Justa >>=M f = fa" by (simp add: mbind_def)
lemma rightID[simp]: "m >>=M Just = m" by (cases m, simp_all add: mbind_def)

lemma assoc[simp]: "(m >>=M f) >>=M g = m >>=M (Λ x. fx >>=M g)"
  by (cases m, simp_all add: mbind_def)

text‹Reduction for the Maybe monad.›

lemma nothing_bind[simp]: "Nothing >>=M f = Nothing" by (simp add: mbind_def)
lemma just_bind[simp]: "Justx >>=M f = fx" by (simp add: mbind_def)

definition
  mliftM2 :: "('a  'b  'c)  'a Maybe  'b Maybe  'c Maybe" where
  "mliftM2 f  Λ x y. x >>=M (Λ x'. y >>=M (Λ y'. Just(fx'y')))"

lemma mliftM2_Nothing1[simp]: "mliftM2 fNothingy = Nothing" by (simp add: mliftM2_def)
lemma mliftM2_Nothing2[simp]: "mliftM2 f(Justx)Nothing = Nothing" by (simp add: mliftM2_def)
lemma mliftM2_op[simp]: "mliftM2 f(Justx)(Justy) = Just(fxy)" by (simp add: mliftM2_def)

lemma mliftM2_strict1[simp]: "mliftM2 f = " by (rule cfun_eqI)+ (simp add: mliftM2_def)
lemma mliftM2_strict2[simp]: "mliftM2 f(Justx) = " by (simp add: mliftM2_def)

end

Theory FixedPointTheorems

(*<*)
(*
 * The worker/wrapper transformation, following Gill and Hutton.
 * (C)opyright 2009-2011, Peter Gammie, peteg42 at gmail.com.
 * License: BSD
 *)

theory FixedPointTheorems
imports
  HOLCF
begin

setup Thy_Output.antiquotation_raw binding‹haskell› (Scan.lift Args.name)
    (fn _ => fn s => Latex.string ("\\" ^ "<" ^ s ^ "\\>"))

(* LaTeXsugar fights with HOLCF syntax: at least cdot *)

(* THEOREMS *)
notation (Rule output)
  Pure.imp  ("latex‹\\mbox{}\\inferrule{\\mbox{›_latex‹}}›latex‹{\\mbox{›_latex‹}}›")

syntax (Rule output)
  "_bigimpl" :: "asms  prop  prop"
  ("latex‹\\mbox{}\\inferrule{›_latex‹}›latex‹{\\mbox{›_latex‹}}›")

  "_asms" :: "prop  asms  asms"
  ("latex‹\\mbox{›_latex‹}\\\\›/ _")

  "_asm" :: "prop  asms" ("latex‹\\mbox{›_latex‹}›")

(*>*)
section‹Fixed-point theorems for program transformation›

text‹

We begin by recounting some standard theorems from the early days of
denotational semantics. The origins of these results are lost to
history; the interested reader can find some of it in
\citet{Bekic:1969, Manna:1974, Greibach:1975, Stoy:1977,
DBLP:books/daglib/0002432, Harel:1980, Plotkin:1983, Winskel:1993,
DBLP:journals/toplas/Sangiorgi09}.

›

subsection‹The rolling rule›

text‹

The \emph{rolling rule} captures what intuitively happens when we
re-order a recursive computation consisting of two parts. This theorem
dates from the 1970s at the latest -- see \citet[p210]{Stoy:1977} and
\citet{Plotkin:1983}. The following proofs were provided by
\citet{GillHutton:2009}.

›

lemma rolling_rule_ltr: "fix(g oo f)  g(fix(f oo g))"
proof -
  have "g(fix(f oo g))  g(fix(f oo g))"
    by (rule below_refl) ― ‹reflexivity›
  hence "g((f oo g)(fix(f oo g)))  g(fix(f oo g))"
    using fix_eq[where F="f oo g"] by simp ― ‹computation›
  hence "(g oo f)(g(fix(f oo g)))  g(fix(f oo g))"
    by simp ― ‹re-associate @{term "(oo)"}
  thus "fix(g oo f)  g(fix(f oo g))"
    using fix_least_below by blast ― ‹induction›
qed

lemma rolling_rule_rtl: "g(fix(f oo g))  fix(g oo f)"
proof -
  have "fix(f oo g)  f(fix(g oo f))" by (rule rolling_rule_ltr)
  hence "g(fix(f oo g))  g(f(fix(g oo f)))"
    by (rule monofun_cfun_arg) ― ‹g is monotonic›
  thus "g(fix(f oo g))  fix(g oo f)"
    using fix_eq[where F="g oo f"] by simp ― ‹computation›
qed

lemma rolling_rule: "fix(g oo f) = g(fix(f oo g))"
  by (rule below_antisym[OF rolling_rule_ltr rolling_rule_rtl])
(*

This property of a fixed-point operator is termed \emph{dinaturality}
by \citet{DBLP:conf/lics/SimpsonP00}.

*)

subsection‹Least-fixed-point fusion›

text‹

\label{sec:lfp-fusion}

\emph{Least-fixed-point fusion} provides a kind of induction that has
proven to be very useful in calculational settings. Intuitively it
lifts the step-by-step correspondence between @{term "f"} and @{term
"h"} witnessed by the strict function @{term "g"} to the fixed points
of @{term "f"} and @{term "g"}:
\[
  \begin{diagram}
    \node{\bullet} \arrow{e,t}{h} \node{\bullet}\\
    \node{\bullet} \arrow{n,l}{g} \arrow{e,b}{f} \node{\bullet} \arrow{n,r}{g}
  \end{diagram}
  \qquad \Longrightarrow \qquad
  \begin{diagram}
    \node{\mathsf{fix}\ h}\\
    \node{\mathsf{fix}\ f} \arrow{n,r}{g}
  \end{diagram}
\]
\citet*{FokkingaMeijer:1991}, and also their later
\citet*{barbed-wire:1991}, made extensive use of this rule, as did
\citet{Tullsen:PhDThesis} in his program transformation tool PATH.
This diagram is strongly reminiscent of the simulations used to
establish refinement relations between imperative programs and their
specifications \citep*{EdR:cup98}.

The following proof is close to the third variant of
\citet[p215]{Stoy:1977}. We relate the two fixpoints using the rule
\texttt{parallel\_fix\_ind}:
\begin{center}
  @{thm[mode=Rule] parallel_fix_ind}
\end{center}
in a very straightforward way:

›

lemma lfp_fusion:
  assumes "g = "
  assumes "g oo f = h oo g"
  shows "g(fixf) = fixh"
proof(induct rule: parallel_fix_ind)
  case 2 show "g = " by fact
  case (3 x y)
  from gx = y g oo f = h oo g show "g(fx) = hy"
    by (simp add: cfun_eq_iff)
qed simp

text‹

This lemma also goes by the name of \emph{Plotkin's axiom}
\citep{PittsAM:relpod} or \emph{uniformity}
\citep{DBLP:conf/lics/SimpsonP00}.

›
(*<*)

(* The rest of this theory is only of historical interest. *)

text ‹Some may find the pointed version easier to read.›

lemma lfp_fusion_pointed:
  assumes Cfg: "f. C(Ff) = G(Cf)"
      and strictC: "C = "
  shows "C(fixF) = fixG"
  using lfp_fusion[where f=F and g=C and h=G] assms
  by (simp add: cfcomp1)

subsubsection‹More about lfp-fusion›

text‹

Alternative proofs. This is the ``intuitive'' one
\citet[p125]{Gunter:1992} and \citet[p46]{Tullsen:PhDThesis}, where we
can shuffle @{term "g"} to the end of the the iteration of @{term "f"}
using @{term "fgh"}.

›

lemma lfp_fusion2_aux:
  assumes fgh: "g oo f = h oo g"
  shows "g(iterate if) = iterate ih(g)"
proof(induct i)
  case (Suc i)
  have "g(iterate (Suc i)f) = (g oo f)(iterate if)" by simp
  also have "... = h(g(iterate if))" using fgh by (simp add: cfcomp1)
  also have "... = h(iterate ih(g))" using Suc by simp
  also have "... = iterate (Suc i)h(g)" by simp
  finally show ?case .
qed simp

lemma lfp_fusion2:
  assumes fgh: "g oo f = h oo g"
      and strictg: "g = "
  shows "g(fixf) = fixh"
proof -
  have "g(fixf) = g(i. iterate if)" by (simp only: fix_def2)
  also have "... = (i. g(iterate if))" by (simp add: contlub_cfun_arg)
  also have "... = (i. (iterate ih(g)))" by (simp only: lfp_fusion2_aux[OF fgh])
  also have "... = fixh" using strictg by (simp only: fix_def2)
  finally show ?thesis .
qed

text‹This is the first one by \citet[p213]{Stoy:1977}, almost
identical to the above.›

lemma lfp_fusion3_aux:
  assumes fgh: "g oo f = h oo g"
      and strictg: "g = "
  shows "g(iterate if) = iterate ih"
proof(induct i)
  case 0 from strictg show ?case by simp
next
  case (Suc i)
  have "g(iterate (Suc i)f) = (g oo f)(iterate if)" by simp
  also have "... = h(g(iterate if))" using fgh by (simp add: cfcomp1)
  also have "... = h(iterate ih)" using Suc by simp
  also have "... = iterate (Suc i)h" by simp
  finally show ?case .
qed

lemma lfp_fusion3:
  assumes fgh: "g oo f = h oo g"
      and strictg: "g = "
  shows "g(fixf) = fixh"
proof -
  have "g(fixf) = g(i. iterate if)" by (simp only: fix_def2)
  also have "... = (i. g(iterate if))" by (simp add: contlub_cfun_arg)
  also have "... = (i. (iterate ih))" by (simp only: lfp_fusion3_aux[OF fgh strictg])
  also have "... = fixh" by (simp only: fix_def2)
  finally show ?thesis .
qed

text‹Stoy's second proof \citep[p214]{Stoy:1977} is similar to the
original proof using fixed-point induction.›

lemma lfp_fusion4:
  assumes fgh: "g oo f = h oo g"
      and strictg: "g = "
  shows "g(fixf) = fixh"
proof(rule below_antisym)
  show "fixh  g(fixf)"
  proof -
    have "h(g(fixf)) = (g oo f)(fixf)" using fgh by simp
    also have "... = g(fixf)" by (subst fix_eq, simp)
    finally show ?thesis by (rule fix_least)
  qed
  let ?P = "λx. gx  fixh"
  show "?P (fixf)"
  proof(induct rule: fix_ind[where P="?P"])
    case 2 with strictg show ?case by simp
  next
    case (3 x) hence indhyp: "gx  fixh" .
    have "g(fx) = (h oo g)x" using fgh[symmetric] by simp
    with indhyp show "g(fx)  fixh"
      by (subst fix_eq, simp add: monofun_cfun)
  qed simp
qed

text‹A wrinkly variant from \citet[p11]{barbed-wire:1991}.›

lemma lfp_fusion_barbed_variant:
  assumes ff': "f = f'"
      and fgh: "f oo g = h oo f"
      and f'g'h: "f' oo g' = h oo f'"
  shows "f(fixg) = f'(fixg')"
proof(induct rule: parallel_fix_ind)
  case 2 show "f = f'" by (rule ff')
  case (3 x y)
  from fx = f'y have "h(fx) = h(f'y)" by simp
  with fgh f'g'h have "f(gx) = f'(g'y)"
    using cfcomp2[where f="f" and g="g", symmetric]
          cfcomp2[where f="f'" and g="g'", symmetric]
    by simp
  thus ?case by simp
qed simp
(*>*)

(*<*)
end
(*>*)

Theory WorkerWrapper

(*<*)
(*
 * The worker/wrapper transformation, following Gill and Hutton.
 * (C)opyright 2009-2011, Peter Gammie, peteg42 at gmail.com, commenced December 2007.
 * License: BSD
 *)

theory WorkerWrapper
imports
  HOLCF
  FixedPointTheorems
begin

(*>*)
section‹The transformation according to Gill and Hutton›

text‹

\begin{figure}[tb]
 \begin{center}
  \fbox{\parbox{0.96\textwidth}{For a recursive definition @{haskell "comp =
      \\mathsf{fix}\\ body"} for some @{haskell "body :: A \\to A"} and a pair of
      functions @{haskell "wrap :: B \\to A"} and @{haskell "unwrap :: A \\to B"} where
      @{haskell "wrap \\circ unwrap = id_A"}, we have:

      \parbox{0.35\textwidth}{\begin{haskell}
        comp = wrap\ work\\
        \quad work :: B\\
        \quad work = \mathsf{fix}\ (unwrap \circ body \circ wrap)
      \end{haskell}}\hfill \textsf{(the worker/wrapper transformation)}

      Also:

      \parbox{0.35\textwidth}{\begin{haskell}
        (unwrap \circ wrap)\ work = work
      \end{haskell}}\hfill \textsf{(worker/wrapper fusion)}}}%
 \end{center}%
 \caption{The worker/wrapper transformation and fusion rule of \citet{GillHutton:2009}.}\label{fig:ww}
\end{figure}

The worker/wrapper transformation and associated fusion rule as
formalised by \citet*{GillHutton:2009} are reproduced in
Figure~\ref{fig:ww}, and the reader is referred to the original paper
for further motivation and background.

Armed with the rolling rule we can show that Gill and Hutton's
justification of the worker/wrapper transformation is sound. There is
a battery of these transformations with varying strengths of
hypothesis.

The first requires @{term "wrap oo unwrap"} to be the identity for all
values.

›

lemma worker_wrapper_id:
  fixes wrap :: "'b::pcpo  'a::pcpo"
  fixes unwrap :: "'a  'b"
  assumes wrap_unwrap: "wrap oo unwrap = ID"
  assumes comp_body: "computation = fixbody"
  shows "computation = wrap(fix(unwrap oo body oo wrap))"
proof -
  from comp_body have "computation = fix(ID oo body)"
    by simp
  also from wrap_unwrap have " = fix(wrap oo unwrap oo body)"
    by (simp add: assoc_oo)
  also have "... = wrap(fix(unwrap oo body oo wrap))"
    using rolling_rule[where f="unwrap oo body" and g="wrap"]
    by (simp add: assoc_oo)
  finally show ?thesis .
qed

text‹

The second weakens this assumption by requiring that @{term "wrap oo
wrap"} only act as the identity on values in the image of @{term
"body"}.

›

lemma worker_wrapper_body:
  fixes wrap :: "'b::pcpo  'a::pcpo"
  fixes unwrap :: "'a  'b"
  assumes wrap_unwrap: "wrap oo unwrap oo body = body"
  assumes comp_body: "computation = fixbody"
  shows "computation = wrap(fix(unwrap oo body oo wrap))"
proof -
  from comp_body have "computation = fix(wrap oo unwrap oo body)"
    using wrap_unwrap by (simp add: assoc_oo wrap_unwrap)
  also have "... = wrap(fix(unwrap oo body oo wrap))"
    using rolling_rule[where f="unwrap oo body" and g="wrap"]
    by (simp add: assoc_oo)
  finally show ?thesis .
qed

text‹

This is particularly useful when the computation being transformed is
strict in its argument.

Finally we can allow the identity to take the full recursive context
into account. This rule was described by Gill and Hutton but not used.

›

lemma worker_wrapper_fix:
  fixes wrap :: "'b::pcpo  'a::pcpo"
  fixes unwrap :: "'a  'b"
  assumes wrap_unwrap: "fix(wrap oo unwrap oo body) = fixbody"
  assumes comp_body: "computation = fixbody"
  shows "computation = wrap(fix(unwrap oo body oo wrap))"
proof -
  from comp_body have "computation = fix(wrap oo unwrap oo body)"
    using wrap_unwrap by (simp add: assoc_oo wrap_unwrap)
  also have "... = wrap(fix(unwrap oo body oo wrap))"
    using rolling_rule[where f="unwrap oo body" and g="wrap"]
    by (simp add: assoc_oo)
  finally show ?thesis .
qed

text‹

Gill and Hutton's worker_wrapper_fusion› rule is intended to
allow the transformation of @{term "(unwrap oo wrap)R"} to @{term
"R"} in recursive contexts, where @{term "R"} is meant to be a
self-call. Note that it assumes that the first worker/wrapper
hypothesis can be established.

›

lemma worker_wrapper_fusion:
  fixes wrap :: "'b::pcpo  'a::pcpo"
  fixes unwrap :: "'a  'b"
  assumes wrap_unwrap: "wrap oo unwrap = ID"
  assumes work: "work = fix(unwrap oo body oo wrap)"
  shows "(unwrap oo wrap)work = work"
proof -
  have "(unwrap oo wrap)work = (unwrap oo wrap)(fix(unwrap oo body oo wrap))"
    using work by simp
  also have " = (unwrap oo wrap)(fix(unwrap oo body oo wrap oo unwrap oo wrap))"
    using wrap_unwrap by (simp add: assoc_oo)
  also have " = fix(unwrap oo wrap oo unwrap oo body oo wrap)"
    using rolling_rule[where f="unwrap oo body oo wrap" and g="unwrap oo wrap"]
    by (simp add: assoc_oo)
  also have " = fix(unwrap oo body oo wrap)"
    using wrap_unwrap by (simp add: assoc_oo)
  finally show ?thesis using work by simp
qed

text‹

The following sections show that this rule only preserves partial
correctness. This is because Gill and Hutton apply it in the context
of the fold/unfold program transformation framework of
\citet*{BurstallDarlington:1977}, which need not preserve termination.
We show that the fusion rule does in fact require extra conditions to
be totally correct and propose one such sufficient condition.


›
(*<*)

end
(*>*)

Theory CounterExample

(*<*)
theory CounterExample
imports
  HOLCF
  WorkerWrapper
begin
(*>*)

subsection‹Worker/wrapper fusion is partially correct›

text‹

We now examine how Gill and Hutton apply their worker/wrapper fusion
rule in the context of the fold/unfold framework.

The key step of those left implicit in the original paper is the use
of the \textsf{fold} rule to justify replacing the worker with the
fused version. Schematically, the fold/unfold framework maintains a
history of all definitions that have appeared during transformation,
and the \textsf{fold} rule treats this as a set of rewrite rules
oriented right-to-left. (The \textsf{unfold} rule treats the current
working set of definitions as rewrite rules oriented left-to-right.)
Hence as each definition @{haskell "f = body"} yields a rule of the form
@{haskell "body\\ \\Longrightarrow\\ f"}, one can always derive @{haskell "f =\\
f"}. Clearly this has dire implications for the preservation of
termination behaviour.

\citet{Tullsen:PhDThesis} in his \S3.1.2 observes that the semantic
essence of the \textsf{fold} rule is Park induction:
\begin{center}
  @{thm[mode=Rule] "fix_least"[where F=f]} {\texttt{fix\_least}}
\end{center}
viz that @{haskell "f\\ x = x"} implies only the partially correct @{haskell "fix\\ f
\\sqsubseteq x"}, and not the totally correct @{haskell "fix\\ f = x"}. We use
this characterisation to show that if @{haskell "unwrap"} is non-strict
(i.e. @{haskell "unwrap \\bot \\neq \\bot"}) then there are programs where
worker/wrapper fusion as used by Gill and Hutton need only be
partially correct.

Consider the scenario described in Figure~\ref{fig:ww}. After applying
the worker/wrapper transformation, we attempt to apply fusion by
finding a residual expression @{term "body'"} such that the body of
the worker, i.e. the expression @{term "unwrap oo body oo wrap"}, can
be rewritten as @{term "body' oo unwrap oo wrap"}. Intuitively this is
the semantic form of workers where all self-calls are fusible. Our
goal is to justify redefining @{term "work"} to @{term "fixbody'"},
i.e. to establish:
\begin{center}
  @{term "fix(unwrap oo body oo wrap) = fixbody'"}
\end{center}
We show that worker/wrapper fusion as proposed by Gill and Hutton is
partially correct using Park induction:

›

lemma fusion_partially_correct:
  assumes wrap_unwrap: "wrap oo unwrap = ID"
  assumes work: "work = fix(unwrap oo body oo wrap)"
  assumes body': "unwrap oo body oo wrap = body' oo unwrap oo wrap"
  shows "fixbody'  work"
proof(rule fix_least)
  have "work = (unwrap oo body oo wrap)work"
    using work by (simp add: fix_eq[symmetric])
  also have "... = (body' oo unwrap oo wrap)work"
    using body' by simp
  also have "... = (body' oo unwrap oo wrap)((unwrap oo body oo wrap)work)"
    using work by (simp add: fix_eq[symmetric])
  also have "... = (body' oo unwrap oo wrap oo unwrap oo body oo wrap)work"
    by simp
  also have "... = (body' oo unwrap oo body oo wrap)work"
    using wrap_unwrap by (simp add: assoc_oo)
  also have "... = body'work"
    using work by (simp add: fix_eq[symmetric])
  finally show "body'work = work" by simp
qed

text‹

The next section shows the converse does not obtain.

›

subsection‹A non-strict @{term "unwrap"} may go awry›

text‹

\label{sec:ww-counterexample}

If @{term "unwrap"} is non-strict, then it is possible that the fusion
rule proposed by Gill and Hutton does not preserve termination. To
show this we take a small artificial example. The type @{term "A"} is
not important, but we need access to a non-bottom inhabitant. The
target type @{term "B"} is the non-strict lift of @{term "A"}.

›

domain A = A
domain B = B (lazy "A")

text‹

The functions @{term "wrap"} and @{term "unwrap"} that map between
these types are routine. Note that @{term "wrap"} is (necessarily)
strict due to the property @{thm "retraction_strict"}.

›

fixrec wrap :: "B  A"
where "wrap(Ba) = a"

(*<*)
lemma wrap_strict[simp]: "wrap = "
  by fixrec_simp
(*>*)

fixrec unwrap :: "A  B"
where "unwrap = B"

text‹

Discharging the worker/wrapper hypothesis is similarly routine.

›

lemma wrap_unwrap: "wrap oo unwrap = ID"
  by (simp add: cfun_eq_iff)

text‹

The candidate computation we transform can be any that uses the
recursion parameter @{term "r"} non-strictly. The following is
especially trivial.

›

fixrec body :: "A  A"
where "bodyr = A"

text‹

The wrinkle is that the transformed worker can be strict in the
recursion parameter @{term "r"}, as @{term "unwrap"} always lifts it.

›

fixrec body' :: "B  B"
where "body'(Ba) = BA"
(*<*)

lemma body'_strict[simp]: "body' = "
  by fixrec_simp

(*>*)
text‹

As explained above, we set up the fusion opportunity:

›

lemma body_body': "unwrap oo body oo wrap = body' oo unwrap oo wrap"
  by (simp add: cfun_eq_iff)

text‹

This result depends crucially on @{term "unwrap"} being non-strict.

Our earlier result shows that the proposed transformation is partially
correct:

›

lemma "fixbody'  fix(unwrap oo body oo wrap)"
  by (rule fusion_partially_correct[OF wrap_unwrap refl body_body'])

text‹

However it is easy to see that it is not totally correct:

›

lemma "¬ fix(unwrap oo body oo wrap)  fixbody'"
proof -
  have l: "fix(unwrap oo body oo wrap) = BA"
    by (subst fix_eq) simp
  have r: "fixbody' = "
    by (simp add: fix_strict)
  from l r show ?thesis by simp
qed

text‹

This trick works whenever @{term "unwrap"} is not strict. In the
following section we show that requiring @{term "unwrap"} to be strict
leads to a straightforward proof of total correctness.

Note that if we have already established that @{term "wrap oo unwrap =
ID"}, then making @{term "unwrap"} strict preserves this equation:

›

lemma
  assumes "wrap oo unwrap = ID"
  shows "wrap oo strictifyunwrap = ID"
proof(rule cfun_eqI)
  fix x
  from assms
  show "(wrap oo strictifyunwrap)x = IDx"
    by (cases "x = ") (simp_all add: cfun_eq_iff retraction_strict)
qed

text‹

From this we conclude that the worker/wrapper transformation itself
cannot exploit any laziness in @{term "unwrap"} under the
context-insensitive assumptions of @{thm [source]
"worker_wrapper_id"}. This is not to say that other program
transformations may not be able to.

›

(*<*)
lemma
  "¬ strictifyunwrap oo body oo wrap = body' oo strictifyunwrap oo wrap"
  by (simp add: cfun_eq_iff exI[where x=""])
(*>*)

(*<*)
end
(*>*)

Theory WorkerWrapperNew

(*<*)
(*
 * The worker/wrapper transformation, following Gill and Hutton.
 * (C)opyright 2009-2011, Peter Gammie, peteg42 at gmail.com.
 * License: BSD
 *)

theory WorkerWrapperNew
imports
  HOLCF
  FixedPointTheorems
  WorkerWrapper
begin

(*>*)
section‹A totally-correct fusion rule›

text‹
\label{sec:ww-fixed}

We now show that a termination-preserving worker/wrapper fusion rule
can be obtained by requiring @{term "unwrap"} to be strict. (As we
observed earlier, @{term "wrap"} must always be strict due to the
assumption that wrap oo unwrap = ID›.)

Our first result shows that a combined worker/wrapper transformation
and fusion rule is sound, using the assumptions of worker_wrapper_id› and the ubiquitous lfp_fusion› rule.

›

lemma worker_wrapper_fusion_new:
  fixes wrap :: "'b::pcpo  'a::pcpo"
  fixes unwrap :: "'a  'b"
  fixes body' :: "'b  'b"
  assumes wrap_unwrap: "wrap oo unwrap = (ID :: 'a  'a)"
  assumes unwrap_strict: "unwrap = "
  assumes body_body': "unwrap oo body oo wrap = body' oo (unwrap oo wrap)"
  shows "fixbody = wrap(fixbody')"
proof -
  from body_body'
  have "unwrap oo body oo (wrap oo unwrap) = (body' oo unwrap oo (wrap oo unwrap))"
    by (simp add: assoc_oo)
  with wrap_unwrap have "unwrap oo body = body' oo unwrap"
    by simp
  with unwrap_strict have "unwrap(fixbody) = fixbody'"
    by (rule lfp_fusion)
  hence "(wrap oo unwrap)(fixbody) = wrap(fixbody')"
    by simp
  with wrap_unwrap show ?thesis by simp
qed

text‹

We can also show a more general result which allows fusion to be
optionally performed on a per-recursive-call basis using
\texttt{parallel\_fix\_ind}:

›

lemma worker_wrapper_fusion_new_general:
  fixes wrap :: "'b::pcpo  'a::pcpo"
  fixes unwrap :: "'a  'b"
  assumes wrap_unwrap: "wrap oo unwrap = (ID :: 'a  'a)"
  assumes unwrap_strict: "unwrap = "
  assumes body_body': "r. (unwrap oo wrap)r = r
                         (unwrap oo body oo wrap)r = body'r"
  shows "fixbody = wrap(fixbody')"
proof -
  let ?P = "λ(x, y). x = y  unwrap(wrapx) = x"
  have "?P (fix(unwrap oo body oo wrap), (fixbody'))"
  proof(induct rule: parallel_fix_ind)
    case 2 with retraction_strict unwrap_strict wrap_unwrap show "?P (, )"
      by (bestsimp simp add: cfun_eq_iff)
    case (3 x y)
    hence xy: "x = y" and unwrap_wrap: "unwrap(wrapx) = x" by auto
    from body_body' xy unwrap_wrap
    have "(unwrap oo body oo wrap)x = body'y"
      by simp
    moreover
    from wrap_unwrap
    have "unwrap(wrap((unwrap oo body oo wrap)x)) = (unwrap oo body oo wrap)x"
      by (simp add: cfun_eq_iff)
    ultimately show ?case by simp
  qed simp
  thus ?thesis
    using worker_wrapper_id[OF wrap_unwrap refl] by simp
qed

text‹

This justifies the syntactically-oriented rules shown in
Figure~\ref{fig:wwc2}; note the scoping of the fusion rule.

Those familiar with the ``bananas'' work of \citet*{barbed-wire:1991}
will not be surprised that adding a strictness assumption justifies an
equational fusion rule.

\begin{figure}[tb]
 \begin{center}
  \fbox{\parbox{0.96\textwidth}{For a recursive definition @{haskell "comp =
      body"} of type @{haskell "A"} and a pair of functions @{haskell "wrap :: B \\to A"}
      and @{haskell "unwrap :: A \\to B"} where @{haskell "wrap \\circ unwrap = id_A"} and
      @{haskell "unwrap\\ \\bot = \\bot"}, define:

      \parbox{0.35\textwidth}{\begin{haskell}
        comp & = wrap\ work\\
        work & = unwrap\ (body[wrap\ work / comp])
      \end{haskell}}\hfill \textsf{(the worker/wrapper transformation)}

    In the scope of @{haskell "work"}, the following rewrite is admissable:

    \parbox{0.35\textwidth}{\begin{haskell}
        unwrap\ (wrap\ work) \Longrightarrow work
      \end{haskell}}\hfill \textsf{(worker/wrapper fusion)}}}%
 \end{center}%
\caption{The syntactic worker/wrapper transformation and fusion rule.}\label{fig:wwc2}
\end{figure}

›

(*<*)
end
(*>*)

Theory Accumulator

(*<*)
(*
 * The worker/wrapper transformation, following Gill and Hutton.
 * (C)opyright 2009-2011, Peter Gammie, peteg42 at gmail.com.
 * License: BSD
 *)

theory Accumulator
imports
  HOLCF
  LList
  WorkerWrapperNew
begin
(*>*)

section‹Naive reverse becomes accumulator-reverse.›

text‹\label{sec:accum}›

subsection‹Hughes lists, naive reverse, worker-wrapper optimisation.›

text‹The ``Hughes'' list type.›

type_synonym 'a H = "'a llist  'a llist"

definition
  list2H :: "'a llist  'a H" where
  "list2H  lappend"

lemma acc_c2a_strict[simp]: "list2H = "
  by (rule cfun_eqI, simp add: list2H_def)

definition
  H2list :: "'a H  'a llist" where
  "H2list  Λ f . flnil"

text‹The paper only claims the homomorphism holds for finite lists,
but in fact it holds for all lazy lists in HOLCF. They are trying to
dodge an explicit appeal to the equation @{thm "inst_cfun_pcpo"},
which does not hold in Haskell.›

lemma H_llist_hom_append: "list2H(xs :++ ys) = list2Hxs oo list2Hys" (is "?lhs = ?rhs")
proof(rule cfun_eqI)
  fix zs
  have "?lhszs = (xs :++ ys) :++ zs" by (simp add: list2H_def)
  also have " = xs :++ (ys :++ zs)" by (rule lappend_assoc)
  also have " = list2Hxs(ys :++ zs)" by (simp add: list2H_def)
  also have " = list2Hxs(list2Hyszs)" by (simp add: list2H_def)
  also have " = (list2Hxs oo list2Hys)zs" by simp
  finally show "?lhszs = (list2Hxs oo list2Hys)zs" .
qed

lemma H_llist_hom_id: "list2Hlnil = ID" by (simp add: list2H_def)

lemma H2list_list2H_inv: "H2list oo list2H = ID"
  by (rule cfun_eqI, simp add: H2list_def list2H_def)

text‹\citet[\S4.2]{GillHutton:2009} define the naive reverse
function as follows.›

fixrec lrev :: "'a llist  'a llist"
where
  "lrevlnil = lnil"
| "lrev(x :@ xs) = lrevxs :++ (x :@ lnil)"

text‹Note ``body'' is the generator of @{term "lrev_def"}.›

lemma lrev_strict[simp]: "lrev = "
by fixrec_simp

fixrec lrev_body :: "('a llist  'a llist)  'a llist  'a llist"
where
  "lrev_bodyrlnil = lnil"
| "lrev_bodyr(x :@ xs) = rxs :++ (x :@ lnil)"

lemma lrev_body_strict[simp]: "lrev_bodyr = "
by fixrec_simp

text‹This is trivial but syntactically a bit touchy. Would be nicer
to define @{term "lrev_body"} as the generator of the fixpoint
definition of @{term "lrev"} directly.›

lemma lrev_lrev_body_eq: "lrev = fixlrev_body"
  by (rule cfun_eqI, subst lrev_def, subst lrev_body.unfold, simp)

text‹Wrap / unwrap functions.›

definition
  unwrapH :: "('a llist  'a llist)  'a llist  'a H" where
  "unwrapH  Λ f xs . list2H(fxs)"

lemma unwrapH_strict[simp]: "unwrapH = "
  unfolding unwrapH_def by (rule cfun_eqI, simp)

definition
  wrapH :: "('a llist  'a H)  'a llist  'a llist" where
  "wrapH  Λ f xs . H2list(fxs)"

lemma wrapH_unwrapH_id: "wrapH oo unwrapH = ID" (is "?lhs = ?rhs")
proof(rule cfun_eqI)+
  fix f xs
  have "?lhsfxs = H2list(list2H(fxs))" by (simp add: wrapH_def unwrapH_def)
  also have " = (H2list oo list2H)(fxs)" by simp
  also have " = ID(fxs)" by (simp only: H2list_list2H_inv)
  also have " = ?rhsfxs" by simp
  finally show "?lhsfxs = ?rhsfxs" .
qed

subsection‹Gill/Hutton-style worker/wrapper.›

definition
  lrev_work :: "'a llist  'a H" where
  "lrev_work  fix(unwrapH oo lrev_body oo wrapH)"

definition
  lrev_wrap :: "'a llist  'a llist" where
  "lrev_wrap  wrapHlrev_work"

lemma lrev_lrev_ww_eq: "lrev = lrev_wrap"
  using worker_wrapper_id[OF wrapH_unwrapH_id lrev_lrev_body_eq]
  by (simp add: lrev_wrap_def lrev_work_def)

subsection‹Optimise worker/wrapper.›

text‹Intermediate worker.›

fixrec lrev_body1 :: "('a llist  'a H)  'a llist  'a H"
where
  "lrev_body1rlnil = list2Hlnil"
| "lrev_body1r(x :@ xs) = list2H(wrapHrxs :++ (x :@ lnil))"

definition
  lrev_work1 :: "'a llist  'a H" where
  "lrev_work1  fixlrev_body1"

lemma lrev_body_lrev_body1_eq: "lrev_body1 = unwrapH oo lrev_body oo wrapH"
  apply (rule cfun_eqI)+
  apply (subst lrev_body.unfold)
  apply (subst lrev_body1.unfold)
  apply (case_tac xa)
  apply (simp_all add: list2H_def wrapH_def unwrapH_def)
  done

lemma lrev_work1_lrev_work_eq: "lrev_work1 = lrev_work"
  by (unfold lrev_work_def lrev_work1_def,
      rule cfun_arg_cong[OF lrev_body_lrev_body1_eq])

text‹Now use the homomorphism.›

fixrec lrev_body2 :: "('a llist  'a H)  'a llist  'a H"
where
  "lrev_body2rlnil = ID"
| "lrev_body2r(x :@ xs) = list2H(wrapHrxs) oo list2H(x :@ lnil)"

lemma lrev_body2_strict[simp]: "lrev_body2r = "
by fixrec_simp

definition
  lrev_work2 :: "'a llist  'a H" where
  "lrev_work2  fixlrev_body2"

lemma lrev_work2_strict[simp]: "lrev_work2 = "
  unfolding lrev_work2_def
  by (subst fix_eq) simp

lemma lrev_body2_lrev_body1_eq: "lrev_body2 = lrev_body1"
  by ((rule cfun_eqI)+
     , (subst lrev_body1.unfold, subst lrev_body2.unfold)
     , (simp add: H_llist_hom_append[symmetric] H_llist_hom_id))

lemma lrev_work2_lrev_work1_eq: "lrev_work2 = lrev_work1"
  by (unfold lrev_work2_def lrev_work1_def
     , rule cfun_arg_cong[OF lrev_body2_lrev_body1_eq])

text ‹Simplify.›

fixrec lrev_body3 :: "('a llist  'a H)  'a llist  'a H"
where
  "lrev_body3rlnil = ID"
| "lrev_body3r(x :@ xs) = rxs oo list2H(x :@ lnil)"

lemma lrev_body3_strict[simp]: "lrev_body3r = "
by fixrec_simp

definition
  lrev_work3 :: "'a llist  'a H" where
  "lrev_work3  fixlrev_body3"

lemma lrev_wwfusion: "list2H((wrapHlrev_work2)xs) = lrev_work2xs"
proof -
  {
    have "list2H oo wrapHlrev_work2 = unwrapH(wrapHlrev_work2)"
      by (rule cfun_eqI, simp add: unwrapH_def)
    also have " = (unwrapH oo wrapH)lrev_work2" by simp
    also have " = lrev_work2"
      apply -
      apply (rule worker_wrapper_fusion[OF wrapH_unwrapH_id, where body="lrev_body"])
      apply (auto iff: lrev_body2_lrev_body1_eq lrev_body_lrev_body1_eq lrev_work2_def lrev_work1_def)
      done
    finally have "list2H oo wrapHlrev_work2 = lrev_work2" .
  }
  thus ?thesis using cfun_eq_iff[where f="list2H oo wrapHlrev_work2" and g="lrev_work2"] by auto
qed

text‹If we use this result directly, we only get a partially-correct
program transformation, see \citet{Tullsen:PhDThesis} for details.›

lemma "lrev_work3  lrev_work2"
  unfolding lrev_work3_def
proof(rule fix_least)
  {
    fix xs have "lrev_body3lrev_work2xs = lrev_work2xs"
    proof(cases xs)
      case bottom thus ?thesis by simp
    next
      case lnil thus ?thesis
        unfolding lrev_work2_def
        by (subst fix_eq[where F="lrev_body2"], simp)
    next
      case (lcons y ys)
      hence "lrev_body3lrev_work2xs = lrev_work2ys oo list2H(y :@ lnil)" by simp
      also have " = list2H((wrapHlrev_work2)ys) oo list2H(y :@ lnil)"
        using lrev_wwfusion[where xs=ys] by simp
      also from lcons have " = lrev_body2lrev_work2xs" by simp
      also have " = lrev_work2xs"
        unfolding lrev_work2_def by (simp only: fix_eq[symmetric])
      finally show ?thesis by simp
    qed
  }
  thus "lrev_body3lrev_work2 = lrev_work2" by (rule cfun_eqI)
qed

text‹We can't show the reverse inclusion in the same way as the
fusion law doesn't hold for the optimised definition. (Intuitively we
haven't established that it is equal to the original @{term "lrev"}
definition.) We could show termination of the optimised definition
though, as it operates on finite lists. Alternatively we can use
induction (over the list argument) to show total equivalence.

The following lemma shows that the fusion Gill/Hutton want to do is
completely sound in this context, by appealing to the lazy list
induction principle.›

lemma lrev_work3_lrev_work2_eq: "lrev_work3 = lrev_work2" (is "?lhs = ?rhs")
proof(rule cfun_eqI)
  fix x
  show "?lhsx = ?rhsx"
  proof(induct x)
    show "lrev_work3 = lrev_work2"
      apply (unfold lrev_work3_def lrev_work2_def)
      apply (subst fix_eq[where F="lrev_body2"])
      apply (subst fix_eq[where F="lrev_body3"])
      by (simp add: lrev_body3.unfold lrev_body2.unfold)
  next
    show "lrev_work3lnil = lrev_work2lnil"
      apply (unfold lrev_work3_def lrev_work2_def)
      apply (subst fix_eq[where F="lrev_body2"])
      apply (subst fix_eq[where F="lrev_body3"])
      by (simp add: lrev_body3.unfold lrev_body2.unfold)
  next
    fix a l assume "lrev_work3l = lrev_work2l"
    thus "lrev_work3(a :@ l) = lrev_work2(a :@ l)"
      apply (unfold lrev_work3_def lrev_work2_def)
      apply (subst fix_eq[where F="lrev_body2"])
      apply (subst fix_eq[where F="lrev_body3"])
      apply (fold lrev_work3_def lrev_work2_def)
      apply (simp add: lrev_body3.unfold lrev_body2.unfold lrev_wwfusion)
      done
  qed simp_all
qed

text‹Use the combined worker/wrapper-fusion rule. Note we get a weaker lemma.›

lemma lrev3_2_syntactic: "lrev_body3 oo (unwrapH oo wrapH) = lrev_body2"
  apply (subst lrev_body2.unfold, subst lrev_body3.unfold)
  apply (rule cfun_eqI)+
  apply (case_tac xa)
    apply (simp_all add: unwrapH_def)
  done

lemma lrev_work3_lrev_work2_eq': "lrev = wrapHlrev_work3"
proof -
  from lrev_lrev_body_eq
  have "lrev = fixlrev_body" .
  also from wrapH_unwrapH_id unwrapH_strict
  have " = wrapH(fixlrev_body3)"
    by (rule worker_wrapper_fusion_new
       , simp add: lrev3_2_syntactic lrev_body2_lrev_body1_eq lrev_body_lrev_body1_eq)
  finally show ?thesis unfolding lrev_work3_def by simp
qed

text‹Final syntactic tidy-up.›

fixrec lrev_body_final :: "('a llist  'a H)  'a llist  'a H"
where
  "lrev_body_finalrlnilys = ys"
| "lrev_body_finalr(x :@ xs)ys = rxs(x :@ ys)"

definition
  lrev_work_final :: "'a llist  'a H" where
  "lrev_work_final  fixlrev_body_final"

definition
  lrev_final :: "'a llist  'a llist" where
  "lrev_final  Λ xs. lrev_work_finalxslnil"

lemma lrev_body_final_lrev_body3_eq': "lrev_body_finalrxs = lrev_body3rxs"
  apply (subst lrev_body_final.unfold)
  apply (subst lrev_body3.unfold)
  apply (cases xs)
  apply (simp_all add: list2H_def ID_def cfun_eqI)
  done

lemma lrev_body_final_lrev_body3_eq: "lrev_body_final = lrev_body3"
  by (simp only: lrev_body_final_lrev_body3_eq' cfun_eqI)

lemma lrev_final_lrev_eq: "lrev = lrev_final" (is "?lhs = ?rhs")
proof -
  have "?lhs = lrev_wrap" by (rule lrev_lrev_ww_eq)
  also have " = wrapHlrev_work" by (simp only: lrev_wrap_def)
  also have " = wrapHlrev_work1" by (simp only: lrev_work1_lrev_work_eq)
  also have " = wrapHlrev_work2" by (simp only: lrev_work2_lrev_work1_eq)
  also have " = wrapHlrev_work3" by (simp only: lrev_work3_lrev_work2_eq)
  also have " = wrapHlrev_work_final" by (simp only: lrev_work3_def lrev_work_final_def lrev_body_final_lrev_body3_eq)
  also have " = lrev_final" by (simp add: lrev_final_def cfun_eqI H2list_def wrapH_def)
  finally show ?thesis .
qed

(*<*)
end
(*>*)

Theory UnboxedNats

(*<*)
(*
 * The worker/wrapper transformation, following Gill and Hutton.
 * (C)opyright 2009-2011, Peter Gammie, peteg42 at gmail.com.
 * License: BSD
 *)

theory UnboxedNats
imports
  HOLCF
  Nats
  WorkerWrapperNew
begin
(*>*)

section‹Unboxing types.›

text‹The original application of the worker/wrapper transformation
was the unboxing of flat types by \citet{SPJ-JL:1991}. We can model
the boxed and unboxed types as (respectively) pointed and unpointed
domains in HOLCF. Concretely @{typ "UNat"} denotes the discrete domain
of naturals, @{typ "UNat"} the lifted (flat and pointed) variant, and
@{typ "Nat"} the standard boxed domain, isomorphic to @{typ
"UNat"}. This latter distinction helps us keep the boxed naturals and
lifted function codomains separated; applications of @{term "unbox"}
should be thought of in the same way as Haskell's @{term "newtype"}
constructors, i.e. operationally equivalent to @{term "ID"}.

The divergence monad is used to handle the unboxing, see below.›

subsection‹Factorial example.›

text‹Standard definition of factorial.›

fixrec fac :: "Nat  Nat"
where
  "facn = If n =B 0 then 1 else n * fac(n - 1)"

declare fac.simps[simp del]

lemma fac_strict[simp]: "fac = "
by fixrec_simp

definition
  fac_body :: "(Nat  Nat)  Nat  Nat" where
  "fac_body  Λ r n. If n =B 0 then 1 else n * r(n - 1)"

lemma fac_body_strict[simp]: "fac_bodyr = "
  unfolding fac_body_def by simp

lemma fac_fac_body_eq: "fac = fixfac_body"
  unfolding fac_body_def by (rule cfun_eqI, subst fac_def, simp)

text‹Wrap / unwrap functions. Note the explicit lifting of the
co-domain. For some reason the published version of
\citet{GillHutton:2009} does not discuss this point: if we're going to
handle recursive functions, we need a bottom.

@{term "unbox"} simply removes the tag, yielding a possibly-divergent
unboxed value, the result of the function.›

definition
  unwrapB :: "(Nat  Nat)  UNat  UNat" where
  "unwrapB  Λ f. unbox oo f oo box"

text‹Note that the monadic bind operator @{term "(>>=)"} here stands
in for the \textsf{case} construct in the paper.›

definition
  wrapB :: "(UNat  UNat)  Nat  Nat" where
  "wrapB  Λ f x . unboxx >>= f >>= box"

lemma wrapB_unwrapB_body:
  assumes strictF: "f = "
  shows "(wrapB oo unwrapB)f = f" (is "?lhs = ?rhs")
proof(rule cfun_eqI)
  fix x :: Nat
  have "?lhsx = unboxx >>= (Λ x'. unwrapBfx' >>= box)"
    unfolding wrapB_def by simp
  also have " = unboxx >>= (Λ x'. unbox(f(boxx')) >>= box)"
    unfolding unwrapB_def by simp
  also from strictF have " = fx" by (cases x, simp_all)
  finally show "?lhsx = ?rhsx" .
qed

text‹Apply worker/wrapper.›

definition
  fac_work :: "UNat  UNat" where
  "fac_work  fix(unwrapB oo fac_body oo wrapB)"

definition
  fac_wrap :: "Nat  Nat" where
  "fac_wrap  wrapBfac_work"

lemma fac_fac_ww_eq: "fac = fac_wrap" (is "?lhs = ?rhs")
proof -
  have "wrapB oo unwrapB oo fac_body = fac_body"
    using wrapB_unwrapB_body[OF fac_body_strict]
    by - (rule cfun_eqI, simp)
  thus ?thesis
    using worker_wrapper_body[where computation=fac and body=fac_body and wrap=wrapB and unwrap=unwrapB]
    unfolding fac_work_def fac_wrap_def by (simp add: fac_fac_body_eq)
qed

text‹This is not entirely faithful to the paper, as they don't
explicitly handle the lifting of the codomain.›

definition
  fac_body' :: "(UNat  UNat)  UNat  UNat" where
  "fac_body'  Λ r n.
     unbox(If boxn =B 0
              then 1
              else unbox(boxn - 1) >>= r >>= (Λ b. boxn * boxb))"

lemma fac_body'_fac_body: "fac_body' = unwrapB oo fac_body oo wrapB" (is "?lhs = ?rhs")
proof(rule cfun_eqI)+
  fix r x
  show "?lhsrx = ?rhsrx"
    using bbind_case_distr_strict[where f="Λ y. boxx * y" and g="unbox(boxx - 1)"]
          bbind_case_distr_strict[where f="Λ y. boxx * y" and h="box"]
    unfolding fac_body'_def fac_body_def unwrapB_def wrapB_def by simp
qed

text‹The @{term "up"} constructors here again mediate the
isomorphism, operationally doing nothing. Note the switch to the
machine-oriented \emph{if} construct: the test @{term "n = 0"} cannot
diverge.›

definition
  fac_body_final :: "(UNat  UNat)  UNat  UNat" where
  "fac_body_final  Λ r n.
     if n = 0 then up1 else r(n -# 1) >>= (Λ b. up(n *# b))"

lemma fac_body_final_fac_body': "fac_body_final = fac_body'" (is "?lhs = ?rhs")
proof(rule cfun_eqI)+
  fix r x
  show "?lhsrx = ?rhsrx"
    using bbind_case_distr_strict[where f="unbox" and g="r(x -# 1)" and h="(Λ b. box(x *# b))"]
    unfolding fac_body_final_def fac_body'_def uMinus_def uMult_def zero_Nat_def one_Nat_def
    by simp
qed

definition
  fac_work_final :: "UNat  UNat" where
  "fac_work_final  fixfac_body_final"

definition
  fac_final :: "Nat  Nat" where
  "fac_final  Λ n. unboxn >>= fac_work_final >>= box"

lemma fac_fac_final: "fac = fac_final" (is "?lhs=?rhs")
proof -
  have "?lhs = fac_wrap" by (rule fac_fac_ww_eq)
  also have " = wrapBfac_work" by (simp only: fac_wrap_def)
  also have " = wrapB(fix(unwrapB oo fac_body oo wrapB))" by (simp only: fac_work_def)
  also have " = wrapB(fixfac_body')" by (simp only: fac_body'_fac_body)
  also have " = wrapBfac_work_final" by (simp only: fac_body_final_fac_body' fac_work_final_def)
  also have " = fac_final" by (simp add: fac_final_def wrapB_def)
  finally show ?thesis .
qed

(* **************************************** *)

subsection‹Introducing an accumulator.›

text‹

The final version of factorial uses unboxed naturals but is not
tail-recursive. We can apply worker/wrapper once more to introduce an
accumulator, similar to \S\ref{sec:accum}.

The monadic machinery complicates things slightly here. We use
\emph{Kleisli composition}, denoted @{term "(>=>)"}, in the
homomorphism.

Firstly we introduce an ``accumulator'' monoid and show the
homomorphism.

›

type_synonym UNatAcc = "UNat  UNat"

definition
  n2a :: "UNat  UNatAcc" where
  "n2a  Λ m n. up(m *# n)"

definition
  a2n :: "UNatAcc  UNat" where
  "a2n  Λ a. a1"

lemma a2n_strict[simp]: "a2n = "
  unfolding a2n_def by simp

lemma a2n_n2a: "a2n(n2au) = upu"
  unfolding a2n_def n2a_def by (simp add: uMult_arithmetic)

lemma A_hom_mult: "n2a(x *# y) = (n2ax >=> n2ay)"
  unfolding n2a_def bKleisli_def by (simp add: uMult_arithmetic)

definition
  unwrapA :: "(UNat  UNat)  UNat  UNatAcc" where
  "unwrapA  Λ f n. fn >>= n2a"

lemma unwrapA_strict[simp]: "unwrapA = "
  unfolding unwrapA_def by (rule cfun_eqI) simp

definition
  wrapA :: "(UNat  UNatAcc)  UNat  UNat" where
  "wrapA  Λ f. a2n oo f"

lemma wrapA_unwrapA_id: "wrapA oo unwrapA = ID"
  unfolding wrapA_def unwrapA_def
  apply (rule cfun_eqI)+
  apply (case_tac "xxa")
  apply (simp_all add: a2n_n2a)
  done

text‹Some steps along the way.›

definition
  fac_acc_body1 :: "(UNat  UNatAcc)  UNat  UNatAcc" where
  "fac_acc_body1  Λ r n.
     if n = 0 then n2a1 else wrapAr(n -# 1) >>= (Λ res. n2a(n *# res))"

lemma fac_acc_body1_fac_body_final_eq: "fac_acc_body1 = unwrapA oo fac_body_final oo wrapA"
  unfolding fac_acc_body1_def fac_body_final_def wrapA_def unwrapA_def
  by (rule cfun_eqI)+ simp

text‹Use the homomorphism.›

definition
  fac_acc_body2 :: "(UNat  UNatAcc)  UNat  UNatAcc" where
  "fac_acc_body2  Λ r n.
     if n = 0 then n2a1 else wrapAr(n -# 1) >>= (Λ res. n2an >=> n2ares)"

lemma fac_acc_body2_body1_eq: "fac_acc_body2 = fac_acc_body1"
  unfolding fac_acc_body1_def fac_acc_body2_def
  by (rule cfun_eqI)+ (simp add: A_hom_mult)

text‹Apply worker/wrapper.›

definition
  fac_acc_body3 :: "(UNat  UNatAcc)  UNat  UNatAcc" where
  "fac_acc_body3  Λ r n.
     if n = 0 then n2a1 else n2an >=> r(n -# 1)"

lemma fac_acc_body3_body2: "fac_acc_body3 oo (unwrapA oo wrapA) = fac_acc_body2" (is "?lhs=?rhs")
proof(rule cfun_eqI)+
  fix r n acc
  show "((fac_acc_body3 oo (unwrapA oo wrapA))rnacc) = fac_acc_body2rnacc"
    unfolding fac_acc_body2_def fac_acc_body3_def unwrapA_def
    using bbind_case_distr_strict[where f="Λ y. n2an >=> y" and h="n2a", symmetric]
    by simp
qed

lemma fac_work_final_body3_eq: "fac_work_final = wrapA(fixfac_acc_body3)"
  unfolding fac_work_final_def
  by (rule worker_wrapper_fusion_new[OF wrapA_unwrapA_id unwrapA_strict])
     (simp add: fac_acc_body3_body2 fac_acc_body2_body1_eq fac_acc_body1_fac_body_final_eq)

definition
  fac_acc_body_final :: "(UNat  UNatAcc)  UNat  UNatAcc" where
  "fac_acc_body_final  Λ r n acc.
     if n = 0 then upacc else r(n -# 1)(n *# acc)"

definition
  fac_acc_work_final :: "UNat  UNat" where
  "fac_acc_work_final  Λ x. fixfac_acc_body_finalx1"

lemma fac_acc_work_final_fac_acc_work3_eq: "fac_acc_body_final = fac_acc_body3" (is "?lhs=?rhs")
  unfolding fac_acc_body3_def fac_acc_body_final_def n2a_def bKleisli_def
  by (rule cfun_eqI)+
     (simp add: uMult_arithmetic)

lemma fac_acc_work_final_fac_work: "fac_acc_work_final = fac_work_final" (is "?lhs=?rhs")
proof -
  have "?rhs = wrapA(fixfac_acc_body3)" by (rule fac_work_final_body3_eq)
  also have " = wrapA(fixfac_acc_body_final)"
    using fac_acc_work_final_fac_acc_work3_eq by simp
  also have " = ?lhs"
    unfolding fac_acc_work_final_def wrapA_def a2n_def
    by (simp add: cfcomp1)
  finally show ?thesis by simp
qed

(*<*)
end
(*>*)

Theory Streams

(*<*)
(*
 * The worker/wrapper transformation, following Gill and Hutton.
 * (C)opyright 2009-2011, Peter Gammie, peteg42 at gmail.com.
 * License: BSD
 *)

theory Streams
imports
  HOLCF
  Nats
  WorkerWrapper
begin
(*>*)

section‹Memoisation using streams.›

text ‹
\label{sec:memoisation-example}›

subsection‹Streams.›

text‹The type of infinite streams.›

domain 'a Stream = stcons (lazy sthead :: "'a") (lazy sttail :: "'a Stream") (infixr "&&" 65)

(*<*)
lemma Stream_bisimI[intro]:
  "(xs ys. R xs ys
      (xs =   ys = )
        (h t t'. R t t'  xs = h && t  ys = h && t'))
   Stream_bisim R"
  unfolding Stream.bisim_def by auto
(*>*)

fixrec smap :: "('a  'b)  'a Stream  'b Stream"
where
  "smapf(x && xs) = fx && smapfxs"

(*<*)
lemma smap_strict[simp]: "smapf = "
by fixrec_simp
(*>*)

lemma smap_smap: "smapf(smapgxs) = smap(f oo g)xs"
(*<*) by (induct xs, simp_all) (*>*)

fixrec i_th :: "'a Stream  Nat  'a"
where
  "i_th(x && xs) = Nat_casex(i_thxs)"

abbreviation
  i_th_syn :: "'a Stream  Nat  'a" (infixl "!!" 100) where
  "s !! i  i_thsi"

(*<*)
lemma i_th_strict1[simp]: " !! i = "
by fixrec_simp

lemma i_th_strict2[simp]: "s !!  = " by (subst i_th.unfold, cases s, simp_all)
lemma i_th_0[simp]: "(s !! 0) = stheads" by (subst i_th.unfold, cases s, simp_all)
lemma i_th_suc[simp]: "i    (x && xs) !! (i + 1) = xs !! i" by (subst i_th.unfold, simp)
(*>*)

text‹The infinite stream of natural numbers.›

fixrec nats :: "Nat Stream"
where
  "nats = 0 && smap(Λ x. 1 + x)nats"

(*<*)
declare nats.simps[simp del]
(*>*)

subsection‹The wrapper/unwrapper functions.›

definition
  unwrapS' :: "(Nat  'a)  'a Stream" where
  "unwrapS'  Λ f . smapfnats"

lemma unwrapS'_unfold: "unwrapS'f = f0 && smap(f oo (Λ x. 1 + x))nats"
(*<*)by (unfold unwrapS'_def, subst nats.unfold, simp add: smap_smap)(*>*)

fixrec unwrapS :: "(Nat  'a)  'a Stream"
where
  "unwrapSf = f0 && unwrapS(f oo (Λ x. 1 + x))"

(*<*)
declare unwrapS.simps[simp del]
(*>*)

text‹The two versions of @{term "unwrapS"} are equivalent. We could
try to fold some definitions here but it's easier if the stream
constructor is manifest.›

lemma unwrapS_unwrapS'_eq: "unwrapS = unwrapS'" (is "?lhs = ?rhs")
proof(rule cfun_eqI)
  fix f show "?lhsf = ?rhsf"
  proof(coinduct rule: Stream.coinduct)
    let ?R = "λs s'. (f. s = f0 && unwrapS(f oo (Λ x. 1 + x))
                         s' = f0 && smap(f oo (Λ x. 1 + x))nats)"
    show "Stream_bisim ?R"
    proof
      fix s s' assume "?R s s'"
      then obtain f where fs:  "s = f0 && unwrapS(f oo (Λ x. 1 + x))"
                      and fs': "s' = f0 && smap(f oo (Λ x. 1 + x))nats"
        by blast
      have "?R (unwrapS(f oo (Λ x. 1 + x))) (smap(f oo (Λ x. 1 + x))nats)"
        by ( rule exI[where x="f oo (Λ x. 1 + x)"]
           , subst unwrapS.unfold, subst nats.unfold, simp add: smap_smap)
      with fs fs'
      show "(s =   s' = )
           (h t t'.
              (f. t = f0 && unwrapS(f oo (Λ x. 1 + x))
                  t' = f0 && smap(f oo (Λ x. 1 + x))nats)
                  s = h && t  s' = h && t')" by best
    qed
    show "?R (?lhsf) (?rhsf)"
    proof -
      have lhs: "?lhsf = f0 && unwrapS(f oo (Λ x. 1 + x))" by (subst unwrapS.unfold, simp)
      have rhs: "?rhsf = f0 && smap(f oo (Λ x. 1 + x))nats" by (rule unwrapS'_unfold)
      from lhs rhs show ?thesis by best
    qed
  qed
qed

definition
  wrapS :: "'a Stream  Nat  'a" where
  "wrapS  Λ s i . s !! i"

text‹Note the identity requires that @{term "f"} be
strict. \citet[\S6.1]{GillHutton:2009} do not make this requirement,
an oversight on their part.

In practice all functions worth memoising are strict in the memoised
argument.›

lemma wrapS_unwrapS_id':
  assumes strictF: "(f::Nat  'a) = "
  shows "unwrapSf !! n = fn"
using strictF
proof(induct n arbitrary: f rule: Nat_induct)
  case bottom with strictF show ?case by simp
next
  case zero thus ?case by (subst unwrapS.unfold, simp)
next
  case (Suc i f)
  have "unwrapSf !! (i + 1) = (f0 && unwrapS(f oo (Λ x. 1 + x))) !! (i + 1)"
    by (subst unwrapS.unfold, simp)
  also from Suc have " = unwrapS(f oo (Λ x. 1 + x)) !! i" by simp
  also from Suc have " = (f oo (Λ x. 1 + x))i" by simp
  also have " = f(i + 1)" by (simp add: plus_commute)
  finally show ?case .
qed

lemma wrapS_unwrapS_id: "f =   (wrapS oo unwrapS)f = f"
  by (rule cfun_eqI, simp add: wrapS_unwrapS_id' wrapS_def)

subsection‹Fibonacci example.›

definition
  fib_body :: "(Nat  Nat)  Nat  Nat" where
  "fib_body  Λ r. Nat_case1(Nat_case1(Λ n. rn + r(n + 1)))"

(*<*)
lemma fib_body_strict[simp]: "fib_bodyr = " by (simp add: fib_body_def)
(*>*)

definition
  fib :: "Nat  Nat" where
  "fib  fixfib_body"

(*<*)
lemma fib_strict[simp]: "fib = "
  by (unfold fib_def, subst fix_eq, fold fib_def, simp add: fib_body_def)
(*>*)

text‹Apply worker/wrapper.›

definition
  fib_work :: "Nat Stream" where
  "fib_work  fix(unwrapS oo fib_body oo wrapS)"

definition
  fib_wrap :: "Nat  Nat" where
  "fib_wrap  wrapSfib_work"

lemma wrapS_unwrapS_fib_body: "wrapS oo unwrapS oo fib_body = fib_body"
proof(rule cfun_eqI)
  fix r show "(wrapS oo unwrapS oo fib_body)r = fib_bodyr"
    using wrapS_unwrapS_id[where f="fib_bodyr"] by simp
qed

lemma fib_ww_eq: "fib = fib_wrap"
  using worker_wrapper_body[OF wrapS_unwrapS_fib_body]
  by (simp add: fib_def fib_wrap_def fib_work_def)

text‹Optimise.›

fixrec
  fib_work_final :: "Nat Stream"
and
  fib_f_final :: "Nat  Nat"
where
  "fib_work_final = smapfib_f_finalnats"
| "fib_f_final = Nat_case1(Nat_case1(Λ n'. fib_work_final !! n' + fib_work_final !! (n' + 1)))"

declare fib_f_final.simps[simp del] fib_work_final.simps[simp del]

definition
  fib_final :: "Nat  Nat" where
  "fib_final  Λ n. fib_work_final !! n"

text‹

This proof is only fiddly due to the way mutual recursion is encoded:
we need to use Beki\'{c}'s Theorem \citep{Bekic:1969}\footnote{The
interested reader can find some historical commentary in
\citet{Harel:1980, DBLP:journals/toplas/Sangiorgi09}.} to massage the
definitions into their final form.

›

lemma fib_work_final_fib_work_eq: "fib_work_final = fib_work" (is "?lhs = ?rhs")
proof -
  let ?wb = "Λ r. Nat_case1(Nat_case1(Λ n'. r !! n' + r !! (n' + 1)))"
  let ?mr = "Λ (fwf :: Nat Stream, fff). (smapfffnats, ?wbfwf)"
  have "?lhs = fst (fix?mr)"
    by (simp add: fib_work_final_def split_def csplit_def)
  also have " = (μ fwf. fst (?mr(fwf, μ fff. snd (?mr(fwf, fff)))))"
    using fix_cprod[where F="?mr"] by simp
  also have " = (μ fwf. smap(μ fff. ?wbfwf)nats)" by simp
  also have " = (μ fwf. smap(?wbfwf)nats)" by (simp add: fix_const)
  also have " = ?rhs"
    unfolding fib_body_def fib_work_def unwrapS_unwrapS'_eq unwrapS'_def wrapS_def
    by (simp add: cfcomp1)
  finally show ?thesis .
qed

lemma fib_final_fib_eq: "fib_final = fib" (is "?lhs = ?rhs")
proof -
  have "?lhs = (Λ n. fib_work_final !! n)" by (simp add: fib_final_def)
  also have " = (Λ n. fib_work !! n)" by (simp only: fib_work_final_fib_work_eq)
  also have " = fib_wrap" by (simp add: fib_wrap_def wrapS_def)
  also have " = ?rhs" by (simp only: fib_ww_eq)
  finally show ?thesis .
qed

(*<*)
end
(*>*)

Theory Continuations

(*<*)
(*
 * The worker/wrapper transformation, following Gill and Hutton.
 * (C)opyright 2009-2011, Peter Gammie, peteg42 at gmail.com.
 * License: BSD
 *)

theory Continuations
imports
  HOLCF
  Maybe
  Nats
  WorkerWrapperNew
begin
(*>*)

section‹Tagless interpreter via double-barreled continuations›

text‹\label{sec:continuations}›

type_synonym 'a Cont = "('a  'a)  'a"

definition
  val2cont :: "'a  'a Cont" where
  "val2cont  (Λ a c. ca)"

definition
  cont2val :: "'a Cont  'a" where
  "cont2val  (Λ f. fID)"

lemma cont2val_val2cont_id: "cont2val oo val2cont = ID"
  by (rule cfun_eqI, simp add: val2cont_def cont2val_def)

domain Expr =
    Val (lazy val::"Nat")
  | Add (lazy addl::"Expr") (lazy addr::"Expr")
  | Throw
  | Catch (lazy cbody::"Expr") (lazy chandler::"Expr")

fixrec eval :: "Expr  Nat Maybe"
where
  "eval(Valn) = Justn"
| "eval(Addxy) = mliftM2 (Λ a b. a + b)(evalx)(evaly)"
| "evalThrow = mfail"
| "eval(Catchxy) = mcatch(evalx)(evaly)"

fixrec eval_body :: "(Expr  Nat Maybe)  Expr  Nat Maybe"
where
  "eval_bodyr(Valn) = Justn"
| "eval_bodyr(Addxy) = mliftM2 (Λ a b. a + b)(rx)(ry)"
| "eval_bodyrThrow = mfail"
| "eval_bodyr(Catchxy) = mcatch(rx)(ry)"

lemma eval_body_strictExpr[simp]: "eval_bodyr = "
  by (subst eval_body.unfold, simp)

lemma eval_eval_body_eq: "eval = fixeval_body"
  by (rule cfun_eqI, subst eval_def, subst eval_body.unfold, simp)

subsection‹Worker/wrapper›

definition
  unwrapC :: "(Expr  Nat Maybe)  (Expr  (Nat  Nat Maybe)  Nat Maybe  Nat Maybe)" where
  "unwrapC  Λ g e s f. case ge of Nothing  f | Justn  sn"

lemma unwrapC_strict[simp]: "unwrapC = "
  unfolding unwrapC_def by (rule cfun_eqI)+ simp

definition
  wrapC :: "(Expr  (Nat  Nat Maybe)  Nat Maybe  Nat Maybe)  (Expr  Nat Maybe)" where
  "wrapC  Λ g e. geJustNothing"

lemma wrapC_unwrapC_id: "wrapC oo unwrapC = ID"
proof(intro cfun_eqI)
  fix g e
  show "(wrapC oo unwrapC)ge = IDge"
    by (cases "ge", simp_all add: wrapC_def unwrapC_def)
qed

definition
  eval_work :: "Expr  (Nat  Nat Maybe)  Nat Maybe  Nat Maybe" where
  "eval_work  fix(unwrapC oo eval_body oo wrapC)"

definition
  eval_wrap :: "Expr  Nat Maybe" where
  "eval_wrap  wrapCeval_work"

fixrec eval_body' :: "(Expr  (Nat  Nat Maybe)  Nat Maybe  Nat Maybe)
                     Expr  (Nat  Nat Maybe)  Nat Maybe  Nat Maybe"
where
  "eval_body'r(Valn)sf = sn"
| "eval_body'r(Addxy)sf = (case wrapCrx of
                                     Nothing  f
                                   | Justn  (case wrapCry of
                                                    Nothing  f
                                                  | Justm  s(n + m)))"
| "eval_body'rThrowsf = f"
| "eval_body'r(Catchxy)sf = (case wrapCrx of
                                       Nothing  (case wrapCry of
                                                      Nothing  f
                                                    | Justn  sn)
                                     | Justn  sn)"

lemma eval_body'_strictExpr[simp]: "eval_body'rsf = "
  by (subst eval_body'.unfold, simp)

definition
  eval_work' :: "Expr  (Nat  Nat Maybe)  Nat Maybe  Nat Maybe" where
  "eval_work'  fixeval_body'"

text‹This proof is unfortunately quite messy, due to the
simplifier's inability to cope with HOLCF's case distinctions.›

lemma eval_body'_eval_body_eq: "eval_body' = unwrapC oo eval_body oo wrapC"
  apply (intro cfun_eqI)
  apply (unfold unwrapC_def wrapC_def)
  apply (case_tac xa)
      apply simp_all
    apply (simp add: wrapC_def)
    apply (case_tac "xExpr1JustNothing")
     apply simp_all
    apply (case_tac "xExpr2JustNothing")
     apply simp_all
   apply (simp add: mfail_def)
  apply (simp add: mcatch_def wrapC_def)
  apply (case_tac "xExpr1JustNothing")
   apply simp_all
  done

fixrec eval_body_final :: "(Expr  (Nat  Nat Maybe)  Nat Maybe  Nat Maybe)
                          Expr  (Nat  Nat Maybe)  Nat Maybe  Nat Maybe"
where
  "eval_body_finalr(Valn)sf = sn"
| "eval_body_finalr(Addxy)sf = rx(Λ n. ry(Λ m. s(n + m))f)f"
| "eval_body_finalrThrowsf = f"
| "eval_body_finalr(Catchxy)sf = rxs(rysf)"

lemma eval_body_final_strictExpr[simp]: "eval_body_finalrsf = "
  by (subst eval_body_final.unfold, simp)

lemma eval_body'_eval_body_final_eq: "eval_body_final oo unwrapC oo wrapC = eval_body'"
  apply (rule cfun_eqI)+
  apply (case_tac xa)
     apply (simp_all add: unwrapC_def)
  done

definition
  eval_work_final :: "Expr  (Nat  Nat Maybe)  Nat Maybe  Nat Maybe" where
  "eval_work_final  fixeval_body_final"

definition
  eval_final :: "Expr  Nat Maybe" where
  "eval_final  (Λ e. eval_work_finaleJustNothing)"

lemma "eval = eval_final"
proof -
  have "eval = fixeval_body" by (rule eval_eval_body_eq)
  also from wrapC_unwrapC_id unwrapC_strict have " = wrapC(fixeval_body_final)"
    apply (rule worker_wrapper_fusion_new)
    using eval_body'_eval_body_final_eq eval_body'_eval_body_eq by simp
  also have " = eval_final"
    unfolding eval_final_def eval_work_final_def wrapC_def
    by simp
  finally show ?thesis .
qed

(*<*)
end
(*>*)

Theory Backtracking

(*<*)
(*
 * The worker/wrapper transformation, following Gill and Hutton.
 * (C)opyright 2009-2011, Peter Gammie, peteg42 at gmail.com.
 * License: BSD
 *)

theory Backtracking
imports
  HOLCF
  Nats
  WorkerWrapperNew
begin

(*>*)
section‹Backtracking using lazy lists and continuations›

text‹
\label{sec:ww-backtracking}

To illustrate the utility of worker/wrapper fusion to programming
language semantics, we consider here the first-order part of a
higher-order backtracking language by \citet{DBLP:conf/icfp/WandV04};
see also \citet{DBLP:journals/ngc/DanvyGR01}. We refer the reader to
these papers for a broader motivation for these languages.

As syntax is typically considered to be inductively generated, with
each syntactic object taken to be finite and completely defined, we
define the syntax for our language using a HOL datatype:

›

datatype expr = const nat | add expr expr | disj expr expr | fail
(*<*)

lemma case_expr_cont[cont2cont]:
  assumes f1: "y. cont (λx. f1 x y)"
  assumes f2: "y z. cont (λx. f2 x y z)"
  assumes f3: "y z. cont (λx. f3 x y z)"
  assumes f4: "cont (λx. f4 x)"
  shows "cont (λx. case_expr (f1 x) (f2 x) (f3 x) (f4 x) expr)"
  using assms by (cases expr) simp_all

(* Presumably obsolete in the HG version, not so in Isabelle2011. *)

fun
  expr_encode :: "expr  nat"
where
  "expr_encode (const n) = prod_encode (0, n)"
| "expr_encode (add e1 e2) = prod_encode (1, (prod_encode (expr_encode e1, expr_encode e2)))"
| "expr_encode (disj e1 e2) = prod_encode (2, (prod_encode (expr_encode e1, expr_encode e2)))"
| "expr_encode fail = prod_encode (3, 0)"

lemma expr_encode_inj:
  "expr_encode x = expr_encode y  x = y"
  by (induct x arbitrary: y) ((case_tac y, auto dest!: inj_onD[OF inj_prod_encode, where A=UNIV])[1])+

instance expr :: countable
  by (rule countable_classI[OF expr_encode_inj])

(*>*)
text‹

The language consists of constants, an addition function, a
disjunctive choice between expressions, and failure. We give it a
direct semantics using the monad of lazy lists of natural numbers,
with the goal of deriving an an extensionally-equivalent evaluator
that uses double-barrelled continuations.

Our theory of lazy lists is entirely standard.

›

default_sort predomain

domain 'a llist =
    lnil
  | lcons (lazy "'a") (lazy "'a llist")

text‹

By relaxing the default sort of type variables to predomain›,
our polymorphic definitions can be used at concrete types that do not
contain @{term ""}. These include those constructed from HOL types
using the discrete ordering type constructor @{typ "'a discr"}, and in
particular our interpretation @{typ "nat discr"} of the natural
numbers.

The following standard list functions underpin the monadic
infrastructure:

›

fixrec lappend :: "'a llist  'a llist  'a llist" where
  "lappendlnilys = ys"
| "lappend(lconsxxs)ys = lconsx(lappendxsys)"

fixrec lconcat :: "'a llist llist  'a llist" where
  "lconcatlnil = lnil"
| "lconcat(lconsxxs) = lappendx(lconcatxs)"

fixrec lmap :: "('a  'b)  'a llist  'b llist" where
  "lmapflnil = lnil"
| "lmapf(lconsxxs) = lcons(fx)(lmapfxs)"
(*<*)

lemma lappend_strict'[simp]: "lappend = (Λ a. )"
  by fixrec_simp

lemma lconcat_strict[simp]: "lconcat = "
  by fixrec_simp

lemma lmap_strict[simp]: "lmapf = "
  by fixrec_simp

(*>*)
text‹

We define the lazy list monad S› in the traditional fashion:

›

type_synonym S = "nat discr llist"

definition returnS :: "nat discr  S" where
  "returnS = (Λ x. lconsxlnil)"

definition bindS :: "S  (nat discr  S)  S" where
  "bindS = (Λ x g. lconcat(lmapgx))"

text‹

Unfortunately the lack of higher-order polymorphism in HOL prevents us
from providing the general typing one would expect a monad to have in
Haskell.

The evaluator uses the following extra constants:

›

definition addS :: "S  S  S" where
  "addS  (Λ x y. bindSx(Λ xv. bindSy(Λ yv. returnS(xv + yv))))"

definition disjS :: "S  S  S" where
  "disjS  lappend"

definition failS :: "S" where
  "failS  lnil"

text‹

We interpret our language using these combinators in the obvious
way. The only complication is that, even though our evaluator is
primitive recursive, we must explicitly use the fixed point operator
as the worker/wrapper technique requires us to talk about the body of
the recursive definition.

›

definition
  evalS_body :: "(expr discr  nat discr llist)
               (expr discr  nat discr llist)"
where
  "evalS_body  Λ r e. case undiscr e of
     const n  returnS(Discr n)
   | add e1 e2  addS(r(Discr e1))(r(Discr e2))
   | disj e1 e2  disjS(r(Discr e1))(r(Discr e2))
   | fail  failS"

abbreviation evalS :: "expr discr  nat discr llist" where
  "evalS  fixevalS_body"

text‹

We aim to transform this evaluator into one using double-barrelled
continuations; one will serve as a "success" context, taking a natural
number into "the rest of the computation", and the other outright
failure.

In general we could work with an arbitrary observation type ala
\citet{DBLP:conf/icalp/Reynolds74}, but for convenience we use the
clearly adequate concrete type @{typ "nat discr llist"}.

›

type_synonym Obs = "nat discr llist"
type_synonym Failure = "Obs"
type_synonym Success = "nat discr  Failure  Obs"
type_synonym K = "Success  Failure  Obs"

text‹

To ease our development we adopt what
\citet[\S5]{DBLP:conf/icfp/WandV04} call a "failure computation"
instead of a failure continuation, which would have the type @{typ
"unit  Obs"}.

The monad over the continuation type @{typ "K"} is as follows:

›

definition returnK :: "nat discr  K" where
  "returnK  (Λ x. Λ s f. sxf)"

definition bindK :: "K  (nat discr  K)  K" where
  "bindK  Λ x g. Λ s f. x(Λ xv f'. gxvsf')f"

text‹

Our extra constants are defined as follows:

›

definition addK :: "K  K  K" where
  "addK  (Λ x y. bindKx(Λ xv. bindKy(Λ yv. returnK(xv + yv))))"

definition disjK :: "K  K  K" where
  "disjK  (Λ g h. Λ s f. gs(hsf))"

definition failK :: "K" where
  "failK  Λ s f. f"

text‹

The continuation semantics is again straightforward:

›

definition
  evalK_body :: "(expr discr  K)  (expr discr  K)"
where
  "evalK_body  Λ r e. case undiscr e of
     const n  returnK(Discr n)
   | add e1 e2  addK(r(Discr e1))(r(Discr e2))
   | disj e1 e2  disjK(r(Discr e1))(r(Discr e2))
   | fail  failK"

abbreviation evalK :: "expr discr  K" where
  "evalK  fixevalK_body"

text‹

We now set up a worker/wrapper relation between these two semantics.

The kernel of @{term "unwrap"} is the following function that converts
a lazy list into an equivalent continuation representation.

›

fixrec SK :: "S  K" where
  "SKlnil = failK"
| "SK(lconsxxs) = (Λ s f. sx(SKxssf))"

definition
  unwrap :: "(expr discr  nat discr llist)  (expr discr  K)"
where
  "unwrap  Λ r e. SK(re)"
(*<*)

lemma SK_strict[simp]: "SK = "
  by fixrec_simp

lemma unwrap_strict[simp]: "unwrap = "
  unfolding unwrap_def by simp

(*>*)
text‹

Symmetrically @{term "wrap"} converts an evaluator using continuations
into one generating lazy lists by passing it the right continuations.

›

definition KS :: "K  S" where
  "KS  (Λ k. klconslnil)"

definition wrap :: "(expr discr  K)  (expr discr  nat discr llist)" where
  "wrap  Λ r e. KS(re)"
(*<*)

lemma KS_strict[simp]: "KS = "
  unfolding KS_def by simp

lemma wrap_strict[simp]: "wrap = "
  unfolding wrap_def by simp

(*>*)
text‹

The worker/wrapper condition follows directly from these definitions.

›

lemma KS_SK_id:
  "KS(SKxs) = xs"
  by (induct xs) (simp_all add: KS_def failK_def)

lemma wrap_unwrap_id:
  "wrap oo unwrap = ID"
  unfolding wrap_def unwrap_def
  by (simp add: KS_SK_id cfun_eq_iff)

text‹

The worker/wrapper transformation is only non-trivial if @{term
"wrap"} and @{term "unwrap"} do not witness an isomorphism. In this
case we can show that we do not even have a Galois connection.

›

lemma cfun_not_below:
  "fx \<notsqsubseteq> gx  f \<notsqsubseteq> g"
  by (auto simp: cfun_below_iff)

lemma unwrap_wrap_not_under_id:
  "unwrap oo wrap \<notsqsubseteq> ID"
proof -
  let ?witness = "Λ e. (Λ s f. lnil :: K)"
  have "(unwrap oo wrap)?witness(Discr fail)(lcons0lnil)
       \<notsqsubseteq> ?witness(Discr fail)(lcons0lnil)"
    by (simp add: failK_def wrap_def unwrap_def KS_def)
  hence "(unwrap oo wrap)?witness \<notsqsubseteq> ?witness"
    by (fastforce intro!: cfun_not_below)
  thus ?thesis by (simp add: cfun_not_below)
qed

text‹

We now apply \texttt{worker\_wrapper\_id}:

›

definition eval_work :: "expr discr  K" where
  "eval_work  fix(unwrap oo evalS_body oo wrap)"

definition eval_ww :: "expr discr  nat discr llist" where
  "eval_ww  wrapeval_work"

lemma "evalS = eval_ww"
  unfolding eval_ww_def eval_work_def
  using worker_wrapper_id[OF wrap_unwrap_id]
  by simp

text‹

We now show how the monadic operations correspond by showing that
@{term "SK"} witnesses a \emph{monad morphism}
\citep[\S6]{wadler92:_comprehending_monads}. As required by
\citet[Definition~2.1]{DBLP:journals/ngc/DanvyGR01}, the mapping needs
to hold for our specific operations in addition to the common monadic
scaffolding.

›

lemma SK_returnS_returnK:
  "SK(returnSx) = returnKx"
  by (simp add: returnS_def returnK_def failK_def)

lemma SK_lappend_distrib:
 "SK(lappendxsys)sf = SKxss(SKyssf)"
  by (induct xs) (simp_all add: failK_def)

lemma SK_bindS_bindK:
  "SK(bindSxg) = bindK(SKx)(SK oo g)"
  by (induct x)
     (simp_all add: cfun_eq_iff
                    bindS_def bindK_def failK_def
                    SK_lappend_distrib)

lemma SK_addS_distrib:
  "SK(addSxy) = addK(SKx)(SKy)"
  by (clarsimp simp: cfcomp1
                     addS_def addK_def failK_def
                     SK_bindS_bindK SK_returnS_returnK)

lemma SK_disjS_disjK:
 "SK(disjSxsys) = disjK(SKxs)(SKys)"
  by (simp add: cfun_eq_iff disjS_def disjK_def SK_lappend_distrib)

lemma SK_failS_failK:
  "SKfailS = failK"
  unfolding failS_def by simp

text‹

These lemmas directly establish the precondition for our all-in-one
worker/wrapper and fusion rule:

›

lemma evalS_body_evalK_body:
  "unwrap oo evalS_body oo wrap = evalK_body oo unwrap oo wrap"
proof(intro cfun_eqI)
  fix r e' s f
  obtain e :: "expr"
    where ee': "e' = Discr e" by (cases e')
  have "(unwrap oo evalS_body oo wrap)r(Discr e)sf
      = (evalK_body oo unwrap oo wrap)r(Discr e)sf"
    by (cases e)
       (simp_all add: evalS_body_def evalK_body_def unwrap_def
                      SK_returnS_returnK SK_addS_distrib
                      SK_disjS_disjK SK_failS_failK)
  with ee' show "(unwrap oo evalS_body oo wrap)re'sf
                = (evalK_body oo unwrap oo wrap)re'sf"
    by simp
qed

theorem evalS_evalK:
  "evalS = wrapevalK"
  using worker_wrapper_fusion_new[OF wrap_unwrap_id unwrap_strict]
        evalS_body_evalK_body
  by simp

text‹

This proof can be considered an instance of the approach of
\citet{DBLP:journals/jfp/HuttonJG10}, which uses the worker/wrapper
machinery to relate two algebras.

This result could be obtained by a structural induction over the
syntax of the language. However our goal here is to show how such a
transformation can be achieved by purely equational means; this has
the advantange that our proof can be locally extended, e.g. to the
full language of \citet{DBLP:journals/ngc/DanvyGR01} simply by proving
extra equations. In contrast the higher-order language of
\citet{DBLP:conf/icfp/WandV04} is beyond the reach of this approach.

›
(*<*)

end
(*>*)

Theory Nub

(*<*)
(*
 * The worker/wrapper transformation, following Gill and Hutton.
 * (C)opyright 2009-2011, Peter Gammie, peteg42 at gmail.com.
 * License: BSD
 *)

theory Nub
imports
  HOLCF
  LList
  Maybe
  Nats
  WorkerWrapperNew
begin
(*>*)

section‹Transforming $O(n^2)$ \emph{nub} into an $O(n\lg n)$ one›

text‹Andy Gill's solution, mechanised.›

subsection‹The @{term "nub"} function.›

fixrec nub :: "Nat llist  Nat llist"
where
  "nublnil = lnil"
| "nub(x :@ xs) = x :@ nub(lfilter(neg oo (Λ y. x =B y))xs)"

lemma nub_strict[simp]: "nub = "
by fixrec_simp

fixrec nub_body :: "(Nat llist  Nat llist)  Nat llist  Nat llist"
where
  "nub_bodyflnil = lnil"
| "nub_bodyf(x :@ xs) = x :@ f(lfilter(neg oo (Λ y. x =B y))xs)"

lemma nub_nub_body_eq: "nub = fixnub_body"
  by (rule cfun_eqI, subst nub_def, subst nub_body.unfold, simp)

(* **************************************** *)

subsection‹Optimised data type.›

text ‹Implement sets using lazy lists for now. Lifting up HOL's @{typ "'a
set"} type causes continuity grief.›

type_synonym NatSet = "Nat llist"

definition
  SetEmpty :: "NatSet" where
  "SetEmpty  lnil"

definition
  SetInsert :: "Nat  NatSet  NatSet" where
  "SetInsert  lcons"

definition
  SetMem :: "Nat  NatSet  tr" where
  "SetMem  lmember(bpred (=))"

lemma SetMem_strict[simp]: "SetMemx = " by (simp add: SetMem_def)
lemma SetMem_SetEmpty[simp]: "SetMemxSetEmpty = FF"
  by (simp add: SetMem_def SetEmpty_def)
lemma SetMem_SetInsert: "SetMemv(SetInsertxs) = (SetMemvs orelse x =B v)"
  by (simp add: SetMem_def SetInsert_def)

text ‹AndyG's new type.›

domain R = R (lazy resultR :: "Nat llist") (lazy exceptR :: "NatSet")

definition
  nextR :: "R  (Nat * R) Maybe" where
  "nextR = (Λ r. case ldropWhile(Λ x. SetMemx(exceptRr))(resultRr) of
                     lnil  Nothing
                   | x :@ xs  Just(x, Rxs(exceptRr)))"

lemma nextR_strict1[simp]: "nextR = " by (simp add: nextR_def)
lemma nextR_strict2[simp]: "nextR(RS) = " by (simp add: nextR_def)

lemma nextR_lnil[simp]: "nextR(RlnilS) = Nothing" by (simp add: nextR_def)

definition
  filterR :: "Nat  R  R" where
  "filterR  (Λ v r. R(resultRr)(SetInsertv(exceptRr)))"

definition
  c2a :: "Nat llist  R" where
  "c2a  Λ xs. RxsSetEmpty"

definition
  a2c :: "R  Nat llist" where
  "a2c  Λ r. lfilter(Λ v. neg(SetMemv(exceptRr)))(resultRr)"

lemma a2c_strict[simp]: "a2c = " unfolding a2c_def by simp

lemma a2c_c2a_id: "a2c oo c2a = ID"
  by (rule cfun_eqI, simp add: a2c_def c2a_def lfilter_const_true)

definition
  wrap :: "(R  Nat llist)  Nat llist  Nat llist" where
  "wrap  Λ f xs. f(c2axs)"

definition
  unwrap :: "(Nat llist  Nat llist)  R  Nat llist" where
  "unwrap  Λ f r. f(a2cr)"

lemma unwrap_strict[simp]: "unwrap = "
  unfolding unwrap_def by (rule cfun_eqI, simp)

lemma wrap_unwrap_id: "wrap oo unwrap = ID"
  using cfun_fun_cong[OF a2c_c2a_id]
  by - ((rule cfun_eqI)+, simp add: wrap_def unwrap_def)

text ‹Equivalences needed for later.›

lemma TR_deMorgan: "neg(x orelse y) = (negx andalso negy)"
  by (rule trE[where p=x], simp_all)

lemma case_maybe_case:
  "(case (case L of lnil  Nothing | x :@ xs  Just(hxxs)) of
     Nothing  f | Just(a, b)  gab)
   =
   (case L of lnil  f | x :@ xs  g(fst (hxxs))(snd (hxxs)))"
  apply (cases L, simp_all)
  apply (case_tac "hallist")
  apply simp
  done

lemma case_a2c_case_caseR:
    "(case a2cw of lnil  f | x :@ xs  gxxs)
   = (case nextRw of Nothing  f | Just(x, r)  gx(a2cr))" (is "?lhs = ?rhs")
proof -
  have "?rhs = (case (case ldropWhile(Λ x. SetMemx(exceptRw))(resultRw) of
                     lnil  Nothing
                   | x :@ xs  Just(x, Rxs(exceptRw))) of Nothing  f | Just(x, r)  gx(a2cr))"
    by (simp add: nextR_def)
  also have " = (case ldropWhile(Λ x. SetMemx(exceptRw))(resultRw) of
                     lnil  f | x :@ xs  gx(a2c(Rxs(exceptRw))))"
    using case_maybe_case[where L="ldropWhile(Λ x. SetMemx(exceptRw))(resultRw)"
                            and f=f and g="Λ x r. gx(a2cr)" and h="Λ x xs. (x, Rxs(exceptRw))"]
    by simp
  also have " = ?lhs"
    apply (simp add: a2c_def)
    apply (cases "resultRw")
      apply simp_all
    apply (rule_tac p="SetMema(exceptRw)" in trE)
      apply simp_all
    apply (induct_tac llist)
       apply simp_all
    apply (rule_tac p="SetMemaa(exceptRw)" in trE)
      apply simp_all
    done
  finally show "?lhs = ?rhs" by simp
qed

lemma filter_filterR: "lfilter(neg oo (Λ y. x =B y))(a2cr) = a2c(filterRxr)"
  using filter_filter[where p="Tr.neg oo (Λ y. x =B y)" and q="Λ v. Tr.neg(SetMemv(exceptRr))"]
  unfolding a2c_def filterR_def
  by (cases r, simp_all add: SetMem_SetInsert TR_deMorgan)

text‹Apply worker/wrapper. Unlike Gill/Hutton, we manipulate the body of
the worker into the right form then apply the lemma.›

definition
  nub_body' :: "(R  Nat llist)  R  Nat llist" where
  "nub_body'  Λ f r. case a2cr of lnil  lnil
                                   | x :@ xs  x :@ f(c2a(lfilter(neg oo (Λ y. x =B y))xs))"

lemma nub_body_nub_body'_eq: "unwrap oo nub_body oo wrap = nub_body'"
  unfolding nub_body_def nub_body'_def unwrap_def wrap_def a2c_def c2a_def
  by ((rule cfun_eqI)+
     , case_tac "lfilter(Λ v. Tr.neg(SetMemv(exceptRxa)))(resultRxa)"
     , simp_all add: fix_const)

definition
  nub_body'' :: "(R  Nat llist)  R  Nat llist" where
  "nub_body''  Λ f r. case nextRr of Nothing  lnil
                                      | Just(x, xs)  x :@ f(c2a(lfilter(neg oo (Λ y. x =B y))(a2cxs)))"

lemma nub_body'_nub_body''_eq: "nub_body' = nub_body''"
proof(rule cfun_eqI)+
  fix f r show "nub_body'fr = nub_body''fr"
    unfolding nub_body'_def nub_body''_def
    using case_a2c_case_caseR[where f="lnil" and g="Λ x xs. x :@ f(c2a(lfilter(Tr.neg oo (Λ y. x =B y))xs))" and w="r"]
    by simp
qed

definition
  nub_body''' :: "(R  Nat llist)  R  Nat llist" where
  "nub_body'''  (Λ f r. case nextRr of Nothing  lnil
                                      | Just(x, xs)  x :@ f(filterRxxs))"

lemma nub_body''_nub_body'''_eq: "nub_body'' = nub_body''' oo (unwrap oo wrap)"
  unfolding nub_body''_def nub_body'''_def wrap_def unwrap_def
  by ((rule cfun_eqI)+, simp add: filter_filterR)

text‹Finally glue it all together.›

lemma nub_wrap_nub_body''': "nub = wrap(fixnub_body''')"
  using worker_wrapper_fusion_new[OF wrap_unwrap_id unwrap_strict, where body=nub_body]
        nub_nub_body_eq
        nub_body_nub_body'_eq
        nub_body'_nub_body''_eq
        nub_body''_nub_body'''_eq
  by simp

end

Theory Last

(*<*)
(*
 * The worker/wrapper transformation, following Gill and Hutton.
 * (C)opyright 2011, Peter Gammie, peteg42 at gmail.com.
 * License: BSD
 *)

theory Last
imports
  HOLCF
  LList
  WorkerWrapper
begin

(*>*)
section‹Optimise ``last''.›

text‹Andy Gill's solution, mechanised. No fusion, works fine using their rule.›

subsection‹The @{term "last"} function.›

fixrec llast :: "'a llist  'a"
where
  "llast(x :@ yys) = (case yys of lnil  x | y :@ ys  llastyys)"

lemma llast_strict[simp]: "llast = "
by fixrec_simp

fixrec llast_body :: "('a llist  'a)  'a llist  'a"
where
  "llast_bodyf(x :@ yys) = (case yys of lnil  x | y :@ ys  fyys)"

lemma llast_llast_body: "llast = fixllast_body"
  by (rule cfun_eqI, subst llast_def, subst llast_body.unfold, simp)

definition wrap :: "('a  'a llist  'a)  ('a llist  'a)" where
  "wrap  Λ f (x :@ xs). fxxs"

definition unwrap :: "('a llist  'a)  ('a  'a llist  'a)" where
  "unwrap  Λ f x xs. f(x :@ xs)"

lemma unwrap_strict[simp]: "unwrap = "
  unfolding unwrap_def by ((rule cfun_eqI)+, simp)

lemma wrap_unwrap_ID: "wrap oo unwrap oo llast_body = llast_body"
  unfolding llast_body_def wrap_def unwrap_def
  apply (rule cfun_eqI)+
  apply (case_tac xa)
  apply (simp_all add: fix_const)
  done

definition llast_worker :: "('a  'a llist  'a)  'a  'a llist  'a" where
  "llast_worker  Λ r x yys. case yys of lnil  x | y :@ ys  ryys"

definition llast' :: "'a llist  'a" where
  "llast'  wrap(fixllast_worker)"

lemma llast_worker_llast_body: "llast_worker = unwrap oo llast_body oo wrap"
  unfolding llast_worker_def llast_body_def wrap_def unwrap_def
  apply (rule cfun_eqI)+
  apply (case_tac xb)
  apply (simp_all add: fix_const)
  done

lemma llast'_llast: "llast' = llast" (is "?lhs = ?rhs")
proof -
  have "?rhs = fixllast_body" by (simp only: llast_llast_body)
  also have " = wrap(fix(unwrap oo llast_body oo wrap))"
    by (simp only: worker_wrapper_body[OF wrap_unwrap_ID])
  also have " = wrap(fix(llast_worker))"
    by (simp only: llast_worker_llast_body)
  also have "... = ?lhs" unfolding llast'_def by simp
  finally show ?thesis by simp
qed

end