Statistiques
| Révision :

root / CSL16 / feas-arith-wfm.tex @ 10

Historique | Voir | Annoter | Télécharger (42,74 ko)

1
\documentclass[a4paper,UKenglish]{lipics-v2016}
2
%This is a template for producing LIPIcs articles. 
3
%See lipics-manual.pdf for further information.
4
%for A4 paper format use option "a4paper", for US-letter use option "letterpaper"
5
%for british hyphenation rules use option "UKenglish", for american hyphenation rules use option "USenglish"
6
% for section-numbered lemmas etc., use "numberwithinsect"
7
 
8
\usepackage{microtype}%if unwanted, comment out or use option "draft"
9

    
10
%\graphicspath{{./graphics/}}%helpful if your graphic files are in another directory
11

    
12
\bibliographystyle{plainurl}% the recommended bibstyle
13

    
14
% Author macros::begin %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
15
\title{Free-cut elimination in linear logic: an application to implicit complexity}
16
%\titlerunning{A Sample LIPIcs Article} %optional, in case that the title is too long; the running title should fit into the top page column
17

    
18
%% Please provide for each author the \author and \affil macro, even when authors have the same affiliation, i.e. for each author there needs to be the  \author and \affil macros
19
\author[1]{Patrick Baillot}
20
\author[2]{Anupam Das}
21
\affil[1]{Dummy University Computing Laboratory, Address/City, Country\\
22
  \texttt{open@dummyuniversity.org}}
23
\affil[2]{Department of Informatics, Dummy College, Address/City, Country\\
24
  \texttt{access@dummycollege.org}}
25
\authorrunning{J.\,Q. Open and J.\,R. Access} %mandatory. First: Use abbreviated first/middle names. Second (only in severe cases): Use first author plus 'et. al.'
26

    
27
\Copyright{John Q. Open and Joan R. Access}%mandatory, please use full first names. LIPIcs license is "CC-BY";  http://creativecommons.org/licenses/by/3.0/
28

    
29
\subjclass{Dummy classification -- please refer to \url{http://www.acm.org/about/class/ccs98-html}}% mandatory: Please choose ACM 1998 classifications from http://www.acm.org/about/class/ccs98-html . E.g., cite as "F.1.1 Models of Computation". 
30
\keywords{Dummy keyword -- please provide 1--5 keywords}% mandatory: Please provide 1-5 keywords
31
% Author macros::end %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
32

    
33
%Editor-only macros:: begin (do not touch as author)%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
34
\EventEditors{John Q. Open and Joan R. Acces}
35
\EventNoEds{2}
36
\EventLongTitle{42nd Conference on Very Important Topics (CVIT 2016)}
37
\EventShortTitle{CVIT 2016}
38
\EventAcronym{CVIT}
39
\EventYear{2016}
40
\EventDate{December 24--27, 2016}
41
\EventLocation{Little Whinging, United Kingdom}
42
\EventLogo{}
43
\SeriesVolume{42}
44
\ArticleNo{23}
45
% Editor-only macros::end %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
46

    
47

    
48

    
49
\usepackage[lutzsyntax]{virginialake}
50
\usepackage{amsmath}
51
\usepackage{amssymb}
52
\usepackage{amsthm}
53
\usepackage{times}
54
%\usepackage{sans}
55
\usepackage{cmll}
56
\usepackage{bm}
57

    
58
%\newtheorem{theorem}{Theorem}
59
%\newtheorem{maintheorem}[theorem]{Main Theorem}
60
%\newtheorem{observation}[theorem]{Observation}
61
%\newtheorem{corollary}[theorem]{Corollary}
62
%\newtheorem{lemma}[theorem]{Lemma}
63
\newtheorem{proposition}[theorem]{Proposition}
64
%\newtheorem{conjecture}[theorem]{Conjecture}
65
%
66
%\theoremstyle{definition}
67
%\newtheorem{definition}[theorem]{Definition}
68
%\newtheorem{example}[theorem]{Example}
69
%\newtheorem{notation}[theorem]{Notation}
70
%\newtheorem{convention}[theorem]{Convention}
71
%\newtheorem{remark}[theorem]{Remark}
72
%\newtheorem{discussion}[theorem]{Discussion}
73

    
74
\newcommand{\todo}[1]{{\color{red}{\textbf{Todo:} #1}}}
75
\newcommand{\anupam}[1]{{\color{orange}{\textbf{Anupam:} #1}}}
76
\newcommand{\patrick}[1]{{\color{blue}{\textbf{Patrick:} #1}}}
77

    
78
\newcommand{\defined}{:=}
79
\begin{document}
80
	
81
	\newcommand{\LL}{\it{LL}}
82
	\vllineartrue
83
	
84
	
85
	\newcommand{\FV}{\mathit{FV}}
86
	
87
	
88
	%terms
89
	\renewcommand{\succ}{s}
90
	\renewcommand{\epsilon}{\varepsilon}
91
	
92
	% linear connectives
93
	
94
	\newcommand{\limp}{\multimap}
95
	\renewcommand{\land}{\otimes}
96
	\newcommand{\laand}{\&}
97
	\newcommand{\laor}{\oplus}
98
	\renewcommand{\lor}{\vlpa}
99
	\renewcommand{\lnot}[1]{{#1^{\perp}}}
100
	\newcommand{\lnotnot}[1]{#1^{\perp \perp}}
101
	
102
	% classical connectives
103
	
104
	\newcommand{\cimp}{\supset}
105
	\newcommand{\cand}{\wedge}
106
	\newcommand{\cor}{\vee}
107
	\newcommand{\cnot}{\neg}
108
	
109
	
110
	\newcommand{\Ax}{\mathit{(Ax)}}
111
	\newcommand{\Rl}{\mathit{(Rl)}}
112
	
113
	\newcommand{\MELL}{\mathit{MELL}}
114
	\newcommand{\MEAL}{\mathit{MELLW}}
115
	\newcommand{\MELLW}{\mathit{MELL(W)}}
116
	
117
	\newcommand{\Aonetwo}{\mathcal{A}^1_2}
118
	\newcommand{\logic}{\mathit{L}_{\mathcal A} }
119
	
120
	% predicates
121
	\newcommand{\nat}{N}
122
	
123
	\newcommand{\Nat}{\mathbb{N}}
124
	
125
	%axioms
126
	\newcommand{\wk}{\mathit{wk}}
127
	\newcommand{\impl}{\cimp\text{-}\mathit{l}}
128
	\newcommand{\impcomm}{\mathit{com}}
129
	\newcommand{\conint}{\cand\text{-}\mathit{i}}
130
	\newcommand{\conel}{\cand\text{-}\mathit{e}}
131
	\newcommand{\negclass}{\cnot}
132
	
133
	%rules
134
	\renewcommand{\mp}{\mathit{mp}}
135
	\newcommand{\gen}{\mathit{gen}}
136
	\newcommand{\inst}{\mathit{ins}}
137
	\newcommand{\id}{\it{id}}
138
	\newcommand{\cut}{\it{cut}}
139
	\newcommand{\multicut}{\it{mcut}}
140
	\newcommand{\indr}{\mathit{IND}}
141
	\newcommand{\nec}{\mathit{nec}}
142
	\newcommand{\tax}{\mathit{T}}
143
	\newcommand{\four}{\mathit{4}}
144
	\newcommand{\kax}{\mathit{K}}
145
	\newcommand{\cntr}{\mathit{cntr}}
146
	
147
	\newcommand{\lefrul}[1]{#1\text{-}\mathit{l}}
148
	\newcommand{\rigrul}[1]{#1\text{-}\mathit{r}}
149
	
150
	%consequence relations
151
	\newcommand{\admits}{\vDash}
152
	\newcommand{\seqar}{\vdash}
153
	\newcommand{\proves}{\vdash_e}
154
	
155
	%induction
156
	\newcommand{\ind}{\mathit{IND}}
157
	\newcommand{\pind}{\mathit{PIND}}
158
	\newcommand{\cax}[2]{#1\text{-}#2}
159

    
160
\maketitle
161

    
162
\begin{abstract}
163
We prove a general form of free-cut elimination for first-order theories in linear logic. We consider a version of Peano Arithmetic in linear logic and prove a witnessing theorem via the Buss and Mints \emph{witness function method}, characterising polytime as the provably total functions.
164
 \end{abstract}
165

    
166
\section{Introduction}
167

    
168
\todo{words or integers?}
169

    
170

    
171

    
172
\section{Preliminaries}
173

    
174
\todo{consider removing and just have a section on linear logic, including free-cut elimination.}
175

    
176

    
177

    
178

    
179
\paragraph*{Notation}
180
Fix conventions here for use throughout:
181
\begin{itemize}
182
\item Eigenvariables: $a, b , c$.
183
\item (Normal) variables: $u,v,w$. (only when distinction is important, e.g.\ $u^{!\nat}$).
184
\item (Safe) variables: $x,y,z$. (as above, e.g.\ $x^\nat$.)
185
\item Terms: $r,s,t$.
186
\item Formulae: $A,B,C$.
187
\item Atomic formulae: $p,q$.
188
\item Sequents: $\Gamma, \Delta, \Sigma, \Pi$.
189
\item Proofs: $\pi, \rho, \sigma$.
190
\item Theories: $\mathcal T$. Sequent systems: $\mathcal S$.
191
\end{itemize}
192

    
193
\subsection{Linear logic}
194

    
195
\anupam{use a system that is already in De Morgan form, for simplicity.}
196
\anupam{Have skipped units, can reconsider this when in arithmetic. Also in affine setting can be recovered by any contradiction/tautology.}
197

    
198
\begin{definition}
199
[Sequent calculus for linear logic]
200
We define the following calculus in De Morgan normal form:
201
\[
202
\begin{array}{l}
203
\begin{array}{cccc}
204
\vlinf{\lefrul{\bot}}{}{p, \lnot{p} \seqar }{}
205
& \vlinf{\id}{}{p \seqar p}{}
206
& \vlinf{\rigrul{\bot}}{}{\seqar p, \lnot{p}}{}
207
& \vliinf{\cut}{}{\Gamma, \Sigma \seqar \Delta , \Pi}{ \Gamma \seqar \Delta, A }{\Sigma, A \seqar \Pi}
208
\\
209
\noalign{\bigskip}
210
%\text{Multiplicatives:} & & & \\
211
%\noalign{\bigskip}
212
\vliinf{\lefrul{\lor}}{}{\Gamma,\Sigma, A \lor B \seqar \Delta, \Pi}{\Gamma, A \seqar \Delta}{\Sigma , B \seqar \Pi}
213
&
214
\vlinf{\lefrul{\land}}{}{\Gamma, A\land B \seqar \Delta}{\Gamma, A , B \seqar \Delta}
215
&
216
\vlinf{\rigrul{\lor}}{}{\Gamma \seqar \Delta, A \lor B}{\Gamma \seqar \Delta, A, B}
217
&
218
\vliinf{\rigrul{\land}}{}{\Gamma, \Sigma \seqar \Delta , \Pi , A \land B}{\Gamma \seqar \Delta , A}{\Sigma \seqar \Pi , B}
219
\\
220
\noalign{\bigskip}
221
%\text{Additives:} & & & \\
222
%\noalign{\bigskip}
223
\vliinf{\lefrul{\laor}}{}{\Gamma, A \laor B \seqar \Delta}{\Gamma , A \seqar \Delta}{\Gamma, B \seqar \Delta}
224
&
225
\vlinf{\lefrul{\laand}}{}{\Gamma, A_1\laand A_2 \seqar \Delta}{\Gamma, A_i \seqar \Delta}
226
&
227
%\vlinf{\lefrul{\laand}}{}{\Gamma, A\laand B \seqar \Delta}{\Gamma, B \seqar \Delta}
228
%\quad
229
\vlinf{\rigrul{\laor}}{}{\Gamma \seqar \Delta, A1\laor A_2}{\Gamma \seqar \Delta, A_i}
230
&
231
%\vlinf{\rigrul{\laor}}{}{\Gamma \seqar \Delta, A\laor B}{\Gamma \seqar \Delta, B}
232
%\quad
233
\vliinf{\rigrul{\laand}}{}{\Gamma \seqar \Delta, A \laand B }{\Gamma \seqar \Delta, A}{\Gamma \seqar \Delta, B}
234
\\
235
\noalign{\bigskip}
236
%\text{Exponentials:} & & & \\
237
%\noalign{\bigskip}
238
\vlinf{\lefrul{?}}{}{!\Gamma, ?A \seqar ?\Delta}{!\Gamma , A \seqar ?\Delta}
239
&
240
\vlinf{\lefrul{!}}{}{\Gamma, !A \seqar \Delta}{\Gamma, A \seqar \Delta}
241
&
242
\vlinf{\rigrul{?}}{}{\Gamma \seqar \Delta, ?A}{\Gamma \seqar \Delta, A}
243
&
244
\vlinf{\rigrul{!}}{}{!\Gamma \seqar ?\Delta, ?A}{!\Gamma \seqar ?\Delta, A}
245
\\
246
\noalign{\bigskip}
247
%\text{Structural:} & & & \\
248
%\noalign{\bigskip}
249
\vlinf{\lefrul{\wk}}{}{\Gamma, !A \seqar \Delta}{\Gamma \seqar \Delta}
250
&
251
\vlinf{\lefrul{\cntr}}{}{\Gamma, !A \seqar \Delta}{\Gamma, !A, !A \seqar \Delta}
252
&
253
\vlinf{\rigrul{\wk}}{}{\Gamma \seqar \Delta, ?A }{\Gamma \seqar \Delta}
254
&
255
\vlinf{\rigrul{\cntr}}{}{\Gamma \seqar \Delta, ?A}{\Gamma \seqar \Delta, ?A, ?A}
256
\\
257
\noalign{\bigskip}
258
\vlinf{\lefrul{\exists}}{}{\Gamma, \exists x . A(x) \seqar \Delta}{\Gamma, A(a) \seqar \Delta}
259
&
260
\vlinf{\lefrul{\forall}}{}{\Gamma, \forall x. A(x) \seqar \Delta}{\Gamma, A(t) \seqar \Delta}
261
&
262
\vlinf{\rigrul{\exists}}{}{\Gamma \seqar \Delta, \exists x . A(x)}{ \Gamma \seqar \Delta, A(t)}
263
&
264
\vlinf{\rigrul{\forall}}{}{\Gamma \seqar \Delta, \forall x . A(x)}{ \Gamma \seqar \Delta, A(a) }
265
\end{array}
266
\end{array}
267
\]
268
where $p$ atomic, $i \in \{ 1,2 \}$ and the eigenvariable $a$ does not occur free in $\Gamma$ or $\Delta$.
269
\end{definition}
270

    
271

    
272
\subsection{Deduction theorem}
273

    
274
[cite Avron:`semantics and proof theory of linear logic']
275

    
276
We will make much use of the deduction theorem, allowing us to argue informally within a theory for hypotheses that have been promoted.
277

    
278
\begin{theorem}
279
	[Deduction]
280
	If $\mathcal T , A \proves B$ then $\mathcal{T} \proves !A \limp B$.
281
\end{theorem}
282

    
283
\todo{need that $A$ above is closed! Check for rules to axioms.}
284

    
285

    
286

    
287
\subsection{Converting axioms to rules in $\MELLW$}
288

    
289
\begin{proposition}
290
	An axiom $\Ax$ of the form,
291
	\[
292
	A_1 \limp \vldots \limp A_m \limp !B_1 \limp \vldots \limp !B_n \limp C
293
	\]
294
	is equivalent (over propositional $\LL$) to the rule $\Rl$:
295
	\[
296
	\vliiinf{\Rl}{}{ !\Gamma , A_1 , \dots , A_m \seqar C , ? \Delta  }{ !\Gamma \seqar B_1 , ?\Delta }{\vldots }{ !\Gamma \seqar B_n , ?\Delta}
297
	\]
298
\end{proposition}
299
\begin{proof}
300
	Let us first assume $\Ax$ and derive $\Rl$. From the axiom and Currying, we have a proof of:
301
	\begin{equation}\label{eqn:curried-axiom}
302
	A_1 , \dots , A_m , !B_1 , \dots , !B_n \seqar C
303
	\end{equation}
304
	
305
	This can simply be cut against each of the premisses of $\Rl$, applying appropriate contractions and necessitations, to derive it:
306
	\[
307
	\vlderivation{
308
		\vliq{c}{}{!\Gamma , A_1 , \dots , A_m \seqar C , ?\Delta }{
309
			\vliin{\cut}{}{!\Gamma, \dots , !\Gamma , A_1 , \dots , A_m \seqar C , ?\Delta, \dots , ?\Delta }{
310
				\vlin{!}{}{!\Gamma \seqar !B_n, ?\Delta }{\vlhy{!\Gamma \seqar B_n , ?\Delta }}
311
			}{
312
			\vliin{\cut}{}{\qquad \qquad \qquad \qquad  \vlvdots \qquad \qquad \qquad \qquad }{
313
				\vlin{!}{}{!\Gamma \seqar !B_1 , ?\Delta}{\vlhy{!\Gamma \seqar B_1, ?\Delta }}
314
			}{\vlhy{ A_1 , \dots , A_m , !B_1 , \dots , !B_n \seqar C } }
315
		}
316
	}
317
}
318
\]
319

    
320
Now let us prove $\Ax$ (again in the form of \eqref{eqn:curried-axiom}) by using $\Rl$ as follows:
321
\[
322
\vliiinf{\Rl}{}{ A_1 , \dots , A_m , !B_1 , \dots , !B_n \seqar C }{  \vlderivation{
323
		\vlin{w}{}{ !B_1 , \dots , !B_n \seqar B_1 }{
324
			\vlin{!}{}{!B_1 \seqar B_1 }{
325
				\vlin{\id}{}{B_1 \seqar B_1 }{\vlhy{}}
326
			}
327
		}
328
	}  }{\vldots}{
329
	\vlderivation{
330
		\vlin{w}{}{ !B_1 , \dots , !B_n \seqar B_n }{
331
			\vlin{!}{}{!B_n \seqar B_n }{
332
				\vlin{\id}{}{B_n \seqar B_n }{\vlhy{}}
333
			}
334
		}
335
	}
336
}
337
\]
338
\end{proof}
339

    
340

    
341
\textbf{NB:} The proof does not strictly require side formulae $? \Delta$ on the right of the sequent arrow $\seqar$, it would work without them, e.g.\ for the intuitionistic case. In a one-sided setting there is no difference.
342

    
343

    
344

    
345
\begin{corollary}
346
	The induction axiom of $A^1_2$ is equivalent to the rule:
347
	\[
348
	\vliinf{}{(x \notin \FV(\Gamma, \Delta))}{ !\Gamma , A(\epsilon) \seqar A(t) , ?\Delta }{ !\Gamma , A(x) \seqar A(s_0 x ), ?\Delta }{ !\Gamma, A(x) \seqar A( s_1 x ) , ?\Delta}
349
	\]
350
\end{corollary}
351
\begin{proof}
352
	By proposition above, generalisation and Currying.
353
\end{proof}
354

    
355

    
356
\subsection{Prenexing}
357
%In the presence of weakening we have a prenex normal form due to the following:
358
%
359
%\[
360
%\vlderivation{
361
%	\vlin{}{}{\exists x . A \lor B \seqar \exists x . (A(x) \lor B) }{
362
%		
363
%		}
364
%	}
365
%\]
366

    
367
Cannot derive prenexing operations, e.g.\ a problem with $\exists x . A \lor B \seqar \exists x . (A(x) \lor B)$. Can safely add prenexing rules? Or not a problem due to Witness predicate?
368

    
369
\section{Free-cut elimination in linear logic}
370

    
371

    
372
\todo{patrick}
373

    
374
\anupam{Possibly need to include the cases for affine logic too, e.g.\ if weakening is native, since it does not play nice as an axiom.}
375

    
376
\section{Some variants of arithmetic in linear logic}
377

    
378
For pretty much all variants discussed we will actually work in the \emph{affine} variant of linear logic, which validates weakening: $(A \land B )\limp A$. There are many reasons for this; essentially it has not much effect on complexity while also creating a more robust proof theory. For example it induces the following equivalence:
379
\[
380
!(A\land B) \equiv (!A \land !B)
381
\]
382
(The right-left direction is already valid in usual linear logic, but the left-right direction requires weakening).
383

    
384
We define a variant of arithmetic inspired by Bellantoni and Hofmann's $A^1_2$. We describe later some connections to bounded arithmetic.
385

    
386

    
387
\subsection{Based on Bellantoni-Hoffmann, Leivant, etc.}
388

    
389
\newcommand{\natcntr}{\nat\mathit{cntr}}
390
\newcommand{\geneps}{\nat_{\epsilon}}
391
\newcommand{\genzer}{\nat_{0}}
392
\newcommand{\genone}{\nat_{1}}
393

    
394

    
395
\newcommand{\sepeps}{\epsilon}
396
\newcommand{\sepzer}{\succ_{0}}
397
\newcommand{\sepone}{\succ_{1}}
398

    
399
\newcommand{\inj}{\mathit{inj}}
400
\newcommand{\surj}{\mathit{surj}}
401

    
402
\begin{definition}
403
[Arithmetic]
404
We have the following axioms:
405
\[
406
\begin{array}{rl}
407
\wk & (A \land B )\limp A \\
408
\natcntr & \forall x^N . (N(x) \land N(x)) \\
409
\geneps & \nat(\epsilon) \\
410
\genzer & \forall x^N . N(\succ_0 x) \\
411
\genone & \forall x^N . N(\succ_1 x) \\
412
\sepeps & \forall x^N . (\epsilon \neq \succ_0 x \land \epsilon \neq \succ_1 x) \\
413
\sepzer & \forall x^N , y^N. ( \succ_0 x = \succ_0 y \limp x=y ) \\
414
\sepone & \forall x^N , y^N. ( \succ_1 x = \succ_1 y \limp x=y ) \\
415
\inj & \forall x^N . \succ_0 x \neq \succ_1 x \\
416
\surj & \forall x^N . (x = \epsilon \lor \exists y^N . x = \succ_0 y \lor \exists y^N . x = \succ_1 y ) \\
417
\noalign{\bigskip}
418
\ind & A(\epsilon) \limp !( \forall x^{!\nat} . ( A(x) \limp A(\succ_0 x) ) ) \limp !( \forall x^{!\nat} . ( A(x) \limp A(\succ_1 x) ) ) \limp \forall x^{!\nat} . A(x)
419
\end{array}
420
\]
421

    
422
Induction rule should be the following, to take account of relativised quantifiers?
423
\[
424
\vliinf{\ind}{}{ !N(t) , !\Gamma , A(\epsilon) \seqar A(t), ?\Delta }{ !N(a) , !\Gamma, A(a) \seqar A(\succ_0 a) , ?\Delta }{ !N(a) , !\Gamma, A(a) \seqar A(\succ_0 a) , ?\Delta  }
425
\]
426

    
427
\todo{in existential above, is there a prenexing problem?}
428

    
429
\todo{what about $\leq$? Introducing this could solve prenexing problem, e.g.\ by induction?}
430
\end{definition}
431

    
432
\subsection{$\nat$-hierarchy}
433

    
434
\begin{definition}
435

    
436
\todo{replace N with nat.}
437

    
438
	Let us define the quantifiers $\exists x^N . A$ and $\forall x^N . A$ as $\exists x . (N(x) \land A)$ and $\forall x . (N(x) \limp A) $ respectively.
439
	
440
	We define $\Sigma^N_0 = \Pi^N_0 $ as the class of formulae that are free of quantifiers. For $i\geq 0$ we define:
441
	\begin{itemize}
442
		\item $\Sigma^N_{i+1}$ is the class of formulae $\exists x^N . A$ where $A \in \Pi^N_i$.
443
		\item $\Pi^N_{i+1}$ is the class of formulae $\forall x^N . A$ where $A \in \Sigma^N_i$.
444
		\item ($\Delta^N_i$ is the class $\Sigma^N_i \cap \Pi^N_i$.)
445
	\end{itemize}
446
\end{definition}
447

    
448
\todo{allow closure under positive Boolean operations?}
449

    
450
\todo{What about prenexing rules? Should not change provably total functions!}
451

    
452
\subsection{Equivalent rule systems}
453

    
454
For the quantifiers $\exists x^N $ and $\forall x^N$ we introduce the following rules, which are compatible with free-cut elimination:
455

    
456
\[
457
\begin{array}{cc}
458
\vlinf{}{}{\Gamma \seqar \Delta, \forall x^N . A(x)}{\Gamma, N(a) \seqar \Delta , A(a)}
459
\quad & \quad
460
\vlinf{}{}{\Gamma, N(t),\forall x^N A(x) \seqar \Delta}{\Gamma,  A(t) \seqar \Delta}
461
\\
462
\noalign{\bigskip}
463
\vlinf{}{}{\Gamma , \exists x^N A(x) \seqar \Delta}{\Gamma, N(a), A(a) \seqar \Delta}
464
\quad &\quad 
465
\vlinf{}{}{\Gamma, N(t) \seqar \Delta , \exists x^N . A(x)}{\Gamma  \seqar \Delta, A(t)}
466
\end{array}
467
\]
468

    
469
CHECK THESE!
470

    
471
\todo{Also include rules for contraction and weakening.}
472

    
473
\section{Convergence of Bellantoni-Cook programs}
474

    
475
\subsection{Bellantoni-Cook programs}
476
We first recall the notion of safe recursion on notation from BC. TODO
477

    
478
\subsection{Convergence in arithmetic}
479
\begin{theorem}
480
	[Convergence]
481
	If $\Phi(f)$ is an equational specification corresponding to a BC-program defining $f()$, then $\cax{\Sigma^N_1}{\pind} \proves  \ !\Phi(f) \limp \forall \vec{x}^{!N} . N(f(\vec x))$.
482
\end{theorem}
483
\begin{proof}
484
	We show by induction on the structure of a BC-program for $f(\vec x ; \vec y)$ that:
485
	\begin{equation}
486
	\Phi(f) , \cax{\Sigma^N_1}{\pind} \proves \ \forall \vec{x}^{!N} . \forall \vec{y}^N . N(f(\vec x ; \vec y))
487
	\end{equation}
488
	We give some key cases in what follows.
489
	
490
	Suppose $f$ is defined by PRN by:
491
	\[
492
	\begin{array}{rcl}
493
	f(0,\vec x ; \vec y) & = & g(\vec x ; \vec y) \\
494
	f(\succ_i x , \vec x ; \vec y) & = & h_i (x, \vec x ; \vec y , f(x , \vec x ; \vec y))
495
	\end{array}
496
	\]
497
	By the inductive hypothesis we already have that $g, h_0, h_1$ are provably convergent, so let us suppose that $N(\vec x)$ and prove,
498
	\begin{equation}
499
	\forall x^{!N} . ( N(\vec y) \limp N(f(x , \vec x ; \vec y)  )
500
	\end{equation}
501
	by $\cax{\Sigma^N_1}{\pind}$ on $x$.
502
	
503
	In the base case we have the following proof:
504
	\[
505
	\vlderivation{
506
		\vliin{}{}{N(\vec y) \limp N(f(0, \vec x ; \vec y))}{
507
			\vltr{IH}{N(\vec y) \limp N(g(\vec x ; \vec y))}{\vlhy{\quad}}{\vlhy{}}{\vlhy{\quad}}
508
		}{
509
		\vlin{}{}{N(g (\vec x ; \vec y) ) \limp N(f(0, \vec x ; \vec y)) }{
510
			\vlin{}{}{g(\vec x ; \vec y) = f(0 , \vec x ; \vec y)}{\vlhy{}}
511
		}
512
	}
513
}
514
\]
515

    
516
For the inductive step, we need to show that,
517
\[
518
\forall x^{!N} . (( N(\vec y) \limp N( f(x, \vec x ; \vec y) ) ) \limp N(\vec y) \limp N(f(\succ_i x , \vec x ; \vec y) )   )
519
\]
520
so let us suppose that $N(x)$ and we give the following proof:
521
\[
522
\vlderivation{
523
	\vlin{N\cntr}{}{N(y) \limp ( N(y ) \limp N(f( x , \vec x ; \vec y ) ) ) \limp N(f (\succ_i x , \vec x ; \vec y)  ) }{
524
		\vliin{}{}{N(y) \limp N(y) \limp ( N(y ) \limp N(f( x , \vec x ; \vec y ) ) ) \limp N(f (\succ_i x , \vec x ; \vec y)  ) }{
525
			\vliin{}{}{ N(y) \limp N(y) \limp ( N(y) \limp N( f(x, \vec x ; \vec y) ) ) \limp N( h_i (x , \vec x ; \vec y , f(x , \vec x ; \vec y) ) ) }{
526
				\vltr{MLL}{( N(\vec y) \limp (N (\vec y) \limp N(f(x, \vec x ; \vec y) )) \limp N(f(x, \vec x ; \vec y)) }{\vlhy{\quad }}{\vlhy{\ }}{\vlhy{\quad }}
527
			}{
528
			\vlhy{2}
529
		}
530
	}{
531
	\vlhy{3}
532
}
533
}
534
}
535
\]
536
TOFINISH
537
\end{proof}
538

    
539
\section{Witness function method}
540

    
541
\todo{negation normal form}
542

    
543
\newcommand{\type}{\mathtt{t}}
544
\newcommand{\norm}{\nu}
545
\newcommand{\safe}{\sigma}
546

    
547
\subsection{Typing}
548

    
549
\begin{definition}
550
	[Types]
551
	To each formula we associate a tuple type as follows:
552
	\[
553
	\begin{array}{rcll}
554
	\type (\nat (t)) & := & \Nat & \\
555
	\type(s = t) & := & \Nat & \\
556
	\type (A \star B)  & : = & \type (A) \times \type (B) & \text{for $\star \in \{ \lor, \land, \laor, \laand \} $.} \\
557
	\type (! A) & : = & \type_\norm (A) & \\
558
	\type (\exists x^N . A) & := & \Nat \times \type (A) & \\
559
%	\type (\forall x^N . A) & := & \Nat \to \type(A)
560
	\end{array}
561
	\]
562
	where $\nu$ designates that the inputs are normal.
563
	\end{definition}
564
	
565
	\todo{compatibility with safety. To consider.}
566
	
567
	
568
	
569
	
570
	\newcommand{\witness}[2]{\mathit{Witness}^{#1}_{#2}}
571
	\begin{definition}
572
		[Witness predicate]
573
		We define the following formula:
574
		\[
575
		\begin{array}{rcll}
576
		\witness{\vec a}{A} (w, \vec a) & := & A(\vec a) & \text{for $A$ quantifier-free.} \\
577
		\witness{\vec a}{\exists x . A} (w, \vec w , \vec a) & := & \witness{\vec{a}, a}{A(a)}(\vec w , \vec a, w ) & \\
578
		\witness{\vec a}{A \star B} ( \vec w^1 , \vec w^2 ) & := &  \witness{\vec a}{A} (\vec w^1) \star \witness{\vec a}{B} (\vec w^2) & \text{for $\star \in \{ \land, \lor, \laand , \laor\}$.} \\
579
		\end{array}
580
		\]
581
	\end{definition}
582
	\todo{problem: what about complexity of checking equality? }
583
	
584
	\todo{What about $\nat (t)$? What does it mean? Replace with true?}
585
	
586
	\todo{either use the witness predicate above, or use realizability predicate at meta-level and a model theory, like BH.}
587
	
588
	\anupam{Need to check above properly. $N(t)$ should be witnessed by the value of $t $ in the model. For equality of terms the witness should not do anything.}
589
	
590
	\subsection{The translation}
591
	Need witness predicate so that we know what is being preserved.
592

    
593
\begin{definition}
594
	[Translation]
595
	We give a translation of proofs $\pi$ of sequents $\Gamma(\vec a) \seqar \Delta(\vec a)$ to BC-programs for a vector of functions $\vec f^\pi : \type(\bigotimes\Gamma) \times \Nat^{|\vec a|} \to \type(\bigparr\Delta)$ such that:
596
	\[
597
	\witness{\vec a}{\bigotimes \Gamma} (\vec w , \vec a) \implies \witness{\vec a}{\bigparr \Delta } (\vec f(\vec w , \vec a), \vec a)
598
	\]
599
	We will be explicit about normal and safe inputs when necessary, for the most part we will simply rearrange inputs into lists $(\vec x; \vec a)$ as in BC framework.
600
	
601
	We often suppress the parameters $\vec a$ when it is not important.	
602
	
603
	\anupam{Is this implication provable in Bellantoni's version of PV based on BC?}
604
	
605
	\anupam{for moment try ignore decoration on right? what about negation?}
606
	
607

    
608
	
609
	\[
610
	\vlderivation{
611
		\vliin{\land}{}{ \Gamma, \Sigma \seqar \Delta, \Pi, A\land B }{
612
			\vltr{\pi_1}{ \Gamma \seqar \Delta, A }{\vlhy{\quad }}{\vlhy{ }}{\vlhy{\quad }}
613
			}{
614
			\vltr{\pi_2}{ \Sigma \seqar \Pi, B }{\vlhy{\quad }}{\vlhy{ }}{\vlhy{\quad }}
615
			}
616
		}
617
	\]
618
	
619
	So we have $\vec f^{1} (\vec x ; \vec a)$ and $\vec f^2 (\vec y ; \vec b)$ for $\pi_1 $ and $\pi_2$ respectively, by the inductive hypothesis. We simply construct $\vec f^\pi$ by rearranging these functions:
620
	\[
621
	\vec f^\pi (\vec x , \vec y ; \vec a , \vec b) \quad := \quad ( f^1_1 (\vec x ; \vec a) , \dots , f^1_{|\Delta|} (\vec x ; \vec a) , f^2_1 (\vec y; \vec b) , \dots , f^2_{|\Pi|} (\vec y ; \vec b) , f^1_{|\Delta|+1} (\vec x ; \vec a ) , f^2_{|\Pi|+1} (\vec y ; \vec b)
622
	\] 
623
	
624
	\anupam{bad notation since need to include free variables of sequent too. (also of theory.) Reconsider.}
625
	
626
	\[
627
	\vlderivation{
628
		\vlin{\lefrul{\cntr}}{}{ !A , \Gamma \seqar \Delta }{
629
			\vltr{\pi'}{  !A, !A , \Gamma \seqar \Delta}{\vlhy{\quad }}{\vlhy{ }}{\vlhy{\quad }}
630
			}
631
		}
632
	\]
633
	Have $\vec f' (x_1 , x_2 , \vec y ; \vec a)$ for $\pi'$. Define
634
	\[
635
	\vec f (x_1 , \vec y ; \vec a) \quad := \quad \vec f'(x_1 , x_1 , \vec y ; \vec a)
636
	\]
637
	\[
638
	\vlderivation{
639
	\vlin{\rigrul{\cntr}}{}{\Gamma \seqar \Delta, ?A}{
640
	\vltr{\pi'}{  \Gamma \seqar \Delta, ?A, ?A}{\vlhy{\quad }}{\vlhy{ }}{\vlhy{\quad }}
641
	}
642
	}
643
	\]
644
	(need test function against witness predicate?)
645
	
646
	\anupam{problem: how to test even equality of two terms? Maybe treat $?$ as $!$ on left?}
647
	
648
	\anupam{Not a problem by free-cut elimination! Can assume there are no $?$ in the proof! Is this equivalent to treaing $?$ as $!$ on left?}
649
	
650
	\anupam{Yes I think so. We can work in a left-sided calculus. only problem is for induction. But this can never be a problem for modalities since induction formulae are modality-free.}
651
	
652
	\anupam{this is actually the main point.}
653
	
654
	
655
	\[
656
	\vlderivation{
657
	\vliin{\rigrul{\laand}}{}{\Gamma \seqar \Delta, A \laand B}{
658
	\vltr{\pi_1}{  \Gamma \seqar \Delta, A}{\vlhy{\quad }}{\vlhy{ }}{\vlhy{\quad }}
659
	}{
660
	\vltr{\pi_2}{  \Gamma \seqar \Delta, B}{\vlhy{\quad }}{\vlhy{ }}{\vlhy{\quad }}
661
	}
662
	}
663
	\]
664
	
665
	Suppose we have $\vec f^i (\vec x , \vec a ; \vec y , \vec b)$ from $\pi_i$.
666
	
667
	\anupam{problem: does this require many tests, just like for contraction? Can we isolate a different complexity class? e.g.\ PH or Grzegorzyck?}
668
	\anupam{skip this case and consider later.}
669
\[
670
\vlderivation{
671
\vliin{\cut}{}{\Gamma , \Sigma \seqar \Delta , \Pi}{
672
\vltr{\pi_1}{ \Gamma \seqar \Delta, A }{\vlhy{\quad }}{\vlhy{ }}{\vlhy{\quad }}
673
}{
674
\vltr{\pi_2}{\Sigma , A \seqar \Pi}{\vlhy{\quad }}{\vlhy{ }}{\vlhy{\quad }}
675
}
676
}
677
	\]
678
	
679
We can assume that $A$ in the conclusion of $\pi_2$ is free of modalities, since one of the $A$s must be directly descended from an induction formula, and so consists of only safe inputs. Therefore we have functions $\vec f^1 (\vec w; \vec x)$ and $\vec f^2 (\vec y ; \vec z , \vec u)$ for $\pi_1$ and $\pi_2 $ respectively (free variable parameters are suppressed), with $\vec u$ corresponding to the (safe) inputs for $A$. 
680

    
681
Let us write:
682
\[
683
\begin{array}{rcl}
684
\vec f^1 & = & ( \vec f^1_i )^{|\Delta|+1}_{i=1} \\
685
\vec f^2 & = & ( \vec f^2_i )^{|\Pi|}_{i=1}
686
\end{array}
687
\]
688
to denote the parts of $\vec f^i$ corresponding to formulae in their sequents.
689

    
690

    
691
We define:
692
\[
693
\vec f (\vec w , \vec y ; \vec x , \vec z)
694
\quad := \quad
695
(
696
( \vec f^1_i (\vec w ; \vec x) )_{i \leq |\Delta|}
697
,
698
\vec f^2 ( \vec y ; \vec z , \vec f^1_{|\Delta| +1 } (\vec w ; \vec x) )
699
)
700
\]
701

    
702
Notice that all safe inputs are hereditarily right of a $;$, and so these terms are definable by safe composition and projection.
703

    
704
\todo{another case for $!\nat(x)$ cut formula.}
705

    
706
\anupam{should work for arbitrary $!$-cuts due to principal case for $!$}
707

    
708

    
709
Right existential:
710
\[
711
\vlderivation{
712
\vlin{\rigrul{\exists}}{}{ \Gamma, \nat(t) \seqar \Delta , \exists x^\nat . A(x) }{
713
\vltr{\pi'}{ \Gamma \seqar \Delta , A(t) }{\vlhy{\quad }}{\vlhy{ }}{\vlhy{\quad }}
714
}
715
}
716
\]
717
Suppose we have functions $\vec f^X (\vec x ; \vec y)$ from $\pi'$ for $X = \Delta$ or $ X=A$. 
718
\[
719
\vec f(\vec x ; \vec y , z) \quad := \quad ( \vec f^\Delta (\vec x ; \vec y)   ,  z , \vec f^A ( \vec x ; \vec y ) )
720
\]
721

    
722

    
723
Left existential:
724
\[
725
\vlderivation{
726
\vlin{\lefrul{\exists}}{}{ \Gamma, \exists x^N . A(x) \seqar \Delta }{
727
\vltr{\pi'(a)}{ \Gamma,  N(a) , A(a) \seqar \Delta  }{\vlhy{\quad }}{\vlhy{ }}{\vlhy{\quad }}
728
}
729
}
730
\]
731

    
732
Suppose we have functions $\vec f' (\vec u , \vec v ; x , \vec y , \vec z)$ from $\pi'$ with $(\vec u ; \vec x )$ corresponding to $\Gamma$, $(;x)$ corresponding to $N(a)$ and $(\vec v ; \vec z)$ corresponding to $A(a)$. We define:
733
\[
734
\vec f ( \vec u , \vec v ;  )
735
\]
736

    
737
\anupam{should just be same, no? To consider.}
738

    
739
%Suppose we have functions $\vec g (\vec v  , \vec w ; \vec x , y , \vec z)$ where $(\vec v ; \vec x) $ corresponds to $\Gamma$, $y$ corresponds to $\nat(a)$ and $(\vec w ; \vec z) $ corresponds to $A(a)$. Then we define:
740
%\[
741
%\vec f ( \vec v , \vec w ; \vec x , y , \vec z )
742
%\]
743

    
744
\todo{finish}
745

    
746
!:
747
\[
748
\vlderivation{
749
\vlin{!}{}{\Gamma, !A \seqar \Delta}{
750
\vltr{\pi'}{ \Gamma , A \seqar \Delta  }{\vlhy{\quad }}{\vlhy{ }}{\vlhy{\quad }}
751
}
752
}
753
\]
754
\anupam{Again, use free-cut elimination to get rid of this case. We know $!$-ed formulae in advance, so just fix those as normal inputs in advance.}
755

    
756

    
757
Induction: (can assume no side-formulae on right for same reason as no contraction.)
758
\[
759
\vlderivation{
760
\vliin{\ind}{}{ !\Gamma , !\nat (t) ,  A(\epsilon) \seqar A(t) }{
761
\vltr{\pi_0}{ !\Gamma , !\nat(a) , A(a) \seqar A(\succ_0 a)}{\vlhy{\quad }}{\vlhy{ }}{\vlhy{\quad }}
762
}{
763
\vltr{\pi_1}{ !\Gamma , !\nat(a), A(a)\seqar A(\succ_1 a) }{\vlhy{\quad }}{\vlhy{ }}{\vlhy{\quad }}
764
}
765
}
766
\]
767

    
768
Suppose we have functions $\vec f^i (\vec x , y ; \vec u)$ from proofs $\pi_i$ where $\vec x $ corresponds to $!\Gamma$, $y$ corresponds to $!N(a)$ and $\vec u$ corresponds to $A(a)$.
769

    
770

    
771
\anupam{role of $!N(a)$ versus having $a$ as a parameter to function. To consider!}
772

    
773
Define the function $f_k$ (for $k = 1 , \dots , |\vec u|$) by PRN:
774
\[
775
\begin{array}{rcl}
776
f_k(\epsilon, \vec x , y ; \vec u) & := & u_k \\
777
f_k (\succ_i w , \vec x , y ; \vec u) & : = & f^i_k ( \vec x ,  y ; \vec f ( w, \vec x , y ; \vec u ) )
778
\end{array}
779
\]
780

    
781
\anupam{check this, esp use of parameters, cf.\ above.}
782

    
783
\anupam{Is this not using a simultaneous version of PRN? Might need concatenation of words/basic coding to deal with this.}
784

    
785

    
786
\end{definition}
787

    
788
\newcommand{\concat}{\mathit{concat}}
789
\paragraph{Some points on concatenation \anupam{if necessary}}
790
We can define the concatenation operation by PRN:
791
\[
792
\begin{array}{rcl}
793
\concat (\epsilon ; y) & : = & x \\
794
\concat (\succ_i x ; y) & := & \succ_i \concat (x ; y)
795
\end{array}
796
\]
797

    
798
From here we can define iterated concatenation:
799
\[
800
\concat (x_1 , \dots x_n ; y) \quad := \quad \concat (x_n ; \concat (x_{n-1} ; \vldots \concat (x_1 ; y) \vldots ) )
801
\]
802

    
803
(notice all safe inputs are hereditarily to the right of $;$ so this is definable by safe composition and projection.)
804

    
805
\section{Further work}
806

    
807
Say something about minimisation and the polynomial hierarchy. Could be achieved by controlling contraction/additives, since this requires a notion of tests.
808

    
809
\section{Conclusions}
810

    
811
\appendix
812

    
813
\section{Some cut-elimination cases}
814
We work in a one-sided calculus for full linear logic. When working in the affine setting we simply drop the condition that weakened formulae are $?$-ed.
815

    
816
As well as all usual rules, we have the induction rule:
817
\[
818
\vlinf{\indr}{(x \notin FV (\Gamma) )}{? \Gamma, A^\perp (0) , A(t)}{? \Gamma , A^\perp (x) , A(s x) }
819
\]
820

    
821
We give all the various cut-reduction cases in the following section but first we give a simple cut-elimination argument.
822

    
823
\subsection{A basic (free-)cut elimination argument}
824
We give an argument that easily scales to free-cut elimination, i.e.\ in the presence of non-logical rules.
825

    
826
\newcommand{\anarrow}[2]{\overset{#1}{\underset{#2}{\longrightarrow}}}
827
\newcommand{\cutredlin}{\mathit{ma}}
828
\newcommand{\cutredexp}{exp}
829

    
830
\begin{definition}
831
	Let $\anarrow{}{\cutredexp}$ denote the reduction steps concerning exponential cuts, and $\anarrow{}{\cutredlin}$ be the set of cut-reduction steps concerning multiplicative and additive cuts.
832
\end{definition}
833

    
834
\newcommand{\rk}{\mathsf{rk}}
835
	\renewcommand{\deg}{\mathsf{deg}}
836
\begin{definition}
837
	[Measures]
838
	For a $\anarrow{}{\cutredlin}$-redex $r$, let its \emph{degree} $\deg( r)$ be the number of logical connectives or quantifiers in its cut-formula. For a proof $\pi$, its \emph{degree} $\deg( \pi)$ is the multiset of the degree of its redexes.
839
\end{definition}
840

    
841
\begin{proposition}
842
If $\pi$ is not $\anarrow{}{\cutredlin}$-normal then there is a derivation $L:\pi \anarrow{*}{\cutredlin} \pi'$ such that $\deg(\pi')< \deg(\pi)$.
843
\end{proposition}
844
\begin{proof}
845
	First let us show the statement in the special case that $\pi$ contains precisely one $\cut$-inference as its final step, by induction on $|\pi|$. Each commutative $\anarrow{}{\cutredlin}$-step results in us being able to apply the inductive hypothesis to smaller proofs,- in the case of $\vlan $ two smaller proofs. Each key $\anarrow{}{\cutredlin}$-case strictly reduces the degree of the proof. Consequently we can eventually transform $\pi$ to a proof $\pi'$ which has possibly several cuts of smaller degree, as required.
846
	
847
	Now to the general case, we simply apply the above procedure to the subproof above a topmost cut in $\pi$.
848
\end{proof}
849
\begin{corollary}
850
	$\anarrow{}{\cutredlin}$ is weakly normalising.
851
\end{corollary}
852

    
853

    
854
\newcommand{\cntdown}{\mathit{cn}}
855
\begin{definition}
856
	[Lazy contraction]
857
	Let $\anarrow{}{\cntdown}$ comprise of the following reduction steps:
858
	\begin{enumerate}
859
		\item The commuting steps,
860
			\[
861
			\vlderivation{
862
				\vlin{\rho}{}{\Gamma', ?A, }{
863
					\vlin{\cntr}{}{\Gamma, ?A}{\vlhy{\Gamma, ? A, ? A}}
864
				}
865
			}
866
			\quad\to \quad
867
			\vlderivation{
868
				\vlin{\cntr}{}{\Gamma', ?A}{
869
					\vlin{\rho}{}{\Gamma', ?A, ?A}{\vlhy{\Gamma, ? A, ? A}}
870
				}
871
			}	
872
			\]
873
			for each unary rule $\rho$.
874
			\item 
875
			The key steps:
876
			\[ 	
877
			\begin{array}{rcl}
878
			\vlderivation{
879
				\vlin{\cntr}{}{\Gamma, ?A}{
880
					\vlin{\wk}{}{\Gamma, ?A, ?A}{\vlhy{\Gamma, ?A}}
881
				}
882
			}
883
			\quad &\to & \quad
884
			\Gamma, ?A 
885
%			\]
886
%			and
887
%			\[
888
\\
889
\noalign{\smallskip}
890
				\vlderivation{
891
					\vlin{\wk}{}{\Gamma, ?A, ?A}{
892
						\vlin{\cntr}{}{\Gamma, ?A}{\vlhy{\Gamma, ?A, ?A}}
893
					}
894
				}
895
				\quad &\to& \quad
896
				\Gamma, ?A, ?A
897
							\end{array}
898
			\].
899
\item The commuting steps,
900
\[
901
	\vlderivation{
902
		\vliin{\rho}{}{\Gamma ,?A}{
903
			\vlin{\cntr}{}{\Gamma_1, ?A}{\vlhy{\Gamma_1, ?A, ?A}}
904
		}{\vlhy{\Gamma_2}}
905
	}
906
	\quad \to \quad
907
	\vlderivation{
908
		\vlin{\cntr}{}{\Gamma, ?A}{
909
			\vliin{\rho}{}{\Gamma,?A, ?A}{\vlhy{\Gamma_1, ?A, ?A}}{ \vlhy{\Gamma_2} }
910
		}
911
	}
912
\]
913
%\[
914
%\begin{array}{rcl}
915
%	\vlderivation{
916
%		\vliin{\land}{}{\Gamma, \Delta, ?A, B\land C}{
917
%			\vlin{\cntr}{}{\Gamma, ?A, B}{\vlhy{\Gamma, ?A, ?A, B}}
918
%		}{\vlhy{\Delta, C}}
919
%	}
920
%	\quad &\to &\quad
921
%	\vlderivation{
922
%		\vlin{\cntr}{}{\Gamma, \Delta, ?A, B\land C}{
923
%			\vliin{\land}{}{\Gamma, \Delta, ?A, ?A, B\land C}{\vlhy{\Gamma, ?A, ?A, B}}{ \vlhy{\Delta, C} }
924
%		}
925
%	}
926
%%	\]
927
%%	
928
%%	\[
929
%\\
930
%\noalign{\smallskip}
931
%	\vlderivation{
932
%		\vliin{\cut}{}{\Gamma, \Delta, ?A}{
933
%			\vlin{\cntr}{}{\Gamma, ?A, B}{\vlhy{\Gamma, ?A, ?A, B}}
934
%		}{\vlhy{\Delta, \lnot{B}}}
935
%	}
936
%	\quad &\to& \quad
937
%	\vlderivation{
938
%		\vlin{\cntr}{}{\Gamma, \Delta, ?A}{
939
%			\vliin{\cut}{}{\Gamma, \Delta, ?A, ?A}{\vlhy{\Gamma, ?A, ?A, B}}{ \vlhy{\Delta, \lnot{B}} }
940
%		}
941
%	}
942
%\end{array}
943
%\]
944
for each binary rule $\rho \in \{ \land, \cut \}$.
945
\item The commuting step:
946
	\[
947
	\vlderivation{
948
		\vliin{\vlan}{}{\Gamma, ?A, B\vlan C}{
949
			\vlin{\cntr}{}{\Gamma, ?A, B}{\vlhy{\Gamma, ?A, ?A, B}}
950
		}{\vlhy{\Gamma, ?A, B}}
951
	}
952
	\quad \to \quad
953
	\vlderivation{
954
		\vlin{\cntr}{}{\Gamma, ?A, B\vlan C}{
955
			\vliin{\vlan}{}{\Gamma, ?A, ?A, B\vlan C}{\vlhy{\Gamma, ?A, ?A, B}}{
956
				\vlin{\wk}{}{\Gamma, ?A, ?A, C}{\vlhy{\Gamma, ?A, C}}
957
			}
958
		}
959
	}
960
	\]
961
	\end{enumerate}
962
	For binary rules:
963

    
964
\end{definition}
965
\begin{proposition}
966
	$\anarrow{}{\cntdown}$ is weakly normalising
967
	, modulo $\cntr$-$\cntr$ redexes,
968
	and degree-preserving. 
969
	If $\pi$ is $\anarrow{}{\cutredlin}$-normal and $\pi \downarrow_\cntdown \pi'$, then so is $\pi'$.
970
\end{proposition}
971
\begin{proof}
972
	By induction on the number of redexes in a proof. Start with bottommost redex, by induction on number of inference steps beneath it.
973
\end{proof}
974

    
975
\begin{remark}
976
	Only weakly normalising since $\cntr$ can commute with $\cntr$, allowing cycles.
977
\end{remark}
978

    
979
\begin{proposition}
980
	If $\pi$ is $\anarrow{}{\cutredexp}$-reducible but $\anarrow{}{\cutredlin}$- and $\anarrow{}{\cntdown}$-normal then there is a derivation $H:\pi \anarrow{*}{\cutredexp} \pi'$ such that $\deg (\pi')<\deg(\pi)$.
981
\end{proposition}
982

    
983

    
984

    
985
\subsection{Base cases}
986

    
987
\[
988
\vlderivation{
989
\vliin{\cut}{}{ \Gamma,a }{ \vlhy{\Gamma,a} }{ \vlin{\id}{}{a,a^\bot}{ \vlhy{} } }
990
}
991
\quad\to\quad
992
\vlderivation{
993
\Gamma,a
994
}
995
\]
996

    
997
\[
998
\vlderivation{
999
\vliin{\cut}{}{\Gamma}{ \vlin{\bot}{}{ \Gamma,\bot }{ \vlhy{\Gamma} } }{ \vlin{1}{}{1}{ \vlhy{} } }
1000
}
1001
\quad\to\quad
1002
\vlderivation{
1003
\Gamma
1004
}
1005
\]
1006

    
1007
\[
1008
\vlderivation{
1009
\vliin{\cut}{}{ \Gamma,\Delta,\top }{ \vlin{\top}{}{ \Gamma,\top,0 }{ \vlhy{} } }{ \vlhy{\Delta , \top } }
1010
}
1011
\quad\to\quad
1012
\vlinf{\top}{}{ \Gamma,\Delta,\top }{}
1013
\]
1014

    
1015
\subsection{Commutative cases}
1016
These are the cases when the principal formula of a step is not active for the cut-step below.
1017

    
1018
\begin{enumerate}
1019
\item The cut-step is immediately preceded by an initial step. Note that only the case for $\top$ applies:
1020
\[
1021
\vlderivation{
1022
\vliin{\cut}{}{\Gamma,\Delta,\top}{ 
1023
\vlin{\top}{  }{ \Gamma,\top,A }{ \vlhy{} }
1024
 }{ \vlhy{\Delta,A^\bot } }
1025
}
1026
\quad\to\quad
1027
\vlinf{\top}{}{ \Gamma,\Delta,\top }{}
1028
\]
1029
%Notice that this commutation eliminates the cut entirely and preserves the height of all cuts below.
1030
\item\label{CaseCommOneStep} A cut-step is immediately preceded by a step $\rho$ with exactly one premiss:
1031
\[
1032
\vlderivation{
1033
\vliin{\cut}{}{\Gamma',\Delta}{\vlin{\rho}{}{\Gamma',A}{\vlhy{\Gamma,A}}}{\vlhy{A^\bot , \Delta}}
1034
}
1035
\]
1036
We have three subcases:
1037
\begin{enumerate}
1038
\item\label{subcase:unary-commutation-vanilla} 
1039
%$\Delta$ consists of only $?$-ed formulae or 
1040
If $\rho \notin \{! , \indr \}$ then we can simply permute as follows:
1041
\begin{equation}
1042
\label{eqn:com-case-unary-simple}
1043
\vlderivation{
1044
\vliin{\cut}{}{\Gamma',\Delta}{\vlin{\rho}{}{\Gamma',A}{\vlhy{\Gamma,A}}}{\vlhy{A^\bot , \Delta}}
1045
}
1046
\quad \to \quad
1047
\vlderivation{
1048
\vlin{\rho}{}{\Gamma',\Delta}{
1049
\vliin{\cut}{}{\Gamma,\Delta}{\vlhy{\Gamma,A}}{\vlhy{A^\bot , \Delta}}
1050
}
1051
}
1052
\end{equation}
1053
(If $\rho$ was $\forall$ then we might need to rename some free variables in $\Delta$.)
1054
\item $\rho $ is a $!$-step. Therefore $\Gamma$ has the form $? \Sigma , B$ and $A$ has the form $?C$, yielding the following situation:
1055
\[
1056
\vlderivation{
1057
\vliin{\cut}{}{? \Sigma, !B, \Delta }{
1058
\vlin{!}{}{? \Sigma, !B , ?C}{\vlhy{?\Sigma , B , ?C}  }
1059
}{
1060
\vlin{\rho'}{}{!C^\perp , \Delta}{\vlhy{\vlvdots}}
1061
}
1062
}
1063
\]
1064
Now, if $!C^\perp$ is principle for $\rho'$ then $\rho'\in \{! , \indr\}$ and so $\Delta $ must be $?$-ed; in this case we are able to apply the transformation \eqref{eqn:com-case-unary-simple}. Otherwise $A^\perp$ is not principal for $\rho'$ and so $\rho' \notin \{! , \indr \}$, which means we can apply \ref{subcase:unary-commutation-vanilla} along this side of the subproof.
1065
\item $\rho$ is a $\indr$-step. Therefore $\Gamma$ has the form $? \Sigma , B^\perp(x), B(s x) $ and $A$ has the form $?C$, and the argument is similar to the case above.
1066
% yielding the following situation:
1067
%\[
1068
%\vlderivation{
1069
%	\vliin{\cut}{}{? \Sigma, B^\perp (0) , B(t), \Delta }{
1070
%		\vlin{\indr}{}{? \Sigma, B^\perp (0) , B(t) , ?C}{\vlhy{?\Sigma , B^\perp(x) , B(s x) , ?C}  }
1071
%	}{
1072
%	\vlin{\rho'}{}{!C^\perp , \Delta}{\vlhy{\vlvdots}}
1073
%}
1074
%}
1075
%\]
1076
\end{enumerate}
1077

    
1078

    
1079

    
1080
\item A cut-step is immediately preceded by a $\vlte $-step:
1081
\[
1082
\vlderivation{
1083
\vliin{\cut}{}{\Gamma,\Delta,\Sigma,A\vlte B }{
1084
\vliin{\vlte}{}{\Gamma,\Delta,A\vlte B,C }{\vlhy{\Gamma,A,C}}{\vlhy{\Delta,B}}
1085
}{\vlhy{ \Sigma , C^\bot  }}
1086
}
1087
\quad\to\quad
1088
\vlderivation{
1089
\vliin{\vlte}{}{\Gamma,\Delta,\Sigma,A\vlte B}{
1090
\vliin{\cut}{}{\Gamma,\Sigma,A}{\vlhy{\Gamma,A,C}}{\vlhy{\Sigma,C^\bot}}
1091
}{\vlhy{\Delta,B}}
1092
}
1093
\]
1094
%Notice that this commutation might increase the height of any cuts below along the right branch of the indicated cut, but decreases the height along the left branch.
1095
\item\label{CommCaseAmpersand} A cut-step is immediately preceded by a $\vlan$-step.
1096
\[
1097
\vlderivation{
1098
\vliin{\cut}{}{ \Gamma,\Delta,A\vlan B }{ \vliin{\vlan}{}{\Gamma,A\vlan B,C}{ \vlhy{\Gamma,A,C} }{\vlhy{\Gamma,B,C}}}{ \vlhy{\Delta, C^\bot} }
1099
}
1100
\quad\to\quad
1101
\vlderivation{
1102
\vliin{\vlan}{}{\Gamma,\Delta,A\vlan B }{
1103
\vliin{cut}{}{\Gamma,\Delta,A }{ \vlhy{\Gamma,A,C} }{ \vlhy{ \Delta,C^\bot } }
1104
}{
1105
\vliin{ \cut }{}{\Gamma,\Delta,B}{ \vlhy{\Gamma,B,C } }{ \vlhy{\Delta,C^\bot} }
1106
}
1107
}
1108
\]
1109
\item (No rule for cuts commuting with cuts.)
1110
%A cut-step is immediately preceded by another cut-step:
1111
\[
1112
\vlderivation{
1113
	\vliin{\cut}{}{\Gamma, \Delta, \Sigma}{
1114
		\vliin{\cut}{}{\Gamma, \Delta, B}{\vlhy{\Gamma, A}}{\vlhy{\Delta, \lnot{A}, B}}
1115
		}{\vlhy{\Sigma, \lnot{B}}}
1116
	}
1117
	\quad\to\quad
1118
\vlderivation{
1119
	\vliin{\cut}{}{\Gamma, \Delta, \Sigma}{\vlhy{\Gamma, A}}{
1120
		\vliin{\cut}{}{\Delta, \Sigma, \lnot{A}}{\vlhy{\Delta, \lnot{A}, B}}{\vlhy{\Sigma, \lnot{B}}}
1121
		}
1122
	}
1123
\]
1124
\end{enumerate}
1125

    
1126

    
1127

    
1128
\subsection{Key cases: structural steps}
1129
%[THIS SECTION MIGHT NOT BE NECESSARY]
1130
We attempt to permute cuts on exponential formulae over structural steps whose principal formula is active for the cut. 
1131

    
1132
We use the commutative case \ref{CaseCommOneStep} to assume that the topmost cut-step in a sequence has already been permuted up to its origin on the $!$-side. 
1133

    
1134
We have two cases.
1135
\begin{enumerate}
1136
\item\label{StructStepCont} The structural step preceding the cut is a weakening:
1137
\[
1138
\vlderivation{
1139
\vliin{\cut}{}{?\Gamma,\Delta}{ \vlin{!}{}{ ?\Gamma,!A }{ \vlhy{?\Gamma , A} } }{ \vlin{w}{}{ ?A^\bot }{ \vlhy{\Delta	} } }
1140
}
1141
\quad\to\quad
1142
\vlinf{w}{}{?\Gamma,\Delta}{ \Delta }
1143
\]
1144
%\item The structural step preceding the cut is a contraction:
1145
%\[
1146
%\vlderivation{
1147
%\vliin{\cut}{}{ ?\Gamma,\Delta }{
1148
% \vlin{!}{}{ ?\Gamma,!A }{ \vlhy{?\Gamma , A} }
1149
%}{ \vlin{c}{}{ ?A^\bot , \Delta }{ \vlhy{ ?A^\bot , ?A^\bot ,  \Delta} } }
1150
%}
1151
%\quad\to\quad
1152
%\vlderivation{
1153
%\vliq{c}{}{?\Gamma, \Delta}{
1154
%\vliin{\cut}{}{?\Gamma, ?\Gamma,\Delta }{
1155
% \vlin{!}{}{ ?\Gamma,!A }{ \vlhy{?\Gamma , A} }
1156
%}{
1157
%\vliin{\cut}{}{?\Gamma,?A^\bot , \Delta }{
1158
% \vlin{!}{}{ ?\Gamma,!A }{ \vlhy{?\Gamma , A} }
1159
%}{ \vlhy{?A^\bot , ?A^\bot , \Delta } }
1160
%}
1161
%}
1162
%}
1163
%\]
1164
%OR use a multicut:
1165
%\[
1166
%\vlderivation{
1167
%\vliin{\cut}{}{ ?\Gamma,\Delta }{
1168
% \vlin{!}{}{ ?\Gamma,!A }{ \vlhy{?\Gamma , A} }
1169
%}{ \vlin{c}{}{ ?A^\bot , \Delta }{ \vlhy{ ?A^\bot , ?A^\bot ,  \Delta} } }
1170
%}
1171
%\quad\to\quad
1172
%\vlderivation{
1173
%\vliin{\multicut}{}{?\Gamma,?A^\bot , \Delta }{
1174
% \vlin{!}{}{ ?\Gamma,!A }{ \vlhy{?\Gamma , A} }
1175
%}{ \vlhy{?A^\bot , ?A^\bot , \Delta } }
1176
%}
1177
%\]
1178
\item The structural step preceding a cut is a contraction:
1179
\[
1180
\vlderivation{
1181
	\vliin{\cut}{}{?\Gamma, \Sigma }{ \vlhy{?\Gamma,!A^\bot} }{
1182
		\vlin{\cntr}{}{ ?A,\Sigma }{ \vlhy{ ?A,?A,\Sigma } }
1183
	}
1184
}
1185
\quad\to\quad
1186
\vlderivation{
1187
	\vliq{\cntr}{}{ ?\Gamma,\Sigma }{
1188
		\vliin{\cut}{}{ ?\Gamma,?\Gamma,\Sigma }{ \vlhy{?\Gamma,!A^\bot } }{
1189
			\vliin{\cut}{}{ ?A,?\Gamma,\Sigma }{ \vlhy{?\Gamma,!A^\bot} }{ \vlhy{ ?A,?A,?\Delta,\Sigma } }
1190
		}
1191
	}
1192
}
1193
\]
1194
%\item The structural step preceding a cut is a parallel contraction:
1195
%\[
1196
%\vlderivation{
1197
%\vliin{\cut}{}{?\Gamma,?\Delta, \Sigma }{ \vlhy{?\Gamma,!A^\bot} }{
1198
%\vlin{}{}{ ?A,?\Delta,\Sigma }{ \vlhy{ ?A,?A,?\Delta,?\Delta,\Sigma } }
1199
%}
1200
%}
1201
%\quad\to\quad
1202
%\vlderivation{
1203
%\vlin{}{}{ ?\Gamma,?\Delta,\Sigma }{
1204
%\vliin{\cut}{}{ ?\Gamma,?\Gamma,?\Delta,?\Delta,\Sigma }{ \vlhy{?\Gamma,!A^\bot } }{
1205
%\vliin{\cut}{}{ ?A,?\Gamma,?\Delta,?\Delta,\Sigma }{ \vlhy{?\Gamma,!A^\bot} }{ \vlhy{ ?A,?A,?\Delta,?\Delta,\Sigma } }
1206
%}
1207
%}
1208
%}
1209
%\]
1210
\end{enumerate}
1211

    
1212
\subsection{Key cases: logical steps}
1213
Finally, once all cuts have been permuted maximally on both sides, we have the cases when the cut-formula is principal for a preceding logical step on both sides. We have three cases, one for every pair of connectives.
1214
\begin{enumerate}
1215
\item The cut-formula is multiplicative:
1216
\[
1217
\vlderivation{
1218
\vliin{\cut}{}{ \Gamma,\Delta,\Sigma }{
1219
\vliin{\vlte}{}{ \Gamma,\Delta,A\vlte B }{ \vlhy{\Gamma,A} }{ \vlhy{\Delta,B} }
1220
}{ \vlin{\vlpa}{}{ \Sigma, A^\bot \vlpa B^\bot }{ \vlhy{\Sigma , A^\bot , B^\bot} } }
1221
}
1222
\quad\to\quad
1223
\vlderivation{
1224
\vliin{\cut}{}{ \Gamma,\Delta,\Sigma }{ \vlhy{\Gamma,A} }{
1225
\vliin{\cut}{}{ \Delta,\Sigma,A^\bot }{ \vlhy{\Delta,B} }{ \vlhy{\Sigma , A^\bot,B^\bot } }
1226
}
1227
}
1228
\]
1229
\item The cut-formula is additive,
1230
\[
1231
\vlderivation{
1232
\vliin{\cut}{}{ \Gamma,\Delta }{ 
1233
\vliin{\vlan}{}{ \Gamma,A\vlan B }{ \vlhy{\Gamma,A} }{ \vlhy{\Gamma,B} }
1234
 }{ \vlin{\vlor}{}{ \Delta,A^\bot\vlor B^\bot }{ \vlhy{\Delta, A^\bot } } }
1235
}
1236
\quad\to\quad
1237
\vliinf{\cut}{}{ \Gamma,\Delta }{ \Gamma,A }{ \Delta,A^\bot }
1238
\]
1239
where the case for the other $\vlor$-rule is symmetric.
1240
\item The cut-formula is exponential.
1241
\[
1242
\vlderivation{
1243
\vliin{\cut}{}{ ?\Gamma,\Delta }{
1244
 \vlin{!}{}{ ?\Gamma,!A }{ \vlhy{?\Gamma , A} }
1245
}{
1246
\vlin{?}{}{ \Delta,?A^\bot }{ \vlhy{\Delta, A^\bot } }
1247
}
1248
}
1249
\quad\to\quad
1250
\vliinf{\cut}{}{ ?\Gamma,\Delta }{ ?\Gamma,A }{ \Delta,A^\bot }
1251
\]
1252
\item The cut-formula is quantified.
1253
\[
1254
\vlderivation{
1255
	\vliin{\cut}{}{\Gamma, \Delta}{
1256
		\vlin{\forall}{}{\Gamma, \forall x. A(x)}{\vltr{\pi(a)}{\Gamma, A(a)}{\vlhy{\quad}}{\vlhy{}}{\vlhy{\quad}}}
1257
		}{
1258
		\vlin{\exists}{}{\Delta, \exists x . \lnot{A}(x)}{\vlhy{\Delta, \lnot{A}(t)}}
1259
		}
1260
	}
1261
	\quad \to \quad
1262
	\vlderivation{
1263
		\vliin{\cut}{}{\Gamma, \Delta}{
1264
			\vltr{\pi(t)}{\Gamma, A(t)}{\vlhy{\quad}}{\vlhy{}}{\vlhy{\quad}}
1265
			}{\vlhy{\Delta, \lnot{A}(t)}}
1266
		}
1267
\]
1268
\end{enumerate}
1269

    
1270

    
1271
\end{document}