\documentclass[leqno]{amsart}
\usepackage{amsmath,amsfonts,amsthm,amssymb,indentfirst,epic,url}
\setlength{\textwidth}{6.5in}
\setlength{\textheight}{9in}
\setlength{\evensidemargin}{0in}
\setlength{\oddsidemargin}{0in}
\setlength{\topmargin}{-.5in}
\sloppy
\setlength{\mathsurround}{1.67pt}
\newcommand{\<}{\kern.0833em}
\newtheorem{theorem}{Theorem}
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{corollary}[theorem]{Corollary}
\newtheorem{proposition}[theorem]{Proposition}
\newtheorem{definition}[theorem]{Definition}
\newcommand{\U}{\mathcal{U}}
\newcommand{\card}{\mathrm{card}}
\newcommand{\supp}{\mathrm{supp}}
\newcommand{\fl}[1]{{\lfloor #1\rfloor}}
\newcommand{\strt}[1][1.7]{\vrule width0pt height0pt depth#1pt}% ^ strut
\raggedbottom
\begin{document}
\begin{center}
\texttt{
This is the final preprint version of a paper which
appeared at\\[.3em]J. Alg.~356 (2012) 257-274.
The published version is accessible to\\[.3em]subscribers
at \ \url{http://dx.doi.org/10.1016/j.jalgebra.2012.01.004} .}
\vspace{2em}
\end{center}
\title[Maps on $k^I$ and product algebras]{Linear maps
on $k^I,$ and homomorphic images of infinite~direct~product~algebras}%
\thanks{After publication of this note, updates, errata, related
references etc., if found, will be recorded at
\url{http://math.berkeley.edu/~gbergman/papers/}
}
\subjclass[2010]{Primary: 15A04, 17A01, 17B05.
% lin_maps nonass L:strc
Secondary: 03C20, 03E55, 08B25, 17B20, 17B30.} % 17B65
% ultrapr large-card prod&& ssmpl slv/np inf--dim
\keywords{linear maps of $k^I$ to vector spaces of small dimension;
measurable cardinals;
homomorphisms on infinite direct products of nonassociative algebras;
simple, solvable, and nilpotent Lie algebras%
}
\author{George M. Bergman}
\address[G. Bergman]{University of California\\
Berkeley, CA 94720-3840, USA}
\email{gbergman@math.berkeley.edu}
\author{Nazih Nahlus}
\address[N. Nahlus]{American University of Beirut\\ %
Beirut, Lebanon}
\email{nahlus@aub.edu.lb}
\begin{abstract}
Let $k$ be an infinite field, $I$ an infinite set,
$V$ a $\!k\!$-vector-space, and $g:k^I\to V$ a $\!k\!$-linear map.
It is shown that if $\dim_k(V)$ is not too large
(under various hypotheses on $\card(k)$ and $\card(I),$
if it is finite, respectively less than $\card(k),$
respectively less than the continuum),
then $\ker(g)$ must contain elements $(u_i)_{i\in I}$
with all but finitely many components $u_i$ nonzero.
These results are used to prove that
every homomorphism from a direct product $\prod_I A_i$
of not-necessarily-associative algebras $A_i$ onto an algebra $B,$
where $\dim_k(B)$ is not too large (in the same senses) is
the sum of a map factoring
through the projection of $\prod_I A_i$ onto the product of
finitely many of the $A_i,$ and a map into
the ideal $\{b\in B\mid bB=Bb=\{0\}\}\subseteq B.$
Detailed consequences
are noted in the case where the $A_i$ are Lie algebras.
A version of the above result is also obtained with
the field $k$ replaced by a commutative valuation ring.
\end{abstract}
\maketitle
% - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
This note resembles \cite{prod_Lie1} in that the two papers obtain
similar results on homomorphisms on infinite product algebras;
but the methods are different, and the hypotheses under which the
methods of one note work
are in some ways stronger, in others weaker, than those of the other.
Also, in~\cite{prod_Lie1} we obtain many consequences from
our results, while here we aim for brevity, and
after one main result about general algebras, restrict
ourselves to a couple of quick consequences for Lie algebras.
The authors are grateful to Leo Harrington and Tom Scanlon for
helpful pointers to the literature, and to Jason Bell for
the strengthened version of Lemma~\ref{L.JPBell} used below.
\section{Definitions, and first results}\label{S.first}
Let us fix some terminology and notation.
\begin{definition}\label{D.alg&}
Throughout this note, $k$ will be a field.
By an {\em algebra} over $k$ we shall mean a
$\!k\!$-vector-space $A$ given with a $\!k\!$-bilinear
multiplication $A\times A\to A,$ which we do not
assume associative or unital.
If $A$ is an algebra, we define its {\em total annihilator ideal} to be
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.Z(A)}
$Z(A)~\,=~\,\{x\in A\mid xA=Ax=\{0\}\}.$
\end{minipage}\end{equation}
If $a=(a_i)_{i\in I}$ is an element of a direct product algebra
$A=\prod_I A_i,$ then we define its {\em support} as
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.supp}
$\supp(a)\ =\ \{i\in I\mid a_i\neq 0\}.$
\end{minipage}\end{equation}
%
For $J$ any subset of $I,$ we shall identify $\prod_{i\in J} A_i$
with the subalgebra of $\prod_{i\in I} A_i$ consisting of elements
whose support is contained in $J.$
We also define the subalgebra
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.Afin}
$A_\mathrm{fin}\ =\ \{a\in A\mid\supp(a)$ {\rm is finite}$\}.$
\end{minipage}\end{equation}
%
\end{definition}
The importance of $\!k\!$-linear functions on spaces $k^I$
to the study of homomorphisms on direct product algebras arises
from the following curious observation:
\begin{lemma}\label{L.supp}
Suppose $(A_i)_{i\in I}$ is a family of $\!k\!$-algebras,
$B$ a $\!k\!$-algebra,
$f:A=\prod_{i\in I}A_i\to B$ a surjective algebra homomorphism,
and $a=(a_i)_{i\in I}$ an element of $A,$ and consider the linear map
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.g_a}
$g_a:k^I\to B$\quad defined by\quad $g_a((u_i))=f((u_i a_i))$\quad
for all\quad $(u_i)\in k^I.$
\end{minipage}\end{equation}
Then\\[.2em]
%
\textup{(i)}~ If $\ker(g_a)$ contains an element $u=(u_i)_{i\in I}$
whose support is all of $I,$ then $f(a)\in Z(B).$\\[.2em]
%
\textup{(ii)}~ More generally, for any $u\in\ker(g_a),$ if we write
$a=a'+a'',$ where $\supp(a')\subseteq\supp(u)$ and
$\supp(a'')\subseteq I-\supp(u),$ then $f(a')\in Z(B).$\\[.2em]
%
\textup{(iii)}~ Hence, if $\ker(g_a)$ contains an element
whose support is cofinite in $I,$ then $a$ is the sum of an
element $a'\in f^{-1}(Z(B))$ and an element $a''\in A_\mathrm{fin}.$
\end{lemma}
\begin{proof}
(i): Given $u$ as in (i), and any $b\in B,$ let us write $b=f(x),$ where
$x=(x_i)\in A,$ and compute
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.fxa}
$f(a)\,b\ =\ f(a)f(x)\ =\ f(a\,x)\ =\ f((a_i\,x_i))\ =
\ f((u_i\,a_i\,u_i^{-1}\,x_i))\\[.2em]
\strt\quad=\ f((u_i\,a_i))\,f((u_i^{-1}\,x_i))\ =
\ 0\,f((u_i^{-1}\,x_i))\ =\ 0.$
\end{minipage}\end{equation}
%
So $f(a)$ left-annihilates all elements of $B;$ and by the
same argument with the order of factors reversed, it
right-annihilates all elements of $B.$
Thus, $f(a)\in Z(B),$ as claimed.
(ii): Let $u'\in k^I$ be defined by taking $u'_i=u_i$ for
$i\in\supp(u),$ and $u'_i=1$ for $i\notin\supp(u).$
Thus, $\supp(u')=I;$ moreover, $u'a'=ua,$
whence $f(u'a')=f(ua)=0.$
Hence, $\ker(g_{a'})$
contains the element $u'$ whose support is $I;$
so by~(i), $f(a')\in Z(B).$
(iii) clearly follows from~(ii).
\end{proof}
Remark: In the context of the above lemma, if the element
of $k^I$ having all entries equal to $1$ lies in $\ker(g_a),$ this
says that $f(a)=0.$
Part~(i) of the lemma says that, more generally, if an element with all
entries invertible lies in $\ker(g_a),$ then $f(a)$ is ``very
close to'' being zero.
Motivated by statement~(iii) of the lemma, let us look
for conditions under which the kernel
of a homomorphism on $k^I$ must contain elements of cofinite support.
Here is an easy one.
\begin{lemma}\label{L.poly}
Let $I$ be a set with $\card(I)\leq\card(k),$
and $g: k^I\to V$ a $\!k\!$-linear map, for some finite-dimensional
$\!k\!$-vector-space $V.$
Then there exists $u\in\ker(g)$ such that $I-\supp(u)$ is finite.
\end{lemma}
\begin{proof}
By the assumption on $\card(I),$ we can choose $x=(x_i)\in k^I$
whose entries $x_i$ are distinct.
Regarding $k^I$ as a $\!k\!$-algebra under componentwise operations,
let us map the polynomial algebra $k[t]$ into it by
the homomorphism sending $t$ to this~$x.$
Composing with $g:k^I\to V,$ we get a $\!k\!$-linear
map $k[t]\to V.$
Since $V$ is finite-dimensional, this map has nonzero kernel, so we
may choose $0\neq p(t)\in k[t]$ such that $p(x)\in\ker(g).$
Since the polynomial $p$ has only finitely many
roots, $p(x_i)$ is zero for only finitely many $i,$
so $p(x)$ gives the desired~$u.$
\end{proof}
Applying Lemma~\ref{L.poly} to maps $g_a$ as in Lemmas~\ref{L.supp},
and calling on statement~(iii) of the latter, we get
\begin{proposition}\label{P.first}
Let $k$ be an infinite field, let $(A_i)_{i\in I}$ be a family
of $\!k\!$-algebras such that the index set $I$
has cardinality $\leq\card(k),$ let
$A=\prod_I A_i,$ and let $f:A\to B$ be any surjective algebra
homomorphism to a finite-dimensional $\!k\!$-algebra $B.$
Then $B=f(A_{\mathrm{fin}})+Z(B).$
\textup{(}Equivalently, $A= A_{\mathrm{fin}}+f^{-1}(Z(B)).)$
Hence $B$ is the sum of $Z(B)$ and the \textup{(}mutually
annihilating\textup{)} images of finitely many of the $A_i.$
\end{proposition}
\begin{proof}
The first assertion follows immediately from the two preceding lemmas.
To get the final assertion, we note that since $B$ is
finite-dimensional, its subalgebra
$f(A_{\mathrm{fin}})=f(\bigoplus_I A_i)=\sum_I f(A_i)$
must be spanned by the images of finitely many of the $A_i,$
and since the $A_i,$ as subalgebras of $A,$ annihilate
one another, so do those images.
\end{proof}
In the next two sections we shall obtain three strengthenings of
Lemma~\ref{L.poly}, two of which weaken the assumption of
finite-dimensionality of $V,$ while the third, instead,
weakens the restriction on $\card(I).$
\section{Larger-dimensional $V.$}\label{S.dimB}
Our first generalization of Lemma~\ref{L.poly}
will be obtained by replacing the countable-dimensional
polynomial ring $k[t]$ by a subspace of the rational
function field $k(t)$ which has dimension $\card(k)$ over $k.$
Rational functions are not, strictly
speaking, functions; but that will be easy to fudge.
\begin{lemma}\label{L.rat}
For each $c\in k,$ let $p^{(c)}\in k^k$
be the function which for every $x\in k-\{c\}$ has
$p^{(c)}(x)=(x-c)^{-1},$ and at $c$ has the value $0.$
Then any nontrivial linear combination of the elements $p^{(c)}$
has at most finitely many zeroes.
Hence if $I$ is a set of cardinality $\leq\card(k),$ and $g$ is a
$\!k\!$-linear map of $k^I$ to
a $\!k\!$-vector-space $V$ of dimension $<\card(k),$ then $\ker(g)$
contains an element $u$ of cofinite support.
\end{lemma}
\begin{proof}
In $k(t),$ any linear combination of elements
$(t-c_1)^{-1},\ \dots\,,\ (t-c_n)^{-1}$ for distinct
$c_1,\dots,c_n\in k$ $(n\geq 1),$ such that each of these elements
has nonzero coefficient, gives a {\em nonzero} rational function
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.rat}
$a_1(t-c_1)^{-1}+\dots+\,a_n(t-c_n)^{-1}\ =
\ h(t)/((t-c_1)\dots(t-c_n))$\quad (where $h(t)\in k[t]).$
\end{minipage}\end{equation}
%
Indeed, to see that~(\ref{d.rat}) is nonzero
in $k(t),$ multiply by any $t-c_m.$
Then we can evaluate both sides at $c_m,$ and we find that
the left-hand side then has a unique nonzero term; so we must
have $h(c_m)\neq 0.$
Hence $h(t)$ is a nonzero element of $k[t],$
so~(\ref{d.rat}) is a nonzero element of~$k(t).$
If we now take the corresponding linear combination of
$p^{(c_1)},\,\dots\,,\,p^{(c_n)}$ in $k^k,$ the result has the value
$h(c)/((c-c_1)\dots(c-c_n))$ at each $c\neq c_1,\dots,c_n.$
Hence it is nonzero everywhere except at the finitely many
zeroes of $h(t),$ and some subset of the finite set $\{c_1,\dots,c_n\}.$
We get the final assertion by embedding the set $I$
in $k,$ so that the $p^{(c)}$ $(c\in k)$ induce elements of $k^I.$
These will form a $\!\card(k)\!$-tuple of functions,
any nontrivial linear combination of which is a function
with only finitely many zeroes.
Under a linear map $g$ from $k^I$ to a vector
space $V$ of dimension $<\card(k),$ some nontrivial linear
combination $u$ of these $\card(k)$ elements must go
to zero, yielding a member of $\ker(g)$ with the asserted property.
\end{proof}
(An alternative way to get around the problem that rational
functions have poles would be
to partition $k$ into two disjoint subsets of
equal cardinalities, and use linear combinations of rational functions
$1/(t-c)$ with $c$ ranging over
one of these sets to get functions on the other.)
For $k$ countable, the condition on the dimension of $V$ in the
final statement of the above lemma is no improvement
on what we got in Lemma~\ref{L.poly} using $k[t].$
In an earlier version of this note, we obtained an improvement
on Lemma~\ref{L.poly} for countable $k$ by a diagonal argument,
showing that if $k$ and $I$ are both countably infinite,
then any maximal subspace $W\subseteq k^I$
no nonzero member of which has infinitely many zero coordinates
must be uncountable-dimensional.
Jason Bell communicated to us the following stronger
result, which not only gives a subspace of continuum, rather than
merely uncountable, dimension, but (as is made clear in the proof,
though for simplicity we
do not include it in the statement), also shares with the constructions
of Lemmas~\ref{L.poly} and~\ref{L.rat} the property that
for every finite-dimensional subspace of $W,$ there is a uniform
bound on the number of zero coordinates of its nonzero elements,
which our earlier result lacked.
(The result below was, in fact, given in response to the question
we raised of whether
a construction admitting such uniform bounds was possible.)
\begin{lemma}[sketched by Jason Bell, personal communication]\label{L.JPBell}
If the field $k$ is infinite, and $I$ is a countably infinite set,
then there exists a subspace $W\subseteq k^I$
of continuum dimensionality such that
no nonzero member of $W$ has infinitely many \mbox{zeroes}.
Hence any $\!k\!$-linear map $g$ from $k^I$ to
a $\!k\!$-vector-space $V$ of less than continuum dimension
has in its kernel an element $u$ of cofinite support.
\end{lemma}
\begin{proof}
It suffices to prove the stated result for $I=\omega,$ the set of
natural numbers.
Let us first note that if $k$ is either of characteristic~$0,$ or
transcendental over its prime field, then it is algebraic
over a Unique Factorization Domain $R$ which is not a field
(namely, $\mathbb{Z},$ or a polynomial ring over the prime field
of $k).$
This ring $R$ admits a discrete valuation,
which induces a discrete valuation on the field of fractions of $R.$
It is easily deduced from \cite[Prop.~XII.4.2]{SL.Alg} that
this extends to a $\!\mathbb{Q}\!$-valued valuation $v$
on the algebraic extension $k$ of that field,
and by rescaling, $v$ can be assumed to have
valuation group containing $\mathbb{Z}.$
Let us call this situation Case~I.
If we are not in Case~I, then $k$ must be an infinite algebraic
extension of a finite field.
Hence it will contain a countable chain of distinct subfields,
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.k0k1}
$k_0\ \subset\ k_1\ \subset\ \cdots\ \subset\ k_i\ \subset\ \cdots~.$
\end{minipage}\end{equation}
%
Given any field $k$ containing such a chain of subfields
(regardless of characteristic, or algebraicity over a prime field),
we may define a natural-number-valued function $v$
(not a valuation) on $\bigcup_{i\in\omega} k_i\subseteq k$ by
letting $v(x)$ be the least $i$ such that $x\in k_i.$
We shall call the situation where $k$ contains
a chain~(\ref{d.k0k1}) Case~II.
(So Cases~I and~II together cover all infinite
fields, with a great deal of overlap.)
In either case, let us choose elements
$x_0,\ x_1,\ \dots\in k$ such that
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.vxi}
$v(x_i)=i$ \quad for all $i\in\omega,$
\end{minipage}\end{equation}
%
and for every real number $\alpha>1,$ let
$f_\alpha\in k^\omega$ be defined by
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.f_alpha}
$f_\alpha(n)=x_\fl{\alpha n}$ \quad $(n\in\omega),$
\end{minipage}\end{equation}
%
where $\fl{\alpha n}$ denotes the largest integer $\leq\alpha n.$
This gives continuum many elements $f_\alpha\in k^\omega.$
We shall now complete the proof by showing separately in Cases~I
and~II that for any
$1<\alpha_1<\dots<\alpha_d,$ there exists a natural number $N$
such that no nontrivial linear combination
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.c1f1+}
$c_1 f_{\alpha_1} +\dots + c_d f_{\alpha_d}$\quad $(c_1,\dots,c_d\in k)$
\end{minipage}\end{equation}
%
has more than $N$ zero coordinates.
If we are in Case~I, consider any $n$ such
that the $\!n\!$-th coordinate of~(\ref{d.c1f1+}) is zero.
This says that
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.sum_i=0}
$\sum_i c_i x_\fl{n\alpha_i}\ =\ 0.$
\end{minipage}\end{equation}
%
Now if a family of elements of $k$ which are not all zero has zero sum,
then at least two nonzero members of the family must have
equal valuation.
Thus, for some $i}
$n\ \geq\ \max_{i=1,\dots,d-1}\,(1/(\alpha_{i+1}-\alpha_i))$
\end{minipage}\end{equation}
%
for which the $\!n\!$-th coordinate of~(\ref{d.c1f1+}) is zero.
For suppose, on the contrary, that $n_1<\dots1,$ the latter because all $n_i$ satisfy~(\ref{d.n>}).
It follows that in the matrix~(\ref{d.(())}), every minor
has the property that its lower right-hand entry does not lie
in the subfield generated by its other entries.
From this, it is easy to show by induction that all
minors have nonzero determinant, and so in particular
that~(\ref{d.(())}) is invertible.
But this contradicts the assumption that~(\ref{d.(())})
annihilates $(c_1,\dots,c_d)^{\mathrm{T}}.$
Hence there are, as claimed, at most $d-1$
values of $n$ satisfying~(\ref{d.n>})
such that the $\!n\!$-th entry of~(\ref{d.c1f1+}) is zero;
so the total number of zero entries of~(\ref{d.c1f1+}) is bounded by
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.N=max+d-1}
$N\ =\ \max_{i=1,\dots,d-1}\fl{1/(\alpha_{i+1}-\alpha_i)}+d,$
\end{minipage}\end{equation}
%
which again depends only on the $\alpha_i.$
The final assertion of the lemma clearly follows.
\end{proof}
Remark: In Case~I of the above proof, in place of
condition~(\ref{d.vxi})
we could equally well have used $x_i$ with $v(x_i)=-i.$
Similarly, the proof in Case~II can be adapted to fields $k$
having a {\em descending} chain of subfields
$k=k_0\supset k_1\supset\dots\supset k_i\supset\cdots~:$
in this situation, we define $v$ on $k-\bigcap_{i\in\omega} k_i$
to take each $x$
to the largest $i$ such that $x\in k_i,$ and consider upper
left-hand corners of minors instead of lower right-hand corners.
We know of no use for these observations at present; but they might be
of value in proving some variants of the above lemma.
\section{Larger $I.$}\label{S.I}
For our third generalization of Lemma~\ref{L.poly}, we
return to the hypothesis that $V$ is finite-dimensional, and
prove that in that situation, the statement that every linear map
$g:k^I\to V$ has elements of cofinite support in fact holds
for sets $I$ of cardinality much greater than $\card(k).$
We can no longer get this conclusion by finding an
infinite-dimensional subspace $W\subseteq k^I$
whose nonzero members each have only finitely many zeroes.
On the contrary, when $\card(I)>\card(k)$ (with the former
infinite) there can be no subspace $W\subseteq k^I$ of
dimension $>1$ whose nonzero members all have only finitely many zeroes.
For if $(x_i)$ and $(y_i)$ are linearly independent elements of
$W,$ and we look at the subspaces of $k^2$
generated by the pairs $(x_i,y_i)$ as $i$ runs over $I,$
then if $\card(I)>\card(k),$
at least one of these subspaces must occur at $\card(I)$ many values
of $i,$ but cannot occur at all $i;$ hence some
linear combination of $(x_i)$ and $(y_i)$ will have $\card(I)$ zeroes,
but not itself be zero.
So we must construct our elements of cofinite support in a different
way, paying attention to the particular map~$g.$
Surprisingly, our proof will again
use the polynomial trick of Lemma~\ref{L.poly};
though this time only after considerable preparation.
(We could use rational functions in place of these polynomials as in
Lemma~\ref{L.rat}, or functions like
the $f_\alpha$ of Lemma~\ref{L.JPBell}, but so far
as we can see, this would not improve our result, since
finite-dimensionality of $V$ is required by other
parts of the argument.)
The case of Theorem~\ref{T.main} below that we will deduce
from the result of this section is actually slightly weaker than the
corresponding result proved by different methods in~\cite{prod_Lie1}.
Hence the reader who is only interested in consequences for
algebra homomorphisms $\prod_I A_i\to B$ may prefer to skip
the lengthy and intricate argument of this section.
On the other hand, insofar as our general technique makes the
question, ``For what $k,$ $I$ and $V$ can we say that the kernel of
every $\!k\!$-linear map $k^I\to V$ must contain an element of cofinite
support?''\ itself of interest, the result of this section creates a
powerful complement to those of the preceding section.
We will assume here familiarity with the definitions of ultrafilter and
ultraproduct (given in most books on universal algebra or model theory,
and summarized in \cite[\S14]{prod_Lie1}), and
of $\!\kappa\!$-completeness of an ultrafilter
(developed, for example, in \cite{Ch+Keis} or \cite{Drake},
and briefly summarized in
the part of \cite[\S15]{prod_Lie1} preceding Theorem~47).
In the lemma below, we do not yet restrict $\card(I)$ at all.
As a result, we will get functions with zero-sets
characterized in terms of finitely many $\!\card(k)^+\!$-complete
ultrafilters, rather than finitely many points.
In the corollary to the lemma, we add a cardinality restriction
which forces such ultrafilters to be principal,
and so get elements with only finitely many zeroes.
The lemma also allows $k$ to be finite, necessitating
a proviso~(\ref{d.dim+2}) that its cardinality not be too small
relative to $\dim_k(V);$ this, too, will go away in the corollary,
where, for other reasons, we will have to require $k$ to be infinite.
In reading the lemma and its proof, the reader might bear in mind that
the property~(\ref{d.J0}) makes $J_0$ ``good'' for our purposes,
while $J_1,\dots,J_n$ embody the complications that we must overcome.
The case of property~(\ref{d.J0}) that we will want in the end
is for the element $0\in g(k^{J_0});$ but in the course of
the proof it will be important to consider that property for
arbitrary elements of that subspace.
\begin{lemma}\label{L.ultra}
Let $I$ be a set, $V$ a finite-dimensional
$\!k\!$-vector space such that
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.dim+2}
$\card(k)\ \geq\ \dim_k(V)+2,$
\end{minipage}\end{equation}
%
and $g:k^I\to V$ a $\!k\!$-linear map.
Then $I$ may be decomposed into finitely many disjoint subsets,
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.I=}
$I\ =\ J_0\cup J_1\cup\,\dots\,\cup J_n$
\end{minipage}\end{equation}
%
$(n\geq 0),$ such that
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.J0}
every element of $g(k^{J_0})$ is the image under $g$ of an element
having support precisely $J_0,$
\end{minipage}\end{equation}
%
and such that each set $J_m$ for $m=1,\dots,n$ has on it a
$\!\card(k)^+\!$-complete ultrafilter $\U_m$
such that, letting $\psi$ denote the factor-map
$V\to V/g(k^{J_0}),$ the composite $\psi g: k^I\to V/g(k^{J_0})$
can be factored
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.gfactors}
$k^I\ =\ k^{J_0}\times k^{J_1}\times\dots\times k^{J_n}\ %
\to\ k^{J_1}/\,\U_1\times\dots\times k^{J_n}/\,\U_n\ %
\hookrightarrow\ V/g(k^{J_0}),$
\end{minipage}\end{equation}
%
where $k^{J_m}/\,\U_m$ denotes the ultrapower of $k$ with respect
to the ultrafilter $\U_m,$
the first arrow of~\textup{(\ref{d.gfactors})}
is the product of the natural projections,
and the last arrow is an embedding.
\end{lemma}
\begin{proof}
If $\card(k)=2,$ then~(\ref{d.dim+2}) makes $V=\{0\},$ and
the lemma is trivially true (with $J_0=I$ and $n=0);$
so below we may assume $\card(k)>2.$
There exist subsets $J_0\subseteq I$ satisfying~(\ref{d.J0});
for instance, the empty subset.
Since $V$ is finite-dimensional, we may choose
a $J_0$ satisfying~(\ref{d.J0}) such that
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.max}
Among subsets of $I$ satisfying~(\ref{d.J0}), $J_0$ maximizes the
subspace $g(k^{J_0})\subseteq V,$
\end{minipage}\end{equation}
%
i.e., such that no subset $J'_0$ satisfying~(\ref{d.J0}) has
$g(k^{J'_0})$ properly larger than $g(k^{J_0}).$
Given this $J_0,$ we now consider subsets $J\subseteq I-J_0$ such that
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.Jmin}
$g(k^J)\not\subseteq g(k^{J_0}),$ and $J$ minimizes the
subspace $g(k^{J_0})+g(k^J)$ subject to this\\
condition, in the sense that every
subset $J'\subseteq J$ satisfies either
\end{minipage}\end{equation}
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.small}
$g(k^{J'})\ \subseteq\ g(k^{J_0})$
\end{minipage}\end{equation}
%
or
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.big}
$g(k^{J_0})+g(k^{J'})\ =\ g(k^{J_0})+g(k^J).$
\end{minipage}\end{equation}
It is not hard to see from the finite-dimensionality
of $V,$ and the fact that inclusions of sets $J$ imply the
corresponding inclusions among the
subspaces $g(k^{J_0})+g(k^{J}),$ that such
minimizing subsets $J$ will exist if $g(k^{J_0})\neq g(k^I).$
If, rather, $g(k^{J_0})=g(k^I),$ then the collection of
such subsets that we develop in the arguments below will be
empty, but that will not be a problem.
Let us, for the next few paragraphs, fix such a $J.$
Thus, every $J'\subseteq J$ satisfies
either~(\ref{d.small}) or~(\ref{d.big}).
However, we claim that there cannot be many {\em pairwise disjoint}
subsets $J'\subseteq J$ satisfying~(\ref{d.big}).
Precisely, letting
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.e=}
$e\ =\ \dim_k((g(k^{J_0})+g(k^J))/g(k^{J_0})),$
\end{minipage}\end{equation}
%
we claim that there cannot be $2e$ such pairwise disjoint subsets.
For suppose we had pairwise disjoint sets $J'_{\alpha, d}\subseteq J$
$(\alpha \in\{0,1\},\ d\in\{1,\dots,e\})$ each satisfying~(\ref{d.big}).
Let
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.h1he}
$h_1,\dots,h_e\ \in\ g(k^{J_0})+g(k^J)$
\end{minipage}\end{equation}
%
be a minimal family spanning $g(k^{J_0})+g(k^J)$ over $g(k^{J_0}).$
For each $\alpha\in\{0,1\}$ and $d\in\{1,\dots,e\},$
condition~(\ref{d.big}) on $J'_{\alpha, d}$
allows us to choose an element
$x^{(\alpha,d)}\in k^{J'_{\alpha, d}}$ such that
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.x*ad}
$g(x^{(\alpha,d)})\ \equiv\ h_d\ \ (\mathrm{mod}\ g(k^{J_0})).$
\end{minipage}\end{equation}
%
Some of the $x^{(\alpha,d)}$ may have support strictly smaller
than the corresponding set $J'_{\alpha, d};$ if this happens,
let us cure it by replacing
$J'_{\alpha, d}$ by $\supp(x^{(\alpha,d)}):$ these are still pairwise
disjoint subsets of $J,$ and will still satisfy~(\ref{d.big}) rather
than~(\ref{d.small}), since after this modification, the
subspace $g(k^{J'_{\alpha, d}})$ still contains
$g(x^{(\alpha,d)})\notin g(k^{J_0}).$
We now claim that the set
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.J*0}
$J^*_0\ =\ J_0\,\cup\,
\bigcup_{\alpha\in\{0,1\},\ d\in\{1,\dots,e\}} J'_{\alpha, d}$
\end{minipage}\end{equation}
%
contradicts the maximality condition~(\ref{d.max}) on $J_0.$
Clearly $g(k^{J^*_0})=g(k^{J_0})+g(k^J)$ is strictly
larger than $g(k^{J_0}).$
To show that $J^*_0$ satisfies the analog of~(\ref{d.J0}),
consider any $h\in g(k^{J^*_0})=g(k^{J_0})+g(k^J),$
and let us write it, using the relative spanning set~(\ref{d.h1he}), as
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.h0}
$h\ =\ h_0+c_1 h_1+\dots+c_e h_e
\quad (h_0\in g(k^{J_0}),\ c_1,\dots,c_e\in k).$
\end{minipage}\end{equation}
%
Since $\card(k)>2,$ we can now choose for each $d=1,\dots,e$ an element
$c'_d\in k$ which is neither $0$ nor $c_d,$ and form the element
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.cc'}
$x\ =\ (c'_1 x^{(0,1)} + (c_1-c'_1) x^{(1,1)})\,+\,
(c'_2 x^{(0,2)} + (c_2-c'_2) x^{(1,2)})\,+\,\cdots\,+\,
(c'_e x^{(0,e)} + (c_e-c'_e) x^{(1,e)}).$
\end{minipage}\end{equation}
%
By our choice of $c'_1,\dots,c'_e,$ none of the coefficients
$c'_d$ or $c_d-c'_d$ is zero, so $\supp(x)=\bigcup J'_{\alpha, d}.$
Applying $g$ to~(\ref{d.cc'}), we see
from~(\ref{d.x*ad}) that $g(x)$ is congruent modulo
$g(k^{J_0})$ to $c_1 h_1+\dots+c_e h_e,$ hence,
by~(\ref{d.h0}), congruent to $h.$
By~(\ref{d.J0}), we can find an element $y\in k^{J_0}$ with support
precisely $J_0$ that makes up the difference, so that $g(y)+g(x)=h.$
The element $y+x$ has support exactly $J^*_0;$ and since we have
obtained an arbitrary $h\in g(k^{J^*_0})$
as the image under $g$ of this element, we have shown
that $J^*_0$ satisfies the analog of~(\ref{d.J0}),
giving the desired contradiction.
Thus, we have a finite upper bound (namely, $2e-1)$
on the number of pairwise disjoint subsets $J'$ that
$J$ can contain which satisfy~(\ref{d.big}).
So starting with $J,$ let us, if it is the union of two disjoint
subsets with that property, split one off and
rename the other $J,$ and repeat this process as many times as we can.
Then in finitely many steps, we must
get a $J$ which cannot be further decomposed.
Summarizing what we know about this $J,$ we have
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.J}
$g(k^J)\not\subseteq g(k^{J_0}),$
every subset $J'\subseteq J$ satisfies either
$g(k^{J'})\subseteq g(k^{J_0})$ or
$g(k^{J_0})+g(k^{J'})=g(k^{J_0})+g(k^J),$
and no two {\em disjoint} subsets of $J$ satisfy the latter equality.
\end{minipage}\end{equation}
Let us call any subset $J\subseteq I-J_0$ satisfying~(\ref{d.J})
a {\em nugget}.
From the above development, we see that
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.nuggets}
Every subset $J\subseteq I-J_0$ such that
$g(k^J)\not\subseteq g(k^{J_0})$ contains a nugget.
\end{minipage}\end{equation}
The rest of this proof will analyze the properties of an arbitrary
nugget $J,$ and finally show (after a possible adjustment
of $J_0)$ that $I-J_0$ can be decomposed into
finitely many nuggets $J_1\cup\dots\cup J_n,$ and that these will have
the properties in the statement of the proposition.
We begin by showing that
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.ultra}
If $J$ is a nugget, then the set
$\U=\{J'\subseteq J\mid g(k^{J_0})+g(k^{J'})=g(k^{J_0})+g(k^J)\}$\\
is an ultrafilter on $J_{\strt}.$
\end{minipage}\end{equation}
%
To see this, note that by~(\ref{d.J}), the complement of $\U$
within the set of subsets of $J$
is also the set of complements relative to $J$ of members
of $\U,$ and is, furthermore, the set of all $J'\subseteq J$ such
that $g(k^{J'})\subseteq g(k^{J_0}).$
The latter set is clearly closed under unions and passing to smaller
subsets, hence $\U,$ inversely, is closed
under intersections and passing to larger subsets of $J;$
i.e., $\U$ is a filter.
Since $\emptyset\notin\U,$
while the complement of any subset of $J$ not in $\U$ does belong
to $\U,$ $\U$ is an ultrafilter.
Let us show next that any nugget $J$ has properties
that come perilously close to making $J_0\cup J$ a counterexample
to the maximality condition~(\ref{d.max}) on $J_0.$
By assumption, $g(k^{J_0\cup J})$ is strictly larger than $g(k^{J_0}).$
Now consider any $h\in g(k^{J_0\cup J}).$
We may write
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.h=}
$h\ =\ g(w)+g(x),$\quad where $w\in k^{J_0},\ x\in k^J.$
\end{minipage}\end{equation}
%
Suppose first that
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.hnotin}
$h\notin g(k^{J_0}).$
\end{minipage}\end{equation}
%
From~(\ref{d.h=}) and~(\ref{d.hnotin})
we see that $g(x)\notin g(k^{J_0}),$ so $\supp(x)\in\U.$
Now take any element $x'\in k^J$ which
agrees with $x$ on $\supp(x),$ and has (arbitrary) {\em nonzero}
values on all points of $J-\supp(x).$
The element by which we have modified $x$ to get $x'$ has
support in $J-\supp(x),$ which is $\notin\U$ because $\supp(x)\in\U;$
hence $g(x')\equiv g(x)\ (\mathrm{mod}\ g(k^{J_0})),$ hence
by~(\ref{d.h=}), $g(x')\equiv h\ (\mathrm{mod}\ g(k^{J_0})).$
Hence by~(\ref{d.J0}), we can find $z\in k^{J_0}$ with
support exactly $J_0$ such that $g(z)+g(x')=h.$
Thus, $z+x'$ is an element with support
$J_0\cup J$ whose image under $g$ is $h.$
This is just what would be needed to make $J_0\cup J$
satisfy~(\ref{d.J0}), if we had proved it for all
$h\in g(k^{J_0\cup J});$ but we have only proved it for $h$
satisfying~(\ref{d.hnotin}) (which we needed to argue
that $\supp(x)$ belonged to $\U).$
We now claim that if there were any $x\in k^J$ with $\supp(x)\in\U$
satisfying $g(x)\in g(k^{J_0}),$ then we would be able to
complete our argument contradicting~(\ref{d.max}).
For modifying such an $x$ by any element with complementary support
in $J,$ we would get an element with support exactly $J$
whose image under $g$ would still lie in $g(k^{J_0}).$
Adding to this element the images under $g$ of
all elements of $k^{J_0}$ with support equal to $J_0,$ we would get
images under $g$ of certain elements with support exactly $J_0\cup J.$
Moreover, since $J_0$ satisfies~(\ref{d.J0}), these sums
would comprise all $h\in g(k^{J_0}),$ i.e., just those values that were
excluded by~(\ref{d.hnotin}).
In view of the resulting contradiction to~(\ref{d.max}), we have proved
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.bigsupp}
If $J$ is a nugget, then every $x\in k^J$ with
$\supp(x)\in\U$ satisfies $g(x)\notin g(k^{J_0}).$
\end{minipage}\end{equation}
We shall now use the ``polynomial functions'' trick to show
that~(\ref{d.bigsupp})
can only hold if the ultrafilter $\U$ is $\!\card(k)^+\!$-complete.
If $k$ is finite, $\!\card(k)^+\!$-completeness is vacuous,
so assume for the remainder of this paragraph that $k$ is infinite.
If $\U$ is not $\!\card(k)^+\!$-complete, we can
find pairwise disjoint subsets $J_c\subseteq J$ $(c\in k)$
with $J_c\notin\U,$ whose union is all of $J.$
Given these subsets, let $z\in k^J$ be the element
having, for each $c\in k,$ the value $z_i=c$ at all $i\in J_c.$
Taking powers of $z$ under componentwise multiplication,
we get elements $1,\,z,\dots,z^n,\ldots\in k^J.$
Since $V$ is finite-dimensional, some nontrivial linear combination
$p(z)$ of these must be in the kernel of $g.$
But as a nonzero polynomial, $p$ has only finitely many
roots in $k,$ say $c_1,\dots,c_r.$
Thus $\supp(p(z))=J-(J_{c_1}\cup\dots\cup J_{c_r}).$
Since $J\in\U$ and $J_{c_1},\dots, J_{c_r}\notin\U,$
we get $\supp(p(z))\in\U;$ but since $p(z)\in\ker(g),$
we have $g(p(z))=0\in g(k^{J_0}),$ contradicting~(\ref{d.bigsupp}).
Hence
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.card+}
For every nugget $J,$ the ultrafilter $\U$ of~(\ref{d.ultra})
is $\!\card(k)^+\!$-complete.
\end{minipage}\end{equation}
We claim next that~(\ref{d.card+}) implies that for any nugget $J,$
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.dim1}
$\dim_k((g(k^{J_0})+g(k^J))/g(k^{J_0}))\ =\ 1.$
\end{minipage}\end{equation}
%
Indeed, fix $x\in k^J$ with support $J,$ and
consider any $y\in k^J.$
If we classify the elements $i\in J$ according to the
value of $y_i/x_i\in k,$ this gives $\card(k)$ sets, so
by $\!\card(k)^+\!$-completeness, one
of them, say $\{i\mid y_i=c\,x_i\}$ (for some $c\in k)$ lies in $\U.$
Hence $y-c\,x$ has support $\notin\U,$ so $g(y-c\,x)\in g(k^{J_0}),$
i.e., modulo $g(k^{J_0}),$ the element
$g(y)$ is a scalar multiple of $g(x).$
So $g(x)$ spans $g(k^{J_0})+g(k^J)$ modulo~$g(k^{J_0}).$
Let us now choose for each nugget $J$ an element $x_J$ with support $J.$
Thus, by the above observations, $g(x_J)$ spans
$g(k^{J_0})+g(k^J)$ modulo $g(k^{J_0}).$
We claim that
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.indep}
For any disjoint nuggets $J_1,\dots,J_n,$ the elements
$g(x^{\strt}_{J_1}),\dots,g(x^{\strt}_{J_n})\in V$ are linearly\\
independent modulo $g(k^{J_0}).$
\end{minipage}\end{equation}
For suppose, by way of contradiction, that we had some relation
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.linrel}
$\sum_{m=1}^n c_m\,g(x^{\strt}_{J_m})\ \in\ g(k^{J_0}),$\quad
with not all $c_m$ zero.
\end{minipage}\end{equation}
%
If $n>\dim_k(V),$ then there must be a linear relation in $V$ among
$\leq \dim_k(V)+1$ of the $g(x^{\strt}_{J_m})\in V,$ so in that
situation we may (in working toward our contradiction) replace
the set of nuggets assumed to satisfy a relation~(\ref{d.linrel})
by a subset also satisfying
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.nleq}
$n\ \leq\ \dim_k(V)+1,$
\end{minipage}\end{equation}
%
and~(\ref{d.linrel}) by a relation which they satisfy.
Also, by dropping from our list of nuggets in~(\ref{d.linrel})
any $J_m$ such that $c_m=0,$
we may assume those coefficients all nonzero.
We now invoke for the third (and last) time the maximality
assumption~(\ref{d.max}), arguing that in the above
situation, $J_0\cup J_1\cup\dots\cup J_n$ would be a counterexample
to that maximality.
For consider any
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.v}
$v\ \in\ g(k^{J_0\,\cup\,J_1\,\cup\,\dots\,\cup\,J_n}).$
\end{minipage}\end{equation}
%
By~(\ref{d.dim1}) and our choice of $x_{J_1},\dots,x_{J_n},$ $v$ can
be written as the sum of an element of $g(k^{J_0})$ and
an element $\sum d_m\,g(x^{\strt}_{J_m})$ with $d_1,\dots,d_n\in k.$
By~(\ref{d.dim+2}) and~(\ref{d.nleq}), $\card(k)\geq\dim_k(V)+2>n,$
hence we can choose an element $c\in k$ distinct from
each of $d_1/c_1,$ $\dots\,,$ $d_n/c_n$ (for
the $c_m$ of~(\ref{d.linrel})), i.e.,
such that $d_m-c\,c_m\neq 0$ for $m=1,\dots,n.$
Thus, $\sum\,(d_m-c\,c_m)\,x^{\strt}_{J_m},$
which by~(\ref{d.linrel}) has the same
image in $V/g(k^{J_0})$ as our given element $v,$ is a linear
combination of $x^{\strt}_{J_1},\dots,x^{\strt}_{J_n}$ with
{\em nonzero} coefficients,
hence has support exactly $J_1\cup\dots\cup J_n.$
As before, we can now use~(\ref{d.J0}) to adjust this by an
element with support exactly $J_0$ so that the image under $g$ of the
resulting element $x$ is $v.$
Since $x$ has support exactly $J_0\cup J_1\cup\dots\cup J_n,$
we have the desired contradiction to~(\ref{d.max}).
It follows from~(\ref{d.indep}) that
there cannot be more than $\dim_k(V)$ disjoint nuggets;
so a maximal family of pairwise disjoint nuggets will be finite.
Let $J_1,\dots,J_n$ be such a maximal family.
In view of~(\ref{d.nuggets}), the set
$J=I-(J_0\cup J_1\cup\dots\cup J_n)$
must satisfy $g(k^J)\subseteq g(k^{J_0}),$ hence we can
enlarge $J_0$ by adjoining to it that set $J,$
without changing $g(k^{J_0}),$ and hence without losing~(\ref{d.J0}).
We then have~(\ref{d.I=}).
For $m=1,\dots,n,$
let $\U_m$ be the ultrafilter on $J_m$ described in~(\ref{d.ultra}).
To verify the final statement of the proposition, that there exists
a factorization~(\ref{d.gfactors}), note that any element
of $k^I$ can be written $a^{(0)}+a^{(1)}+\dots+a^{(n)}$ with
$a^{(m)}\in k^{J_m}$ $(m=0,\dots,n),$ hence its image under $g$ will be
congruent modulo $g(k^{J_0})$ to $g(a^{(1)})+\dots+g(a^{(n)}).$
Now the image of each $g(a^{(m)})$ modulo $g(k^{J_0})$
is a function only of the equivalence class of $a^{(m)}$ with respect to
the ultrafilter $\U_m$ (since two elements in the same equivalence
class will disagree on a subset of
$J_m$ that is $\notin\U_m,$ so that their difference
is mapped by $g$ into $g(k^{J_0})).$
Hence the value of $g(a)$ modulo $g(k^{J_0})$
is determined by the images of $a$ in the spaces $k^{J_m}/\,\U_m.$
This gives the factorization~(\ref{d.gfactors}).
The one-one-ness of the factoring map follows from~(\ref{d.indep}).
\end{proof}
To get from this a result with a simpler statement, recall that a set
$I$ admits a {\em nonprincipal} $\!\card(k)^+\!$-complete
ultrafilter only if its cardinality is greater than or equal to a
measurable cardinal $>\card(k)$ \cite[Proposition~4.2.7]{Ch+Keis}.
(We follow~\cite{Ch+Keis} in counting $\aleph_0$ as a measurable
cardinal.
Thus, we write ``uncountable measurable cardinal''
for what many authors, e.g., \cite[p.177]{Drake}, simply call a
``measurable cardinal''.)
Now uncountable measurable cardinals, if they exist at all,
must be enormously large (cf.\ \cite[Chapter~6, Corollary~1.8]{Drake}).
Hence for $k$ infinite, it is a weak restriction to assume
that $I$ is smaller than all $\!\card(k)^+\!$-complete cardinals.
Under that assumption, the $\!\card(k)^+\!$-complete ultrafilters
$\U_m$ of Lemma~\ref{L.ultra}
must be principal, determined by elements $i_m\in I;$
so each nugget $J_m$ contains a minimal nugget, the singleton $\{i_m\},$
and we may use these minimal nuggets in our
decomposition~(\ref{d.I=}).
The statement of Lemma~\ref{L.ultra} then simplifies to the next result.
(No such simplification is possible if $k$ is finite,
since then every ultrafilter is $\!\card(k)^+\!$-complete,
and the only restriction we could put on $\card(I)$ that would force
all $\!\card(k)^+\!$-complete ultrafilters to be
principal would be finiteness; an uninteresting situation.
So we now exclude the case of finite~$k.)$
\begin{corollary}\label{C.\nolinebreak\card(k)$
\textup{(}if any exist\textup{)}, $V$ a finite-dimensional
$\!k\!$-vector space, and $g:k^I\to V$ a $\!k\!$-linear map.
Then there exist elements $i_1,\dots,i_n\in I$
such that, writing $J_0=I-\{i_1,\dots,i_n\},$ we have
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.I-i*}
Every element of $g(k^{J_0})$ is the image under
$g$ of an element having support precisely $J_0.$
\end{minipage}\end{equation}
In particular, applying this to $0\in g(k^{J_0}),$
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.fewzeros}
There exists some $u=(u_i)\in\ker(g)$ such
that $u_i=0$ for only finitely many $i$
\textup{(}namely $i_1,\dots,i_n).$
\qed\hspace{-1.3em}
\end{minipage}\end{equation}
%
\end{corollary}
Since we have excluded the case where $k$ is finite, the
above corollary did not need condition~(\ref{d.dim+2}),
that $\card(k)\geq\dim_k(V)+2.$
We end this section with a quick example showing that
Lemma~\ref{L.ultra} does need that condition.
Let $k$ be any finite field, and $I$ a subset of $k\times k$
consisting of one nonzero element from each of the
$\card(k)+1$ one-dimensional subspaces of that
two-dimensional space (i.e., $I$ is a set of
representatives of the points of the projective line over $k).$
Let $S\subseteq k^I$ be the two-dimensional subspace consisting of the
restrictions to $I$ of all $\!k\!$-linear functionals on $k\times k.$
Since $k^I$ is $\!(\card(k){+}1)\!$-dimensional, $S$ can be expressed
as the kernel of a linear map $g$ from $k^I$ to a
$\!(\card(k){-}1)\!$-dimensional vector space $V.$
By choice of $I,$ every element of $S=\ker(g)$
has a zero somewhere on $I,$ so
$0\in g(k^I)$ is not the image under $g$ of an element having
all of $I$ for support.
Hence~(\ref{d.J0}) cannot hold with $J_0=I.$
If Lemma~\ref{L.ultra} were applicable, this would force
the existence of a nonzero number of nuggets $J_m.$
Since $I$ is finite, the associated
ultrafilters would be principal, corresponding to elements $i_m$ such
that all members of $S=\ker(g)$ were zero at $i_m$
(by the one-one-ness of the last map of~(\ref{d.gfactors})).
But this does not happen either: for every $i\in I,$
there are clearly elements of $S$ nonzero at $i.$
Hence the conclusion of Lemma~\ref{L.ultra} does not hold for this $g.$
Note that since $\dim_k(V)=\card(k)-1,$
the condition $\card(k)\geq\dim_k(V)+2$ fails by just~$1.$
\section{Back to homomorphic images of product algebras}\label{S.*PA_i->B}
From the above three results on elements with cofinite
support, we can now prove the three cases of
\begin{theorem}\label{T.main}
Assume the field $k$ is infinite, and let $(A_i)_{i\in I}$ be a family
of $\!k\!$-algebras, $B$ a $\!k\!$-algebra, and
$f:A=\prod_I A_i\to B$ a surjective $\!k\!$-algebra homomorphism.
Suppose further that either\\[.2em]
%
\textup{(i)}\ \ $\dim_k(B)<\card(k),$ and
$\card(I)\leq\card(k),$ or\\[.2em]
%
\textup{(ii)}\,\ $\dim_k(B)<2^{\aleph_0},$ and $\card(I)=\aleph_0,$
or\\[.2em]
%
\textup{(iii)}\ $\dim_k(B)$ is finite, and $\card(I)$ is less than
every measurable cardinal $>\card(k).$\vspace{.2em}
Then
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.B=}
$B\ =\ f(A_{\mathrm{fin}})+Z(B).$
\end{minipage}\end{equation}
In fact, $f$ can be written as the sum
$f_1+f_0$ of a $\!k\!$-algebra homomorphism $f_1:A\to B$ that
factors through the projection of $A$ onto the product
of finitely many of the $A_i,$ and a $\!k\!$-algebra
homomorphism $f_0:A\to Z(B).$
\end{theorem}
\begin{proof}
We see~(\ref{d.B=}) by combining Lemma~\ref{L.supp}(iii)
with Lemma~\ref{L.rat} in case~(i),
with Lemma~\ref{L.JPBell} in case~(ii), and
with Corollary~\ref{C.\card(k)$ (if these exist).
This is because any $I$ of cardinality greater
than or equal to such a $\mu$
admits a nonprincipal $\!\card(k)^+\!$-complete ultrafilter $\U,$
which makes $k^I/\,\U$ one-dimensional (cf.\ proof of~(\ref{d.dim1})
above, or \cite[Theorem~49]{prod_Lie1}),
and hence embeddable in $V,$
though the kernel of $k^I\to k^I/\,\U$ consists
of elements whose zero-sets lie in $\U,$ and hence are infinite.
Thus, Corollary~\ref{C.\card(k),$ but instead give, in that case, a conclusion in which
factorization of $f:\prod_I A_i\to B$
through finitely many of the $A_i$ is replaced by factorization
through finitely many ultraproducts of the $A_i$ with respect to
$\!\card(k)^+\!$-complete ultrafilters.
Though similar factorizations for a linear map $g:k^I\to B$
appear in Lemma~\ref{L.ultra} of this note, an apparent
obstruction to carrying these over
to results on algebra homomorphisms is that our
proof of the latter applies the results
of \S\S\ref{S.dimB}-\ref{S.I} not just to a single linear map
$g_a:k^I\to B,$ but to one such map for each $a\in A=\prod_I A_i;$
and different maps yield different families of ultrafilters.
However, one can get around this by choosing finitely many
elements $a_1,\dots,a_d\in A$ whose images under $f$ span $B,$
regarding them as together determining a map
$g_{a_1,\dots,a_d}:k^I\to B^d,$
applying Lemma~\ref{L.ultra} to that map, and then showing
that the image under $f$ of any element in the kernels of all the
resulting ultraproduct maps has zero product with the images
of $a_1,\dots,a_d\in A,$ hence lies in $Z(B).$
For the sake of brevity we have not set down formally a generalization
of Theorem~\ref{T.main}(iii) based on this argument.
For other results on cardinality and factorization of maps on
products, but of a somewhat different flavor, see~\cite{E+F+M}.
% - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
\begin{thebibliography}{00}
\bibitem{pro-np} George\,M.\,Bergman,
{\em Homomorphic images of pro-nilpotent algebras},
to appear, {\em Illinois J. Math.}.
\bibitem{pro-np2} \bysame,
{\em Continuity of homomorphisms on pro-nilpotent algebras},
to appear, {\em Illinois J. Math.}.
\bibitem{prod_Lie1} {\bysame} and Nazih Nahlus,
{\em Homomorphisms on infinite direct product algebras,
especially~Lie~algebras},
J. Alg. {\bf 333} (2011) 67-104.
\url{http://dx.doi.org/10.1016/j.jalgebra.2011.02.035}.
MR~2785938.
\bibitem{Bois} Jean-Marie Bois,
{\em Generators of simple Lie algebras in arbitrary characteristics},
Mathematische Zeitschrift
{\bf 262} (2009) 715-741.
\url{http://arxiv.org/pdf/0708.1711}.
MR~{\bf 2010c}:17011.
\bibitem{Bourbaki} Nicolas Bourbaki,
{\em \'{E}l\'{e}ments de math\'{e}matique,}
Fasc.\ XXXVIII: {\em Groupes et alg\`{e}bres de Lie,}
chapitres VII-VIII.
Translated by Andrew Pressley in
{\em Lie Groups and Lie Algebras. Chapters 7--9.}
MR~{\bf 56}\#12077,
MR~{\bf 2005h}:17001.
% http://www.springerlink.com/content/q8102l678x025718/?p=df7a37853a6e4470abc3bd1ae2c3a116
\bibitem{Brown} Gordon Brown,
{\em On commutators in a simple Lie algebra,}
Proc. Amer. Math. Soc. {\bf 14} (1963) 763--767.
MR~{\bf 27}\#3676.
\bibitem{Ch+Keis} C.\,C.\,Chang and H.\,J.\,Keisler,
{\em Model Theory}, 3rd ed.,
Studies in Logic and the Foundations of Mathematics,
v.73. North-Holland, 1990.
% ISBN: 0-444-88054-2.
MR~{\bf 91c}:03026.
\bibitem{Drake} F.\,R.\,Drake, % QA248 .D731
{\em Set Theory: An Introduction to Large Cardinals,}
Studies in Logic and the Foundations of Mathematics, v.76, Elsevier,
1974.
% ISBN 0-444-10535-2.
Zbl~{\bf 0294}.02034.
\bibitem{E+F+M} Andrzej Ehrenfeucht, Siemion Fajtlowicz,
and Jan Mycielski,
{\em Homomorphisms of direct powers of algebras},
Fund. Math. {\bf 103} (1979) 189--203.
MR~{\bf 81b}:08002.
\bibitem{KHH+SAM} Karl H. Hofmann and Sidney A. Morris,
{\em The Lie Theory of Connected Pro-Lie Groups. % QA387 .H6364 2007
A structure theory for pro-Lie algebras, pro-Lie groups, and connected
locally compact groups,}
EMS Tracts in Mathematics, 2. European Mathematical Society, 2007.
% ISBN: 978-3-03719-032-6.
MR~{\bf 2008h}:22001.
\bibitem{NJ} Nathan Jacobson, % QA162 .J3 1975 v.2
{\it Lectures in Abstract Algebra. Volume II: Linear algebra,}
Van Nostrand, 1953, and Springer GTM, No. 31, 1975.
MR~{\bf 14},837e\, and\, MR~{\bf 51}\#5614.
\bibitem{SL.Alg} Serge Lang,
{\em Algebra},
Addison-Wesley, third edition, 1993,
reprinted as Springer G.T.M., v.211, 2002.
MR~{\bf 2003e}:00003.
\bibitem{Loeb} Peter A. Loeb,
{\em Nonstandard analysis and topology},
pp.77-86 in {\em Nonstandard Analysis
\textup{(}Edinburgh, 1996\textup{)}},
ed.\ Leif Arkeryd, Nigel Cutland, and C.\,W.\,Henson. Kluwer, %spell.ig
1997.
MR~1603230. % no review
\bibitem{NN_split} Nazih Nahlus,
{\em On $L=[L,a]+[L,b]$ and $x=[a,b]$ in split simple Lie algebras},
to be written, title tentative.
\end{thebibliography}
\end{document}