\documentclass{amsart}
\usepackage{amsmath,amsfonts,amsthm,amssymb,indentfirst,epic,url,graphics,needspace}
\setlength{\textwidth}{6.5in}
\setlength{\textheight}{9.00in}
\setlength{\evensidemargin}{0in}
\setlength{\oddsidemargin}{0in}
\setlength{\topmargin}{-.5in}
\sloppy
\setlength{\mathsurround}{.167em}
\newtheorem{theorem}{Theorem}
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{corollary}[theorem]{Corollary}
\newtheorem{proposition}[theorem]{Proposition}
\newtheorem{definition}[theorem]{Definition}
\newtheorem{question}[theorem]{Question}
\newtheorem{example}[theorem]{Example}
\newtheorem{convention}[theorem]{Convention}
\newcommand{\Z}{\mathbb{Z}}
\newcommand{\N}{\mathbb{N}}
\newcommand{\Pm}{\mathcal{P}}
\renewcommand{\r}{\mathrm}
\newcommand{\supp}{\r{supp}}
\newcommand{\cl}{\r{cl}}
\newcommand{\po}{\rightsquigarrow}
\newcommand{\lang}{\begin{picture}(5,7)
\put(1.2,2.5){\rotatebox{45}{\line(1,0){6.0}}}
\put(1.2,2.5){\rotatebox{315}{\line(1,0){6.0}}}
\end{picture}\kern.16em}
%
\newcommand{\rang}{\kern.1em\begin{picture}(5,7)
\put(.1,2.5){\rotatebox{135}{\line(1,0){6.0}}}
\put(.1,2.5){\rotatebox{225}{\line(1,0){6.0}}}
\end{picture}}
\raggedbottom
\begin{document}
\begin{center}
\texttt{
This is the final preprint version of a paper which
appeared at \\[.3em]Journal of Algebra, 535 (2019) 503-540.
The published version is accessible to \\[.3em]subscribers
at \ https://doi.org/10.1016/j.jalgebra.2019.06.021.}
\end{center}
\vspace{2em}
\title%
[Embeddability of rings in division rings]%
{Some results relevant to embeddability of rings\\
(especially group algebras) in division rings}
\thanks{Archived at \url{http://arXiv.org/abs/1812.06123}\,.
After publication, any updates, errata, related references,
etc., found will be recorded at
\url{http://math.berkeley.edu/~gbergman/papers/}\,.
}
\subjclass[2010]{Primary: 06F16, 16K40, 20C07.
% ord_gps div_rgs kG(&mdls)
Secondary: 05B35, 06A05, 06A06, 43A17.}
% matroids tot-ord part-ord anal_on_ord_gps
\keywords{%
homomorphisms of rings to division rings;
coherent matroidal structures on free modules;
group algebras of right-ordered groups;
prime matrix ideals.
}
\author{George M. Bergman}
\address{University of California\\
Berkeley, CA 94720-3840, USA}
\email{gbergman@math.berkeley.edu}
\begin{abstract}
P.\,M.\,Cohn showed in 1971 that given a ring $R,$ to describe,
up to isomorphism, a division ring $D$ generated by a homomorphic
image of $R$ is equivalent to specifying the set of square matrices
over $R$ which map to singular matrices over $D,$ and he determined
precisely the conditions that such a set of matrices must satisfy.
The present author later developed another version of this data,
in terms of closure operators on free $\!R\!$-modules.
In this note, we examine the latter concept further, and show how
an $\!R\!$-module $M$ satisfying certain conditions
can be made to induce such data.
In an appendix we make some observations on Cohn's
original construction,
and note how the data it uses can similarly be induced by
an appropriate sort of $\!R\!$-module.
Our motivation is the longstanding question of whether,
for $G$ a right-orderable group and $k$ a field, the group
algebra $kG$ must be embeddable in a division ring.
Our hope is that the right $\!kG\!$-module \mbox{$M=k((G))$}
might induce a closure operator of the required sort.
We re-prove a partial result in this direction due to N.\,I.\,Dubrovin,
note a plausible generalization thereof which would give the
desired embedding, and also sketch some
thoughts on other ways of approaching the problem.
\end{abstract}
\maketitle
% - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
\section{Background}\label{S.intro}
A.\,I.\,Mal'cev~\cite{Malcev} and,
independently, B.\,H.\,Neumann~\cite{BHN} showed that
if $G$ is a group given with a {\em \mbox{$\!2\!$-sided}-invariant}
ordering, that is, a total ordering $\leq$ such that for
all $e,\,f,\,g,\,h\in G,$
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.2-sided}
$f\leq g\ \implies\ ef\leq eg$\ \ and\ \ $fh\leq gh,$
\end{minipage}\end{equation}
%
and if, for $k$ a field, we let $k((G))$ denote the
set of formal $\!k\!$-linear combinations
$\sum_{g\in G}\,\alpha_g\,g$ of elements of $G$ whose support,
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.supp}
$\supp(\sum_{g\in G}\,\alpha_g\,g)\ =\ \{g\in G\mid \alpha_g\neq 0\},$
\end{minipage}\end{equation}
%
is well-ordered, then $k((G))$ can be made a ring
in a natural way; in fact, a division ring.
This division ring contains the group algebra $kG,$
as the subalgebra of elements with finite support.
Now suppose $G$ is merely given with a {\em right-invariant} ordering,
that is, a total ordering satisfying
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.rt-ord}
$f\leq g\ \implies\ fh\leq gh,$
\end{minipage}\end{equation}
%
and again let $k((G))$ be the set of
formal $\!k\!$-linear combinations of elements of $G$ whose supports
are well-ordered.
This time we cannot extend the ring structure of $kG$ to $k((G))$
in any evident way: if we
try to take the formal product $ab$ of elements $a,b\in k((G)),$
the one-sided invariance of the ordering is not enough to guarantee
that only finitely many occurrences of each $g\in G$
arise when we multiply $ab$ out; and even when that is true, for
instance, when $a$ is a member of $G,$ the support of
the resulting formal sum $ab$ may fail to be well-ordered.
However, by~\eqref{d.rt-ord} we can make $k((G))$ a
right $\!kG\!$-module; and this module has been shown to
have a property that is very encouraging with respect to the possibility
of embedding $kG$ in a division ring:
Dubrovin~\cite{Dubrovin} shows that
every nonzero element of $kG$ acts invertibly on $k((G)).$
% (\S\S\ref{S.bij}-\ref{S.bij_via_Higman} below).
But it is not clear how to go further: if we form
the ring of $\!k\!$-linear endomorphisms of $k((G))$ generated
by the actions of the elements of $kG$ and their inverses, there is
no evident way to prove invertibility of
all nonzero elements of this larger ring;
so we are not in a position to iterate the adjunction of inverses.
Indeed, the question of whether group rings of all right-orderable
groups are embeddable in division rings is listed in \cite{Kourovka}
as Problem~1.6, attributed to A.\,I.\,Mal'cev, and dating from the first
(1965) edition of that collection of open problems in group theory.
(The still more general question of whether group rings
of all torsion-free groups embed in division
rings also appears to be open [{\em ibid.}, Problems~1.3 and 1.5].)
P.\,M.\,Cohn \cite{PMC_1971}-\cite{FRR+} showed that a homomorphism
from a not necessarily
commutative ring $R$ into a division ring $D$ can be studied
in terms of the set of square matrices over $R$ that become
singular over $D.$
He showed that this set of matrices determines the structure of
the division subring of $D$ generated by the image of $R,$ and
gave criteria for a set of matrices to arise in this
way (recalled in~\S\ref{S.PMC_versions} below);
he named sets of matrices satisfying those criteria
``prime matrix ideals''.
Subsequently, the present author showed in \cite{sfd_fr_mtrd}
that the same data can be described in terms
of closure operators on free $\!R\!$-modules of finite rank
(details recalled in~\S\ref{S.mtrd} below).
Something I did not notice then is that a structure with {\em most} of
the properties defining Cohn's prime matrix ideals, or
my classes of closure operators, is determined by
any right or left $\!R\!$-module $M.$
In~\S\ref{S.M} we develop these observations for the closure
operator construction, and describe the additional properties
that $M$ must have for the closure operator so induced
to satisfy {\em all} the required conditions.
We then give, in \S\S\ref{S.bij}-\ref{S.bij_via_Higman},
a slightly modified proof of the result of Dubrovin cited above,
and in~\S\S\ref{S.further?}-\ref{S.either/or} look at a plausible
strengthening of that
result which would lead to the conclusion that $k((G))$ has the
module-theoretic properties needed to induce, by the results
of \S\ref{S.M}, an embedding in a division ring.
In \S\S\ref{S.G*}-\ref{S.variants} we discuss some
other ideas that might be of use in tackling this problem.
Finally, in an appendix, \S\ref{S.PMC_versions}, we look at Cohn's
concept of a prime matrix ideal.
We note a discrepancy between the definition of that concept
used in most of his works, and a weaker definition
given in~\cite{SF}, and sketch an apparent difficulty with his
reasoning about the latter version.
But we record an argument supplied by Peter Malcolmson,
which shows that adding a
small additional condition to the weaker definition renders
it equivalent to the other, and show that, so modified, it
allows us to obtain prime matrix ideals from certain
$\!R\!$-modules $M$ in a way parallel to our results on
closure operators.
Let me remark, regarding
the concepts of $\!2\!$-sided and $\!1\!$-sided
orderability of groups, that though the former seems ``intrinsically''
more natural, the latter has considerable ``extrinsic'' naturality:
A group is right orderable if and only if it can be embedded in the
group of order-automorphisms of a totally ordered set, written
as acting on the right
\cite[Proposition~29.5]{Darnel}.
Here ``only if'' is clear, using the group's action on itself.
To see ``if'' we need, for any totally ordered set $A,$ a
way of right-ordering $\r{Aut}(A).$
To get this, index $A$ as $\{a_i\mid i\in\kappa\}$ for some ordinal
$\kappa,$ and for $s\neq t\in\r{Aut}(A),$ let $s\leq t$ if and only if
for the {\em least} $i$ such that $s_i\neq t_i,$ we have $s_i>}
$S\ \subseteq\ T\ \implies\ \cl(S)\ \subseteq\ \cl(T).$
\end{minipage}\end{equation}
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.cl_>}
$\cl(S)\ \supseteq\ S.$
\end{minipage}\end{equation}
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.cl_idpt}
$\cl(\cl(S))\ =\ \cl(S).$
\end{minipage}\end{equation}
A closure operator $\cl$ will be called {\em finitary} if
for all $S\subseteq X,$
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.cl_fin}
$\cl(S)\ =\ \bigcup_{\,\mbox{\em\scriptsize finite}
\ S_0\subseteq S}\ \cl(S_0).$
\end{minipage}\end{equation}
%
\end{definition}
(The most common term for a closure operator
satisfying~\eqref{d.cl_fin} is ``algebraic'', because that condition
is frequent in algebraic contexts.
But ``finitary'' seems more to the point.)
Now suppose $R$ is a ring,
and $f:R\to D$ a homomorphism into a division ring.
For every $n\geq 0,$ let us
define a closure operator $\cl_{R^n}$ on $R^n$
by looking at the induced map $f:R^n\to D^n,$ and sending each
$S\subseteq R^n$ to the inverse image
in $R^n$ of the right span over $D$ of the image of $S$ in $D^n.$
In writing this formally, it will be convenient to use the
same letter $f$ that denotes our homomorphism $R\to D$ to
denote also the induced homomorphisms of right $\!R\!$-modules,
$R^n\to D^n,$ for all $n\geq 0.$
Then our definition says that
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.cl}
$\cl_{R^n}(S)\ =\ f^{-1}(f(S)D)$\hspace{1em}
for $S\subseteq R^n.$
\end{minipage}\end{equation}
It is not hard to verify that this construction satisfies
the following five conditions for all $m,n\geq 0.$
%
\Needspace{4\baselineskip}
\begin{equation}\begin{minipage}[c]{35pc}\label{d.cl_cl}
$\cl_{R^n}$ is a closure operator on
the underlying set of the right
$\!R\!$-module $R^n,$ whose closed subsets are $\!R\!$-submodules.
\end{minipage}\end{equation}
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.cl_proper}
For all $n>0,$ $\cl_{R^n}(\emptyset)$ is a proper submodule of $R^n.$
\end{minipage}\end{equation}
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.cl_homs}
For every homomorphism of right $\!R\!$-modules $h:R^m\to R^n$
and every $\!\cl_{R^n}\!$-closed submodule $A\subseteq R^n,$
the submodule $h^{-1}(A)\subseteq R^m$ is $\!\cl_{R^m}\!$-closed.
\end{minipage}\end{equation}
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.cl_exch}
The closure operator $\cl_{R^n}$ has the {\em exchange property},
namely, for $S\subseteq R^n$ and $t,u\in R^n,$
if $u\notin\cl_{R^n}(S)$ but $u\in\cl_{R^n}(S\cup\{t\}),$
then $t\in\cl_{R^n}(S\cup\{u\}).$
\end{minipage}\end{equation}
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.cl_finitary}
The closure operator $\cl_{R^n}$ is finitary.
\end{minipage}\end{equation}
%
In~\cite{sfd_fr_mtrd}, I named families of closure operations
$(\cl_{R^n})_{n\geq 0}$ satisfying~\eqref{d.cl_cl}-\eqref{d.cl_finitary}
``proper coherent matroidal structures on free $\!R\!$-modules''
(``matroid'' being the standard term for a set $X$ given with a
finitary closure operator $\r{cl}$ having
the exchange property of~\eqref{d.cl_exch}),
and it was shown that every such structure determines a
homomorphism $f$ of $R$ into a division ring $D$
which induces the given operators via~\eqref{d.cl}, and which is,
up to embeddings of division rings, the unique such homomorphism.
By~\eqref{d.cl},
the kernel of that homomorphism is $\cl_{R}(\emptyset).$
Condition~\eqref{d.cl_homs} above is stated in
terms of inverse images of closed subsets.
It is also equivalent (given~\eqref{d.cl_cl}) to
a statement about {\em closures of images} of subsets, namely:
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.cl_homs'}
For every homomorphism of right $\!R\!$-modules $h:R^m\to R^n$
$(m,n\geq 0)$ and every subset $S\subseteq R^m,$
the submodule $h(\cl_{R^m}(S))$ of $R^n$ is
contained in $\cl_{R^n}(h(S)).$
\end{minipage}\end{equation}
Indeed, consider an arbitrary subset $S\subseteq R^m$ and
an arbitrary closed subset $A\subseteq R^n.$
Then~\eqref{d.cl_homs} is equivalent to the statement that
for any such sets, if $S\subseteq h^{-1}(A)$ then
$\cl_{R^m}(S)\subseteq h^{-1}(A),$ while~\eqref{d.cl_homs'}
is equivalent to the statement that for any such sets,
if $h(S)\subseteq A$ then $h(\cl_{R^m}(S))\subseteq A.$
These statements are clearly equivalent,
so~\eqref{d.cl_homs} and~\eqref{d.cl_homs'} are equivalent.
We remark that matroid theorists often require the underlying
sets of matroids to be finite; for instance,
this is assumed by Welsh~\cite{Welsh}, and only in his final
chapter does he discuss ways the theory can be
extended to infinite structures.
But for most algebraic applications, including those of this note,
the restriction to finite sets would be unnatural, and the
appropriate version in the infinite case is clear: Regarding
matroids as sets with closure operators
(one of many equivalent formulations of the concept),
one should simply require that these operators be finitary, i.e.,
one should impose condition~\eqref{d.cl_finitary}.
We shall call on many results from~\cite{Welsh} in this note,
tacitly understanding that the statements we quote go over to
the infinite matroids we will be considering.
The assumption that our closure operators are finitary makes it
straightforward to deduce such statements from the corresponding
facts about finite matroids.
(The term ``matroid'' is based on the
motivating example of the linear dependence structure on the
rows or columns of a matrix over a field $K.$
From that point of view, the finiteness assumption is natural.
But such systems of rows or columns are simply
finite families of elements of a space $K^n,$ and to the
algebraist, linear dependence
is most naturally viewed as structure on that generally infinite set.)
In the situations we shall be looking at,
conditions~\eqref{d.cl_cl}-\eqref{d.cl_homs} will generally
be easy to establish.
The next lemma restricts the instances one has to verify to show
that~\eqref{d.cl_exch} also holds, and shows
that~\eqref{d.cl_finitary} is
implied by \eqref{d.cl_cl} and~\eqref{d.cl_exch}.
\begin{lemma}\label{L.1),$
then taking $s_2\in S-\cl_{R^n}(\{s_1\}),$
the corresponding argument shows that we can
replace another $e_i$ by $s_2;$ and so on.
Since there are only $n$ elements $e_i$ to be replaced,
this process must stop after $\leq n$ steps, giving a
subset of $\leq n$ elements of $S$ which (because the
process has stopped) has $S$ in its closure,
hence has the same closure as $S,$ proving~(ii).
Further, if $\cl_{R^n}(S)\neq R^n,$ this process can't
terminate with all the $e_i$ replaced by elements of $S,$
since that would imply that $S$ had
closure $R^n;$ so it must terminate
with $}\,N$ behave
badly, see the $V_{m,n}$ case of \cite[Theorem 6.1]{coproducts2}.)
\section{Systems of closure operators induced by $\!R\!$-modules}\label{S.M}
In \S\ref{S.mtrd} we saw how a homomorphism of a ring $R$
into a division ring $D$ induces, by~\eqref{d.cl},
a system of closure operators
satisfying~\eqref{d.cl_cl}-\eqref{d.cl_exch}.
Suppose that instead of
a homomorphism from $R$ to a division ring, we are given
a nonzero right $\!R\!$-module $M.$
There is no obvious way to put $M$ in place of $D$ in~\eqref{d.cl}
(even if we assume it a left rather than a right module);
but we shall see below that there is a natural way to get from $M$
a system of closure operators $(\cl_{R^n})_{0\leq n}$
which, for $M=D_R$ $(D$ regarded as a right $\!R\!$-module)
agrees with that given by~\eqref{d.cl}.
For each $n>0,$ let us write elements $a\in M^n$ as
row vectors, and elements $x\in R^n$ as column vectors.
Then for such $a$ and $x$ we can define $a x\in M$ in the obvious way;
thus we can speak of elements of $R^n$
annihilating elements of $M^n.$
For $S\subseteq R^n,$ let $\cl_{R^n}(S)$
be the set of elements of $R^n$ that annihilate all
elements of $M^n$ annihilated by all elements of $S.$
Writing $\r{ann}_{M^n}(S)$ for
$\{a\in M^n\mid (\forall\,s\in S)\linebreak[1]\ as=0\},$
this becomes
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.cl_fr_M}
$\cl_{R^n}(S)\ =\ \{x\in R^n\mid
\r{ann}_{M^n}(x)\supseteq \r{ann}_{M^n}(S)\}.$
\end{minipage}\end{equation}
%
We see that the closed subsets of $R^n$ under~\eqref{d.cl_fr_M}
are precisely the annihilators of subsets of $M^n.$
It is not hard to check that given a homomorphism to a division
ring, $f:R\to D,$ as in \S\ref{S.mtrd},
if we let $M=D_R,$ then~\eqref{d.cl_fr_M} describes the
same closure operator as~\eqref{d.cl}.
(The key observation is that every subspace of the right $\!D\!$-vector
space of height-$\!n\!$ columns over $D$ is the right annihilator
of a set of length-$\!n\!$ rows over $D$ -- since such row
vectors correspond to the $\!D\!$-linear functionals on that space --
so the right $\!R\!$-submodules of $R^n$ that are inverse
images under $f$ of $\!D\!$-subspaces of $D^n,$
regarded as sets of columns, are those that are
annihilators of sets of elements of~$D^n$ regarded as rows.)
Returning to the case of a general right $\!R\!$-module $M,$
let us, for any {\em matrix} $A$ over $R$ with $n$ rows,
write $\r{ann}_{M^n}(A)$ for the subset of $M^n$ annihilated
by the right action of $A,$ in other words,
the annihilator in $M^n$ of the set of columns of~$A.$
\begin{lemma}\label{L.cl_fr_M}
Let $R$ be a ring and $M$ a nonzero right $\!R\!$-module, and
for each $n\geq 0$ let $\cl_{R^n}$ be defined by~\eqref{d.cl_fr_M}.
Then this family of operators satisfies
conditions~\eqref{d.cl_cl}, \eqref{d.cl_proper} and
\eqref{d.cl_homs}.
For each $n,$ the condition that $\cl_{R^n}$ also
satisfy~\eqref{d.cl_exch}
\textup{(}which, as noted, is equivalent
to~\eqref{d.cl_exch-}\textup{)}, is equivalent
to each of the following three statements.
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.cl_exch_iff}
There do not exist a subset $S\subseteq R^n,$ and elements
$u,t\in R^n,$ such that\\
$\r{ann}_{M^n}(S)\ \supsetneqq\ \r{ann}_{M^n}(S\cup\{u\})\ \supsetneqq
\ \r{ann}_{M^n}(S\cup\{t\}).$
\end{minipage}\end{equation}
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.cl_exch_iff_mx-}
There do not exist an $n\times n{-}1$ matrix $A$ over $R,$
and $n\times n$ matrices $B,$ $C$ over $R,$
each obtained by adding a single column to $A,$ such that
\ $\r{ann}_{M^n}(A)\ \supsetneqq
\ \r{ann}_{M^n}(B)\ \supsetneqq\ \r{ann}_{M^n}(C).$
\end{minipage}\end{equation}
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.cl_exch_iff_mx}
There do not exist $n\times n$ matrices $A,$ $B,$ $C$
over $R$ which all agree except in one column, such that
\ $\r{ann}_{M^n}(A)\ \supsetneqq
\ \r{ann}_{M^n}(B)\ \supsetneqq\ \r{ann}_{M^n}(C).$
\end{minipage}\end{equation}
Hence, if a ring $R$ has a {\em faithful} right module $M$ which
for all $n\geq 0$ satisfies~\eqref{d.cl_exch_iff}, equivalently,
\eqref{d.cl_exch_iff_mx-}, equivalently, \eqref{d.cl_exch_iff_mx},
then $R$ is embeddable in a division ring.
\end{lemma}
\begin{proof}
That the operators defined by~\eqref{d.cl_fr_M} satisfy
\eqref{d.cl_cl} and \eqref{d.cl_proper} is straightforward,
the key fact being that the annihilator in $R^n$ of
every element $a\in M^n$ is a right submodule of $R^n,$ which
is proper if $a\neq 0.$
\eqref{d.cl_homs} is also not difficult, but here are the details.
Let $h:R^m\to R^n$ be represented by the
$n\times m$ matrix $H,$ acting on the left on columns
of elements of $R.$
The matrix $H$ can also be applied on the right to rows of elements of
$M,$ so as to carry $M^n$ to $M^m,$ and if we also call this
map $h$ (and write it on the right), the associativity
of formal matrix multiplication gives the law $(a\,h)\,x=a\,(h\,x).$
Thus, if $A\subseteq R^n$ is closed, i.e., is the
annihilator of a subset $T\subseteq M^n,$ and we write its
inverse image $h^{-1}(A)\subseteq R^m$ as
$\{x\in R^m\mid h\,x\in A\}=
\{x\in R^m\mid(\forall\,t\in\nolinebreak T)\ t\,(h\,x)=0\}=
\{x\in R^m\mid(\forall\,t\in T)\ (t\,h)\,x=0\},$ we see that
this is the annihilator of $T\,h\subseteq M^m,$ hence also closed.
The equivalence of~\eqref{d.cl_exch} with~\eqref{d.cl_exch_iff}
is easy to see if we bear in mind that an inclusion between the
annihilators in $M^n$ of two subsets of $R^n$ is
equivalent to the {\em reverse} inclusion between the closures
of those subsets of $R^n,$ as defined by~\eqref{d.cl_fr_M}.
Condition~\eqref{d.cl_exch_iff_mx-} is a translation
of~\eqref{d.cl_exch-}, gotten by
looking at the $\rho_{\supp(x)}(g_0).$
\end{minipage}\end{equation}
%
Then applying~\eqref{d.g_0_approx} to the greatest $g_0\rho_{\supp(x)}(g_0)$ for all $g_0\rho_{\supp(x)}(g_0)$ for all $g_00,$
and $y\in R^n$ a column vector.
Then if the upper $n{-}1\times n{-}1$ block of $X$
is $\!M\!$-invertible, and $y$ maps $\r{ann}_{M^n}(X)$ bijectively
to $M,$ then the $n\times n$ matrix $X'$ gotten by
appending $y$ to $X$ as an $\!n\!$-th column is $\!M\!$-invertible.
\end{lemma}
\begin{proof}
By assumption, $y$ annihilates no member of $\r{ann}_{M^n}(X);$
clearly this says that the matrix $X'$
annihilates no member of $M^n.$
To see that $X'$ is surjective, let $a\in M^n$
be an element we want to show is in its range.
By the $\!M\!$-invertibility of the top $n{-}1\times n{-}1$ block
of $X,$ we can find $b\in M^n$ with last term $0,$ and whose
first $n-1$ terms form a vector carried by that subblock of $X$
to the first $n-1$ terms of $a.$
Since the last term of $b$ is $0,$ multiplying $b$ by the whole
matrix $X$ still gives the first $n-1$ terms of $a.$
If we apply $y$ to $b,$ we get an element $by\in M$
which may differ from the desired last term $a_n$ of $a;$
but since $y$ carries the annihilator of $X$ bijectively
to $M,$ we can find an element $b'$ in that
annihilator which is carried by $y$ to $a_n - by.$
We then get $(b+b')X'=a,$ proving surjectivity.
\end{proof}
From this, we can prove, for any $N\geq 0,$
\begin{lemma}\label{L.max_inv}
Suppose $R$ and $M$ are a ring and module
satisfying~\eqref{d.either/or} for all $0m,$ the $\!j\!$-th column of $H$
did not annihilate $\r{ann}_{M^n}(\left(\begin{matrix} A \\
C \end{matrix}\right)).$
By rearranging the columns of $H$ after the $\!m\!$-th,
we can assume without loss of generality that $j=m+1.$
Since the $\!m{+}1\!$-st column of $H$ does not annihilate
$\r{ann}_{M^n}(\left(\begin{matrix} A \\
C \end{matrix}\right)),$ that column will not annihilate all of
the direct summands mentioned in the preceding paragraph, and by a
rearrangement of the rows of $H,$ we can assume
that a summand which it fails to annihilate consists of the members of
$\r{ann}_{M^n}(\left(\begin{matrix} A \\
C \end{matrix}\right))$ whose only nonzero entry after
the $\!m\!$-th (if any) is the $\!m{+}1\!$-st.
Let us now apply~\eqref{d.either/or}, putting in the role of $X$
the $m{+}1\times m$ matrix consisting of $A$ and the top row of $C,$
and in the role of $y$ the column vector consisting of the
first $m+1$ entries of the $\!m{+}1\!$-st column of $H.$
By assumption, that column does not annihilate the
annihilator of that matrix.
By Lemma~\ref{L.X'}, that makes the upper left $m{+}1\times m{+}1$
submatrix of $H$ an $\!M\!$-invertible matrix,
contradicting the maximality assumption on $A.$
This contradiction shows that every column of
$\left(\begin{matrix} B \\
D \end{matrix}\right)$ annihilates
$\r{ann}_{M^n}(\left(\begin{matrix} A \\
C \end{matrix}\right)),$ as claimed.
\end{proof}
We can now prove
\begin{proposition}\label{P.e/o=>exch}
Let $R$ and $M$ be a ring and module satisfying~\eqref{d.either/or}
for $00,$ and
the $\!R\!$-module $M$ is faithful, then $R$ admits an
embedding in a division ring.
\end{proposition}
\begin{proof}
Assume, by way of contradiction, that strict inclusions
as in~\eqref{d.cl_exch_iff_mx-} hold; however, let us write $H$
for the matrix there called $A,$ and write the matrices
there called $B$ and $C$ as $(H,s)$
and $(H,t),$ where $s,t\in R^n$ (freeing up the
letters $A$ through $D$ for use as in Lemma~\ref{L.max_inv}).
Applying Lemma~\ref{L.max_inv} to $H,$ we get (after rearranging the
rows and columns of $H)$
an $n\times m$ submatrix $H'=\left(\begin{matrix} A \\
C \end{matrix}\right),$ where $m\leq n-1,$
having the same left annihilator in $M^n$
as $H,$ and such that $A$ is invertible.
By hypothesis, the vectors $s$ and $t$ each act nontrivially
on $\r{ann}_{M^n}(H)=\r{ann}_{M^n}(H'),$
with $s$ having strictly larger annihilator
there than $t$ does, so
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.H',sH',t}
$\r{ann}_{M^n}(H',s)\ \supsetneqq\ \r{ann}_{M^n}(H',t).$
\end{minipage}\end{equation}
Now as in the proof of Lemma~\ref{L.max_inv}, we see that in
$\r{ann}_{M^n}(H'),$ each element is determined uniquely
by its final $n-m$ terms, and that since $s$ acts nontrivially
on that annihilator, it will act nontrivially on an element
in which only one of those positions has a nonzero entry.
By another rearrangement of rows we can assume that
that position is the $\!m{+}1\!$-st.
The annihilator of the action $t$ on $\r{ann}(H')$
was assumed to be contained in that of $s,$ so $t$ will
also act nontrivially on that element.
Hence by Lemma~\ref{L.X'}, in each of the $n\times m{+}1$ matrices
$(H',s)$ and $(H',t),$ the top $m{+}1\times m{+}1$ block
will be invertible.
Hence in the annihilators of those matrices, every element
will be determined uniquely by its last $n-m-1$ terms.
Clearly, if the functions determining such elements
from their last $n-m-1$ terms are the same
for $(H',s)$ and $(H',t),$ then the annihilators of those matrices
are the same, while if the functions are different,
those annihilators are incomparable; so neither possibility
is compatible with the assumed strict inclusion~\eqref{d.H',sH',t}.
This contradiction completes the proof of~\eqref{d.cl_exch_iff_mx-}.
The final assertion of the
proposition follows by Lemma~\ref{L.cl_fr_M}.
\end{proof}
We remark that an $\!R\!$-module $M$
satisfying~\eqref{d.cl_exch_iff_mx-} for all $n,$
and hence leading to a homomorphism from $R$ to a division
ring $D,$ need not, in general, itself be a vector space
over a division ring.
For example, if $R$ is a commutative integral domain, one
finds that the choice $M=R$ leads as in~\S\ref{S.M}
to a closure operator that gives the field of fractions $F$ of $R.$
Indeed, the closure operator determined by $M$ is the same
as that determined by the $\!R\!$-module $F,$ since
the annihilator of any row vector over $F$
is also the annihilator of a row vector over $R,$
gotten by clearing denominators.
On the other hand,
I do not know whether a module satisfying the stronger
condition~\eqref{d.either/or} for all $n$ must be a vector space over
the division ring $D$ that it determines.
(This is indeed so in the case of commutative $R,$ where the
$n=1$ case of~\eqref{d.either/or} is the analog of Dubrovin's result:
It says that every element of $R$ that does not annihilate $M$
acts invertibly on it.)
The $n=2$ case of~\eqref{d.either/or},
discussed in the preceding section, should be a useful
test case for ideas on how to try to prove
that for $R=kG$ and $M=k((G)),$~\eqref{d.either/or} holds for all $n.$
\section{Sandwiching $kG$ between a right and a left module}\label{S.G*}
For $R$ an algebra over a field $k$ and $M$ a right $\!R\!$-module,
the dual $\!k\!$-vector space $M^*=\r{Hom}_k(M,k)$ has
a natural structure of {\em left} $\!R\!$-module.
If we write the image of $a\in M$ under
$b\in M^*$ as $\lang a,b\rang\in k,$ then the relation between these
module structures is described by the rule
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.MrM*}
$\lang a\,r,\,b\rang\ =\ \lang a,\,r\,b\rang$\quad
for\quad $a\in M,\ r\in R,\ b\in M^*.$
\end{minipage}\end{equation}
%
I do not know whether $M^*$ can somehow be used, together with
$M,$ in studying whether $R$ is embeddable in a division ring.
However, the above observation is really a lead-in to the observation
that for $R=kG$ and $M=k((G)),$ there is
a left $\!R\!$-module that behaves much like the above $M^*,$ but is
not itself constructed from $M,$ and hence has (conceivably) a better
chance of bringing additional strength to our investigations.
Namely, given a group $G$
with a right-invariant ordering $\leq,$ let $G^*$ be the same
group under the corresponding {\em left}-invariant
ordering, $\leq^*,$ characterized by
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.G*}
$g\,\leq^*\,h\quad\iff\quad g^{-1}\geq h^{-1}.$
\end{minipage}\end{equation}
(A right- or left-invariant ordering $\leq$
on a group is determined by its
{\em positive cone}, $\{g\in G\mid g\geq 1\}.$
The ordering $\leq^*$ defined above is the left-invariant ordering
having the same positive cone as the given
right-invariant ordering ${\leq}.$
Indeed, writing $P$ for the positive cone of ${\leq},$ we have
$g\leq h$ if and only if $h\in Pg,$ so by~\eqref{d.G*},
$g\leq^* h$ if and only if $g^{-1}\in Ph^{-1},$
which, left-multiplying by $g$ and right-multiplying by $h,$
comes to $h\in gP.)$
Let us write $k((G^*))$ for the space of
formal $\!k\!$-linear combinations of elements of $G$
having well-ordered supports under $\leq^*;$
this clearly has a natural structure of {\em left} $\!kG\!$-module.
I claim that we can define a $\!k\!$-bilinear
map $\lang\,,\,\rang:k((G))\times k((G^*))\to k$ by
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.kGkG*}
$\lang \sum \alpha_g g,\ \sum \beta_h h\rang\ =
\ \sum_{g\in G}\,\alpha_g \beta_{g^{-1}}$\quad for\quad
$\sum \alpha_g g\in k((G))$ and $\sum \beta_h h\in k((G^*)).$
\end{minipage}\end{equation}
%
To see that the right-hand side of the equation
of~\eqref{d.kGkG*} makes sense, let $A$ be the set of
$g\in G$ such that both $\alpha_g$ and $\beta_{g^{-1}}$ are nonzero.
The condition $\sum \alpha_g g\in k((G))$
shows that $A$ is well-ordered under ${\leq}.$
Similarly, since $A$ is contained in the set of {\em inverses} of
elements of the support of $\sum \beta_h h,$ and the latter support
is well-ordered under $\leq^*,$~\eqref{d.G*} shows that
$A$ is reverse-well-ordered under ${\leq}.$
Being both well-ordered and reverse-well-ordered
under $\leq,$ $A$ is finite; so the sum on the
right-hand side of~\eqref{d.kGkG*} is indeed defined.
The formula~\eqref{d.kGkG*} looks as though it says, ``Multiply
the formal sums $\sum\alpha_g g$ and $\sum\beta_h h$ together,
and take the coefficient of $1$ in the result''.
But though the summation that would give
that coefficient is, as we have just
seen, defined, the same need not be true of the coefficients
of other members of $G.$
For instance, if $G$ contains elements $s,t,$ both $>1,$ such
that $t s = s t^{-1},$ then $\sum_{i\geq 0} t^i\in k((G))$ belongs
to both $k((G))$ and $k((G^*));$ hence by left-invariance
of the order on $G^*,$
$\sum_{j\geq 0} s t^j$ also belongs to $k((G^*)).$
But the formal product of these two elements is
$(\sum_{i\geq 0} t^i)\,(\sum_{j\geq 0} s t^j)=
\sum_{i,j\geq 0} s t^{j-i},$
in which the term $s$ occurs infinitely many times.
(More generally, in this summation, each term $s t^j$
occurs infinitely many times, while terms $t^j,$
in particular, the term $1,$ never occur; which is
consistent with our observation that $1$ can occur only
finitely many times.)
Returning to the map~\eqref{d.kGkG*}, one finds that it
satisfies the analog of~\eqref{d.MrM*}:
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.kGrkG*}
$\lang a\,r,\,b\rang =\lang a,\,r\,b\rang$\quad for\quad
$a\in k((G)),$ $r\in kG,$ $b\in k((G^*)).$
\end{minipage}\end{equation}
%
This is intuitively clear from the ``coefficient of~$1$''
interpretation of $\lang\,,\,\rang.$
To verify it formally, one can first check it for $r\in G,$ then take a
finite $\!k\!$-linear combination of the resulting formulas.
Let us write the common value of the two
sides of~\eqref{d.kGrkG*} as $\lang a\,r\,b\rang.$
Thus, given $a\in k((G))$ and $b\in k((G^*)),$
though one cannot associate to each $g\in G$ the ``coefficient
of $g$ in their product'', one can associate to each such
$g$ the value $\lang a\,g^{-1}\,b\rang.$
It is not hard to check that this is in fact the
coefficient of $g$ in the formal product $ba;$
so the summations giving all coefficients of that
product (unlike the summations that would
give the coefficients in $ab)$
do each involve only finitely many terms.
Thus, the construction sending a pair $(a,b)$ to
the formal sum $\sum (\lang a g^{-1} b\rang)\,g\in k^G,$
equivalently, to the formal product $ba,$ is a
well-defined $\!k\!$-bilinear map $k((G))\times k((G^*))\to k^G.$
However, the elements of the resulting
subspace $k((G^*))\ k((G))\subseteq k^G$
are not as ``nice'' as those of $k((G))$ and $k((G^*)).$
For instance, for $G$ having positive elements satisfying
$t s = s t^{-1}$ as above, $k((G))$ contains
$(\sum_{i\geq 0} t^i)s=s(\sum_{i\geq 0} t^{-i}),$ and $k((G^*)),$
as we have noted,
contains $s(\sum_{i>0} t^i);$ so $k((G^*))\ k((G))$ will contain
$s(\sum_{i\geq 0} t^{-i})\cdot 1 + 1\cdot s(\sum_{i>0} t^i)
=s(\sum_{-\infty}^\infty t^i).$
(If one wants to see that a product
of a single element of $k((G^*))$ with
a single element of $k((G))$ can misbehave in this way, note that in
the product
$(1+s(\sum_{i\geq 0} t^{-i}))\cdot(1+s(\sum_{i>0} t^i)),$ the terms
homogeneous of degree $1$ in $s$ give the expression just described.)
However (again writing $P$ for the positive cone of the
right-ordered group $G,$ equivalently
of the left-ordered group $G^*)$
we can at least say that each element of
$k((G^*))\ k((G))$ has support which is contained in
$u\,P\,v$ for some $u,v\in G,$
equivalently, which is disjoint from $u\,(P-\{1\})^{-1}\,v.$
Namely, given $\sum_{i=1}^n b_i a_i$
with each $b_i\in k((G^*))$ and each $a_i\in k((G)),$ take $u$
such that the supports of all the $b_i$ are in $uP,$
and $v$ such that the supports of all the $a_i$ are in $Pv.$
Suppose we now let $S$ denote the set of pairs $(s_1,s_2)$
such that $s_1$ is a $\!k\!$-vector-space endomorphism
of $k((G))$ and $s_2$ a $\!k\!$-vector-space endomorphism
of $k((G^*)),$ written on the right and the left respectively,
which satisfy
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.kGskG*}
$\lang a\,s_1,\,b\rang =\lang a,\,s_2\,b\rang$\quad for\quad
$a\in k((G)),$ $b\in k((G^*)).$
\end{minipage}\end{equation}
%
It is easy to see that in such a pair, $s_1$ and $s_2$
each determine the other.
The set $S$ forms a $\!k\!$-algebra under the obvious operations,
and contains a copy of $kG,$ consisting of all
pairs $(r,r),$ where by abuse of notation we let the symbol
for $r\in kG$ denote both the right action of $r$
on $k((G))$ and its left action on $k((G^*)).$
For nonzero $r\in kG,$ we can see from Theorem~\ref{T.Dubrovin}
and its left-right dual that
all such elements are invertible in $S;$ so $S$ contains all
ring-theoretic expressions in ``elements of $kG$'' and their inverses.
But if one has any hope that $S$ might be a division ring (as
I briefly did), that is quickly squelched.
It contains, for instance, a copy of the direct
product $\!k\!$-algebra $k^G.$
Namely, if we let each $(c_g)_{g\in G}$ in that algebra
act on $k((G))$ by $\sum \alpha_g g\mapsto \sum c_g \alpha_g g$
and on $k((G^*))$ by $\sum \beta_g g\mapsto \sum c_{g^{-1}} \beta_g g,$
these actions are easily seen to satisfy~\eqref{d.kGskG*},
and to have the ring structure of the direct product of fields $k^G.$
In conclusion, I do not know whether the interaction of the right
$\!kG\!$-module $k((G)),$ the left $\!kG\!$-module $k((G^*)),$
and the operator $\lang\,,\,\rang$ may, in some
way, be useful in tackling the question of whether
$kG$ can be embedded in a division ring.
\section{Further ideas -- also having difficulties}\label{S.variants}
\subsection{A different sort of $\!kG\!$-module?}\label{SS.prod_G_mod_?}
We noted in the preceding section that a
right ordered group $G$ can have elements $s$ and $t$
satisfying $ts = st^{-1}.$
Indeed, that relation gives a presentation of
the simplest example of a group admitting
a right invariant ordering but not
a two-sided invariant ordering:
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.ts=}
$G\ =\ \lang s,t\mid t s = s t^{-1}\rang.$
\end{minipage}\end{equation}
If we write elements of this group in the normal
form $t^i s^j$ $(i,j\in\Z),$ it is straightforward
to verify that a right ordering is given by lexicographic
ordering of the pairs $(j,i):$
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.st}
$t^i s^j \leq t^{i'} s^{j'}$\quad $\iff$ \quad
$j}
$y^h x^n\ \geq\ y^{h'} x^{n'} \iff
\left \{ \begin{array}{cl}
\mbox{either} & n>n',\\[.2 em]
\mbox{or} & n=n'\ \ \mbox{and}\ \r{Re}(h)>\r{Re}(h'),\\[.2 em]
\mbox{or} & n=n',\ \, \r{Re}(h)=\r{Re}(h'),
\ \, \mbox{and}\ \, \r{Im}(h)\geq\r{Im}(h')\,.
\end{array} \right.$
\end{minipage}\end{equation}
%
(Cf.~\eqref{d.st}.)
In this situation, if $c$ has the form $e^{\alpha \pi i}$
$(\alpha\in\mathbb{R}),$ then for any $S$ with more than one element,
and any ordering $\preceq$ of $S$ such that $G_\preceq$ is
nonempty, it is not hard to show that $\{n\in\Z\mid x^n\in G_\preceq\}$
is periodic (invariant under some nonzero additive translation
on $\Z)$ if and only if $\alpha$ is rational.
So in the irrational case, the sets $G_\preceq$
are particularly messy.
\subsection{One case that would imply the general result we want}\label{SS.ord_aut_R}
Yves de~Cornulier (personal communication)
has pointed out that to prove embeddability
of $kG$ in a division ring for every right-orderable group $G,$
we `merely' need to prove this for $G$
the group of order-automorphisms of the ordered set of real numbers,
or, alternatively, for $G$ the order-automorphisms of the ordered
set of rationals.
For it is known \cite[Proposition~2.5]{Linnell} that any {\em countable}
right orderable group can be embedded in each of those groups; hence
if one of those two group algebras were embeddable in a division ring,
then for any right-orderable group $G,$ all of its finitely generated
subgroups $G_0$ would have group algebras $kG_0$ embeddable in
division rings, and
from this, a quick ultraproduct argument would give the embeddability
of $kG$ itself in a division ring.
\subsection{Can we use lattice-orderability?}\label{SS.lat_ord}
Recall the fact mentioned at the end of~\S\ref{S.intro},
that the one-sided-orderable groups are the groups embeddable,
group-theoretically, in lattice-ordered groups.
So what we want is equivalent to saying that
group algebra $kG$ of every lattice-ordered group $G$ is embeddable in
a division ring.
The partial ordering of a lattice-ordered group
is required to be invariant under both right and left translations,
and it is tempting to hope that we should be able to
construct a division ring of formal infinite sums whose
supports in $G$ have some nice property with respect to such
a lattice ordering.
However, note that any lattice-ordered group $G$ can be embedded
group-theoretically, by the diagonal map, in the lattice-ordered
group $G\times G^\r{\,op},$ where $G^\r{\,op}$
is the group $G$ with its partial order relation reversed.
Since the subgroup of $G\times G^\r{\,op}$ given by the
image of this embedding is an antichain, it is hard
to see how the order structure can be used to pick out a
class of infinite sums that would form a division ring and
contain that diagonal subring.
But one might be able to go somewhere with this idea -- perhaps
defining a permissible infinite sum not just in terms of
order relations among the elements of its support,
but using the sublattice generated by that support.
(Incidentally, the lattice structure of a lattice-ordered group is
always distributive \cite[Corollary~3.17]{Darnel}.)
\section{Appendix on prime matrix ideals}\label{S.PMC_versions}
Let us recall P.\,M.\,Cohn's approach to
maps of rings into division rings, which we sketched in \S\ref{S.intro}.
It is based on
\begin{definition}[\cite{FRR}, \cite{SF}, \cite{FRR+}]\label{D.PMC_sing_ker}
Let $f:R\to D$ be a homomorphism from a ring into a division ring.
Then the {\em singular kernel} $\Pm$ of $f$ is the
set of square matrices
over $R$ whose images under $f$ are singular matrices over $D.$
\end{definition}
Cohn shows that in the above situation, the structure of the division
subring of $D$ generated by $f(R)$ is determined by $\Pm$
(\cite{FRR}, \cite{SF}, \cite{FRR+}; see also \cite{Malcolmson1}),
and he notes that $\Pm$ has
properties~\eqref{d.PMC_nonfull}-\eqref{d.PMC_prime} below.
Let me explain in advance the notation
of~\eqref{d.PMC_nabla_col} and~\eqref{d.PMC_nabla_row}.
If $A$ and $B$ are square matrices of the same
size, which agree except in their
$\!r\!$-th row, or agree except in their
$\!r\!$-th column, then $A\nabla B$ is defined to be the matrix
which agrees with $A$ and $B$ in all rows or columns but the $\!r\!$-th,
and has for $\!r\!$-th row or column the sum of those
rows or columns of $A$ and $B.$
The specification of whether rows or columns are involved, and of
the $r$ in question, is understood to be determined by context.
Cohn calls $A\nabla B$
the {\em determinantal sum} of $A$ and $B,$ in view
of the expression, when $R$ is commutative,
for the determinant of that matrix.
Here, now, are the properties of the singular kernel $\Pm$ of
a homomorphism of $R$ into a division ring used by Cohn:
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.PMC_nonfull}
$\Pm$ contains every square $n\times n$ matrix that can be
written as the product of an $n\times n{-}1$ matrix and
an $n{-}1\times n$ matrix over $R.$
(Cohn calls such products {\em non-full} matrices.)
\end{minipage}\end{equation}
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.PMC_(+)}
If $A$ is a matrix lying in $\Pm,$ and $B$ is {\em any}
square matrix over $R,$ then $\Pm$ contains the matrix
$\left(\begin{matrix} A\ 0 \\
0\ B \end{matrix}\right),$ denoted $A\oplus B.$
\end{minipage}\end{equation}
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.PMC_nabla_col}
If $\Pm$ contains square $n\times n$ matrices $A$ and $B$ which agree
except in the $\!r\!$-th column for some $r,$ then it contains their
determinantal sum $A\nabla B$ with respect to that column.
\end{minipage}\end{equation}
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.PMC_nabla_row}
If $\Pm$ contains square $n\times n$ matrices $A$ and $B$ which agree
except in the $\!r\!$-th row for some $r,$ then it contains their
determinantal sum $A\nabla B$ with respect to that row.
\end{minipage}\end{equation}
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.PMC_(+)1}
If $\Pm$ contains a matrix of the form $A\oplus 1,$ where
$1$ denotes the $1\times 1$ matrix with entry $1,$ and where
$\oplus$ is defined as in~\eqref{d.PMC_(+)}, then $A\in\Pm.$
\end{minipage}\end{equation}
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.PMC_1}
The $1\times 1$ matrix $1$ is not in $\Pm.$
\end{minipage}\end{equation}
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.PMC_prime}
If $A\oplus B\in\Pm,$ then $A\in\Pm$ or $B\in\Pm.$
\end{minipage}\end{equation}
In~\cite{FRR},~\cite{FRR+}, and many other works,
Cohn calls a set of square matrices over a ring $R$ which
satisfies~\eqref{d.PMC_nonfull}-\eqref{d.PMC_(+)1} a
{\em matrix ideal}, and calls a matrix ideal which
also satisfies~\eqref{d.PMC_1} and~\eqref{d.PMC_prime} {\em prime}.
He proves that for every prime matrix ideal $\Pm$ of
$R,$ the ring gotten by universally adjoining to $R$ inverses
to all matrices not in $\Pm$ is a local ring, whose residue
ring is a division ring $D$ such that the singular kernel
of the induced map $R\to D$ is precisely $\Pm$
\cite[Theorem~7.4.3]{FRR+}.
Thus since, as mentioned, the singular kernel of a
map $f:R\to D$ determines the division subring generated by the
image of $R,$ it follows
that homomorphisms from $R$ into division rings generated
by the images of $R$ are, up to isomorphisms making
commuting triangles with those homomorphisms,
in bijective correspondence with prime matrix ideals of $R.$
We see from Definition~\ref{D.PMC_sing_ker}
that the homomorphism $R\to D$ corresponding to $\Pm$ is
one-to-one if and only if $\Pm$ contains no nonzero $1\times 1$ matrix.
However, in~\cite[\S4.4]{SF}, Cohn defines matrix ideals by
conditions~\eqref{d.PMC_nonfull}-\eqref{d.PMC_nabla_col}
and~\eqref{d.PMC_(+)1}, omitting~\eqref{d.PMC_nabla_row},
again calling such a matrix ideal prime
if~\eqref{d.PMC_1} and~\eqref{d.PMC_prime} hold.
(He notes at \cite[p.\,164, two lines after display~(30)]{SF}
that one can similarly define determinantal sums with respect
to rows, ``but this will not be needed''.)
He claims to prove, under this definition, the same result
cited above, that the prime matrix ideals
are precisely the singular kernels of homomorphisms to division rings.
This, together with the corresponding result proved
using the stronger definition, would imply that the two
definitions of prime matrix ideal are equivalent.
Now the shortened definition of prime matrix ideal
would lend itself to an approach similar to the one we took
in~\S\ref{S.M}.
Namely, given a right $\!R\!$-module $M,$ we could
for each $n\geq 0$ consider the $n\times n$ matrices over $R$
which act non-injectively on $M^n,$ verify that these together
satisfy {\em most} of the conditions to form a prime matrix ideal
(details below), and examine
when they satisfy the remaining conditions.
But this would be more difficult if we used the
definition appearing in most of Cohn's work on this
subject, containing condition~\eqref{d.PMC_nabla_row}.
Unfortunately, I have difficulty verifying
one of the steps in the proof in~\cite{SF} that prime
matrix ideals, defined without condition~\eqref{d.PMC_nabla_row},
yield homomorphisms to division rings.
Fortunately, Peter Malcolmson has been able to supply an
argument, which with his permission I give
below, showing that in the stronger definition of prime
matrix ideal, condition~\eqref{d.PMC_nabla_row} can be
replaced by a condition that {\em is} easily verifiable
for the set of matrices that act non-injectively on product
modules $M^n$ for a right $\!R\!$-module $M.$
Let me first sketch, for the reader with~\cite{SF} in
hand, my problem with the development given there.
It concerns the assertion in the middle of p.\,164 that the operation
$\odot$ on square matrices
introduced on that page respects equivalence classes under
the equivalence relation $\sim$ defined on p.\,163.
That equivalence relation is generated by three sorts
of operations on matrices:
certain operations of left multiplication by elementary matrices,
certain operations of right multiplication by elementary matrices,
and certain operations of deleting rows and columns.
If we have $a_1\sim a_2$ via a left multiplication operation,
or via the deletion operation, it is indeed straightforward
that $a_1\odot b\sim a_2\odot b$ via the same operation;
but if $a_1\sim a_2$ via a right multiplication operation,
I don't see why $a_1\odot b\sim a_2\odot b$ should hold.
Similarly, if $b_1\sim b_2$ via a right multiplication operation
or a deletion operation, I have no problem,
but if they are related via a left multiplication operation,
I don't see that $a\odot b_1\sim a\odot b_2.$
Here, however, is Malcolmson's result.
\begin{lemma}[P.\,Malcolmson, personal communication]\label{L.PM}
Let $R$ be a ring, and $\Pm$ a set of square matrices over $R$
satisfying~\eqref{d.PMC_nonfull}-\eqref{d.PMC_nabla_col}
and \eqref{d.PMC_(+)1}.
Then $\Pm$ also satisfies~\eqref{d.PMC_nabla_row} if and only
if it satisfies
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.I+eij}
For each $n>0,$ the set of $n\times n$ matrices in $\Pm$
is closed under left multiplication by matrices $I_n\pm e_{ij}$
$(i\neq j).$
\end{minipage}\end{equation}
\end{lemma}
\begin{proof}
``Only if'' follows from \cite[2nd ed., point (f) on p.\,398]{FRR},
which shows that a set $\Pm$ of square matrices
satisfying conditions~\eqref{d.PMC_nonfull}-\eqref{d.PMC_(+)1}
(there called M.1-M.4, with M.3 being the conjunction
of~\eqref{d.PMC_nabla_col} and~\eqref{d.PMC_nabla_row})
is closed under right and left multiplication by
arbitrary square matrices.
Below, we shall prove ``if\,''; so assume~\eqref{d.I+eij} holds.
By a familiar calculation, the group generated by the
elementary matrices $I+e_{ij}$ and their inverses
$I-e_{ij}$ contains the matrices whose
left actions transpose an arbitrary pair of rows, changing
the sign of one of them.
(The essence of that calculation is the $2\times 2$ case,
$\left(\begin{matrix} 1 & 0 \\
1 & 1 \end{matrix}\right)
\left(\begin{matrix} 1 & \!{-}1 \\
0 & 1 \end{matrix}\right)
\left(\begin{matrix} 1 & 0 \\
1 & 1 \end{matrix}\right)=
\left(\begin{matrix} 0 & \!{-}1 \\
1 & 0 \end{matrix}\right).)$
This will be a key tool later on, but let us first
use it in a trivial way: it allows us to reduce to the case where
the row with respect to which we want to show closure
under determinantal sums is the last row of our matrices.
(That reduction also uses the observation that the operation of
determinantal sum with respect to any row respects the operation of
reversing the sign of a particular row in all matrices.)
Another fact we shall use is that if $\Pm$ is a set of square
matrices satisfying~\eqref{d.PMC_nonfull} and~\eqref{d.PMC_nabla_col},
$A$ an $n\times n$ matrix,
$B$ an $n'\times n'$ matrix, and
$C$ an $n'\times n$ matrix, then
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.low_left}
$\Pm$ contains
$\left(\begin{matrix} A & 0 \\
0 & B \end{matrix}\right)$
if and only if it contains
$\left(\begin{matrix} A & 0 \\
C & B \end{matrix}\right).$
\end{minipage}\end{equation}
%
This can be seen from point~(e) on p.\,397
of \cite[2nd edition]{FRR}.
(Although both~\eqref{d.PMC_nabla_col} and~\eqref{d.PMC_nabla_row} are
assumed there, only the former is used in the calculation.)
Now to prove the ``if'' direction of our lemma, let $H$ be an
$n{-}1\times n$ matrix over $R,$ and $a,$ $b$ length-$\!n\!$
rows such that
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.XaXb}
$\left(\begin{matrix} X \\
a \end{matrix}\right),
\ \left(\begin{matrix} X \\
b \end{matrix}\right)\in\Pm.$
\end{minipage}\end{equation}
%
Applying~\eqref{d.PMC_(+)}, we get
$\left(\begin{matrix} X & 0 \\
a & 0 \\
0 & 1\end{matrix}\right),
\ \left(\begin{matrix} X & 0 \\
b & 0 \\
0 & 1\end{matrix}\right)\in\Pm.$
Applying~\eqref{d.low_left} to these matrices, we get
$\left(\begin{matrix} X & 0 \\
a & 0 \\
b & 1\end{matrix}\right),
\ \left(\begin{matrix} X & 0 \\
b & 0 \\
{-}a{-}b & 1\end{matrix}\right)\in\Pm.$
If we left-multiply the first of those
two matrices by $I_n+e_{n-1,n},$ we get
$\left(\begin{matrix} X & 0 \\
a{+}b & 1 \\
b & 1\end{matrix}\right)\in\Pm,$ while if we left multiply the
second by a product of elementary matrices that
transposes the last two rows and changes the sign of one of
them, we get $\left(\begin{matrix} X & 0 \\
a{+}b & -1 \\
b & 0 \end{matrix}\right)\in\Pm.$
These two matrices differ only in their last column, and
applying~\eqref{d.PMC_nabla_col} to their
determinantal sum with respect to that column
gives $\left(\begin{matrix} X & 0 \\
a{+}b & 0 \\
b & 1 \end{matrix}\right)\in\Pm.$
Applying~\eqref{d.low_left} again, this gives
$\left(\begin{matrix} X & 0 \\
a{+}b & 0 \\
0 & 1 \end{matrix}\right)\in\Pm,$
hence by~\eqref{d.PMC_(+)1},
$\left(\begin{matrix} X \\
a{+}b \end{matrix}\right)\in\Pm.$
Having gotten this from~\eqref{d.XaXb}, we have
proved the case of~\eqref{d.PMC_nabla_row} where $r=n,$
which we have seen is equivalent to the general case.
\end{proof}
We can now obtain a result parallel to Lemma~\ref{L.cl_fr_M}.
As in the context of that lemma, elements of $M^n$ will be regarded as
row vectors, on which $n\times n'$ matrices over $R$ act on the right.
(Thus, the kernel $K$ referred to in~\eqref{d.PMC_nabla_col_if} below
is not, in general, an
$\!R\!$-submodule of $M^n,$ but merely an additive subgroup.)
\begin{lemma}\label{L.non-f+row-sum}
Let $M$ be a nonzero right module over a ring $R,$ and $\Pm$ the set of
square matrices $A$ over $R$ such that, if $A$ is $n\times n,$
$A$ gives a non-injective map $M^n\to M^n.$
Then\vspace{.5em}
\textup{(i)} $\Pm$ satisfies~\eqref{d.PMC_(+)}, \eqref{d.PMC_(+)1},
\eqref{d.PMC_1}, \eqref{d.PMC_prime}, and~\eqref{d.I+eij}.\vspace{.5em}
\textup{(ii)} A necessary and sufficient condition for
$\Pm$ to satisfy~\eqref{d.PMC_nonfull} is
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.PMC_nonfull_iff}
No $n\times n{-}1$ matrix over $R$ induces an injection of
abelian groups $M^n\to M^{n-1}$ $(n>0).$
\end{minipage}\end{equation}
\vspace{-1em} % without this, very wide space; with it, reasonable
\textup{(iii)}
A sufficient condition for $\Pm$ to satisfy~\eqref{d.PMC_nabla_col}
is
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.PMC_nabla_col_if}
If $K\subseteq M^n$ is the kernel of the
action on $M^n$ of an $n\times n{-}1$ matrix over $R,$ then
either \textup{(a)}~every
map $M^n\to M$ which is induced by a height-$\!n\!$ column vector
over $R,$ and is nonzero on $K,$ is one-to-one on $K,$
or \textup{(b)}~no such map is one-to-one on $K.$
\end{minipage}\end{equation}
Thus, if both~\eqref{d.PMC_nonfull_iff} and~\eqref{d.PMC_nabla_col_if}
hold, then $\Pm$ is a prime matrix ideal of $R.$
Hence if, further, the right $\!R\!$-module $M$ is faithful, then $R$
is embeddable in a division ring.
\end{lemma}
\begin{proof}
All parts of~(i) are straightforward.
(Condition \eqref{d.I+eij} is a special case of the observation that
$\Pm$ is closed under left and right multiplication by arbitrary
invertible matrices.)
(ii) is also easy:
Assume first that $\Pm$ satisfies~\eqref{d.PMC_nonfull}.
If $A$ is an $n\times n{-}1$ matrix over $R,$ then extending $A$
by a zero column, we get an $n\times n$ matrix $A'$
which is non-full in the sense stated in~\eqref{d.PMC_nonfull},
hence by~\eqref{d.PMC_nonfull} lies in $\Pm,$ hence, by our choice of
$\Pm,$ is not one-to-one on $M^n.$
Hence $A$ is not one-to-one there, proving~\eqref{d.PMC_nonfull_iff}.
Conversely, if $A$ is a non-full $n\times n$ matrix,
say $A=BC$ where $B$ is $n\times n{-}1$ and $C$ is $n{-}1\times n,$
then assuming~\eqref{d.PMC_nonfull_iff}, $B$ acts on $M^n$ with
nonzero kernel, hence so does $A,$ so $A\in\Pm.$
To prove (iii), let $A,B\in\Pm$ be as in~\eqref{d.PMC_nabla_col},
$C$ the common $n\times n{-}1$ submatrix obtained by deleting
the $\!r\!$-th columns from these, and $K$
the kernel of the action of $C$ on $M^n.$
From the fact that $A,B\in\Pm,$ we see that $K\neq\{0\}.$
Now if, as in the first alternative of~\eqref{d.PMC_nabla_col_if},
every map $M^n\to M$ induced by a height-$\!n\!$ column
vector restricts to either the zero map or a one-to-one map on $K,$
then for $A$ and $B$ to lie in $\Pm,$
their $\!r\!$-th columns must both induce the zero map
on $K,$ hence so will the sum of those columns,
showing (since $K\neq\{0\})$ that $A\nabla B$ lies in $\Pm.$
On the other hand, if {\em no} height-$\!n\!$ column vector induces a
one-to-one map on $K,$ then in particular, the
$\!r\!$-th column of $A\nabla B$ does not, giving the same conclusion.
To see the first sentence of the last paragraph of the lemma,
note that~(i),~(ii) and~(iii) give us all of
\eqref{d.PMC_nonfull}-\eqref{d.PMC_prime}
except~\eqref{d.PMC_nabla_row}, and that is given to us by
Lemma~\ref{L.PM}, since~(i) includes~\eqref{d.I+eij}.
The final sentence follows by the results of \cite{FRR} cited earlier.
\end{proof}
Remark: The converse of~(iii) above is not true;
i.e., $\Pm$ can satisfy~\eqref{d.PMC_nabla_col}
without satisfying~\eqref{d.PMC_nabla_col_if}.
For example, suppose $R=\Z$ and $M$ is the module $\Z/p^2\Z$
for some prime $p.$
It is not hard to see that the $\Pm$ of Lemma~\ref{L.non-f+row-sum}
will consist of the square matrices over $\Z$ whose
determinants are divisible by $p.$
This is the prime
matrix ideal corresponding to the homomorphism
of $\Z$ into the field $\Z/p\Z,$ so
in particular, it satisfies~\eqref{d.PMC_nabla_col}.
On the other hand, for any $n\geq 1,$ the subgroup
$K=\{0\}^{n-1}\times M\subseteq M^n$ is easily seen to be
the kernel of the action
of an $n\times n{-}1$ matrix; but if we take a height-$\!n\!$
column vector with $1$ in the $\!n\!$-th position, and
another with $p$ in that position, then both
are nonzero on $K,$ but the former is one-to-one while the
latter is not; so~\eqref{d.PMC_nabla_col_if} fails.\vspace{.4em}
On a general note,
the above approach to obtaining homomorphisms into division
rings from modules may be thought of as less convenient than the one
developed in \S\ref{S.M}, in that it leaves us the two
conditions~\eqref{d.PMC_nonfull_iff} and~\eqref{d.PMC_nabla_col_if}
to verify, in contrast to the one condition~\eqref{d.cl_exch_iff}
(with equivalent
forms~\eqref{d.cl_exch_iff_mx-},~\eqref{d.cl_exch_iff_mx}).
But it is, in another way, more robust,
in that the concept of prime matrix ideal is left-right symmetric,
and this allows us to produce a version of the same result based on
surjectivity rather than injectivity {\em without} switching
from right to left modules as we did in that section.
Rather, the switch between injectivity and
surjectivity can be made independently of whether we
use right or left modules.
The next lemma is the result based on right modules and surjectivity;
the two left-module results are obtained from the two right-module
results in the obvious way.
We leave to the reader the proof of the lemma, which
exactly parallels that of Lemma~\ref{L.non-f+row-sum}.
\begin{lemma}\label{L.non-f+col-sum}
Let $M$ be a nonzero right module over a ring $R,$ and $\Pm$ the set of
square matrices $A$ over $R$ such that, if $A$ is $n\times n,$
$A$ gives a {\em non-surjective} map $M^n\to M^n.$
Then\vspace{.5em}
\textup{(i)} $\Pm$ satisfies~\eqref{d.PMC_(+)}, \eqref{d.PMC_(+)1},
\eqref{d.PMC_1}, \eqref{d.PMC_prime}, and
the left-right dual of~\eqref{d.I+eij} \textup{(}closure under
{\em right} multiplication by matrices $I_n\pm e_{ij}).$\vspace{.5em}
\textup{(ii)} A necessary and sufficient condition for
$\Pm$ to satisfy~\eqref{d.PMC_nonfull} is
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.PMC_nonfull_iff'}
No $n{-}1\times n$ matrix over $R$ induces a surjection of
abelian groups $M^{n-1}\to M^n$ $(n>0).$
\end{minipage}\end{equation}
\vspace{-1em}
\textup{(iii)}
A sufficient condition for $\Pm$ to satisfy~\eqref{d.PMC_nabla_row}
is
%
\begin{equation}\begin{minipage}[c]{35pc}\label{d.PMC_nabla_row_if}
If $I\subseteq M^n$ is the image of $M^{n-1}$ under the
action of an $n{-}1\times n$ matrix over $R,$ then
either \textup{(a)}~every
map $M\to M^n$ which is determined by a length-$\!n\!$ row vector
and has image not contained in $I$ has image which, with $I,$
spans the additive group of $M^n,$ or \textup{(b)}~no
such map has image which, with $I,$ spans that additive group.
\end{minipage}\end{equation}
Thus, if both~\eqref{d.PMC_nonfull_iff'} and~\eqref{d.PMC_nabla_row_if}
hold, then $\Pm$ is a prime matrix ideal of $R.$
Hence if, further, every nonzero element of $R$ carries $M$ surjectively
to itself, then $R$ is embeddable in a division ring.\qed
\end{lemma}
\section{Acknowledgements}\label{S.Ackn}
I am indebted to Pace Nielsen for many invaluable comments and
corrections to earlier drafts of this note,
to A.\,M.\,W.\,Glass
for a helpful discussion of what is known about right-orderable groups,
to Peter Malcolmson for showing the way to Lemma~\ref{L.PM},
and to Yves de~Cornulier for the observation of~\S\ref{SS.ord_aut_R}.
\Needspace{4\baselineskip}
\begin{thebibliography}{00}
\bibitem{coproducts2} George M.\,Bergman,
{\em Coproducts and some universal ring constructions},
Trans. A. M. S. {\bf 200} (1974) 33--88.
MR0357503
\bibitem{sfd_fr_mtrd} George M.\,Bergman,
{\em Constructing division rings as module-theoretic direct limits},
Trans. A. M. S. {\bf 354} (2002) 2079--2114.
MR1881031
\bibitem{245} George M. Bergman,
{\em An Invitation to General Algebra and Universal Constructions,}
2015, Springer Universitext, x$\!+\!$572 pp..
\url{http://dx.doi.org/10.1007/978-3-319-11478-1}\,.
MR3309721
\bibitem{PMC_1971} Paul M. Cohn,
{\em Un crit\`{e}re d'immersibilit\'{e} d'un anneau dans
un corps gauche,}
C. R. Acad. Sci. Paris S\'{e}r.
A-B {\bf 272} (1971) A1442--A1444.
MR0279133
\bibitem{FRR} P.\,M.\,Cohn,
{\em Free rings and their relations,}
London Mathematical Society Monographs, No.\,2,
xvi$+$346 pp., 1971.
% Academic Press, London-New York,
MR0800091.
Second ed.: same Monographs, No.\,19,
% Academic Press, Inc. [Harcourt Brace Jovanovich, Publishers], London,
xxii$+$588 pp., 1985.
% ISBN: 0-12-179152-1.
MR0371938
\bibitem{SF} P.\,M.\,Cohn,
{\em Skew fields. Theory of general division rings.}
Encyclopedia of Mathematics and its Applications, 57.
Cambridge University Press, Cambridge. xvi$+$500 pp., 1995.
% ISBN: 0-521-43217-0.
MR1349108
\bibitem{FRR+} P.\,M.\,Cohn,
{\em Free ideal rings and localization in general rings,}
New Mathematical Monographs, 3. Cambridge University Press.
xxii$+$572 pp., 2006.
% ISBN: 978-0-521-85337-8; 0-521-85337-0.
% https://books.google.com/books?id=YLnF1CE4xDUC
MR2246388
\bibitem{Conrad} Paul Conrad,
{\em Right-ordered groups,}
Michigan Math. J. {\bf 6} (1959) 267--275.
\url{http://projecteuclid.org/download/pdf_1/euclid.mmj/1028998233}\,.
MR0106954
\bibitem{Darnel} Michael R. Darnel,
{\em Theory of lattice-ordered groups},
Monographs and Textbooks in Pure and Applied Mathematics, 187.
Marcel Dekker, 1995. viii+539 pp. ISBN: 0-8247-9326-9.
MR1304052
% \bibitem{DNR} Bertrand Deroin, Andr\'{e}s Navas, and
% Crist\'{o}bal Rivas,
% {\em Groups, Orders, and Dynamics,}
% \url{https://arxiv.org/pdf/1408.5805.pdf}.
\bibitem{Dubrovin} N.\,I.\,Dubrovin,
{\em Invertibility of the group ring of a right-ordered group over a
division ring} (Russian),
Mat. Zametki {\bf 42} (1987) 508--518, 622.
\url{http://www.mathnet.ru/links/23b614b35d59ceea7b08b86289719949/mzm5014.pdf}\,.
Transl.\ in Mathematical Notes of the Academy of Sciences of the USSR
{\bf 42} (1987) 781--786.
\url{http://link.springer.com/article/10.1007%2FBF01138310}\,.
(That translation has many errors.)
MR0917804
\bibitem{Dubrovin_cuts} N.\,I.\,Dubrovin,
{\em Rational closures of group rings of left-ordered groups}
(Russian), Mat. Sb. {\bf 184} (1993) 3--48.
Transl.\ in
Russian Acad. Sci. Sb. Math. {\bf 79} (1994) 231--263.
\url{http://dx.doi.org/10.1070/SM1994v079n02ABEH003498}\,.
MR1235288
% shows we can get div alg if (i) K(stabilizer of oA Ded cut)
% is rt Ore [what about zero cut??] and (ii) normalizer of
% positive cone is cofinal
\bibitem{Dubrovin_GL2} N.\,I.\,Dubrovin,
{\em Rational operators of the space of formal series} (Russian),
Fundam. Prikl. Mat. {\bf 12} (2006) 9--53.
Transl. in J. Math. Sci. (N.Y.) {\bf 149} (2008) 1191--1223.
\url{http://link.springer.com/article/10.1007%2Fs10958-008-0059-3}\,.
MR2249705
% shows we can embed kG for G the covering gp of GL(2,R)
\bibitem{Higman} Graham Higman,
{\em Ordering by divisibility in abstract algebras},
Proc. London Math. Soc. (3) {\bf 2} (1952) 326--336.
MR0049867
\bibitem{Linnell} Peter A. Linnell,
{\em Left ordered amenable and locally indicable groups},
J. London Math. Soc. (2) {\bf 60} (1999) 133--142.
MR1721820
\bibitem{Malcev} A.\,I.\,Mal'cev,
{\em On the embedding of group algebras in division algebras}
(Russian), Doklady Akad. Nauk SSSR
(N.S.) {\bf 60} (1948) 1499--1501.
MR0025457
\bibitem{Malcolmson1} Peter Malcolmson,
{\em A prime matrix ideal yields a skew field,}
J. London Math. Soc. (2) {\bf 18} (1978) 221--233.
\url{http://jlms.oxfordjournals.org/content/s2-18/2/221}\,.
MR0509937
% simpler more elegant construction than PMC's original one
\bibitem{Malcolmson2} Peter Malcolmson,
{\em Determining homomorphisms to skew fields,}
J. Algebra {\bf 64} (1980) 399--413.
\url{http://dx.doi.org/10.1016/0021-8693(80)90153-2}\,.
MR0579068
\bibitem{Kourovka} E.\,I.\,Khukhro and V.\,D.\,Mazurov, ed.s,
{\em The Kourovka notebook.
Unsolved problems in group theory}, No.\,19.
Russian Academy of Sciences, Siberian Branch,
Sobolev Institute of Mathematics, Novosibirsk, 2018. 248~pp.
\bibitem{BHN} B.\,H.\,Neumann,
{\em On ordered division rings},
Trans.\ Amer.\ Math.\ Soc. {\bf 66} (1949) 202--252.
MR0032593
\bibitem{Welsh} D.\,J.\,A.\,Welsh,
{\em Matroid Theory,}
London Mathematical Society Monographs, No. 8, 1976.
MR0427112
\end{thebibliography}
\end{document}