HANDY ONE-LINERS FOR AWK 22 July 2003
compiled by Eric Pement version 0.22
Latest version of this file is usually at:
http://www.student.northpark.edu/pemente/awk/awk1line.txt
USAGE:
Unix: awk '/pattern/ {print "$1"}' # standard Unix shells
DOS/Win: awk '/pattern/ {print "$1"}' # okay for DJGPP compiled
awk "/pattern/ {print \"$1\"}" # required for Mingw32
Most of my experience comes from version of GNU awk (gawk) compiled for
Win32. Note in particular that DJGPP compilations permit the awk script
to follow Unix quoting syntax '/like/ {"this"}'. However, the user must
know that single quotes under DOS/Windows do not protect the redirection
arrows (<, >) nor do they protect pipes (|). Both are special symbols
for the DOS/CMD command shell and their special meaning is ignored only
if they are placed within "double quotes." Likewise, DOS/Win users must
remember that the percent sign (%) is used to mark DOS/Win environment
variables, so it must be doubled (%%) to yield a single percent sign
visible to awk.
If I am sure that a script will NOT need to be quoted in Unix, DOS, or
CMD, then I normally omit the quote marks. If an example is peculiar to
GNU awk, the command 'gawk' will be used. Please notify me if you find
errors or new commands to add to this list (total length under 65
characters). I usually try to put the shortest script first.
FILE SPACING:
# double space a file
awk '1;{print ""}'
awk 'BEGIN{ORS="\n\n"};1'
# double space a file which already has blank lines in it. Output file
# should contain no more than one blank line between lines of text.
# NOTE: On Unix systems, DOS lines which have only CRLF (\r\n) are
# often treated as non-blank, and thus 'NF' alone will return TRUE.
awk 'NF{print $0 "\n"}'
# triple space a file
awk '1;{print "\n"}'
NUMBERING AND CALCULATIONS:
# precede each line by its line number FOR THAT FILE (left alignment).
# Using a tab (\t) instead of space will preserve margins.
awk '{print FNR "\t" $0}' files*
# precede each line by its line number FOR ALL FILES TOGETHER, with tab.
awk '{print NR "\t" $0}' files*
# number each line of a file (number on left, right-aligned)
# Double the percent signs if typing from the DOS command prompt.
awk '{printf("%5d : %s\n", NR,$0)}'
# number each line of file, but only print numbers if line is not blank
# Remember caveats about Unix treatment of \r (mentioned above)
awk 'NF{$0=++a " :" $0};{print}'
awk '{print (NF? ++a " :" :"") $0}'
# count lines (emulates "wc -l")
awk 'END{print NR}'
# print the sums of the fields of every line
awk '{s=0; for (i=1; i<=NF; i++) s=s+$i; print s}'
# add all fields in all lines and print the sum
awk '{for (i=1; i<=NF; i++) s=s+$i}; END{print s}'
# print every line after replacing each field with its absolute value
awk '{for (i=1; i<=NF; i++) if ($i < 0) $i = -$i; print }'
awk '{for (i=1; i<=NF; i++) $i = ($i < 0) ? -$i : $i; print }'
# print the total number of fields ("words") in all lines
awk '{ total = total + NF }; END {print total}' file
# print the total number of lines that contain "Beth"
awk '/Beth/{n++}; END {print n+0}' file
# print the largest first field and the line that contains it
# Intended for finding the longest string in field #1
awk '$1 > max {max=$1; maxline=$0}; END{ print max, maxline}'
# print the number of fields in each line, followed by the line
awk '{ print NF ":" $0 } '
# print the last field of each line
awk '{ print $NF }'
# print the last field of the last line
awk '{ field = $NF }; END{ print field }'
# print every line with more than 4 fields
awk 'NF > 4'
# print every line where the value of the last field is > 4
awk '$NF > 4'
TEXT CONVERSION AND SUBSTITUTION:
# IN UNIX ENVIRONMENT: convert DOS newlines (CR/LF) to Unix format
awk '{sub(/\r$/,"");print}' # assumes EACH line ends with Ctrl-M
# IN UNIX ENVIRONMENT: convert Unix newlines (LF) to DOS format
awk '{sub(/$/,"\r");print}
# IN DOS ENVIRONMENT: convert Unix newlines (LF) to DOS format
awk 1
# IN DOS ENVIRONMENT: convert DOS newlines (CR/LF) to Unix format
# Cannot be done with DOS versions of awk, other than gawk:
gawk -v BINMODE="w" '1' infile >outfile
# Use "tr" instead.
tr -d \r outfile # GNU tr version 1.22 or higher
# delete leading whitespace (spaces, tabs) from front of each line
# aligns all text flush left
awk '{sub(/^[ \t]+/, ""); print}'
# delete trailing whitespace (spaces, tabs) from end of each line
awk '{sub(/[ \t]+$/, "");print}'
# delete BOTH leading and trailing whitespace from each line
awk '{gsub(/^[ \t]+|[ \t]+$/,"");print}'
awk '{$1=$1;print}' # also removes extra space between fields
# insert 5 blank spaces at beginning of each line (make page offset)
awk '{sub(/^/, " ");print}'
# align all text flush right on a 79-column width
awk '{printf "%79s\n", $0}' file*
# center all text on a 79-character width
awk '{l=length();s=int((79-l)/2); printf "%"(s+l)"s\n",$0}' file*
# substitute (find and replace) "foo" with "bar" on each line
awk '{sub(/foo/,"bar");print}' # replaces only 1st instance
gawk '{$0=gensub(/foo/,"bar",4);print}' # replaces only 4th instance
awk '{gsub(/foo/,"bar");print}' # replaces ALL instances in a line
# substitute "foo" with "bar" ONLY for lines which contain "baz"
awk '/baz/{gsub(/foo/, "bar")};{print}'
# substitute "foo" with "bar" EXCEPT for lines which contain "baz"
awk '!/baz/{gsub(/foo/, "bar")};{print}'
# change "scarlet" or "ruby" or "puce" to "red"
awk '{gsub(/scarlet|ruby|puce/, "red"); print}'
# reverse order of lines (emulates "tac")
awk '{a[i++]=$0} END {for (j=i-1; j>=0;) print a[j--] }' file*
# if a line ends with a backslash, append the next line to it
# (fails if there are multiple lines ending with backslash...)
awk '/\\$/ {sub(/\\$/,""); getline t; print $0 t; next}; 1' file*
# print and sort the login names of all users
awk -F ":" '{ print $1 | "sort" }' /etc/passwd
# print the first 2 fields, in opposite order, of every line
awk '{print $2, $1}' file
# switch the first 2 fields of every line
awk '{temp = $1; $1 = $2; $2 = temp}' file
# print every line, deleting the second field of that line
awk '{ $2 = ""; print }'
# print in reverse order the fields of every line
awk '{for (i=NF; i>0; i--) printf("%s ",i);printf ("\n")}' file
# remove duplicate, consecutive lines (emulates "uniq")
awk 'a !~ $0; {a=$0}'
# remove duplicate, nonconsecutive lines
awk '! a[$0]++' # most concise script
awk '!($0 in a) {a[$0];print}' # most efficient script
# concatenate every 5 lines of input, using a comma separator
# between fields
awk 'ORS=%NR%5?",":"\n"' file
SELECTIVE PRINTING OF CERTAIN LINES:
# print first 10 lines of file (emulates behavior of "head")
awk 'NR < 11'
# print first line of file (emulates "head -1")
awk 'NR>1{exit};1'
# print the last 2 lines of a file (emulates "tail -2")
awk '{y=x "\n" $0; x=$0};END{print y}'
# print the last line of a file (emulates "tail -1")
awk 'END{print}'
# print only lines which match regular expression (emulates "grep")
awk '/regex/'
# print only lines which do NOT match regex (emulates "grep -v")
awk '!/regex/'
# print the line immediately before a regex, but not the line
# containing the regex
awk '/regex/{print x};{x=$0}'
awk '/regex/{print (x=="" ? "match on line 1" : x)};{x=$0}'
# print the line immediately after a regex, but not the line
# containing the regex
awk '/regex/{getline;print}'
# grep for AAA and BBB and CCC (in any order)
awk '/AAA/; /BBB/; /CCC/'
# grep for AAA and BBB and CCC (in that order)
awk '/AAA.*BBB.*CCC/'
# print only lines of 65 characters or longer
awk 'length > 64'
# print only lines of less than 65 characters
awk 'length < 64'
# print section of file from regular expression to end of file
awk '/regex/,0'
awk '/regex/,EOF'
# print section of file based on line numbers (lines 8-12, inclusive)
awk 'NR==8,NR==12'
# print line number 52
awk 'NR==52'
awk 'NR==52 {print;exit}' # more efficient on large files
# print section of file between two regular expressions (inclusive)
awk '/Iowa/,/Montana/' # case sensitive
SELECTIVE DELETION OF CERTAIN LINES:
# delete ALL blank lines from a file (same as "grep '.' ")
awk NF
awk '/./'
CREDITS AND THANKS:
Special thanks to Peter S. Tillier for helping me with the first release
of this FAQ file.
For additional syntax instructions, including the way to apply editing
commands from a disk file instead of the command line, consult:
"sed & awk, 2nd Edition," by Dale Dougherty and Arnold Robbins
O'Reilly, 1997
"UNIX Text Processing," by Dale Dougherty and Tim O'Reilly
Hayden Books, 1987
"Effective awk Programming, 3rd Edition." by Arnold Robbins
O'Reilly, 2001
To fully exploit the power of awk, one must understand "regular
expressions." For detailed discussion of regular expressions, see
"Mastering Regular Expressions, 2d edition" by Jeffrey Friedl
(O'Reilly, 2002).
The manual ("man") pages on Unix systems may be helpful (try "man awk",
"man nawk", "man regexp", or the section on regular expressions in "man
ed"), but man pages are notoriously difficult. They are not written to
teach awk use or regexps to first-time users, but as a reference text
for those already acquainted with these tools.
USE OF '\t' IN awk SCRIPTS: For clarity in documentation, we have used
the expression '\t' to indicate a tab character (0x09) in the scripts.
All versions of awk, even the UNIX System 7 version should recognize
the '\t' abbreviation.
#---end of file---
Do nothing to the file, just echo it back (if no pattern is specified, then any
line will match)
awk '{print}' file
==============================================================================
like "grep", find string "fleece" (the {print} command is the default if
nothing is specified)
awk '/fleece/' file
==============================================================================
select lines 14 through 30 of file
awk 'NR==14, NR==30' file
==============================================================================
select just one line of a file
awk 'NR==12' file
awk "NR==$1" file
==============================================================================
rearrange fields 1 and 2 and put colon in between
awk '{print $2 ":" $1}' file
==============================================================================
all lines between BEGIN and END lines (you can substitute any strings for
BEGIN and END, but they must be between slashes)
awk '/BEGIN/,/END/' file
==============================================================================
print number of lines in file (of course wc -l does this, too)
awk 'END{print NR}' file
==============================================================================
substitute every occurrence of a string XYZ by the new string ABC:
Requires nawk.
nawk '{gsub(/XYZ/,"ABC"); print}' file
==============================================================================
print 3rd field from each line, but the colon is the field separate
awk -F: '{print $3}' file
==============================================================================
Print out the last field in each line, regardless of how many fields:
awk '{print $NF}' file
==============================================================================
To print out a file with line numbers at the edge:
awk '{print NR, $0}' somefile
This is less than optimal because as the line number gets longer in digits,
the lines get shifted over. Thus, use printf:
awk '{printf "%3d %s", NR, $0}' somefile
==============================================================================
Print out lengths of lines in the file
awk '{print length($0)}' somefile
or
awk '{print length}' somefile
==============================================================================
Print out lines and line numbers that are longer than 80 characters
awk 'length > 80 {printf "%3d. %s\n", NR, $0}' somefile
==============================================================================
Total up the lengths of files in characters that results from "ls -l"
ls -l | awk 'BEGIN{total=0} {total += $4} END{print total}'
==============================================================================
Print out the longest line in a file
awk 'BEGIN {maxlength = 0} \
{ \
if (length($0) > maxlength) { \
maxlength = length($0) \
longest = $0 \
} \
} \
END {print longest}' somefile
==============================================================================
How many entirely blank lines are in a file?
awk '/^$/ {x++} END {print x}' somefile
==============================================================================
Print out last character of field 1 of every line
awk '{print substr($1,length($1),1)}' somefile
==============================================================================
comment out only #include statements in a C file. This is useful if you want
to run "cxref" which will follow the include links.
awk '/#include/{printf "/* %s */\n", $0; next} {print}' \
file.c | cxref -c $*
==============================================================================
If the last character of a line is a colon, print out the line. This would be
useful in getting the pathname from output of ls -lR:
awk '{ \
lastchar = substr($0,length($0),1) \
if (lastchar == ":") \
print $0 \
}' somefile
Here is the complete thing....Note that it even sorts the final output
ls -lR | awk '{ \
lastchar = substr($0,length($0),1) \
if (lastchar == ":") \
dirname = substr($0,1,length($0)-1) \
else \
if ($4 > 20000) \
printf "%10d %25s %s\n", $4, dirname, $8 \
}' | sort -r
==============================================================================
The following is used to break all long lines of a file into chunks of
length 80:
awk '{ line = $0
while (length(line) > 80) {
print substr(line,1,80)
line = substr(line,81,length(line)-80)
}
if (length(line) > 0) print line
} somefile.with.long.lines > whatever
==============================================================================
If you want to use awk as a programming language, you can do so by not
processing any file, but by enclosing a bunch of awk commands in curly braces,
activated upon end of file. To use a standard UNIX "file" that has no lines,
use /dev/null. Here's a simple example:
awk 'END{print "hi there everyone"}' < /dev/null
Here's an example of using this to print out the ASCII characters:
awk 'END{for (i=32; i<127; i++) \
printf "%3d %3o %c\n", i,i,i \
}' < /dev/null
==============================================================================
Sometimes you wish to find a field which has some identifying tag, like
X= in front. Suppose your file looked like:
50 30 X=10 Y=100 Z=-2
X=12 89 100 32 Y=900
1 2 3 4 5 6 X=1000
Then to select out the X= numbers from each do
awk '{ for (i=1; i<=NF; i++) \
if ($i ~ /X=.*/) \
print substr($i,3) \
}' playfile1
Note that we used a regular expression to find the initial part: /X=.*/
==============================================================================
Pull an abbreviation out of a file of abbreviations and their translation.
Actually, this can be used to translate anything, where the first field
is the thing you are looking up and the 2nd field is what you want to
output as the translation.
nawk '$1 == abbrev{print $2}' abbrev=$1 translate.file
==============================================================================
Join lines in a file that end in a dash. That is, if any line ends in
-, join it to the next line. This only joins 2 lines at a time. The
dash is removed.
awk '/-$/ {oldline = $0 \
getline \
print substr(oldline,1,length(oldline)-1) $0 \
next} \
{print}' somefile
==============================================================================
Function in nawk to round:
function round(n)
{
return int(n+0.5)
}
==============================================================================
If you have a file of addresses with empty lines between the sections,
you can use the following to search for strings in a section, and print
out the whole section. Put the following into a file called "section.awk":
BEGIN {FS = "\n"; RS = ""; OFS = "\n"}
$0 ~ searchstring { print }
Assume your names are in a file called "rolodex".
Then use the following nawk command when you want to find a section
that contains a string. In this example, it is a person's name:
nawk -f section.awk searchstring=Wolf rolodex
Here's a sample rolodex file:
Big Bad Wolf
101 Garden Lane
Dark Forest, NY 14214
Grandma
102 Garden Lane
Dark Forest, NY 14214
home phone: 471-1900
work phone: 372-8882
==============================================================================
icsprod:{PRD}/product/ict/nmishin/dup# cat /bmd/TRFSTAT/REPORTS/StatRpt_20040709.txt|awk 'substr(NR,length(NR),1) ~/[24680]/ {print}'
AWK syntax:
awk [-Fs] "program" [file1 file2...] # commands come from DOS cmdline
awk 'program{print "foo"}' file1 # single quotes around double quotes
# NB: Don't use single quotes alone if the embedded info will contain the
# vertical bar or redirection arrows! Either use double quotes, or (if
# using 4DOS) use backticks around the single quotes: `'NF>1'`
# NB: since awk will accept single quotes around arguments from the
# DOS command line, this means that DOS filenames which contain a
# single quote cannot be found by awk, even though they are legal names
# under MS-DOS. To get awk to find a file named foo'bar, the name must
# be entered as foo"'"bar.
awk [-Fs] -f pgmfile [file1 file2...] # commands come from DOS file
If file1 is omitted, input comes from stdin (console).
Option -Fz sets the field separator FS to letter "z".
AWK notes:
"pattern {action}"
if {action} is omitted, {print $0} is assumed
if "pattern" is omitted, each line is selected for {action}.
Fields are separated by 1 or more spaces or tabs: "field1 field2"
If the commands come from a file, the quotes below can be omitted.
Basic AWK commands:
-------------------
"NR == 5" file show rec. no. (line) 5. NB: "==" is equals.
{FOO = 5} single = assigns "5" to the variable FOO
"$2 == 0 {print $1}" if 2d field is 0, print 1st field
"$3 < 10" if 3d field < 10, numeric comparison; print line
'$3 < "10" ' use single quotes for string comparison!, or
-f pgmfile [$3 < "10"] use "-f pgmfile" for string comparison
"$3 ~ /regexp/" if /regexp/ matches 3d field, print the line
'$3 ~ "regexp" ' regexp can appear in double-quoted string*
# * If double-quoted, 2 backslashes for every 1 in regexps
# * Double-quoted strings require the match (~) character.
"NF > 4" print all lines with 5 or more fields
"$NF > 4" print lines where the last field is 5 or more
"{print NF}" tell us how many fields (words) are on each line
"{print $NF}" print last field of each line
"/regexp/" Only print lines containing "regexp"
"/text|file/" Lines containing "text" or "file" (CASE SENSITIVE!)
"/foo/{print "za", NR}" FAILS on DOS/4DOS command line!!
'/foo/{print "za", NR}' WORKS on DOS/4DOS command line!!
If lines matches "foo", print word and line no.
`"/foo/{print \"za\",NR}"` WORKS on 4DOS cmd line: escape internal quotes with
slash and backticks; for historical interest only.
"$3 ~ /B/ {print $2,$3}" If 3d field contains "B", print 2d + 3d fields
"$4 !~ /R/" Print lines where 4th field does NOT contain "R"
'$1=$1' Del extra white space between fields & blank lines
'{$1=$1;print}' Del extra white space between fields, keep blanks
'NF' Del all blank lines
AND(&&), OR(||), NOT(!)
-----------------------
"$2 >= 4 || $3 <= 20" lines where 2d field >= 4 .OR. 3d field <= 20
"NR > 5 && /with/" lines containing "with" for lines 6 or beyond
"/x/ && NF > 2" lines containing "x" with more than 2 fields
"$3/$2 != 5" not equal to "value" or "string"
"$3 !~ /regexp/" regexp does not match in 3d field
"!($3 == 2 && $1 ~ /foo/)" print lines that do NOT match condition
"{print NF, $1, $NF}" print no. of fields, 1st field, last field
"{print NR, $0}" prefix a line number to each line
'{print NR ": " $0}' prefix a line number, colon, space to each line
"NR == 10, NR == 20" print records (lines) 10 - 20, inclusive
"/start/, /stop/" print lines between "start" and "stop"
"length($0) > 72" print all lines longer than 72 chars
"{print $2, $1}" invert first 2 fields, delete all others
"{print substr($0,index($0,$3))}" print field #3 to end of the line
END{...} usage
--------------- END reads all input first.
1) END { print NR } # same output as "wc -l"
2) {s = s + $1 } # print sum, ave. of all figures in col. 1
END {print "sum is", s, "average is", s/NR}
3) {names=names $1 " " } # converts all fields in col 1 to
END { print names } # concatenated fields in 1 line, e.g.
+---Beth 4.00 0 #
input | Mary 3.75 0 # infile is converted to:
file | Kathy 4.00 10 # "Beth Mary Kathy Mark" on output
+---Mark 5.00 30 #
4) { field = $NF } # print the last field of the last line
END { print field }
PRINT, PRINTF: print expressions, print formatted
print expr1, expr2, ..., exprn # parens() needed if the expression contains
print(expr1, expr2, ..., exprn) # any relational operator: <, <=, ==, >, >=
print # an abbreviation for {print $0}
print "" # print only a blank line
printf(expr1,expr2,expr3,\n} # add newline to printf statements
FORMAT CONVERSION:
------------------
BEGIN{ RS=""; FS="\n"; # takes records sep. by blank lines, fields
ORS="\n"; OFS="," } # sep. by newlines, and converts to records
{$1=$1; print } # sep. by newlines, fields sep. by commas.
PARAGRAPHS:
-----------
'BEGIN{RS="";ORS="\n\n"};/foo/' # print paragraph if 'foo' is there.
'BEGIN{RS="";ORS="\n\n"};/foo/&&/bar/' # need both
;/foo|bar/' # need either
PASSING VARIABLES:
------------------
gawk -v var="/regexp/" 'var{print "Here it is"}' # var is a regexp
gawk -v var="regexp" '$0~var{print "Here it is"}' # var is a quoted string
gawk -v num=50 '$5 == num' # var is a numeric value
Built-in variables:
ARGC number of command-line arguments
ARGV array of command-line arguments (ARGV[0...ARVC-1])
FILENAME name of current input file
FNR input record number in current file
FS input field separator (default blank)
NF number of fields in current input record
NR input record number since beginning
OFMT output format for numbers (default "%.6g")
OFS output field separator (default blank)
ORS output record separator (default newline)
RLENGTH length of string matched by regular expression in match
RS input record separator (default newline)
RSTART beginning position of string matched by match
SUBSEP separator for array subscripts of form [i,j,...] (default ^\)
Escape sequences:
\b backspace (^H)
\f formfeed (^L)
\n newline (DOS, CR/LF; Unix, LF)
\r carriage return
\t tab (^I)
\ddd octal value `ddd', where `ddd' is 1-3 digits, from 0 to 7
\c any other character is a literal, eg, \" for " and \\ for \
Awk string functions:
`r' is a regexp, `s' and `t' are strings, `i' and `n' are integers
`&' in replacement string in SUB or GSUB is replaced by the matched string
gsub(r,s,t) globally replace regex r with string s, applied to data t;
return no. of substitutions; if t is omitted, $0 is used.
gensub(r,s,h,t) replace regex r with string s, on match number h, applied
to data t; if h is 'g', do globally; if t is omitted, $0 is
used. Return the converted pattern, not the no. of changes.
index(s,t) return the index of t in s, or 0 if s does not contain t
length(s) return the length of s
match(s,r) return index of where s matches r, or 0 if there is no
match; set RSTART and RLENGTH
split(s,a,fs) split s into array a on fs, return no. of fields; if fs is
omitted, FS is used in its place
sprintf(fmt,expr-list) return expr-list formatted according to fmt
sub(r,s,t) like gsub but only the first matched substring is replaced
substr(s,i,n) return the n-character substring of s starting at i; if n
is omitted, return the suffix of s starting at i
Arithmetic functions:
atan2(y,x) arctangent of y/x in radians in the range of -Nc to Nc
cos(x) cosine (angle in radians)
exp(n) exponential eN| (n need not be an integer)
int(x) truncate to integer
log(x) natural logarithm
rand() pseudo-random number r, 0 Ns r Ns 1
sin(x) sine (angle in radians)
sqrt(x) square root
srand(x) set new seed for random number generator; uses time of day
if no x given
[end-of-file]
[code:1:7cdb303e37]cat awk '{print}'
cat -s awk '{blank = NF == 0 ? ++blank : 0; if (blank <= 1) print;}'
tac awk '{t[NR] = $0;}END{for (i = NR; i >= 1; i--) print t[i];}'
grep patten awk '/patten/{print}'
grep -v patten awk '! /patten/{print}'
head awk 'NR <= 10 {print}' 24.sh
head -1 awk '{print; exit; }' 24.sh
tail awk '{t[n++ % 10] = $0}END{for (i = 0; i < 10; i++) print t[n++ % 10];}'
tail -1 awk '{t = $0}END{print t}'
cut -c 10 awk '{print substr($0, 10, 1)}'
cut -d: -f4 awk -F: '{if (NF > 1) print $4; else print;}'
tr A-Z a-z awk '{print tolower($0);}' se.sh
tr a-z A-Z awk '{print toupper($0);}' se.sh
tr -s ' ' awk '{print gensub(" +", " ", "g")}'
tr -d '\012' awk '{printf "%s", $0}'
wc -l awk 'END{printf "% 7d\n", NR-1}'
uniq awk '{if (NR == 1 || ln != $0) print; ln = $0;}'
rev awk '{l = ""; for (i = length($0); i > 0; i--) printf "%c", substr($0, i, 1); print "";}'
basename awk -F'/' '{print $NF}'
dirname awk -F'/' '{if (NF <= 1) printf "."; else {OFS="/"; $NF=""; printf "%s", substr($0, 1, length($0) - 1);}}'
xargs awk '{printf "%s ", $0}END{print}'
paste -sd: awk 'NR > 1{printf ":%s", $0}'
cat -n awk '{printf "% 6d %s\n", NR, $0}'
grep -n awk '/ss/{print NR":"$0}'
cp orig new awk '{print > "new"}' orig
[/code:1:7cdb303e37]
[code:1:d1742ae866]cat awk '{print}'
awk '1'
or awk 'NR'
grep patten awk '/patten/{print}'
awk '/patten/'
grep -v patten awk '! /patten/{print}'
awk '! /patten/'
head awk 'NR <= 10 {print}' 24.sh
awk 'NR <= 10 ' 24.sh [/code:1:d1742ae866]
[quote:13e2de2776="admirer"][/quote:13e2de2776]
cat | sed ':'