① 如何使用python来对二维数组进行复合排序

#例子
importnumpyasnp

data=np.array([[1,2,3,4,5],
[1,2,3,6,7],
[2,3,4,5,7],
[3,4,5,6,7],
[4,5,6,7,8]])
sorted_cols=[]
forcol_noinrange(data.shape[1]):
sorted_cols.append(data[np.argsort(data[:,col_no])][:,col_no])
sorted_data=np.column_stack(sorted_cols)

② python argsort( )究竟如何返回的

argsort( )使用K近邻算法进行排序从而实现返回。

K近邻算法原理:输入一个新的没有标签的数据后,将新数据的每个特征值与训练样本集中数据的对应的特征进行比较,选择训练样本数据集中前K个最相似的数据,最后,选择K个最相似数据中出现次数最多的分类,作为新数据的分类。

Python是一种面向对象的解释型计算机程序设计语言,由荷兰人Guido van Rossum于1989年发明,第一个公开发行版发行于1991年。2017年编程语言排行榜:Python高居首位 。由于Python语言的简洁性、易读性以及可扩展性,在国外用Python做科学计算的研究机构日益增多,一些知名大学已经采用Python来教授程序设计课程。

③ 如何用python实现k近邻算法

import numpy as np

def read_data(filename):
'''读取文本数据,格式:特征1 特征2 …… 类别'''
f=open(filename,'rt')
row_list=f.readlines() #以每行作为列表
f.close()
data_array=[]
labels_vector=[]
while True:
if not row_list:
break
row=row_list.pop(0).strip().split('\t') #去除换行号,分割制表符
temp_data_row=[float(a) for a in row[:-1]] #将字符型转换为浮点型
data_array.append(temp_data_row) #取特征值
labels_vector.append(row[-1]) #取最后一个作为类别标签
return np.array(data_array),np.array(labels_vector)

def classify(test_data,dataset,labels,k):
'''分类'''
diff_dis_array=test_data-dataset #使用numpy的broadcasting
dis_array=(np.add.rece(diff_dis_array**2,axis=-1))**0.5 #求距离
dis_array_index=np.argsort(dis_array) #升序距离的索引
class_count={}
for i in range(k):
temp_label=labels[dis_array_index[i]]
class_count[temp_label]=class_count.get(temp_label,0)+1 #获取类别及其次数的字典
sorted_class_count=sorted(class_count.items(), key=lambda item:item[1],reverse=True) #字典的值按降序排列
return sorted_class_count[0][0] #返回元组列表的[0][0]

def normalize(dataset):
'''数据归一化'''
return (dataset-dataset.min(0))/(dataset.max(0)-dataset.min(0))

k=3 #近邻数
test_data=[0,0] #待分类数据
data,labels=read_data('testdata.txt')
print('数据集:\n',data)
print('标签集:\n',labels)
result=classify(test_data,normalize(data),labels,k)
print('分类结果:',result)

④ 利用python进行数据分析 使用什么软件

用于高效处理数据的python工具。一般处理数据都需要完成以下几个步骤:

与外界进行交互

准备,数据清理、修整、整合、规范化、重塑、切片切换、变形等等

转换

建模和计算

展示

Introctory examples
1.usa.gov data from bit.ly
[code]%pwd

%cd ../book_scripts

path = 'ch02/usagov_bitly_data2012-03-16-1331923249.txt'

open(path).readline()

import json
path = 'ch02/usagov_bitly_data2012-03-16-1331923249.txt'
records = [json.loads(line) for line in open(path)]

records[0]

records[0]['tz']

print(records[0]['tz'])

Counting time zones in pure Python
[code]time_zones = [rec['tz'] for rec in records]

time_zones = [rec['tz'] for rec in records if 'tz' in rec]

time_zones[:10]

def get_counts(sequence):
counts = {}
for x in sequence:
if x in counts:
counts[x] += 1
else:
counts[x] = 1
return counts

from collections import defaultdict

def get_counts2(sequence):
counts = defaultdict(int) # values will initialize to 0
for x in sequence:
counts[x] += 1
return counts

counts = get_counts(time_zones)

counts['America/New_York']

len(time_zones)

def top_counts(count_dict, n=10):
value_key_pairs = [(count, tz) for tz, count in count_dict.items()]
value_key_pairs.sort()
return value_key_pairs[-n:]

top_counts(counts)

from collections import Counter

counts = Counter(time_zones)

counts.most_common(10)

Counting time zones with pandas
[code]%matplotlib inline

from __future__ import division
from numpy.random import randn
import numpy as np
import os
import matplotlib.pyplot as plt
import pandas as pd
plt.rc('figure', figsize=(10, 6))
np.set_printoptions(precision=4)

import json
path = 'ch02/usagov_bitly_data2012-03-16-1331923249.txt'
lines = open(path).readlines()
records = [json.loads(line) for line in lines]

from pandas import DataFrame, Series
import pandas as pd

frame = DataFrame(records)
frame

frame['tz'][:10]

tz_counts = frame['tz'].value_counts()
tz_counts[:10]

clean_tz = frame['tz'].fillna('Missing')
clean_tz[clean_tz == ''] = 'Unknown'
tz_counts = clean_tz.value_counts()
tz_counts[:10]

plt.figure(figsize=(10, 4))

tz_counts[:10].plot(kind='barh', rot=0)

frame['a'][1]

frame['a'][50]

frame['a'][51]

results = Series([x.split()[0] for x in frame.a.dropna()])
results[:5]

results.value_counts()[:8]

cframe = frame[frame.a.notnull()]

operating_system = np.where(cframe['a'].str.contains('Windows'),
'Windows', 'Not Windows')
operating_system[:5]

by_tz_os = cframe.groupby(['tz', operating_system])

agg_counts = by_tz_os.size().unstack().fillna(0)
agg_counts[:10]

# Use to sort in ascending order
indexer = agg_counts.sum(1).argsort()
indexer[:10]

count_subset = agg_counts.take(indexer)[-10:]
count_subset

plt.figure()

count_subset.plot(kind='barh', stacked=True)

plt.figure()

normed_subset = count_subset.div(count_subset.sum(1), axis=0)
normed_subset.plot(kind='barh', stacked=True)

MovieLens 1M data set
[code]import pandas as pd
import os
encoding = 'latin1'

upath = os.path.expanser('ch02/movielens/users.dat')
rpath = os.path.expanser('ch02/movielens/ratings.dat')
mpath = os.path.expanser('ch02/movielens/movies.dat')

unames = ['user_id', 'gender', 'age', 'occupation', 'zip']
rnames = ['user_id', 'movie_id', 'rating', 'timestamp']
mnames = ['movie_id', 'title', 'genres']

users = pd.read_csv(upath, sep='::', header=None, names=unames, encoding=encoding)
ratings = pd.read_csv(rpath, sep='::', header=None, names=rnames, encoding=encoding)
movies = pd.read_csv(mpath, sep='::', header=None, names=mnames, encoding=encoding)

users[:5]

ratings[:5]

movies[:5]

ratings

data = pd.merge(pd.merge(ratings, users), movies)
data

data.ix[0]

mean_ratings = data.pivot_table('rating', index='title',
columns='gender', aggfunc='mean')
mean_ratings[:5]

ratings_by_title = data.groupby('title').size()

ratings_by_title[:5]

active_titles = ratings_by_title.index[ratings_by_title >= 250]

active_titles[:10]

mean_ratings = mean_ratings.ix[active_titles]
mean_ratings

mean_ratings = mean_ratings.rename(index={'Seven Samurai (The Magnificent Seven) (Shichinin no samurai) (1954)':
'Seven Samurai (Shichinin no samurai) (1954)'})

top_female_ratings = mean_ratings.sort_index(by='F', ascending=False)
top_female_ratings[:10]

Measuring rating disagreement
[code]mean_ratings['diff'] = mean_ratings['M'] - mean_ratings['F']

sorted_by_diff = mean_ratings.sort_index(by='diff')
sorted_by_diff[:15]

# Reverse order of rows, take first 15 rows
sorted_by_diff[::-1][:15]

# Standard deviation of rating grouped by title
rating_std_by_title = data.groupby('title')['rating'].std()
# Filter down to active_titles
rating_std_by_title = rating_std_by_title.ix[active_titles]
# Order Series by value in descending order
rating_std_by_title.order(ascending=False)[:10]

US Baby Names 1880-2010
[code]from __future__ import division
from numpy.random import randn
import numpy as np
import matplotlib.pyplot as plt
plt.rc('figure', figsize=(12, 5))
np.set_printoptions(precision=4)
%pwd

http://www.ssa.gov/oact/babynames/limits.html

[code]!head -n 10 ch02/names/yob1880.txt

import pandas as pd
names1880 = pd.read_csv('ch02/names/yob1880.txt', names=['name', 'sex', 'births'])
names1880

names1880.groupby('sex').births.sum()

# 2010 is the last available year right now
years = range(1880, 2011)

pieces = []
columns = ['name', 'sex', 'births']

for year in years:
path = 'names/names/yob%d.txt' % year
frame = pd.read_csv(path, names=columns)

frame['year'] = year
pieces.append(frame)

# Concatenate everything into a single DataFrame
names = pd.concat(pieces, ignore_index=True)

total_births = names.pivot_table('births', index='year',
columns='sex', aggfunc=sum)

total_births.tail()

total_births.plot(title='Total births by sex and year')

def add_prop(group):
# Integer division floors
births = group.births.astype(float)

group['prop'] = births / births.sum()
return group
names = names.groupby(['year', 'sex']).apply(add_prop)

names

np.allclose(names.groupby(['year', 'sex']).prop.sum(), 1)

def get_top1000(group):
return group.sort_index(by='births', ascending=False)[:1000]
grouped = names.groupby(['year', 'sex'])
top1000 = grouped.apply(get_top1000)

pieces = []
for year, group in names.groupby(['year', 'sex']):
pieces.append(group.sort_index(by='births', ascending=False)[:1000])
top1000 = pd.concat(pieces, ignore_index=True)

top1000.index = np.arange(len(top1000))

top1000

Analyzing naming trends
[code]boys = top1000[top1000.sex == 'M']
girls = top1000[top1000.sex == 'F']

total_births = top1000.pivot_table('births', index='year', columns='name',
aggfunc=sum)
total_births

subset = total_births[['John', 'Harry', 'Mary', 'Marilyn']]
subset.plot(subplots=True, figsize=(12, 10), grid=False,
title="Number of births per year")

Measuring the increase in naming diversity
[code]plt.figure()

table = top1000.pivot_table('prop', index='year',
columns='sex', aggfunc=sum)
table.plot(title='Sum of table1000.prop by year and sex',
yticks=np.linspace(0, 1.2, 13), xticks=range(1880, 2020, 10))

df = boys[boys.year == 2010]
df

prop_cumsum = df.sort_index(by='prop', ascending=False).prop.cumsum()
prop_cumsum[:10]

prop_cumsum.values.searchsorted(0.5)

df = boys[boys.year == 1900]
in1900 = df.sort_index(by='prop', ascending=False).prop.cumsum()
in1900.values.searchsorted(0.5) + 1

def get_quantile_count(group, q=0.5):
group = group.sort_index(by='prop', ascending=False)
return group.prop.cumsum().values.searchsorted(q) + 1

diversity = top1000.groupby(['year', 'sex']).apply(get_quantile_count)
diversity = diversity.unstack('sex')

def get_quantile_count(group, q=0.5):
group = group.sort_index(by='prop', ascending=False)
return group.prop.cumsum().values.searchsorted(q) + 1
diversity = top1000.groupby(['year', 'sex']).apply(get_quantile_count)
diversity = diversity.unstack('sex')
diversity.head()

diversity.plot(title="Number of popular names in top 50%")

The “Last letter” Revolution
[code]# extract last letter from name column
get_last_letter = lambda x: x[-1]
last_letters = names.name.map(get_last_letter)
last_letters.name = 'last_letter'

table = names.pivot_table('births', index=last_letters,
columns=['sex', 'year'], aggfunc=sum)

subtable = table.reindex(columns=[1910, 1960, 2010], level='year')
subtable.head()

subtable.sum()

letter_prop = subtable / subtable.sum().astype(float)

import matplotlib.pyplot as plt

fig, axes = plt.subplots(2, 1, figsize=(10, 8))
letter_prop['M'].plot(kind='bar', rot=0, ax=axes[0], title='Male')
letter_prop['F'].plot(kind='bar', rot=0, ax=axes[1], title='Female',
legend=False)

plt.subplots_adjust(hspace=0.25)

letter_prop = table / table.sum().astype(float)

dny_ts = letter_prop.ix[['d', 'n', 'y'], 'M'].T
dny_ts.head()

plt.close('all')

dny_ts.plot()

Boy names that became girl names (and vice versa)
[code]all_names = top1000.name.unique()
mask = np.array(['lesl' in x.lower() for x in all_names])
lesley_like = all_names[mask]
lesley_like

filtered = top1000[top1000.name.isin(lesley_like)]
filtered.groupby('name').births.sum()

table = filtered.pivot_table('births', index='year',
columns='sex', aggfunc='sum')
table = table.div(table.sum(1), axis=0)
table.tail()

plt.close('all')

table.plot(style={'M': 'k-', 'F': 'k--'})

⑤ 怎么用python进行数据

pandas是本书后续内容的首选库。pandas可以满足以下需求:

  • 具备按轴自动或显式数据对齐功能的数据结构。这可以防止许多由于数据未对齐以及来自不同数据源(索引方式不同)的数据而导致的常见错误。.

  • 集成时间序列功能

  • 既能处理时间序列数据也能处理非时间序列数据的数据结构

  • 数学运算和简约(比如对某个轴求和)可以根据不同的元数据(轴编号)执行

  • 灵活处理缺失数据

  • 合并及其他出现在常见数据库(例如基于SQL的)中的关系型运算

  • 1、pandas数据结构介绍

    两个数据结构:Series和DataFrame。Series是一种类似于以为NumPy数组的对象,它由一组数据(各种NumPy数据类型)和与之相关的一组数据标签(即索引)组成的。可以用index和values分别规定索引和值。如果不规定索引,会自动创建 0 到 N-1 索引。

  • #-*- encoding:utf-8 -*-import numpy as npimport osimport pandas as pdfrom pandas import Series,DataFrameimport matplotlib.pyplot as pltimport time#下面看一下cummin函数#注意:这里的cummin函数是截止到目前为止的最小值,而不是加和以后的最小值frame = DataFrame([[1,2,3,4],[5,6,7,8],[-10,11,12,-13]],index = list('abc'),columns = ['one','two','three','four'])print frame.cummin()print frame

  • >>>

  • one two three four

  • a 1 2 3 4

  • b 1 2 3 4

  • c -10 2 3 -13

  • one two three four

  • a 1 2 3 4

  • b 5 6 7 8

  • c -10 11 12 -13
  • 相关系数与协方差

    有些汇总


⑥ python argsort()究竟如何返回的

在Python中使用help帮助
>>> import numpy
>>> help(numpy.argsort)
Help on function argsort in mole numpy.core.fromnumeric:

argsort(a, axis=-1, kind='quicksort', order=None)
Returns the indices that would sort an array.

Perform an indirect sort along the given axis using the algorithm specified
by the `` keyword. It returns an array of indices of the same shape as
`a` that index data along the given axis in sorted order.

从中可以看出argsort函数返回的是数组值从小到大的索引值
Examples
--------

>>> x = np.array([3, 1, 2])
>>> np.argsort(x)
array([1, 2, 0])
--------
argsort函数返回的是数组值从小到大的索引值

[3, 1, 2]从小到大为[1,2,3],期对应的索引为[1,2,0]

⑦ python argsort在matlab中怎么表示

file = open('matlab file', 'a') # 'a' 表示加在文件后面
file.write('test abc')
file.close

上面就是打开文件,加入一行'test abc'到matlab file的末尾.

⑧ python 复合列表 怎么输出其中某个值的坐标

123456789101112# 例子import numpy as np data = np.array([[1,2,3,4,5], [1,2,3,6,7], [2,3,4,5,7], [3,4,5,6,7], [4,5,6,7,8]])sorted_cols = []for col_no in range(data.shape[1]): sorted_cols.append(data[np.argsort(data[:,col_no])][:,col_no])sorted_data = np.column_stack(sorted_cols)

⑨ 如何在实践中学习Python

如果你是在校生,你可以加入相关实验室。如果不是的话,有些python论坛回或者编程论坛你答可以进去看看,有相关项目练手。像码云,github上有很多python项目,你可以申请加入,当然要求较高。也可以把python2的程序用python3写(网上大多是用2写的爬虫 学2的忽略)
我当时是在知乎,开源中国,还是开发者头条中看到的,有一些前辈给出的建议。我就找了感兴趣的练手,我写了一些爬虫:爬取网络图片(不受‘翻页’限制),模拟登陆,cookie登陆等。

在实践中你会发现很多问题,我写第一个爬虫是遇到了"编码"问题,爬取的源码出现乱码情况。也出现过文件的保存问题(html文件以文本形式打开可以,浏览器打开乱码)。
在实践中遇到乱七八糟的问题很多,基本是靠网络,谷歌解决的(有些时候的想放弃,但一定要坚持)

⑩ Python怎么生成三维数


1、创建一般的多维数组

importnumpyasnp
a=np.array([1,2,3],dtype=int)#创建1*3维数组array([1,2,3])
type(a)#numpy.ndarray类型
a.shape#维数信息(3L,)
a.dtype.name#'int32'
a.size#元素个数:3
a.itemsize#每个元素所占用的字节数目:4


b=np.array([[1,2,3],[4,5,6]],dtype=int)#创建2*3维数组array([[1,2,3],[4,5,6]])
b.shape#维数信息(2L,3L)
b.size#元素个数:6
b.itemsize#每个元素所占用的字节数目:4


c=np.array([[1,2,3],[4,5,6]],dtype='int16')#创建2*3维数组array([[1,2,3],[4,5,6]],dtype=int16)
c.shape#维数信息(2L,3L)
c.size#元素个数:6
c.itemsize#每个元素所占用的字节数目:2
c.ndim#维数


d=np.array([[1,2,3],[4,5,6]],dtype=complex)#复数二维数组
d.itemsize#每个元素所占用的字节数目:16
d.dtype.name#元素类型:'complex128'

2、创建一般的多维数组

importnumpyasnp
a=np.array([1,2,3],dtype=int)#创建1*3维数组array([1,2,3])
type(a)#numpy.ndarray类型
a.shape#维数信息(3L,)
a.dtype.name#'int32'
a.size#元素个数:3
a.itemsize#每个元素所占用的字节数目:4


b=np.array([[1,2,3],[4,5,6]],dtype=int)#创建2*3维数组array([[1,2,3],[4,5,6]])
b.shape#维数信息(2L,3L)
b.size#元素个数:6
b.itemsize#每个元素所占用的字节数目:4


c=np.array([[1,2,3],[4,5,6]],dtype='int16')#创建2*3维数组array([[1,2,3],[4,5,6]],dtype=int16)
c.shape#维数信息(2L,3L)
c.size#元素个数:6
c.itemsize#每个元素所占用的字节数目:2
c.ndim#维数


d=np.array([[1,2,3],[4,5,6]],dtype=complex)#复数二维数组
d.itemsize#每个元素所占用的字节数目:16
d.dtype.name#元素类型:'complex128'

3、创建特殊类型的多维数组

a1=np.zeros((3,4))#创建3*4全零二维数组
输出:
array([[0.,0.,0.,0.],
[0.,0.,0.,0.],
[0.,0.,0.,0.]])
a1.dtype.name#元素类型:'float64'
a1.size#元素个数:12
a1.itemsize#每个元素所占用的字节个数:8


a2=np.ones((2,3,4),dtype=np.int16)#创建2*3*4全1三维数组
a2=np.ones((2,3,4),dtype='int16')#创建2*3*4全1三维数组
输出:
array([[[1,1,1,1],
[1,1,1,1],
[1,1,1,1]],

[[1,1,1,1],
[1,1,1,1],
[1,1,1,1]]],dtype=int16)


a3=np.empty((2,3))#创建2*3的未初始化二维数组
输出:(mayvary)
array([[1.,2.,3.],
[4.,5.,6.]])


a4=np.arange(10,30,5)#初始值10,结束值:30(不包含),步长:5
输出:array([10,15,20,25])
a5=np.arange(0,2,0.3)#初始值0,结束值:2(不包含),步长:0.2
输出:array([0.,0.3,0.6,0.9,1.2,1.5,1.8])
fromnumpyimportpi
np.linspace(0,2,9)#初始值0,结束值:2(包含),元素个数:9
输出:
array([0.,0.25,0.5,0.75,1.,1.25,1.5,1.75,2.])
x=np.linspace(0,2*pi,9)
输出:
array([0.,0.78539816,1.57079633,2.35619449,3.14159265,
3.92699082,4.71238898,5.49778714,6.28318531])


a=np.arange(6)
输出:
array([0,1,2,3,4,5])
b=np.arange(12).reshape(4,3)
输出:
array([[0,1,2],
[3,4,5],
[6,7,8],
[9,10,11]])
c=np.arange(24).reshape(2,3,4)
输出:
array([[[0,1,2,3],
[4,5,6,7],
[8,9,10,11]],

[[12,13,14,15],
[16,17,18,19],
[20,21,22,23]]])

使用numpy.set_printoptions可以设置numpy变量的打印格式

在ipython环境下,使用help(numpy.set_printoptions)查询使用帮助和示例

4、多维数组的基本操作

加法和减法操作要求操作双方的维数信息一致,均为M*N为数组方可正确执行操作。

a=np.arange(4)
输出:
array([0,1,2,3])
b=a**2
输出:
array([0,1,4,9])
c=10*np.sin(a)
输出:
array([0.,8.41470985,9.09297427,1.41120008])


n<35
输出:
array([True,True,True,True],dtype=bool)

A=np.array([[1,1],[0,1]])
B=np.array([[2,0],[3,4]])
C=A*B#元素点乘
输出:
array([[2,0],
[0,4]])
D=A.dot(B)#矩阵乘法
输出:
array([[5,4],
[3,4]])
E=np.dot(A,B)#矩阵乘法
输出:
array([[5,4],
[3,4]])

多维数组操作过程中的类型转换

When operating with arrays of different types, the type of the
resulting array corresponds to the more general or precise one (a
behavior known as upcasting)

即操作不同类型的多维数组时,结果自动转换为精度更高类型的数组,即upcasting

数组索引、切片和迭代

a=np.ones((2,3),dtype=int)#int32
b=np.random.random((2,3))#float64
b+=a#正确
a+=b#错误
a=np.ones(3,dtype=np.int32)
b=np.linspace(0,pi,3)
c=a+b
d=np.exp(c*1j)
输出:
array([0.54030231+0.84147098j,-0.84147098+0.54030231j,
-0.54030231-0.84147098j])
d.dtype.name
输出:
'complex128'

多维数组的一元操作,如求和、求最小值、最大值等

a=np.random.random((2,3))
a.sum()
a.min()
a.max()


b=np.arange(12).reshape(3,4)
输出:
array([[0,1,2,3],
[4,5,6,7],
[8,9,10,11]])
b.sum(axis=0)#按列求和
输出:
array([12,15,18,21])
b.sum(axis=1)#按行求和
输出:
array([6,22,38])
b.cumsum(axis=0)#按列进行元素累加
输出:
array([[0,1,2,3],
[4,6,8,10],
[12,15,18,21]])
b.cumsum(axis=1)#按行进行元素累加
输出:
array([[0,1,3,6],
[4,9,15,22],
[8,17,27,38]])

universal functions

B=np.arange(3)
np.exp(B)
np.sqrt(B)
C=np.array([2.,-1.,4.])
np.add(B,C)

其他的ufunc函数包括:

all,any,apply_along_axis,argmax,argmin,argsort,average,bincount,ceil,clip,conj,corrcoef,cov,cross,cumprod,cumsum,diff,dot,floor,inner,lexsort,max,maximum,mean,median,min,minimum,nonzero,outer,prod,re,round,sort,std,sum,trace,transpose,var,vdot,vectorize,where

5. 数组索引、切片和迭代

a=np.arange(10)**3
a[2]
a[2:5]
a[::-1]#逆序输出
foriina:
print(i**(1/3.))
deff(x,y):
return10*x+y
b=np.fromfunction(f,(5,4),dtype=int)
b[2,3]
b[0:5,1]
b[:,1]
b[1:3,:]
b[-1]
c=np.array([[[0,1,2],[10,11,12]],[[100,101,102],[110,111,112]]])
输出:
array([[[0,1,2],
[10,11,12]],

[[100,101,102],
[110,111,112]]])
c.shape
输出:
(2L,2L,3L)
c[0,...]
c[0,:,:]
输出:
array([[0,1,2],
[10,11,12]])
c[:,:,2]
c[...,2]
输出:
array([[2,12],
[102,112]])

forrowinc:
print(row)

forelementinc.flat:
print(element)
a=np.floor(10*np.random.random((3,4)))
输出:
array([[3.,9.,8.,4.],
[2.,1.,4.,6.],
[0.,6.,0.,2.]])
a.ravel()
输出:
array([3.,9.,8.,...,6.,0.,2.])
a.reshape(6,2)
输出:
array([[3.,9.],
[8.,4.],
[2.,1.],
[4.,6.],
[0.,6.],
[0.,2.]])
a.T
输出:
array([[3.,2.,0.],
[9.,1.,6.],
[8.,4.,0.],
[4.,6.,2.]])
a.T.shape
输出:
(4L,3L)
a.resize((2,6))
输出:
array([[3.,9.,8.,4.,2.,1.],
[4.,6.,0.,6.,0.,2.]])
a.shape
输出:
(2L,6L)
a.reshape(3,-1)
输出:
array([[3.,9.,8.,4.],
[2.,1.,4.,6.],
[0.,6.,0.,2.]])

详查以下函数:

ndarray.shape,reshape,resize,ravel

6. 组合不同的多维数组

a=np.floor(10*np.random.random((2,2)))
输出:
array([[5.,2.],
[6.,2.]])
b=np.floor(10*np.random.random((2,2)))
输出:
array([[0.,2.],
[4.,1.]])
np.vstack((a,b))
输出:
array([[5.,2.],
[6.,2.],
[0.,2.],
[4.,1.]])
np.hstack((a,b))
输出:
array([[5.,2.,0.,2.],
[6.,2.,4.,1.]])


fromnumpyimportnewaxis
np.column_stack((a,b))
输出:
array([[5.,2.,0.,2.],
[6.,2.,4.,1.]])


a=np.array([4.,2.])
b=np.array([2.,8.])
a[:,newaxis]
输出:
array([[4.],
[2.]])
b[:,newaxis]
输出:
array([[2.],
[8.]])
np.column_stack((a[:,newaxis],b[:,newaxis]))
输出:
array([[4.,2.],
[2.,8.]])
np.vstack((a[:,newaxis],b[:,newaxis]))
输出:
array([[4.],
[2.],
[2.],
[8.]])
np.r_[1:4,0,4]
输出:
array([1,2,3,0,4])
np.c_[np.array([[1,2,3]]),0,0,0,np.array([[4,5,6]])]
输出:
array([[1,2,3,0,0,0,4,5,6]])

详细使用请查询以下函数:

hstack,vstack,column_stack,concatenate,c_,r_

7. 将较大的多维数组分割成较小的多维数组

a=np.floor(10*np.random.random((2,12)))
输出:
array([[9.,7.,9.,...,3.,2.,4.],
[5.,3.,3.,...,9.,7.,7.]])
np.hsplit(a,3)
输出:
[array([[9.,7.,9.,6.],
[5.,3.,3.,1.]]),array([[7.,2.,1.,6.],
[7.,5.,0.,2.]]),array([[9.,3.,2.,4.],
[3.,9.,7.,7.]])]
np.hsplit(a,(3,4))
输出:
[array([[9.,7.,9.],
[5.,3.,3.]]),array([[6.],
[1.]]),array([[7.,2.,1.,...,3.,2.,4.],
[7.,5.,0.,...,9.,7.,7.]])]

实现类似功能的函数包括:

hsplit,vsplit,array_split

8. 多维数组的复制操作

a=np.arange(12)
输出:
array([0,1,2,...,9,10,11])


notatall

b=a
bisa#True
b.shape=3,4
a.shape#(3L,4L)

deff(x)#,sofunctioncallsmakeno.
print(id(x))#id是python对象的唯一标识符

id(a)#111833936L
id(b)#111833936L
f(a)#111833936L


浅复制

c=a.view()
cisa#False
c.baseisa#True
c.flags.owndata#False
c.shape=2,6
a.shape#(3L,4L)
c[0,4]=1234
print(a)
输出:
array([[0,1,2,3],
[1234,5,6,7],
[8,9,10,11]])
s=a[:,1:3]
s[:]=10
print(a)
输出:
array([[0,10,10,3],
[1234,10,10,7],
[8,10,10,11]])


深复制
d=a.()
disa#False
d.baseisa#False
d[0,0]=9999
print(a)
输出:
array([[0,10,10,3],
[1234,10,10,7],
[8,10,10,11]])

numpy基本函数和方法一览

Array Creation

arange,array,,empty,empty_like,eye,fromfile,fromfunction,identity,linspace,logspace,mgrid,ogrid,ones,ones_like,r,zeros,zeros_like

Conversions

ndarray.astype,atleast_1d,atleast_2d,atleast_3d,mat

Manipulations

array_split,column_stack,concatenate,diagonal,dsplit,dstack,hsplit,hstack,ndarray.item,newaxis,ravel,repeat,reshape,resize,squeeze,swapaxes,take,transpose,vsplit,vstack

Questionsall,any,nonzero,where

Ordering

argmax,argmin,argsort,max,min,ptp,searchsorted,sort

Operations

choose,compress,cumprod,cumsum,inner,ndarray.fill,imag,prod,put,putmask,real,sum

Basic Statistics

cov,mean,std,var

Basic Linear Algebra

cross,dot,outer,linalg.svd,vdot

完整的函数和方法一览表链接:

https://docs.scipy.org/doc/numpy-dev/reference/routines.html#routines