Commit 66b96bf1 authored by Jérome Perrin's avatar Jérome Perrin

py3: use io.BytesIO instead of cStringIO

parent de0b6658
......@@ -31,8 +31,9 @@ import urllib
import msgpack
from httplib import NO_CONTENT
from cStringIO import StringIO
from io import BytesIO
from App.version_txt import getZopeVersion
from Products.ERP5Type.Utils import str2bytes
from Products.ERP5Type.tests.ERP5TypeTestCase import ERP5TypeTestCase
......@@ -65,7 +66,7 @@ class TestDataIngestion(ERP5TypeTestCase):
body = msgpack.packb([0, data_chunk], use_bin_type=True)
env = { "CONTENT_TYPE": "application/x-www-form-urlencoded" }
body = urllib.urlencode({ "data_chunk": body })
body = str2bytes(urllib.urlencode({ "data_chunk": body }))
if not isinstance(ingestion_policy, str):
ingestion_policy = ingestion_policy.getPath()
......@@ -77,7 +78,7 @@ class TestDataIngestion(ERP5TypeTestCase):
data_product = data_product.getReference()
path = ingestion_policy + "/ingest?reference=" + data_supply + "." + data_product
publish_kw = dict(user="ERP5TypeTestCase", env=env, request_method="POST", stdin=StringIO(body))
publish_kw = dict(user="ERP5TypeTestCase", env=env, request_method="POST", stdin=BytesIO(body))
response = self.publish(path, **publish_kw)
return response
......
......@@ -23,7 +23,7 @@
##############################################################################
from AccessControl import ClassSecurityInfo
from Products.ERP5Type.Core.Folder import Folder
from cStringIO import StringIO
from io import BytesIO
import msgpack
from warnings import warn
......@@ -55,7 +55,7 @@ class IngestionPolicyTool(Folder):
XXX: use a simple deterministic approach to detect type of data using
https://github.com/fluent/fluentd/blob/master/lib/fluent/plugin/in_forward.rb#L205
"""
data_file = StringIO(data)
data_file = BytesIO(data)
msgpack_list = msgpack.Unpacker(data_file)
# we need pure primitive list so we avoid zope security in restricted
# script environment, but we loose lazyness
......@@ -73,5 +73,5 @@ class IngestionPolicyTool(Folder):
Lazy unpack data, usable in restructed environment
Setting use_list=False uses tuples instead of lists which is faster
"""
data_file = StringIO(data)
data_file = BytesIO(data)
return (x for x in msgpack.Unpacker(data_file, use_list=use_list))
\ No newline at end of file
import binascii
import numpy as np
import struct
from cStringIO import StringIO
from io import BytesIO
MAGIC_HEADER = b'\x92WEN\x00\x01'
io = StringIO()
io = BytesIO()
np.save(io, array)
io.seek(0)
npy_data = io.read()
......
import binascii
import struct
import numpy as np
from cStringIO import StringIO
from io import BytesIO
MAGIC_PREFIX = b'\x92WEN'
MAGIC_LEN = len(MAGIC_PREFIX) +2
......@@ -18,7 +18,7 @@ assert major == 0 and minor == 1
# verify crc32 checksum
checksum = struct.unpack('<i', data[MAGIC_LEN:HEADER_LEN])[0]
assert checksum == binascii.crc32(data[HEADER_LEN:])
io = StringIO()
io = BytesIO()
io.write(data[HEADER_LEN:])
io.seek(0)
array = np.load(io, allow_pickle=False)
......
......@@ -20,7 +20,7 @@
#
##############################################################################
from cStringIO import StringIO
from io import BytesIO
import base64
import binascii
from httplib import NO_CONTENT
......@@ -105,7 +105,7 @@ class Test(ERP5TypeTestCase):
env = {'CONTENT_TYPE': 'application/octet-stream'}
path = ingestion_policy.getPath() + '/ingest?reference=' + reference
publish_kw = dict(user='ERP5TypeTestCase', env=env,
request_method='POST', stdin=StringIO(body))
request_method='POST', stdin=BytesIO(body))
response = self.publish(path, **publish_kw)
self.assertEqual(NO_CONTENT, response.getStatus())
# at every ingestion if no specialised Data Ingestion exists it is created
......@@ -391,7 +391,7 @@ class Test(ERP5TypeTestCase):
path = ingestion_policy.getPath() + '/ingest?reference=' + reference
publish_kw = dict(user='ERP5TypeTestCase', env=env,
request_method='POST', stdin=StringIO(body))
request_method='POST', stdin=BytesIO(body))
response = self.publish(path, **publish_kw)
self.assertEqual(NO_CONTENT, response.getStatus())
......
......@@ -149,11 +149,11 @@ class ProgbarLogger(OriginalProgbarLogger):
seed = 7
np.random.seed(seed)
from cStringIO import StringIO
from io import BytesIO
import cPickle
def save(portal, value):
data_stream = portal.data_stream_module.wendelin_examples_keras_nn
data_stream.edit(file=StringIO(cPickle.dumps(value)))
data_stream.edit(file=BytesIO(cPickle.dumps(value)))
def load(portal):
data_stream = portal.data_stream_module.wendelin_examples_keras_nn
......@@ -168,7 +168,7 @@ def train(portal):
# 1. you can use keras.
# 2. you can save trained model.
# 3. you can load trained model.
# from cStringIO import StringIO
# from io import BytesIO
import tensorflow as tf # pylint:disable=import-error
sess = tf.Session()
from keras import backend as K # pylint:disable=import-error
......@@ -190,7 +190,7 @@ def train(portal):
model.add(Dense(1, init='uniform', activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
dataset = np.loadtxt(StringIO(str(portal.portal_skins.erp5_wendelin_examples_keras['pima.csv'])), delimiter=',')
dataset = np.loadtxt(BytesIO(bytes(portal.portal_skins.erp5_wendelin_examples_keras['pima.csv'])), delimiter=',')
X = dataset[:, 0:8]
Y = dataset[:, 8]
......
from keras.applications.vgg16 import VGG16, preprocess_input, decode_predictions # pylint:disable=import-error
from keras.preprocessing import image # pylint:disable=import-error
import numpy as np
from cStringIO import StringIO
from io import BytesIO
import PIL.Image
model = VGG16(weights='imagenet')
def predict(image_document):
img = PIL.Image.open(StringIO(image_document.getData()))
img = PIL.Image.open(BytesIO(bytes(image_document.getData())))
img = img.resize((224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment